text
stringlengths 56
7.94M
|
---|
\begin{document}
\title[On a problem of J.~Matkowski and J.~Wesołowski,~II]{On a problem of Janusz Matkowski and Jacek Wesołowski,~II}
\selectlanguage{polish}
\author[J. Morawiec]{Janusz Morawiec}
\address{Instytut Matematyki{}\\
Uniwersytet ÅšlÄ…ski{}\\
Bankowa 14, PL-40-007 Katowice{}\\
Poland}
\email{[email protected]}
\author[T. Zürcher]{Thomas Zürcher}
\address{Instytut Matematyki{}\\
Uniwersytet ÅšlÄ…ski{}\\
Bankowa 14, PL-40-007 Katowice{}\\
Poland}
\email{[email protected]}
\selectlanguage{English}
\subjclass{Primary 39B12, Secondary 37A05}
\keywords{functional equations; probabilistic iterated function systems; singular functions; absolutely continuous functions}
\begin{abstract}
We continue our study started in \cite{MZ} of the
functional equation
\begin{equation*}
\varphi(x)=\sum_{n=0}^{N}\varphi(f_n(x))-\sum_{n=0}^{N}\varphi(f_n(0))
\end{equation*}
and its increasing and continuous solutions $\varphi\colon[0,1]\to[0,1]$
such that $\varphi(0)=0$ and $\varphi(1)=1$. In this paper we assume that $f_0,\ldots,f_N\colon[0,1]\to[0,1]$ are strictly increasing contractions such that
\begin{equation*}
0\leq f_0(0)<f_0(1)\leq f_1(0)<\cdots <f_{N-1}(1)\leq f_N(0)<f_N(1)\leq 1
\end{equation*}
and at least one of the weak inequalities is strong.
\end{abstract}
\maketitle
\section{Introduction}\label{introduction}
Fix $N\in\mathbb N$ and strictly increasing contractions $f_0,\ldots,f_N\colon[0,1]\to[0,1]$ such that
\begin{equation}\label{0<1}
0\leq f_0(0)<f_0(1)\leq f_1(0)<\cdots<f_{N-1}(1)\leq f_N(0)<f_N(1)\leq 1.
\end{equation}
We continue our study of the existence of solutions $\varphi$ of the functional equation
\begin{equation}\label{e}
\varphi(x)=\sum_{n=0}^{N}\varphi(f_n(x))-\sum_{n=0}^{N}\varphi(f_n(0))
\tag{$\textsf{E}$}
\end{equation}
in the class $\mathcal C$ consisting of all increasing and continuous functions $\varphi\colon [0,1]\to [0,1]$ satisfying the following boundary conditions
\begin{equation}\label{cond}
\varphi(0)=0\hspace{3ex}\hbox{ and }\hspace{3ex}\varphi(1)=1.
\end{equation}
In this paper we assume, in contrast to~\cite{MZ}, that
\begin{equation}\label{[0,1]}
\bigcup_{n=0}^{N}\big[f_n(0),f_n(1)\big]\neq [0,1].
\end{equation}
\section{Preliminaries}\label{Preliminaries}
Throughout this paper for all $k\in\mathbb N$ and $n_1,\ldots,n_k\in\{0,\ldots,N\}$ we denote the composition $f_{n_1}\circ\cdots\circ f_{n_k}$ by $f_{n_1,\ldots,n_k}$. Moreover, we extend the notation to the case $k=0$ by letting $f_{n_1,\ldots,n_0}$ be the identity.
We begin with three lemmas. The proof of the first one is very easy, so we omit it.
\begin{lemma}\label{lem21}
Fix $m\in\mathbb N$ and nonnegative real numbers $\alpha_1,\ldots,\alpha_m$ such that $\sum_{i=1}^m\alpha_i=1$. If $\varphi_1,\ldots,\varphi_m\in\mathcal C$, then
$\sum_{i=1}^m\alpha_i\varphi_i\in\mathcal C$.
\end{lemma}
\begin{lemma}\label{lem22}
If $\varphi\in\mathcal C$, then $\varphi(f_0(0))=0$ and $\varphi(f_N(1))=1$.
\end{lemma}
\begin{proof}
By \eqref{cond}, \eqref{e}, \eqref{0<1}, and the monotonicity of $\varphi$ we have
\begin{equation*}
\begin{split}
1&=\varphi(1)=\varphi(f_N(1))-\varphi(f_0(0))
+\sum_{n=1}^{N}\big[\varphi(f_{n-1}(1))-\varphi(f_n(0))\big]\\
&\leq\varphi(f_N(1))-\varphi(f_0(0)).
\end{split}
\end{equation*}
As the image of~$[0,1]$ under~$\varphi$ lies in~$[0,1]$, we infer that $\varphi(f_0(0))=0$ and $\varphi(f_N(1))=1$.
\end{proof}
Now we want to show that if all the contractions $f_0,\ldots, f_N$ are \emph{nonsingular} (i.e.\ $f_0^{-1}(A),\ldots,f_N^{-1}(A)$ have Lebesgue measure zero for every set $A\subset [0,1]$ of Lebesgue measure zero\footnote{See~\cite{LM1994}. Note also that as the inverses of the contractions exist and are continuous and increasing, being nonsingular is equivalent to the inverses being absolutely continuous, see for example Theorem~7.1.38 in \cite{KaKr96}.}), then the class $\mathcal C$ is determined by two of its subclasses $\mathcal C_a$ and $\mathcal C_s$ of all absolutely continuous and all singular functions, respectively. Repeating directly the proof of Remark~2.2 from~\cite{MZ} with the use of \cref{lem22} we get the following result.
\begin{lemma}\label{lem23}
Assume that all the contractions $f_0,\ldots, f_N$ are nonsingular. Then, both the absolutely continuous and the singular parts\footnote{The parts are unique up to a constant. For definiteness, we choose them such that both of them map $0$~to~$0$.} of every element from $\mathcal C$ satisfy $\eqref{e}$ for every $x\in[0,1]$.
\end{lemma}
By the monotonicity of $f_0$ and $f_N$, it is easy to prove that the sequence $(f_{\scriptstyle\underbrace{0,\ldots,0}_{k}}(0))_{k\in\mathbb N}$ is increasing and the sequence
$(f_{\scriptstyle\underbrace{N,\ldots,N}_{k}}(1))_{k\in\mathbb N}$ is decreasing. Hence both are convergent. Put
\begin{equation*}\label{endpoints}
\ensuremath{\mathbf{0}}=\lim_{k\to\infty}f_{\scriptstyle\underbrace{0,\ldots,0}_{k}}(0)
\quad\hbox{ and }\quad
\ensuremath{\mathbf{1}}=\lim_{k\to\infty}f_{\scriptstyle\underbrace{N,\ldots,N}_{k}}(1).
\end{equation*}
It is clear that $\ensuremath{\mathbf{0}}$ is the unique fixed point of $f_0$ and $\ensuremath{\mathbf{1}}$ is the unique fixed point of $f_N$, i.e.
\begin{equation}\label{fixed}
f_0(\ensuremath{\mathbf{0}})=\ensuremath{\mathbf{0}}\quad\hbox{ and }\quad f_N(\ensuremath{\mathbf{1}})=\ensuremath{\mathbf{1}}.
\end{equation}
Moreover,
\begin{equation*}
\ensuremath{\mathbf{1}}=\lim_{k\to\infty}f_{\scriptstyle\underbrace{N,\ldots,N}_{k}}(0),
\end{equation*}
because for every $k\in\mathbb N$ we have $|f_{\scriptstyle\underbrace{N,\ldots,N}_{k}}(1)-f_{\scriptstyle\underbrace{N,\ldots,N}_{k}}(0)|\leq c^k$, where $c\in(0,1)$ is a Lipschitz constant of $f_N$.
\begin{lemma}\label{lem24}
Assume that $\varphi\in\mathcal C$. Then $\varphi(\ensuremath{\mathbf{0}})=0$ and $\varphi(\ensuremath{\mathbf{1}})=1$.
\end{lemma}
\begin{proof}
We first prove that $\varphi(\ensuremath{\mathbf{0}})=0$.
By \cref{lem22} we have $\varphi(f_0(0))=0$. Fix $k\in\mathbb N$ and assume inductively that $\varphi(f_{\scriptstyle \underbrace{0,\ldots,0}_{k}}(0))=0$. Applying the induction hypothesis, \eqref{e}, \mbox{\vphantom{g}\cref{lem22}} and the monotonicity of $f_0,\ldots,f_N$ and $\varphi$, we get
\begin{equation*}
\begin{split}
0&=\varphi(f_{\scriptstyle \underbrace{0,\ldots,0}_{k}}(0))
=\sum_{n=0}^{N}\varphi(f_n(f_{\scriptstyle \underbrace{0,\ldots,0}_{k}}(0)))-\sum_{n=0}^{N}\varphi(f_n(0))\\
&=\varphi(f_{\scriptstyle \underbrace{0,\ldots,0}_{k+1}}(0))
+\sum_{n=1}^{N}\varphi(f_n(f_{\scriptstyle \underbrace{0,\ldots,0}_{k}}(0)))
-\sum_{n=1}^{N}\varphi(f_n(0))\\
&\geq\varphi(f_{\scriptstyle \underbrace{0,\ldots,0}_{k+1}}(0))\geq 0.
\end{split}
\end{equation*}
Hence $\varphi(f_{\scriptstyle \underbrace{0,\ldots,0}_{k+1}}(0))=0$.
Now the continuity of $\varphi$ gives
\begin{equation*}
\varphi(\ensuremath{\mathbf{0}})=\lim_{k\to\infty}\varphi(f_{\scriptstyle \underbrace{0,\ldots,0}_{k}}(0))=0.
\end{equation*}
To prove that $\varphi(\ensuremath{\mathbf{1}})=1$ observe first that by \eqref{0<1} and the monotonicity of $\varphi$ we have $\varphi(f_n(1))\leq\varphi(f_{n+1}(0))$ for every $n\in\{0,\ldots,N-1\}$. We want to show that
\begin{equation}\label{i2}
\varphi(f_n(1))=\varphi(f_{n+1}(0))
\end{equation}
for every $n\in\{0,\ldots,N-1\}$. Suppose that, contrary to our claim, there exists $n\in\{0,\ldots,N-1\}$ such that $\varphi(f_n(1))<\varphi(f_{n+1}(0))$. Then, using \cref{lem22} and arguing as in its proof, we obtain
\begin{equation*}
\begin{split}
1&=\varphi(1)=
\sum_{n=0}^{N-1}\varphi(f_n(1))+1-\sum_{n=0}^{N}\varphi(f_n(0))\\
&<\sum_{n=0}^{N-1}\varphi(f_{n+1}(0))+1-\sum_{n=0}^{N}\varphi(f_n(0))
=1-\varphi(f_0(0))=1,
\end{split}
\end{equation*}
a contradiction.
Now we show by induction that
\begin{equation}\label{i3}
\varphi(f_{\scriptstyle \underbrace{N,\ldots,N}_{k}}(1))=1
\end{equation}
for all $k\in\mathbb N$.
The first step of the induction holds due to \cref{lem22}. Fix $k\in\mathbb N$ and assume that \eqref{i3} holds. Then applying \eqref{i3}, \eqref{e}, \cref{lem22}, \eqref{i2} and the monotonicity of $f_0,\ldots,f_N$ and $\varphi$ we get
\begin{equation*}
\begin{split}
1&=\varphi(f_{\scriptstyle \underbrace{N,\ldots,N}_{k}}(1))=
\sum_{n=0}^{N}\varphi(f_n(f_{\scriptstyle \underbrace{N,\ldots,N}_{k}}(1)))-\sum_{n=1}^{N}\varphi(f_n(0))\\
&\leq \sum_{n=0}^{N-1}\varphi(f_{n}(f_{\scriptstyle \underbrace{N,\ldots,N}_{k}}(1)))+\varphi(f_{\scriptstyle \underbrace{N,\ldots,N}_{k+1}}(1))-
\sum_{n=0}^{N-1}\varphi(f_n(1))\\
&\leq\varphi(f_{\scriptstyle \underbrace{N,\ldots,N}_{k+1}}(1))\leq 1.
\end{split}
\end{equation*}
Hence $\varphi(f_{\scriptstyle \underbrace{N,\ldots,N}_{k+1}}(1))=1$.
Finally, passing with $k$ to infinity in~\eqref{i3} and using the continuity of $\varphi$ we obtain $\varphi(\ensuremath{\mathbf{1}})=1$.
\end{proof}
\section{Basic property of solutions}\label{Basic}
Define recursively a sequence $(A_k)_{k\in\mathbb N}$ of subsets of the interval $[0,1]$ as follows:
\begin{equation*}
A_0=[0,1]
\quad\hbox{ and }\quad
A_k=\bigcup_{n=0}^{N}f_n(A_{k-1})\quad\hbox{for every }k\in\mathbb N.
\end{equation*}
By~\eqref{[0,1]} we have $A_1=\bigcup_{n=0}^{N}\big[f_n(0),f_n(1)\big]\varsubsetneq A_0$. Moreover, a witness of the strict inclusion can be found that is different from $0$~and~$1$. This jointly with an easy induction shows that $A_{k+1}\varsubsetneq A_k$ for every $k\in\mathbb N$. Again there is a witness of the strict inequality differing from $0$~and~$1$. Put
\begin{equation*}
A_*=\bigcap_{k\in\mathbb N}A_k.
\end{equation*}
It is clear that $A_*$ is compact and
\begin{equation}\label{structure of invariant set}
A_*=\bigcup_{n=0}^{N}f_n(A_*).
\end{equation}
We will show that the just constructed set $A_*$, called the \emph{attractor} of the iterated function system $\{f_0,\ldots,f_N\}$ (see~\cite{B1988}), is
a \emph{Cantor-like set}, i.e.\ uncountable, nowhere dense and perfect subset of $\mathbb R$ (see~\cite{WiHa1993}); note that $A_*$ is uncountable and nowhere dense, which follows from its construction. Moreover, we will see in \cref{perfect} that $A_*$ is perfect and in \cref{ex34} that it is of Lebesgue measure zero if $f_0,\ldots,f_N$ are similitudes, whereas in the general case it can happen that $A_*$ is of positive Lebesgue measure (see \cite{MZPositive}).
From the construction we have
\begin{equation*}
A_*=\bigcap_{k\in\mathbb N}\left(\bigcup_{n_1,\ldots,n_k\in\{0,\ldots,N\}}
\big[f_{n_1,\ldots,n_k}(0),f_{n_1,\ldots,n_k}(1)\big]\right).
\end{equation*}
Whenever a point $x$ can be written as
\begin{equation}\label{x}
x=\lim_{k\to\infty}f_{x_1,\ldots,x_k}(0)=\lim_{k\to\infty}f_{x_1,\ldots,x_k}(1),
\end{equation}
we say that $x$ has an \emph{address}\footnote{We have come across the term \emph{coding} as well.} (see~\cite{B1988}).
\begin{lemma}\label{attractor and addresses}
The set~$A_*$ is exactly the set of points in~$[0,1]$ that have an address.
\end{lemma}
\begin{proof}
Let $x\in A_*$. Note that for every $k\in \field{N}$ there exist $x_1^k,\ldots,x_k^k\in \{0,\ldots,N\}$ such that $x\in[f_{x_1^k,\ldots,x_k^k}(0),f_{x_1^k,\ldots,x_k^k}(1)]$ with $x_m^n$ not necessarily agreeing with $x_m^l$ for different $l$ and $n$, however, as each $x_m^l$ is chosen from the finite set $\{0,\ldots,N\}$, we may apply a Cantor diagonal argument to get a sequence as wished.
It is easy to see that every sequence $(x_k)_{k\in\mathbb N}$ of elements of $\{0,\ldots,N\}$ is an address of a point from the set $A_*$.
\end{proof}
Note that
\begin{equation}\label{01inA}
\ensuremath{\mathbf{0}}=\min A_*\quad\hbox{ and }\quad \ensuremath{\mathbf{1}}=\max A_*.
\end{equation}
Since $A_*$ is a closed set, it follows that $[\ensuremath{\mathbf{0}},\ensuremath{\mathbf{1}}]\setminus A_*$ is an open set. Moreover,
\begin{equation}\label{A*}
[\ensuremath{\mathbf{0}},\ensuremath{\mathbf{1}}]\setminus A_*=\bigcup_{k\in\mathbb N}\mathop{\bigcup_{0\leq n_1,\ldots,n_{k-1}\leq N}}_{0\leq n_k\leq N-1}\big(f_{n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}),f_{n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}})\big)
\end{equation}
and for all $k\in\mathbb N$, $n_1,\ldots,n_{k-1}\in\{0,\ldots,N\}$ and $n_k\in\{0,\ldots,N-1\}$ the interval $(f_{n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}),f_{n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}}))$ is a connected component of the set $[\ensuremath{\mathbf{0}},\ensuremath{\mathbf{1}}]\setminus A_*$.
Now we are in a position to show that any $\varphi\in\mathcal C$ is constant on the closure of each connected component of the set $[0,1]\setminus A_*$. We do it in two steps.
\begin{lemma}\label{lem31}
Assume that $\varphi\in\mathcal C$. Then:
\begin{enumerate}
\item\label{restriction phi to 0} $\varphi|_{[0,\ensuremath{\mathbf{0}}]}=0$;
\item\label{restriction phi to 1} $\varphi|_{[\ensuremath{\mathbf{1}},1]}=1$;
\item\label{phi constant} $\varphi|_{[f_n(\ensuremath{\mathbf{1}}),f_{n+1}(\ensuremath{\mathbf{0}})]}$ is constant for every $n\in\{0,\ldots,N-1\}$.
\end{enumerate}
\end{lemma}
\begin{proof}
To prove (\ref{restriction phi to 0})~and~(\ref{restriction phi to 1}) it is enough to apply \cref{lem24} jointly with the monotonicity of $\varphi$.
Let us tackle (\ref{phi constant}). According to~\eqref{i2} and to the monotonicity of $f_0,\ldots,f_N$ and $\varphi$, we see that $\varphi(f_n(\ensuremath{\mathbf{1}}))\leq\varphi(f_{n+1}(\ensuremath{\mathbf{0}}))$.
Suppose that, contrary to our claim, there exists $n\in\{0,\ldots,N-1\}$ such that $\varphi(f_n(\ensuremath{\mathbf{1}}))<\varphi(f_{n+1}(\ensuremath{\mathbf{0}}))$. Then, using \cref{lem24}, \eqref{e}, and the first equality of \eqref{fixed} we get
\begin{equation*}
\begin{split}
1&=\varphi(\ensuremath{\mathbf{1}})=
\sum_{n=0}^{N-1}\varphi(f_n(\ensuremath{\mathbf{1}}))+\varphi(f_N(\ensuremath{\mathbf{1}}))-\sum_{n=0}^{N}\varphi(f_n(0))\\
&<\sum_{n=0}^{N-1}\varphi(f_{n+1}(\ensuremath{\mathbf{0}}))+1-\sum_{n=0}^{N}\varphi(f_n(0))
=\varphi(\ensuremath{\mathbf{0}})-\varphi(f_0(\ensuremath{\mathbf{0}}))+1=1,
\end{split}
\end{equation*}
a contradiction.
\end{proof}
\begin{lemma}\label{lem32}
Assume that $\varphi\in\mathcal C$. Then for all $k\in\mathbb N$, $n_1,\ldots,n_{k-1}\in\{0,\ldots,N\}$ and $n_k\in\{0,\ldots,N-1\}$ there exists $c_{n_1,\ldots,n_k}\in[0,1]$ such that
\begin{equation}\label{const}
\varphi|_{[f_{n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}),f_{n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}})]}=c_{n_1,\ldots,n_k}.
\end{equation}
\end{lemma}
\begin{proof}
We proceed by induction on $k$.
The first step of the induction is implied by assertion~(\ref{phi constant}) of \cref{lem31}.
Fix $k\in\mathbb N$, $n_1,\ldots,n_{k-1}\in\{0,\ldots,N\}$, $n_k\in\{0,\ldots,N-1\}$ and assume that there exists $c_{n_1,\ldots,n_k}\in[0,1]$ such that \eqref{const} holds.
Then \eqref{const}, \eqref{e} and the monotonicity of $f_0,\ldots,f_N$ and $\varphi$ imply
\begin{equation*}
\begin{split}
c_{n_1,\ldots,n_k}&=\varphi(f_{n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}))
=\sum_{n=0}^{N}\varphi(f_{n,n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}))-\sum_{n=0}^{N}\varphi(f_n(0))\\
&\leq\sum_{n=0}^{N}\varphi(f_{n,n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}}))-
\sum_{n=0}^{N}\varphi(f_n(0))=\varphi(f_{n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}}))\\
&=c_{n_1,\ldots,n_k}.
\end{split}
\end{equation*}
Hence
\begin{equation*}
\sum_{n=0}^{N}\varphi(f_{n,n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}))=
\sum_{n=0}^{N}\varphi(f_{n,n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}})),
\end{equation*}
and applying again the monotonicity of $f_0,\ldots,f_N$ and $\varphi$, we obtain
\begin{equation*}
\varphi(f_{n,n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}))=\varphi(f_{n,n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}}))
\end{equation*}
for every $n\in\{0,\ldots,N\}$.
\end{proof}
Combining \cref{lem31,lem32} with \eqref{A*}, we get the following result.
\begin{theorem}\label{thm33}
If the set $A_*$ has Lebesgue measure zero, then $\mathcal C=\mathcal C_s$.
\end{theorem}
We now give an example of contractions $f_0,\ldots, f_N$ for which the set $A_*$ is of Lebesgue measure zero.
\begin{example}\label{ex34}
Assume additionally to our assumptions in the introduction that $f_0,\ldots,f_N$ are similitudes, i.e.\
\begin{equation*}
f_n(x)=(\beta_n-\alpha_n)x+\alpha_n
\end{equation*}
for all $x\in[0,1]$ and $n\in\{0,\ldots,N\}$, where
\begin{equation*}
0\leq\alpha_0<\beta_0\leq\alpha_1<\beta_1\leq\cdots\leq\alpha_N<\beta_N\leq 1
\quad\hbox{and}\quad
\bigcup_{n=0}^{N}\big[\alpha_n,\beta_n\big]\neq [0,1].
\end{equation*}
Clearly, \eqref{0<1} and \eqref{[0,1]} hold. Denote by $l$ the Lebesgue measure on the real line and put $d=l(A_0\setminus A_1)$. By a simple induction we get $l(A_k\setminus A_{k+1})=d(1-d)^k$ for every $k\in\mathbb N$. From \eqref{0<1} and \eqref{[0,1]} we infer that $d\in(0,1)$ and hence that
\begin{equation*}
l(A_*)=1-\sum_{k=0}^{\infty}l(A_k\setminus A_{k+1})=1-\frac{d}{1-(1-d)}=0.
\end{equation*}
\end{example}
We finish this section with one more property of the set $A_*$.
\begin{theorem}\label{perfect}
The set $A_*$ is perfect.
\end{theorem}
\begin{proof}
We know from its definition that $A_*$ is closed, and it is nonempty by \eqref{01inA}.
Let $x\in A_*$ and fix an address of $x$, i.e.\ a sequence $(x_k)_{k\in\mathbb N}$ of elements of $\{0,\ldots,N\}$ satisfying~\eqref{x}; we can choose such a sequence according to \cref{attractor and addresses}. To complete the proof, we need to show that in each neighbourhood of~$x$ we can find some element belonging to $A_*\setminus\{x\}$.
Fix $\varepsilon>0$ and $m\in \mathbb N$ so large that $L^{m-1}<\varepsilon$, where $L\in(0,1)$ is the largest Lipschitz constant of the given contractions
$f_0,\ldots,f_N$. Define a sequence $(y_k)_{k\in\mathbb N}$ by putting
$y_k=x_k$ for all $k\neq m$ and choosing arbitrarily $y_m\in\{0,\ldots,N\}\setminus\{x_m\}$. Then
\begin{equation*}
y=\lim_{k\to\infty}f_{y_1,\ldots,y_k}(0)\in A_*.
\end{equation*}
Since all considered contractions are injective and the addresses of points $x$ and $y$ differ only in the $m$-th coordinate, it follows that $y\neq x$. Moreover,
\begin{equation*}
\begin{split}
|x-y|&=\lim_{k\to\infty}|f_{x_1,\ldots,x_{m-1}}(f_{x_m,\ldots,x_k}(0))-f_{x_1,\ldots,x_{m-1}}(f_{y_m,\ldots,y_k}(0))|\\
&\leq L^{m-1}\lim_{k\to\infty}|f_{x_m,\ldots,x_k}(0)-f_{y_m,\ldots,y_k}(0)|\leq
L^{m-1}<\varepsilon.
\end{split}
\end{equation*}
The proof is complete.
\end{proof}
\section{Existence of solutions}\label{Existence}
In the previous section we have discussed the behaviour of functions belonging to the class $\mathcal C$, but, up to now, we do not know if $\mathcal C$ contains any function at all.
In this section, we want to show that $\mathcal C\neq\emptyset$.
Fix positive real numbers $p_0,\ldots,p_N$ such that
\begin{equation}\label{prob}
\sum_{n=0}^{N}p_n=1.
\end{equation}
Then there exists a unique Borel probability measure $\mu$ such that
\begin{equation}\label{inv}
\mu(B)=\sum_{n=0}^{N}p_n\mu(f_n^{-1}(B))
\end{equation}
for every Borel set $B\subset [0,1]$ (see \cite{H1981}; cf.\ \cite{F1997}).
From now on the letter $\mu$ will be reserved for the unique Borel probability measure satisfying~\eqref{inv} for every Borel set $B\subset [0,1]$.
Now we are interested in some properties of the measure $\mu$ that will be needed later. We begin with a well-known folklore lemma; for its proof the reader can consult \cite{LaMy1994}.
\begin{lemma}\label{lem41}
The measure $\mu$ is either singular or absolutely continuous with respect to the Lebesgue measure on $\mathbb R$.
\end{lemma}
To formulate the next lemma, which is also well-known (see e.g.\ \cite{B1988}), we recall that the support of the measure $\mu$ is the set $\supp\mu$ of all points $x\in[0,1]$ such that $\mu([x-\varepsilon,x+\varepsilon])>0$ for every $\varepsilon>0$.
\begin{lemma}\label{lem42}
We have $\supp\mu=A_*$. In particular, $\mu([0,1]\setminus A_*)=0$.
\end{lemma}
\begin{lemma}\label{lem43}
The measure $\mu$ is continuous.
\end{lemma}
\begin{proof}
To prove that $\mu$ is continuous it is enough to show $\mu(\{x\})=0$ for every $x\in [0,1]$.
Fix $x\in [0,1]$.
If $x\not\in A_*$, then $\mu(\{x\})=0$ by \cref{lem42}, hence we assume now that $x\in A_*$ and choose an address of~$x$, that is a sequence $(x_k)_{k\in\mathbb N}$ of elements of $\{0,\ldots,N\}$ such that \eqref{x} holds. Note that the monotonicity of $f_0,\ldots,f_N$ implies
\begin{equation}\label{xx}
f_{x_1,\ldots,x_k}(0)\leq f_{x_1,\ldots,x_{k+1}}(0)\leq x\leq
f_{x_1,\ldots,x_{k+1}}(1)\leq f_{x_1,\ldots,x_k}(1)
\end{equation}
for every $k\in\mathbb N$.
First we want to prove that
\begin{equation}\label{f_n(0)}
\mu\big(\{f_n(0)\}\big)=\mu\big(\{f_n(1)\}\big)=0
\end{equation}
for every $n\in\{0,\ldots,N\}$.
We begin with proving that
\begin{equation}\label{01}
\mu(\{0\})=\mu(\{1\})=0.
\end{equation}
If $f_0(0)>0$, then~\eqref{inv} gives
\begin{equation*}
\mu(\{0\})=\sum_{n=0}^{N}p_n\mu(\{f_n^{-1}(0)\})=
\sum_{n=0}^{N}p_n\mu(\emptyset)=0.
\end{equation*}
If $f_0(0)=0$, then~\eqref{inv} yields
\begin{equation*}
\mu(\{0\})=\sum_{n=0}^{N}p_n\mu(\{f_n^{-1}(0)\})=
p_0\mu(\{0\})+\sum_{n=1}^{N}p_n\mu(\emptyset)=p_0\mu(\{0\}),
\end{equation*}
and since $p_0\in(0,1)$ we conclude that $\mu(\{0\})=0$.
In the same way, considering two cases ($f_N(1)<1$ and $f_N(1)=1$) and using~\eqref{inv} jointly with the fact that $p_N\in(0,1)$ in the second case, we get $\mu(\{1\})=0$.
Using~\eqref{inv}, \eqref{0<1} and \eqref{01} we obtain
\begin{equation*}
\mu(\{f_0(0)\})=\sum_{n=0}^{N}p_n\mu(\{f_n^{-1}(f_0(0))\})=p_0\mu(\{0\})=0,
\end{equation*}
\begin{equation*}
\mu(\{f_N(1)\})=\sum_{n=0}^{N}p_n\mu(\{f_n^{-1}(f_N(1))\})=p_N\mu(\{1\})=0
\end{equation*}
and
\begin{equation*}
\begin{split}
\mu(\{f_m(1),f_{m+1}(0)\})&= \sum_{n=0}^{N}p_n\mu(\{f_n^{-1}(f_m(1)),f_n^{-1}(f_{m+1}(0))\})\\
&\leq 2(p_m\mu(\{1\})+p_{m+1}\mu(\{0\}))=0
\end{split}
\end{equation*}
for every $m\in\{0,\ldots,N-1\}$.
By \eqref{f_n(0)}~and~\eqref{0<1}, equality~\eqref{inv} implies
\begin{equation}\label{mu(B)}
\mu(f_n(B))=p_n\mu(B)
\end{equation}
for all $n\in\{0,\ldots,N\}$ and Borel sets $B\subset[0,1]$.
Finally, applying \eqref{xx} and $k$ times equality~\eqref{mu(B)} jointly with the fact that $\mu$ is a probability measure we get
\begin{equation*}
\begin{split}
\mu(\{x\})&=\mu\left(\bigcap_{k\in\mathbb N} \big[f_{x_1,\ldots,x_k}(0), f_{x_1,\ldots,x_k}(1)\big]\right)\\
&=\lim_{k\to\infty}\mu\left(\big[f_{x_1,\ldots,x_k}(0), f_{x_1,\ldots,x_k}(1)\big]\right)\\
&=\lim_{k\to\infty}\prod_{i=1}^k p_{x_i}
\leq\lim_{k\to\infty}{\left(\max\{p_0,\ldots,p_N\}\right)}^k=0.
\end{split}
\end{equation*}
The proof is complete.
\end{proof}
Combining \cref{lem42,lem43} with~\eqref{A*} we get the following corollary.
\begin{corollary}\label{cor44}
The measure $\mu$ vanishes on each of the following intervals:
$[0,\ensuremath{\mathbf{0}}]$, $[\ensuremath{\mathbf{1}},1]$, $[f_{n_1,\ldots,n_k}(\ensuremath{\mathbf{1}}),f_{n_1,\ldots,n_k+1}(\ensuremath{\mathbf{0}})]$ with $k\in\mathbb N$, $n_1,\ldots,n_{k-1}\in\{0,\ldots,N\}$ and $n_k\in\{0,\ldots,N-1\}$.
\end{corollary}
Define the function $\varphi\colon[0,1]\to[0,1]$ by
\begin{equation*}
\varphi(x)=\mu([0,x]).
\end{equation*}
From now on the letter $\varphi$ will be reserved for the just defined function.
Repeating the proof of Theorem~3.3 from~\cite{MZ} we get the following result.
\begin{theorem}\label{thm41}
Either $\varphi\in\mathcal C_a$ or $\varphi\in\mathcal C_s$.
\end{theorem}
As a consequence of \cref{thm41}, we have $\varphi\in\mathcal C$. \cref{lem42} implies that $\varphi$ cannot be constant on an open interval having nonempty intersection with the attractor $A_*$. Therefore, all the constants $c_{n_1,\ldots,n_k}$ occurring in the assertion of \cref{lem32} (associated with the above constructed $\varphi$) are pairwise different and belong to the open interval $(0,1)$.
We finish this section by giving a precise formula for $\varphi$.
\begin{theorem}\label{formula}
Assume that $x\in[0,1]$.
\begin{enumerate}[label=(\roman*),ref=(\roman*)]
\item If $x\in[0,\ensuremath{\mathbf{0}}]$, then $\varphi(x)=0$.\label{phi is zero}
\item If $x\in[\ensuremath{\mathbf{1}},1]$, then $\varphi(x)=1$.\label{phi is one}
\item If $x\in A_*$ and $(x_l)_{l\in\mathbb N}$ is an address of $x$, then
\begin{equation*}
\varphi(x)=\sum_{l=1}^{\infty}\sign(x_l)\left[\prod_{n=0}^{N}
p_n^{\#\{i\in\{1,\ldots,l-1\}:x_i=n\}}\cdot
\sum_{n=0}^{x_l-1}p_n\right].\label{formula for phi if address}
\end{equation*}
\item If $x\in[f_{x_1,\ldots,x_k}(\ensuremath{\mathbf{1}}),f_{x_1,\ldots,x_k+1}(\ensuremath{\mathbf{0}})]$ with $k\in\mathbb N$, $x_1,\ldots,x_{k-1}\in\{0,\ldots,N\}$ and $x_k\in\{0,\ldots,N-1\}$, then
\begin{equation*}
\begin{split}
\varphi(x)=&\sum_{l=1}^{k}\sign(x_l)\left[ \prod_{n=0}^{N}p_n^{\#\{i\in\{1,\ldots,l-1\}:x_i=n\}}\cdot
\sum_{n=0}^{x_l-1}p_n\right]\\
&+\prod_{n=0}^{N}p_n^{\#\{i\in\{1,\ldots,k\}:x_i=n\}}.
\end{split}
\end{equation*}\label{formula for phi if not address}
\end{enumerate}
\end{theorem}
\begin{proof}
Assertions \cref{phi is zero} and \cref{phi is one} are trivially implied by assertions \eqref{restriction phi to 0} and \eqref{restriction phi to 1} of \cref{lem31}. The proof of assertion~\cref{formula for phi if address} follows very closely the proof of Theorem 3.6 from \cite{MZ}, so we omit it. Assertion~\cref{formula for phi if not address} is a consequence of \cref{lem32}, the fact that
$(x_1,\ldots,x_k,N,\ldots)$ is the address of the point $f_{x_1,\ldots,x_k}(\ensuremath{\mathbf{1}})$ and assertion~\cref{formula for phi if address}; indeed
\begin{equation*}
\begin{split}
\varphi(x)&=\varphi(f_{x_1,\ldots,x_k}(\ensuremath{\mathbf{1}}))=\sum_{l=1}^{k}\sign(x_l)\left[ \prod_{n=0}^{N}p_n^{\#\{i\in\{1,\ldots,l-1\}:x_i=n\}}\cdot
\sum_{n=0}^{x_l-1}p_n\right]\\
&\hspace{3ex}+\sum_{l=k+1}^{\infty} \left[\prod_{n=0}^{N}p_n^{\#\{i\in\{1,\ldots,k\}:x_i=n\}}p_N^{l-k-1}\right]
\cdot\big(1-p_N\big)\\
&=\sum_{l=1}^{k}\sign(x_l)\left[ \prod_{n=0}^{N}p_n^{\#\{i\in\{1,\ldots,l-1\}:x_i=n\}}\cdot
\sum_{n=0}^{x_l-1}p_n\right]\\
&\hspace{3ex}+\prod_{n=0}^{N}p_n^{\#\{i\in\{1,\ldots,k\}:x_i=n\}}.
\end{split}
\end{equation*}
This finishes the proof.
\end{proof}
\section{More about the class \texorpdfstring{$\mathcal C$}{ð’ž}}\label{More}
As we have seen in \cref{thm41}, with each sequence $\big(p_0,\ldots,p_N\big)$ of positive real numbers satisfying~\eqref{prob} we have associated a continuous increasing surjective solution $\varphi_{p_0,\ldots,p_N}\colon[0,1]\to[0,1]$ of equation~\eqref{e}. We denote by $\mathcal W$ the set of all these solutions. The main purpose of this section is to prove the following result.
\begin{theorem}\label{thm51}
The set $\mathcal W$ is linearly independent and its convex hull is contained in $\mathcal C$.
\end{theorem}
\subsection{Proof of Theorem~\ref{thm51}}
The statement concerning the convex hull follows from \cref{lem21}.
The proof for the independence will be divided into several lemmas. Before we formulate the first one, note that for every $y\in A_*$ equality~\cref{structure of invariant set} guarantees that there exists at least one $n\in\{0,\ldots,N\}$ such that $y\in f_n(A_*)$. Therefore, we can define a transformation $T\colon A_*\to [0,1]$ by putting
\begin{equation*}
T(y)=f_{n(y)}^{-1}(y),
\end{equation*}
where
\begin{equation*}
n(y)=\max\{n\in\{0,\ldots,N\}:y\in f_n(A_*)\}.
\end{equation*}
\begin{lemma}\label{measure preserving}
The transformation $T$ maps $A_*$ into $A_*$ and it is measure preserving for $\mu$.
\end{lemma}
\begin{proof}
To see that $T(A_*)\subset A_*$ we fix $y\in A_*$. Then the injectivity of~$f_{n(y)}$ implies that there is exactly one $x\in A_*$ such that $y=f_{n(y)}(x)$. Thus $T(y)=x\in A_*$.
Now we prove that $T$ is measure preserving for $\mu$.
Fix a Borel set $B\subset A_*$. As $A_*\subset \bigcup_{n=0}^N [f_n(0),f_n(1)]$, we have
\begin{equation*}
T^{-1}(B)=\bigcup_{n=0}^N {\{y\in [f_n(0),f_n(1)]\cap A_*: T(y)\in B\}}.
\end{equation*}
Then using \cref{lem43} jointly with \eqref{0<1} and the fact that the set~$\bigcup_{i=0}^Nf_i^{-1}(y)$ contains just one element in the case where $y\in (f_n(0),f_n(1))$ and at most two elements in the case where $y\in\{f_n(0),f_n(1)\}$, we obtain
\begin{dmath*}
\mu(T^{-1}(B))=\sum_{n=0}^N\mu({\{y\in [f_n(0),f_n(1)]\cap A_*: T(y)\in B\}})
=\sum_{n=0}^N\mu({\{y\in [f_n(0),f_n(1)]\cap A_*: f_{n}^{-1}(y)\in B\}})
=\sum_{n=0}^N\mu({\{y\in [f_n(0),f_n(1)]\cap A_*: y\in f_n(B)\}}).
\end{dmath*}
Next note that $f_n(B)\subset f_n(A_*)\subset A_*$ for every $n\in\{0,\ldots,N\}$. Thus
\begin{dmath*}
\mu(T^{-1}(B))=\sum_{n=0}^N\mu(f_n(B)).
\end{dmath*}
Finally, according to \cref{mu(B)} we conclude that
\begin{dmath*}
\mu(T^{-1}(B))=\sum_{n=0}^{N}p_n\mu(B)=\mu(B),
\end{dmath*}
and the proof is complete.
\end{proof}
By \cref{attractor and addresses} the points in $A_*$ are exactly the ones that have an address. The next lemma shows that we might run into slight problems with the uniqueness of the addresses if
\begin{equation}\label{Nb}
f_0(0)=0,\quad f_N(1)=1\quad\hbox{ and }\quad N_b\neq\emptyset,
\end{equation}
where
\begin{equation*}
N_b=\{n\in \{0,\ldots,N-1\}: f_n(1)=f_{n+1}(0)\}.
\end{equation*}
\begin{lemma}\label{uniqueness}
$ $
\begin{enumerate}
\item\label{at most two addresses} Every point from $A_*$ has at most two addresses, and if a point from $A_*$ has two addresses, then \eqref{Nb} holds and exactly one of the addresses belongs to the set
\begin{equation*}
Z_b=\left\{(x_k)_{k\in\mathbb N}\in \{0,\ldots,N\}^{\mathbb N}:\exists_{n\in\mathbb N}(x_{n}\in N_b\hbox{ and }x_k=N\hbox{ for every }k>n)\right\}.
\end{equation*}
\item\label{additional address} If \eqref{Nb} holds and a point from $A_*$ has an address belonging to the set $Z_b$, then it also has an address not belonging to the set $Z_b$.
\item\label{exactly one address} Every point from $A_*$ has exactly one address if and only if \eqref{Nb} does not hold.
\end{enumerate}
\end{lemma}
\begin{proof}
(\ref{at most two addresses}) Assume that $(x_k)_{k\in\mathbb{N}}$ and $(y_k)_{k\in\mathbb{N}}$ are two different addresses of a point $x\in A_*$. Put
\begin{equation*}
m=\min\{k\in\mathbb{N}:x_k\neq y_k\}
\end{equation*}
and let $x_m<y_m$. Then according to \eqref{x} and \eqref{0<1} we have
\begin{dmath*}
x=f_{x_1,\ldots,x_{m}}\left(\lim_{k\to\infty}f_{x_{m+1},\ldots,x_{k}}(1)\right)
\leq f_{x_1,\ldots,x_m}(1)\leq f_{y_1,\ldots,y_m}(0)\leq f_{y_1,\ldots,y_m}\left(\lim_{k\to\infty} f_{y_{m+1},\ldots,y_{k}}(0)\right)=x.
\end{dmath*}
Thus $f_{x_1,\ldots,x_m}(1)=f_{y_1,\ldots,y_m}(0)$, and hence $f_{x_m}(1)=f_{y_m}(0)\in A_*$. Finally, making use of \eqref{0<1}, we conclude that $x_m\in N_b$, $f_0(0)=0$ and $f_N(1)=1$. In consequence \eqref{Nb} holds, $(x_k)_{k\in\mathbb{N}}\in Z_b$ and $(y_k)_{k\in\mathbb{N}}\not\in Z_b$. Moreover, if we assumed that $x$ has a third address $(z_k)_{k\in\mathbb{N}}$, different from both the first ones, we would have $(z_k)_{k\in\mathbb{N}}\in Z_b\setminus\{(x_k)_{k\in\mathbb{N}}\}$, which is impossible.
(\ref{additional address}) Assume that \eqref{Nb} holds and let a point $x\in A_*$ has an address $(x_k)_{k\in\mathbb{N}}\in Z_b$. Then there is $m\in N$ such that $x_m\in N_b$ and $x=f_{x_1,\ldots,x_m}(\ensuremath{\mathbf{1}})$. Applying now \eqref{Nb} we get
\begin{equation*}
x=f_{x_1,\ldots,x_m}(1)=f_{x_1,\ldots,x_m+1}(0)=f_{x_1,\ldots,x_m+1}(\ensuremath{\mathbf{0}}),
\end{equation*}
which shows that \eqref{x} has an address not belonging to the set $Z_b$.
(\ref{exactly one address}) Assertion (\ref{at most two addresses}) implies that if \eqref{Nb} does not hold, then every point from $A_*$ has exactly one address.
Assume now that every point from $A_*$ has exactly one address and suppose that, on the contrary, \eqref{Nb} holds. Then $0=\ensuremath{\mathbf{0}}$ and $1=\ensuremath{\mathbf{1}}$. Fix $n\in N_b$ and put $x=f_n(\ensuremath{\mathbf{1}})$. Then $x\in A_*$ and $f_n(\ensuremath{\mathbf{1}})=f_n(1)=f_{n+1}(0)=f_{n+1}(\ensuremath{\mathbf{0}})$, which jointly with \eqref{x} implies that $x$ has two different addresses, a contradiction.
\end{proof}
We now define a map $\pi\colon \{0,\ldots,N\}^{\field{N}}\setminus Z_b\to A_*$ by putting
\begin{equation*}
\pi((x_k)_{k\in\mathbb N})=\lim_{k\to\infty}f_{x_1,\ldots,x_k}(0),
\end{equation*}
where $Z_b$ is as in \cref{uniqueness} in the case where \eqref{Nb} holds and $Z_b=\emptyset$ in the case where \eqref{Nb} does not hold.
\begin{lemma}\label{pi is a bijection}
The map $\pi$ is a bijection.
\end{lemma}
\begin{proof}
It is enough to apply \cref{uniqueness} and \cite[Theorem 1 in Chapter 4.2]{B1988}.
\end{proof}
Denote by $\sigma$ the \emph{Bernoulli shift}, i.e.\ the map from $\{0,\ldots,N\}^{\field{N}}$ into itself defined by
\begin{equation*}
\sigma\big((x_k)_{k\in\mathbb N}\big)=(x_{k+1})_{k\in\mathbb N}.
\end{equation*}
\begin{lemma}\label{sigma pi T commute}
For every $n\in \field{N}$ we have
\begin{equation*}
\sigma^{-n}\circ \pi^{-1}=\pi^{-1}\circ T^{-n}.
\end{equation*}
\end{lemma}
\begin{proof}
We begin with proving that we have
\begin{equation}\label{diagram commutes}
\pi\circ\sigma=T\circ \pi
\end{equation}
on $\{0,\ldots,N\}^{\field{N}}\setminus Z_b$.
First, we note that $\sigma(\{0,\ldots,N\}^{\field{N}}\setminus Z_b)\subset\{0,\ldots,N\}^{\field{N}}\setminus Z_b$.
Fix $(x_k)_{k\in\mathbb N}\in \{0,\ldots,N\}^{\field{N}}\setminus Z_b$ and put
$z=\lim_{k\to\infty}f_{x_2,\ldots,x_k}(0)$.
Then
\begin{dmath*}
\pi(\sigma((x_k)_{k\in\mathbb N}))=\lim_{k\to\infty}f_{x_2,\ldots,x_k}(0)=z,
\end{dmath*}
and
\begin{dmath*}
T(\pi((x_k)_{k\in\mathbb N}))
=T\left(\lim_{k\to\infty}f_{x_1,\ldots,x_k}(0)\right)
=T\left(f_{x_1}(z)\right)=f_{n(f_{x_1}(z))}^{-1}(f_{x_1}(z)).
\end{dmath*}
Since $z\in A_*$, we have $f_{x_1}(z)\in f_{x_1}(A_*)$, and so
\begin{equation*}
x_1\leq n(f_{x_1}(z)).
\end{equation*}
Suppose towards a contradiction that $x_1<n(f_{x_1}(z))$. Then $f_{x_1}(z)\in f_{x_1+1}(A_*)$, and by \eqref{0<1} we have $z=1$. Therefore,
$f_{x_1}(1)=f_{x_1+1}(0)$ and $x_k=N$ for every $k\geq 2$, which is impossible as $(x_k)_{k\in\mathbb N}\not\in Z_b$.
In consequence $x_1=n(f_{x_1}(z))$. Hence $f_{n(f_{x_1}(z))}^{-1}(f_{x_1}(z))=z$, which yields that \eqref{diagram commutes} holds.
To complete the proof it is enough to proceed by induction with the use of \cref{diagram commutes}.
\end{proof}
Let us consider now the measure $\field{P}_{p_0,\ldots,p_N}$ defined on~$\{0,\ldots,N\}$ by
\begin{equation*}
\field{P}_{p_0,\ldots,p_N}(\{k\})=p_k.
\end{equation*}
Note that $\field{P}_{p_0,\ldots,p_N}$ is a probability measure by \eqref{prob}.
Further, we let $\field{P}$ be the product measure on $\{0,\ldots,N\}^{\field{N}}$ of $\mathbb N$ copies of the measure $\field{P}_{p_0,\ldots,p_N}$. It is known that the Bernoulli shift is strong-mixing for~$\field{P}$ (see e.g. \cite[Problem~4.3]{biswas2014ergodic} or \cite[Exercise 2.7.9]{EW2011}).
\begin{lemma}\label{nu is mupi}
If $B\subset\{0,\ldots,N\}^{\field{N}}\setminus Z_b$ is a Borel set, then
$\pi(B)$ is a Borel set and
\begin{equation}\label{Pmu}
\field{P}(B)=\mu(\pi(B)).
\end{equation}
\end{lemma}
\begin{proof}
We first prove that if $B\subset\{0,\ldots,N\}^{\field{N}}\setminus Z_b$ is a Borel set, then $\pi(B)$ is a Borel set as well.
As every Borel set in $\{0,\ldots,N\}^{\field{N}}\setminus Z_b$ is generated by sets of the form
\begin{equation}\label{cylinder}
B=\left(\{x_1\}\times \cdots\times \{x_m\}\times \{0,\ldots,N\}^{\field{N}}\right)\setminus Z_b,
\end{equation}
where $m\in\mathbb N$ and $x_1,\ldots,x_m\in\{0,\ldots,N\}$, it is sufficient to show that $\pi(B)$ is a Borel set for every set of the form \eqref{cylinder}.
Fix a set $B$ of the form \eqref{cylinder} with $m\in\mathbb N$ and $x_1,\ldots,x_m\in\{0,\ldots,N\}$. If either \eqref{Nb} does not hold or \eqref{Nb} holds and $x_m\not\in N_b$, then by \cref{pi is a bijection} we have
\begin{dmath}\label{piB1}
\pi(B)=f_{x_1,\ldots,x_m}\left(\pi\left(\{0,\ldots,N\}^{\field{N}}\setminus Z_b\right)\right)=f_{x_1,\ldots,x_m}(A_*).
\end{dmath}
If \eqref{Nb} holds and $x_m\in N_b$, then by \cref{pi is a bijection} we have
\begin{dmath}\label{piB2}
\pi(B)=f_{x_1,\ldots,x_m}\left(\pi\left((\{0,\ldots,N\}^{\field{N}}\setminus Z_b)\setminus \{N\}^{\field{N}}\right)\right)=f_{x_1,\ldots,x_m}(A_*)\setminus f_{x_1,\ldots,x_m}(\{\ensuremath{\mathbf{1}}\}).
\end{dmath}
Since $A_*$ and $\{\ensuremath{\mathbf{1}}\}$ are compact sets and $f_0,\ldots,f_N$ are contractions, it follows that $f_{x_1,\ldots,x_m}(A_*)$ and $f_{x_1,\ldots,x_m}(\{\ensuremath{\mathbf{1}}\})$ are compact sets. In consequence, we see that in both the considered cases the set $\pi(B)$ is Borel.
Now we prove that \eqref{Pmu} holds for every Borel set $B\subset\{0,\ldots,N\}^{\field{N}}\setminus Z_b$.
Since every two Borel probability measures defined on $\{0,\ldots,N\}^{\field{N}}\setminus Z_b$ agreeing on cylinders are equal, it is suffices to show that \eqref{Pmu} holds for every cylinder $B\subset\{0,\ldots,N\}^{\field{N}}\setminus Z_b$. Moreover, by the additivity of the measures, we only need to show that \eqref{Pmu} holds for every set of the form \eqref{cylinder}, where $m\in\mathbb N$ and $x_1,\ldots,x_m\in\{0,\ldots,N\}$.
Fix a set $B$ of the form \eqref{cylinder} with $m\in\mathbb N$ and $x_1,\ldots,x_m\in\{0,\ldots,N\}$. Then either \eqref{piB1} or \eqref{piB2} is satisfied, and by \cref{lem43,lem42} we see that in both the cases we have
\begin{equation*}
\mu\left(f_{x_1,\ldots,x_m}^{-1}(\pi(B))\right)=\mu(A_*)=1.
\end{equation*}
This jointly with \cref{mu(B)} yields
\begin{dmath*}
\mu(\pi(B))=\mu\left(f_{x_1}\left(\cdots\left(f_{x_m}\left(f_{x_1,\ldots,x_m}^{-1}(\pi(B))\right)\right)\cdots\right)\right)=\prod_{i=1}^mp_{x_i}.
\end{dmath*}
Finally, note that $\field{P}(B)=\prod_{i=1}^mp_{x_i}$.
\end{proof}
\begin{lemma}\label{T is strong-mixing}
The transformation~$T$ is strong-mixing for~$\mu$.
\end{lemma}
\begin{proof}
The transformation $T$ is measure preserving for $\mu$ by \cref{measure preserving}. To prove that it is strong-mixing for~$\mu$, fix two Borel sets $A,B\subset A_*$. Then using \cref{pi is a bijection,nu is mupi}, the fact that the Bernoulli shift is strong-mixing for~$\field{P}$ and \cref{sigma pi T commute} we get
\begin{dmath*}
\mu(A)\mu(B)=\mu(\pi(\pi^{-1}(A)))\mu(\pi(\pi^{-1}(B)))
=\field{P}(\pi^{-1}(A))\field{P}(\pi^{-1}(B))
=\lim_{m\to\infty}\field{P}(\sigma^{-m}(\pi^{-1}(A))\cap\pi^{-1}(B))
=\lim_{m\to \infty}\field{P}(\pi^{-1}(T^{-m}(A))\cap \pi^{-1}(B))
=\lim_{m\to \infty}\field{P}(\pi^{-1}(T^{-m}(A)\cap B))
=\lim_{m\to \infty}\mu(T^{-m}(A)\cap B).
\end{dmath*}
The proof is complete.
\end{proof}
Denote by $\mathcal{M}^T(A_*)$ the set of all Borel probability measures defined on the $\sigma$-algebra of all Borel subset of the interval $[0,1]$ supported on $A_*$, making the transformation $T$ measure preserving. Note that $\mu\in \mathcal{M}^T(A_*)$ by \cref{lem42}.
\begin{lemma}\label{mutually singular then independent}
Every family of pairwise mutually singular measures belonging to the set $\mathcal{M}^T(A_*)$ is linearly independent.
\end{lemma}
\begin{proof}
Fix $m\in\mathbb N\setminus\{1\}$, pairwise mutually singular measures $\mu_1,\ldots,\mu_m\in \mathcal{M}^T(A_*)$, numbers $\alpha_1,\ldots,\alpha_m\in \mathbb R\setminus\{0\}$ and assume by contradiction that
\begin{equation*}\label{independent}
\sum_{i=1}^m \alpha_i\mu_i=0.
\end{equation*}
Since the measures are mutually singular, for each $i,j\in\{1,\ldots,m\}$ with $i\not=j$ there are sets $A_i^j$ and $A_j^i=X\setminus A_i^j$ such that $\mu_i(A_i^i)=\mu_j(A_j^i)=0$. Put $A_m=\bigcup_{i=1}^{m-1}A_m^i$. Then
\begin{equation*}
0\leq \mu_m(A_m)=\mu_m\left(\bigcup_{i=1}^{m-1}A_m^i\right)\leq \sum_{i=1}^{m-1}\mu_m(A_m^i)=0
\end{equation*}
and for every $j\in\{1,\ldots,m-1\}$ we have
\begin{dmath*}
1\geq \mu_j(A_m)=\mu_j\left(X\setminus \bigcap_{i=1}^{m-1}A_i^m\right)
\geq \mu_j(X\setminus A_j^m)\geq \mu_j(X)-\mu_j(A_j^m)=1.
\end{dmath*}
In consequence
\begin{equation*}
\alpha_m=\sum_{i=1}^{m}\alpha_i\mu_i(X\setminus A_m)=0,
\end{equation*}
and the proof is complete.
\end{proof}
Now we are in a position to prove that the set $\mathcal{W}$ is linearly independent.
Fix $m$ different functions from $\mathcal{W}$ and consider the corresponding measures $\mu_1,\ldots,\mu_m\in \mathcal{M}^T(A_*)$. From \cref{T is strong-mixing} we infer that the transformation~$T$ is ergodic for all the measures. Thus $\mu_1,\ldots,\mu_m$ are extreme points of the set $\mathcal{M}^T(A_*)$ (see \cite[Theorem~4.4]{EW2011} or \cite[Proposition 12.4]{P2001}), and hence they are pairwise mutually singular. Invoking \cref{mutually singular then independent} gives the claim.
\subsection{An application of Theorem~\ref{thm51}}
In \cite{M1985} Janusz Matkowski posed a problem asking about the existence of nonlinear monotonic and continuous solutions $\Phi\colon[0,1]\to\mathbb R$ of a very particular case of the equation
\begin{equation}\label{e1}
\Phi(x)=\sum_{n=0}^{N}\Phi(f_n(x))-\sum_{n=1}^{N}\Phi(f_n(0)).
\end{equation}
Motivated by this problem, denote by $\mathcal M$ the vector space spanned by ${\mathcal W}\cup\{\mathds{1}\}$, where $\mathds{1}$ denotes the constant function that equals $1$ on $[0,1]$. Note that by \cref{thm51} and the fact that $\phi(0)=0$ for each $\phi\in\mathcal W$, the set ${\mathcal W}\cup\{\mathds{1}\}$ is a basis of $\mathcal M$.
\begin{prop}\label{prop52}
Every function belonging to $\mathcal M$ is a continuous solution of equation
\eqref{e1}. Moreover, if $\phi_1,\ldots,\phi_m\in{\mathcal W}$ and $\alpha_1,\ldots,\alpha_m\in\mathbb R$ are of the same sign, then the function $\sum_{i=1}^m\alpha_{i}\phi_{i}+\alpha_{0}$ is monotone for every $\alpha_{0}\in\mathbb R$.
\end{prop}
\begin{proof}
Fix $\Phi\in\mathcal M$. Then there exist $\alpha_0,\ldots,\alpha_m\in\mathbb R$ and $\phi_1,\ldots,\phi_m\in{\mathcal W}$ such that $\Phi=\sum_{i=1}^m\alpha_{i}\phi_{i}+\alpha_{0}$. Obviously, $\Phi$ is continuous. According to the first assertion of~\cref{lem22} we see that $\phi_i(f_0(0))=0$ for every \mbox{$i\in\{1,\ldots,m\}$}, and hence applying also \eqref{e}, we obtain
\begin{equation*}
\begin{split}
\sum_{n=0}^{N}\Phi(f_n(x))-\sum_{n=1}^{N}\Phi(f_n(0))&=
\sum_{n=0}^{N}\left(\sum_{i=1}^m\alpha_{i}\phi_{i}(f_n(x))+\alpha_{0}\right)\\
&\hspace*{3ex}-\sum_{n=1}^{N}\left(\sum_{i=1}^m\alpha_{i}\phi_{i}(f_n(0))+\alpha_{0}\right)\\
&=\sum_{i=1}^m\alpha_{i}\left(\sum_{n=0}^{N}\phi_{i}(f_n(x))-\sum_{n=0}^{N}\phi_{i}(f_n(0))\right)+\alpha_{0}\\
&=\sum_{i=1}^m\alpha_{i}\phi_{i}(x)+\alpha_{0}=\Phi(x)
\end{split}
\end{equation*}
for every $x\in[0,1]$.
The moreover part of the assertion is clear.
\end{proof}
\end{document}
|
\begin{document}
\title[Equivariant main conjecture]{On the Equivariant Main Conjecture of Iwasawa Theory}
\author{Malte Witte}
\address
{
Mathematisches Institut\\
Universit\"at Leipzig\\
Postfach 920\\
D-04109 Leipzig\\
Germany
}
\email{[email protected]}
\subjclass{Primary 11R23; Secondary 11R18, 19F27, 11R42}
\date{\today}
\begin{abstract}
Refining results of David Burns and Cornelius Greither,
respectively Annette Huber and Guido Kings, we formulate and prove
an equivariant version of the main conjecture of Iwasawa theory
for abelian number fields.
\end{abstract}
\maketitle
\section*{Introduction}
The main conjecture of Iwasawa theory for an abelian number field
in its classical formulation describes the Galois-module structure
of the class groups in the limit over the intermediate fields of
its cyclotomic $\Zp$-extension. The eigenspace of this limit with
respect to a Dirichlet character $\chi$ associated to the ground
field is related to the corresponding $p$-adic $L$-function or to
the eigenspace of the group of global units modulo cyclotomic
units, depending on the parity of $\chi$. The conjecture was
proved in 1986 by B.~Mazur and A.~Wiles \cite{MW1}.
Recently, D.~Burns and C.~Greither \cite{BG1} deduced an
equivariant version of the main conjecture as the key to their
proof of the equivariant Tamagawa number conjecture. Here,
`equivariant' refers to the fact that one retains the full Galois
module structure instead of decomposing the modules by characters.
A.~Huber and G.~Kings \cite{HK1} also use a variant of the Iwasawa
main conjecture in their proof of the Tamagawa number conjecture
for Dirichlet motives. It consists, like the classical
formulation, of a separate statement for each Dirichlet character.
In particular, it is weaker than the formulation in \cite{BG1}.
In the present article, we use this statement and the Theorem of
Ferrero-Washington to reprove the equivariant conjecture of
\cite{BG1} in a slightly more general form.
Fix an odd prime $p$ and let $(K_{\infty},\rho,U)$ be a triple
consisting of the cyclotomic extension $K_\infty$ of an abelian
number field, a one-dimensional representation $\rho$ of
$\GAL(\algclosure{\Rat}/\Rat)$ on a finite extension $\ValR$ of
$\Zp$, and an open subscheme $U$ of $\SPEC \Int$, subject to the
condition that the ramification index in $K_\infty/\Rat$ of every
point in $U$ is prime to $p$.
Like \cite{BG1} and \cite{HK1} we use continuous \'etale
cohomology to assign to each of such triples a complex
$\cmplx{R}=\RightD\IwaSect(U_{K_\infty},\ValR(\rho))$ of modules
over the profinite group ring
$\Omega=\power{\ValR}{\GAL(K_\infty/\Rat)}$. Further, we define a
cyclotomic element $c(U_{K_\infty},\rho)$ in the first cohomology
module of $\cmplx{R}$ and a $p$-adic $L$-element
$L(U_{K_\infty},\rho^{-1}\cyclchar)$ in the quotient ring of
$\Omega$. Here, $\cyclchar$ denotes the cyclotomic character. The
quotient $\cmplx{R}/\Omega c(U_{K_\infty},\rho)$ turns out to be a
perfect complex that is torsion, i.e. acyclic after base change to
the quotient ring. Note that this is no longer true if we drop the
condition on the type of ramification in $U$.
Using the determinant functor of F.~Knudsen and D.~Mumford we can
attach to each perfect torsion complex $\cmplx{P}$ an invertible
fractional ideal of $\Omega$ called the characteristic ideal of
$\cmplx{P}$. Our main result then reads as follows.
\begin{thm}[see Theorem \ref{thm:Main Theorem}]\label{thm:premain}\
\begin{description}
\item[(i) Vanishing of the $\mu$-invariant] Let $\ideal{p}$ be a
prime ideal of codimension $1$ of $\Omega$, with $p\in\ideal{p}$.
Then $(\cmplx{R}/\Omega c(U_{K_\infty},\rho))_{\ideal{p}}$ is
acyclic.
\item[(ii) Iwasawa main conjecture]
$L(U_{K_\infty},\rho^{-1}\cyclchar)$ generates the characteristic
ideal of $\cmplx{R}/\Omega c(U_{K_\infty},\rho)$.
\end{description}
\end{thm}
The formulation of the main conjecture in \cite{BG1} corresponds
to Theorem~\ref{thm:premain}.(ii) for all triples
$(\CyclF{np^{\infty}},\cyclchar^r,\SPEC \algebra{\Int}{1/np})$,
the version in \cite{HK1} to triples
$(\Rat_\infty,\chi\cyclchar^r,\SPEC \algebra{\Int}{1/p})$. Here,
$r$ and $n$ are integers, $\Rat_\infty$ is the $\Zp$-extension of
$\Rat$ and $\chi$ is any Dirichlet character.
The relation to the classical Iwasawa main conjecture is
established by the fact that the first and second cohomology
modules of the complex $\cmplx{R}/\Omega c(U_{K_\infty},\rho)$ for
$\rho=\cyclchar$ is essentially given by the limit of the
$p$-primary parts of the global units modulo cyclotomic units,
respectively of the class groups, taken over the intermediate
fields of $K_{\infty}/\Rat$. Using
$\cmplx{\tilde{R}}/c(U_{K_\infty},\rho)$ in lieu of these
classical objects leads to a smoother formulation of the
conjecture that circumvents the problems usually connected to $p$
dividing the order of $\GAL(K/\Rat)$ (see the discussion in
\cite{HK1}).
The main idea of the proof of Theorem~\ref{thm:premain} is
essentially the same as in \cite{BG1}. However, we can clarify the
argument considerably by using the result of \cite{HK1}.
Originally, D.~Burns and C.~Greither derived their theorem from
the result of B.~Mazur and A.~Wiles. This approach necessitates
some rather involved deduction steps to deal with the first
cohomology group of $\cmplx{R}/\Omega c(U_{K_\infty},\rho)$, in
particular for Theorem~\ref{thm:premain}.(i). (Note that this step
of the argument in \cite{BG1} contains an inaccuracy that was
later corrected in the appendix of \cite{Fl1}.) The additional
strength of the main conjecture in \cite{HK1} allows us to present
a comparatively quick proof of this part of the theorem. Recall
that A.~Huber and G.~Kings do not use the result of \cite{MW1}.
Instead, they give an independent proof of their statement, using
the Euler system approach of V.~A.~Kolyvagin and K. Rubin
\cite{Ru1}.
As \cite{BG1} and \cite{HK1}, we do not treat the case $p=2$, but
this gap has meanwhile been filled by M.~Flach in \cite{Fl1}.
The article is organised as follows. In
Section~\ref{sec:CharIdeals} we introduce the characteristic ideal
of a perfect torsion complex. Section~\ref{sec:ProfGroupRing}
consists of a collection of algebraic properties of $\Omega$ that
turn out to be useful in the later sections.
The definition of the complex
$\RightD\IwaSect(U_{K_\infty},\ValR(\rho))$ is given in
Section~\ref{sec:IwasawaCohomology}. In the subsequent section we
calculate its cohomology modules in the special case
$\rho=\cyclchar$, which is closely related to classical Iwasawa
theory.
To deal with the ramification of $\rho$ we need an explicit
description of the relative cohomology modules associated to
closed subschemes of $U$. This is achieved in
Section~\ref{sec:LocalFactors}.
In Section~\ref{sec:CyclotomicElements} we extend the classical
construction of cyclotomic elements and $L$-elements to our
setting. The final section is devoted to the proof of the main
theorem.
\section{Characteristic Ideals}\label{sec:CharIdeals}
The notion of the characteristic ideal of a perfect torsion
complex is a variant of the usual determinant functor of
F.~Knudsen and D.~Mumford \cite{KM1}. It is less flexible than the
latter, but easier to handle.
Let $R$ be any commutative ring (with unit) and denote by
$\quotient{R}$ the total ring of fractions of $R$. Further, we
write $\cart(R)$ for the abelian group of invertible fractional
ideals, i.e. $R$-submodules $I$ of $\quotient{R}$ which are
locally free of rank $1$ and which satisfy
$\quotient{R}\tensorp{R}I=\quotient{R}$.
We can view $\cart$ as a functor from the category of commutative
rings to abelian groups if we restrict the morphisms of the former
to the following class.
\begin{defn}
We call a ring homomorphism $\phi: R\To S$ extendable if it
extends to a homomorphism $\quotient{R}\To\quotient{S}$, also
denoted by $\phi$.
\end{defn}
Examples of extendable homomorphisms include all flat
homomorphisms and all integral extensions.
If $\phi: R\To S$ is extendable, then $\cart(\phi)$ is given by
$$
\cart(\phi)(I)=\phi(I)S
$$
for all $I\in\cart(R)$.
Assume that $R$ is noetherian. Then an element of $\cart(R)$ is
uniquely determined by the following local conditions.
\begin{prop}\label{prop:comparison of ideals}
Let $R$ be noetherian and $I,J\in\cart(R)$. Then $I=J$ if and only
if $IR_\ideal{p}=JR_\ideal{p}$ for all nonzerodivisors $r$ and all
primes $\ideal{p}$ associated to $R/rR$. These are exactly the
primes of codimension $1$ if $R$ is Cohen-Macaulay.
\end{prop}
\begin{proof}
This follows by the same argument as \cite{Ei1}, Prop. 11.3.
\end{proof}
\begin{defn}
We call a complex $\cmplx{P}$ of $R$-modules a torsion complex if
$\quotient{R}\tensorp{R}\cmplx{P}$ is acyclic. $\cmplx{P}$ is
called perfect if it is quasi-isomorphic to a bounded complex of
finitely generated projective $R$-modules.
\end{defn}
Let $\DET_R \cmplx{P}$ denote the determinant of $\cmplx{P}$
according to F.~Knudsen and D.~Mumford \cite{KM1}. If $\cmplx{P}$
is a perfect torsion complex, then the natural isomorphism
$$\quotient{R}\tensorp{R}\DET_R\cmplx{P}=\quotient{R}$$
allows us to view $\DET_R\cmplx{P}$ as an invertible fractional
ideal of $R$.
\begin{defn}
We call $\CHAR \cmplx{P} =(\DET_R\cmplx{P})^{-1}\in\cart(R)$ the
characteristic ideal of $\cmplx{P}$.
\end{defn}
The characteristic ideal enjoys the following properties.
\begin{prop}\label{prop:properties of char}
Let $\cmplx{P}$ be a perfect torsion complex of $R$-modules.
\begin{enumerate}
\item $\CHAR \cmplx{P}$ depends only on the quasi-isomorphism
class of $\cmplx{P}$.
\item $\CHAR \cmplx{P}[1]=(\CHAR \cmplx P)^{-1}$.
\item If $\cmplx{P}_1\To\cmplx{P}_2\To\cmplx{P}_3$ is a
distinguished triangle of perfect torsion complexes in the derived
category, then
$$\CHAR \cmplx{P}_2=\CHAR\cmplx{P}_1\CHAR\cmplx{P}_3.$$
\item If $\phi:R\To S$ is an extendable homomorphism, then
$\LeftD\phi_*(\cmplx{P})=S\dtensorp{R}\cmplx{P}$ is a perfect
torsion complex of $S$-modules and
$$\CHAR\LeftD\phi_*(\cmplx{P})=\cart(\phi)(\CHAR\cmplx{P}).$$
\item If the cohomology modules of $\cmplx{P}$ are themselves
perfect, i.e. of finite $\Tor{}{}$-dimension, then
$$\CHAR\cmplx{P}=\prod_{n\in\Int}(\CHAR \HF^n\cmplx{P})^{(-1)^n}.$$
\item If $R$ is a noetherian and normal domain and $M$ any torsion
module of finite projective dimension (considered as complex
concentrated in degree $0$), then $\CHAR M$ coincides with the
content of $M$, as defined in \cite{Bu1}, VII, \S4.5. In
particular, if $R=\power{\Zp}{T}$, then $\CHAR M$ is the
characteristic ideal of Iwasawa theory.
\end{enumerate}
\end{prop}
\begin{proof}
Everything follows easily from the corresponding properties of the
determinant functor, as given in \cite{KM1}.
\end{proof}
\begin{rem}
If $R$ is not reduced, then the usual determinant functor is
additive only for the class of `true' triangles. In the following,
we will only consider reduced rings. However, note that in our
setting, (iii) is indeed true for arbitrary distinguished
triangles. The reason is that one can always replace the
distinguished triangle by a true triangle of strictly perfect
torsion complexes, the particular choice of which, according to
(i), does not matter. For the determinant functor, it is this
non-canonical choice that causes trouble.
\end{rem}
\begin{rem}
One can also deduce Proposition \ref{prop:properties of char} from
the results of \cite{BreuBurns} on the more sophisticated notion
of the refined Euler characteristic. To this end, note that for a
perfect torsion complex $\cmplx{P}$, the only trivialisation is
the zero map and $\CHAR \cmplx{P}$ is the image of
$-\chi(\cmplx{P},0)$ under the natural homomorphism
$K_0(R,\quotient{R})\To\cart(R)$.
\end{rem}
\section{The Profinite Group Ring of a
$\Zp$-Extension}\label{sec:ProfGroupRing}
In this section we will assemble some useful facts about
cyclotomic $\Zp$-extensions and profinite group rings. A large
part of the material can also be found in \cite{BG1}, \S6.1.
Throughout this article, $p$ will denote a fixed odd prime. Let
$\Rat_\infty$ be the unique $\Zp$-extension of $\Rat$. The
cyclotomic $\Zp$-extension of a number field is given by
$K_\infty=K\Rat_\infty$. We shall always make the additional
assumption that $K$ is an abelian extension of $\Rat$. The Theorem
of Kronecker-Weber then shows that there exists the following
distinguished choice of subfields of $K_\infty$.
\begin{defn}
Let $K_0\subset K_\infty$ be the subfield that is uniquely
determined by the following two properties.
\begin{enumerate}
\item
$\GAL(K_\infty/\Rat)=\GAL(K_0/\Rat)\times\GAL(\Rat_\infty/\Rat)$,
\item $p^2$ does not divide the conductor of $K_0$.
\end{enumerate}
We write $K_n$ for the subfield of $K_\infty$ of degree $p^n$ over
$K_0$.
\end{defn}
Let $\ValR$ be the valuation ring of an arbitrary finite extension
of $\Rat_p$ and write
$$
\Omega=\power{\ValR}{\GAL(K_\infty/\Rat)}=\varprojlim_n\algebra{\ValR}{\GAL(K_n/\Rat)}
$$
for the profinite group ring with coefficients in $\ValR$. Assume
for simplicity that $\ValR$ contains all values of the characters
of $\GAL(K_0/\Rat)$. If $P_\infty$ is the maximal $p$-extension of
$\Rat$ inside $K_\infty$, then
$$
\Omega\isomorph\prod_{\theta}\power{\ValR}{\GAL(P_\infty/\Rat)},
$$
where the product runs through the characters $\theta$ of
$\GAL(K_\infty/P_\infty)$. Observe that
$\power{\ValR}{\GAL(P_\infty/\Rat)}$ is a local Cohen-Macaulay
ring of Krull dimension $2$, but it is not regular unless
$P_\infty=\Rat_\infty$.
The normalisation of $\Omega$ in its total quotient ring
$\quotient{\Omega}$ is given by
$$
\widetilde{\Omega}\isomorph\prod_{\chi}\power{\ValR}{\GAL(\Rat_\infty/\Rat)}.
$$
Here, $\chi$ runs through the characters of $\GAL(K_0/\Rat)$. Note
that
$$
\algebra{\Omega}{1/p}=\algebra{\widetilde{\Omega}}{1/p}.
$$
The prime ideals $\ideal{p}$ of codimension $1$ of $\Omega$ with
$p\in\ideal{p}$ play a special role in our considerations. Recall
that a torsion module $M$ over $\power{\Zp}{T}$ has vanishing
Iwasawa $\mu$-invariant if $M$ is finitely generated as
$\Zp$-module. We generalise this as follows.
\begin{lem}\label{lem:criterion for vanishing}
Let $M$ be an $\Omega$-module which is finitely generated as
$\ValR$-module. Then $M_{\ideal{p}}=0$ for all prime ideals
$\ideal{p}$ of codimension $1$ containing $p$.
\end{lem}
\begin{proof}
We can view $M$ as a module over
$$
\power{\Zp}{T}\isomorph\power{\Zp}{\GAL(K_\infty/K_0)}
$$
via the natural inclusion
$$
i:\power{\Zp}{\GAL(K_\infty/K_0)}\hookrightarrow\power{\ValR}{\GAL(K_\infty/\Rat)}.
$$
The structure theorem for $\power{\Zp}{T}$-modules (\cite{Wa1},
Prop. 13.19) shows that $M_{(p)}=0$, since $\power{\Zp}{T}/(p^n)$
is not finitely generated over $\Zp$ for integers $n\geq0$. The
statement for $\power{\ValR}{\GAL(K_\infty/\Rat)}$ follows because
$i^{-1}(\ideal{p})=(p)$.
\end{proof}
We will now determine the group of invertible ideals of $\Omega$.
Since $\Omega$ is semilocal, it is given by
$$
\cart(\Omega)=\mult{\quotient{\Omega}}/\mult{\Omega}.
$$
In our main statement we compare two elements of $\cart(\Omega)$.
If $\ValR'$ is a faithfully flat extension of $\ValR$, e.g. the
valuation ring of a finite extension of $\quotient{\ValR}$, then
the induced map
$$
\cart(\Omega)\To\cart(\ValR'\tensorp{\ValR}\Omega)
$$
is injective. Therefore, the above assumption that $\ValR$
contains the values of the characters of $\GAL(K_\infty/\Rat)$ is
no restriction for our purposes. (Alternatively, it can be
circumvented by using components instead of characters, as in
\cite{MW1}.)
If $\Rat_\infty\subset L_\infty \subset K_\infty$ is any
intermediate extension, we write
$$
\prKL[K_{\infty}/L_{\infty}]:\power{\ValR}{\GAL(K_{\infty}/\Rat)}\To\power{\ValR}{\GAL(L_\infty/\Rat)}
$$
for the natural projection. Note that the ring homomorphism
$\prKL[K_{\infty}/L_{\infty}]$ is extendable. Indeed, the induced
map $\widetilde{\prKL}$ between the normalisations of both rings
is extendable for almost trivial reasons. Since the inclusion
$\Omega\To\widetilde{\Omega}$ is extendable and maps zero divisors
to zero divisors, it follows that $\prKL$ is extendable as well.
\section{Iwasawa Cohomology}\label{sec:IwasawaCohomology}
Consider an open subscheme $U$ of $\SPEC \Int$ and let $S$ denote
its closed complement. If $F/\Rat$ is a finite field extension, we
set
$$
U_F=U\times\SPEC \Order[F],\qquad S_F=S\times\SPEC \Order[F],
$$
where $\Order[F]$ denotes the ring of integers of $F$. Write
$$
j_F: \SPEC F\To U_F
$$
for the inclusion of the generic point. As before, $K_\infty$ will
denote the cyclotomic $\Zp$-extension of an abelian number field.
Let $M(\rho)$ be a finitely generated $\ValR$-module $M$ together
with a continuous representation
$$
\rho: \GAL(\algclosure{\Rat}/\Rat)\To\AUT_{\ValR}M
$$
(where we give $Aut_{\ValR} M$ its profinite topology). Let
further
$$
\iota: \GAL(\algclosure{\Rat}/\Rat)\To\mult{\Omega}, \quad
\iota(g)=\bar{g}^{-1}\in\GAL(K_{\infty}/\Rat)
$$
denote the contragredient of the natural representation. The
$\algebra{\Omega}{\GAL(\algclosure{\Rat}/\Rat)}$-module
$$
\Ind M(\rho)=\Omega(\iota)\tensorp{\ValR}M(\rho)
$$
gives rise to a projective system of \'etale sheaves
$$
j_{\Rat*}\Ind M(\rho)=
\bigl(j_{\Rat*}\bigl(\algebra{\ValR/p^n\ValR}{\GAL(K_n/\Rat)}(\iota)\tensorp{\ValR}M(\rho)
\bigr)\bigr)_{n=1}^{\infty}
$$
on $U$. (We reemphasise that the action of
$\GAL(\algclosure{\Rat}/\Rat)$ on the module
$\algebra{\ValR/p^n\ValR}{\GAL(K_n/\Rat)}(\iota)$ is given by
$\iota$, i.e. $\algebra{\ValR/p^n\ValR}{\GAL(K_n/\Rat)}$ is
considered as trivial $\GAL(\algclosure{\Rat}/\Rat)$-module.)
\begin{defn}\label{defn:Iwasawa complex}
We define the Iwasawa complex of $M(\rho)$ over $U$ to be the
cochain complex of continuous \'etale cohomology
$$
\RightD\IwaSect(U_{K_{\infty}},M(\rho))=\RightD(\varprojlim_n\etSect)(U,j_{\Rat
*}\Ind M(\rho)),
$$
as constructed by U.~Jannsen in \cite{Ja1}. If $Z$ is a closed
subscheme of $U$, we define
$$
\RightD\IwaSect(U_{K_{\infty}},Z,M(\rho))=
\RightD(\varprojlim_n{\etSect})(U,Z,j_{\Rat *}\Ind M(\rho))
$$
to be the complex of continuous \'etale cohomology with support in
$Z$. These complexes are to be understood as objects of the
derived category of $\Omega$-modules. Their $i$-th cohomology
modules will be denoted by $\IwaH^i(U_{K_\infty},M(\rho))$,
respectively $\IwaH^i(U_{K_\infty},Z,M(\rho))$.
\end{defn}
\begin{rem}
Alternatively, it should also be possible to use the formalism of
T.~Ekedahl \cite{Ek}.
\end{rem}
Here are some basic properties of
$\RightD\IwaSect(U_{K_{\infty}},M(\rho))$.
\begin{prop}\label{prop:IwaCohomology}
Assume $p\notin U$.
\begin{enumerate}
\item For all $i\in\Int$,
$$
\IwaH^i(U_{K_\infty},M(\rho))= \varprojlim_n\etH^i(U_{K_n},j_{K_n
*}M(\rho)),
$$
where the limit is taken with respect to the corestriction maps.
\item In particular,
$$
\IwaH^0(U_{K_\infty},M(\rho))=0.
$$
\end{enumerate}
\end{prop}
\begin{proof}
By \cite{Mi2}, Theorem~II.2.13 the modules
$\etH^i(U_{K_n},j_{K_n*}M(\rho))$ are finite. The asserted
equality in (i) follows by \cite{Ja1}, Proposition~1.6 and
Lemma~1.15.
It remains to verify that $\IwaH^0(U_{K_\infty},M(\rho))=0$. As
$M$ is noetherian, there is an $n_0$ such that the inflation map
\begin{multline*}
\etH^0(U_{K_{n_0}},j_{K_{n_0}*}M(\rho))=\\
M(\rho)^{\GAL(\algclosure{\Rat}/K_{n_0})}\hookrightarrow M(\rho)^{\GAL(\algclosure{\Rat}/K_n)}\\
=\etH^0(U_{K_n},j_{K_n*}M(\rho))
\end{multline*}
is the identity for $n\geq n_0$ and therefore, the corestriction
map is multiplication by $p^{n-n_0}$. Hence, the limit over the
corestriction maps vanishes.
\end{proof}
If both $U_{K_{\infty}}$ and $\rho$ are unramified over $U$, then
all sheaves in the projective system $j_{\Rat*}\Ind M(\rho)$ are
locally constant. Under these circumstances one can identify
$\RightD\IwaSect(U_{K_{\infty}},M(\rho))$ with the complex of
continuous cochains of the topological $\fG(U)$-module $\Ind
M(\rho)$, where $\fG(U)$ denotes the \'etale fundamental group of
$U$ (see Proposition~II.2.9 of \cite{Mi2}). This setting has been
extensively explored by J.~Nekov{\'a}{\v{r}} in \cite{Nek1}. We
recall some of the consequences.
\begin{prop}\label{prop:cohom dim}
Assume that $U_{K_{\infty}}$ and $\rho$ are unramified over $U$.
Then $\RightD\IwaSect(U_{K_{\infty}},M(\rho))$ is acyclic outside
degrees $1$ and $2$.
\end{prop}
\begin{proof}
The cohomological $p$-dimension of $\fG(U)$ is $2$ (see
\cite{Ne1}, Theorem~8.3.19).
\end{proof}
\begin{prop}\label{prop:tensor products}
Let $M(\rho)$ be free as an $\ValR$-module and let $W$ be a
finitely generated $\Omega$-module. Assume that $U_{K_{\infty}}$
and $\rho$ are unramified over $U$. Then there exists a natural
quasi-isomorphism
$$
W\dtensorp{\Omega}\RightD\IwaSect(U_{K_\infty},M(\rho))=
\RightD(\varprojlim_n\etSect)(U,j_{\Rat *}(W\tensorp{\Omega}\Ind
M(\rho))).
$$
\end{prop}
\begin{proof}
See \cite{Nek1}, Proposition~3.4.4.
\end{proof}
Observe that for any
$\rho:\GAL(\algclosure{\Rat}/\Rat)\To\mult{\ValR}$ we can find an
abelian cyclotomic $\Zp$-extension $K_{\infty}$ such that $\rho$
factors through $\GAL(K_\infty/\Rat)$. We will then denote by
$$
\Tw{\rho}:\power{\ValR}{\GAL(K_{\infty}/\Rat)}\To\power{\ValR}{\GAL(K_\infty/\Rat)}
$$
the ring automorphism that maps $g\in\GAL(K_\infty/\Rat)$ to
$\rho(g)g$. For any ring homomorphism $f:R\To S$ and any
$R$-module $M$ we write
$$
f_*M=S\tensorp{R}M
$$
for the base extension to $S$.
\begin{prop}\label{prop:base change comps}
Let $M(\rho)$ be free as an $\ValR$-module. Assume that $\rho$ is
unramified outside a finite set of primes. Choose $U$ such that
for any $l\in U$ the ramification index of $l$ in
$K_{\infty}/\Rat$ is prime to $p$. Then
\begin{enumerate}
\item $\RightD\IwaSect(U_{K_\infty},M(\rho))$ is perfect,
\item for any intermediate field $\Rat_\infty\subset
L_\infty\subset K_\infty$ there exists a natural quasi-isomorphism
$$
\LeftD\prKL[K_{\infty}/L_{\infty}*]\RightD\IwaSect(U_{K_\infty},M(\rho))=
\RightD\IwaSect(U_{L_\infty},M(\rho)),
$$
\item For any $\chi:\GAL(K_\infty/\Rat)\To\mult{\ValR}$ there
exist a natural quasi-isomorphism
$$
\Tw{\chi*}\RightD\IwaSect(U_{K_\infty},M(\rho))=
\RightD\IwaSect(U_{K_\infty},M(\chi^{-1}\rho)).
$$
\end{enumerate}
\end{prop}
\begin{proof}
Let $V$ be an open subscheme of $U$ such that both $\rho$ and
$V_{K_\infty}$ are unramified over $U$. The localisation triangle
$$
\RightD\IwaSect(U_{K_\infty},U-V,M(\rho))\To
\RightD\IwaSect(U_{K_\infty},M(\rho))\To
\RightD\IwaSect(V_{K_\infty},M(\rho))
$$
(\cite{Ja1}, 3.6) implies that to prove (i), it is sufficient to
show that the two outer complexes are perfect. The right complex
is immediately seen to be perfect by Proposition~\ref{prop:tensor
products}. We will prove in Proposition~\ref{prop:local factors}
that the left complex is perfect as well.
By Remark \ref{rem:Local factors tensor} and the localisation
triangle it also suffices to prove (ii) and (iii) for the scheme
$V$. Claim (ii) then follows directly from the above proposition.
For (iii) it remains to notice that
\begin{align*}
\Tw{\chi*}\Ind M(\rho)&\To \Ind M(\chi^{-1}\rho), \\
1\tensor w\tensor m&\mapsto \Tw{\chi}(w)\tensor m\qquad (
w\in\Omega(\iota), m\in M(\rho))
\end{align*}
is a $\GAL(\algclosure{\Rat}/\Rat)$-equivariant isomorphism of
$\Omega$-modules.
\end{proof}
\section{Cohomology of
$\ValR(\cyclchar)$}\label{sec:Zp1Cohomology}
In this section we will calculate the cohomology of the
one-dimensional representation $\ValR(\cyclchar)$ given by the
cyclotomic character
$$
\cyclchar: \GAL(\algclosure{\Rat}/\Rat)\To\mult{\Zp}.
$$
The following proposition establishes the link to the objects of
classical Iwasawa theory.
\begin{prop}\label{prop:cohom of Zp(1)}
Let $U$ be an open subscheme of $X=\SPEC \Int$ such that $p$ lies
in the complement $S$ of $U$.
\begin{enumerate}
\item There exist a canonical isomorphism of $\Omega$-modules
$$
\IwaH^1(U_{K_\infty},\ValR(\cyclchar))=\varprojlim_n
\ValR\tensorp{\Int}\Gm(U_{K_n}),
$$
where $\Gm$ denotes the multiplicative group.
\item The following
sequence of $\Omega$-modules is exact
\begin{gather*}
0\To\varprojlim_n\ValR\tensorp{\Int}\Gm(X_{K_n})\To
\IwaH^1(U_{K_\infty},\ValR(\cyclchar))\\
\To\varprojlim_n\etH^0(S_{K_n},\ValR)
\To\varprojlim_n\ValR\tensorp{\Int}\Pic(X_{K_n})\To\\
\IwaH^2(U_{K_\infty},\ValR(\cyclchar))
\To\varprojlim_n\etH^1(S_{K_n},\ValR)\To\ValR\To 0.
\end{gather*}
\end{enumerate}
\end{prop}
\begin{proof}
This is proved in the same way as \cite{BG1}, Proposition~5.1. The
idea is to combine the calculation of the cohomology groups of
$\Gm$ in \cite{Mi2}, Proposition~II.2.1, with the Kummer exact
sequence on $U_{K_n}$ and then to pass to the limit. The last term
can then be identified as the tensor product of
$$
\Zp=\varprojlim_n \KER
(\etH^3(X_{K_n},\Gm)\xrightarrow{p^n}\etH^3(X_{K_n},\Gm))
$$
with $\ValR$.
\end{proof}
This result is complemented by the following
\begin{prop}\label{prop:gysin factors}
There exist (non-canonical) isomorphisms of $\Omega$-modules
\begin{align*}
\varprojlim_n\etH^0(S_{K_n},\ValR)
&\isomorph\algebra{\ValR}{\GAL(K_{\infty}/\Rat)/D_p}\\
\varprojlim_n\etH^1(S_{K_n},\ValR) &\isomorph\Dsum_{l\in
S}\algebra{\ValR}{\GAL(K_{\infty}/\Rat)/D_l},
\end{align*}
where $D_l$ denotes the decomposition subgroup of the prime $l$ in
$\GAL(K_{\infty}/\Rat)$.
\end{prop}
\begin{proof}
One of the fundamental properties of $\Zp$-extensions is the fact
that $K_\infty/K_0$ is unramified outside the primes over $p$ (see
\cite{Wa1}, Prop. 13.2). For cyclotomic $\Zp$-extensions one also
knows that there exists a number $n_0$ such that all primes over
$p$ are totally ramified in $K_{\infty}/K_{n_0}$ and such that
none of the primes in $S_{K_{n_0}}$ splits in $K_{\infty}/K_{n_0}$
(see \cite{Wa1}, Ex. 13.2). In particular, $S_{K_\infty}\To
S_{K_n}$ is a homeomorphism for $n\geq n_0$.
On the other hand,
$$
\etH^0(S_{K_n},\ValR)\isomorph\Dsum_{v\in
S_{K_n}}\ValR\isomorph\etH^1(S_{K_n},\ValR).
$$
An elementary calculation shows that the corestriction map
$$
\etH^i(S_{K_{n+1}},\ValR)\To\etH^i(S_{K_n},\ValR)
$$
for $n\geq n_0$ is the identity for $i=1$, respectively the
multiplication by the residue degree of $v$ on the $v$-component
of $\etH^0(S_{K_{n+1}},\ValR)$ for $i=0$. But the residue degree
is $1$ or $p$ depending on wether $v$ lies over $p$ or not. Now
pass to the limit. The choice of an element of $S_{K_\infty}$ for
each prime in $S$ induces the desired isomorphisms.
\end{proof}
\begin{cor}\label{cor:local factors Zp(1)}
Let $T$ be a closed subscheme of $U$. Then the complex
$\RightD\IwaSect(U_{K_\infty},T,\ValR(\cyclchar))$ is acyclic
outside degree $3$ and
$$
\IwaH^3(U_{K_{\infty}},T,\ValR(\cyclchar))\isomorph\Dsum_{l\in
T}\algebra{\ValR}{\GAL(K_{\infty}/\Rat)/D_l}
$$
\end{cor}
\begin{proof}
Easy application of the snake lemma.
\end{proof}
\section{Local Factors}\label{sec:LocalFactors}
In this section we shall examine the relative cohomology complexes
$\RightD\IwaSect(U_{K_\infty},S,M(\rho))$ for arbitrary continuous
$\GAL(\algclosure{\Rat}/\Rat)$-representations $M(\rho)$, where
$M$ is any finitely generated $\ValR$-module. This will also
complete the proof of Proposition~\ref{prop:base change comps}.
Our aim is to extend Corollary \ref{cor:local factors Zp(1)} as
follows.
\begin{prop}\label{prop:local factors}
Let $U$ be an open subscheme of $\SPEC \algebra{\Int}{1/p}$, $S$ a
closed subscheme of $U$. Then
\begin{enumerate}
\item $\RightD\IwaSect(U_{K_\infty},S,M(\rho))$ is acyclic outside
degree $3$;
\item $\IwaH^3(U_{K_\infty},S,M(\rho))$ is a finitely generated
$\ValR$-module;
\item if for all $l\in S$ the prime $p$ does not divide the
ramification index of $l$ in $K_\infty/\Rat$, then
$\RightD\IwaSect(U_{K_\infty},S,M(\rho))$ is a perfect torsion
complex of $\power{\ValR}{\GAL(K_{\infty}/\Rat)}$-modules.
\end{enumerate}
\emph{Supplement:} If $K_{\infty}/\Rat$ is a $p$-extension,
$M=\ValR$, and $\rho$ is ramified over a prime $l$ that is
unramified in $K_{\infty}/\Rat$, then
$$
\CHAR \RightD\IwaSect(U_{K_\infty},l,\ValR(\rho))=(1).
$$
\end{prop}
\begin{proof}
As in the proof of Proposition~\ref{prop:gysin factors}, we fix a
number $m$ such that none of the primes in $S_{K_{m}}$ splits or
ramifies in $K_{\infty}/K_{m}$.
Write $v_m\in S_{K_m}$ for the image of a point $v\in
S_{K_\infty}$. From \cite{Ja1}, Prop. 3.8, we obtain an
isomorphism of $\power{\ValR}{\GAL(K_\infty/K_{m})}$-modules
\begin{multline*}
\IwaH^i(U_{K_\infty},S,M(\rho))=\\
\Dsum_{v\in S_{K_\infty}}\RightD^i(\varprojlim\etSect)(\SPEC
\Order[v_m]^h,v_m,j_{K_{m} *}\Ind[K_{\infty}/K_{m}]M(\rho)),
\end{multline*}
where $\Order[v_m]^h$ is the henselisation of the local ring at
$v_m$ and the limit is taken over the projective system $j_{K_{m}
*}\Ind[K_{\infty}/K_{m}]M(\rho)$.
We will now use the connection between \'{e}tale and Galois
cohomology. Fix a $v\in S_{K_\infty}$ and let $G_{v_m}$ denote the
absolute Galois group of $\quotient{\Order[v_m]^h}$, $I_{v_m}$ its
inertia subgroup and $g_{v_m}=G_{v_m}/I_{v_m}$ the Galois group of
the residue field of $v_m$. For any profinite group $G$, let
$\cd_p G$ denote the cohomological $p$-dimension, i.e. the largest
number $i$ such that the $i$-th group cohomology functor is
non-trivial on finite $p$-torsion $G$-modules. We have
\begin{align*}
\cd_p G_{v_m}&=2,\\
\cd_p I_{v_m}=\cd_p g_{v_m}&=1
\end{align*}
(see \cite{Ne1}, Prop. 3.3.4, Prop. 7.1.8). Further, it is well
known that for these groups, the cohomology groups of finite
$p$-torsion modules will again be finite. In particular, we may
interchange projective limits and continuous cohomology functors
during the subsequent considerations.
By \cite{Mi2}, Proposition~II.1.1.(b) and \cite{Mi1}, Ex. II.3.15
it follows that
\begin{multline*}
\RightD^i(\varprojlim\etSect)(\SPEC \Order[v_m]^h,j_{K_{m}
*}\Ind[K_{\infty}/K_{m}]M(\rho))=\\
\Hc^i(g_{v_m},H^0(I_{v_m},\Ind[K_{\infty}/K_{m}]M(\rho))).
\end{multline*}
Comparing the localisation sequence for
$$
\SPEC \quotient{\Order[v_m]^h}\hookrightarrow\SPEC
\Order[v_m]^h\hookleftarrow v_m
$$
with the Hochschild-Serre spectral sequence for $I_{v_m}\subset
G_{v_m}$ we obtain from the above
\begin{multline*}
\RightD^i(\varprojlim\etSect)(\SPEC \Order[v_m]^h,v_m,j_{K_{m}
*}\Ind[K_{\infty}/K_{m}]M(\rho))=\\
\Hc^{i-2}(g_{v_m},\Hc^1(I_{v_m},\Ind[K_{\infty}/K_{m}]M(\rho))).
\end{multline*}
As $v_m$ is unramified in $K_{\infty}/K_{m}$, the action of
$I_{v_m}$ on $\power{\ValR}{\GAL(K_{\infty}/K_{m})}(\iota)$ is
trivial and hence,
$$
\Hc^1(I_{v_m},\Ind[K_{\infty}/K_{m}]M(\rho))=
\Ind[K_{\infty}/K_{m}]\Hc^1(I_{v_m},M(\rho)).
$$
Observe that $\Hc^1(I_{v_m},M(\rho))$ is a finitely generated
$\ValR$-module.
To finish the proof of Proposition~\ref{prop:local factors}, (i)
and (ii), it remains to verify the following
\begin{lem}
Let $N(\tau)$ be a finitely generated $\ValR$-module $N$ together
with a continuous representation $\tau:g_{v_m}\To\AUT_{\ValR}N$.
Then
\begin{enumerate}
\item $\Hc^0(g_{v_m},\Ind[K_\infty/K_{m}]N(\tau))=0$,
\item $\Hc^1(g_{v_m},\Ind[K_\infty/K_{m}]N(\tau))$ is a finitely
generated $\ValR$-module.
\end{enumerate}
\end{lem}
\begin{proof}
By our assumption on $m$ we have $I_{v_n}=I_{v_m}$ and
$G_{v_n}/G_{v_m}=\GAL(K_n/K_{m})$ for $n\geq m$. Thus,
$$
\Hc^i(g_{v_m},\Ind[K_{\infty}/K_{m}]N(\tau))=\varprojlim_n\Hc^i(g_{v_n},N(\tau)).
$$
The same argument as in Proposition~\ref{prop:IwaCohomology}.(ii)
implies that this term vanishes for $i=0$. This proves (i). Claim
(ii) follows because $\Hc^1(g_{v_m},N(\tau))$ is a quotient of $N$
for all $n$.
\end{proof}
We now prove Proposition~\ref{prop:local factors}, (iii). After
decomposing by characters we may assume that $K_{\infty}$ is a
$p$-extension. In particular, $l\in S$ is unramified. By the same
argument as above, replacing $K_{m}$ by $\Rat$, we obtain
$$
\IwaH^3(U_{K_\infty},l,M(\rho))= \Hc^1(g_l,\Hc^1(I_l,\Ind
M(\rho))).
$$
Write $\Omega=\power{\ValR}{\GAL(K_{\infty}/\Rat)}$ and note that
$I_l$ acts trivially on $\Omega(\iota)$. Consequently,
$$
\Hc^1(I_l,\Ind M(\rho))=\Ind N(\tau),
$$
where $N=\Hc^1(I_l,M(\rho))$ is a finitely generated
$\ValR$-module and $\tau:g_l\to\AUT_{\ValR}N$ is the induced
representation. Recall that $g_l$ is topologically generated by
the geometric Frobenius element $\geoF_l$. By (i) the sequence
$$
0\To \Omega\tensorp{\ValR}N \xrightarrow{\id -
\iota(\geoF_l)\tensor\tau(\geoF_l)}\Omega\tensorp{\ValR}N\To\IwaH^3(U_{K_\infty},l,M(\rho))\To
0
$$
is exact. As $\ValR$ is regular, $\Omega\tensorp{\ValR}N$ is
perfect as $\Omega$-complex; hence, so is
$\IwaH^3(U_{K_\infty},l,M(\rho))$ . The latter module is
$\Omega$-torsion, because it is a finitely generated
$\ValR$-module. This proves (iii).
To prove the supplement it suffices to recall that
$$
N=(M(\rho)^{R_l})_{T_l},
$$
where $R_l$ is the ramification subgroup and $T_l=I_l/R_l$. If
$M=\ValR$ and the restriction of $\rho$ to $I_l$ is non-trivial,
then this module is clearly $\ValR$-torsion. By
Proposition~\ref{prop:properties of char},(iii) and (v), applied
to the above exact sequence, we obtain
$$
\CHAR \RightD\IwaSect(U_{K_\infty},l,M(\rho))=\CHAR
(\Omega\tensorp{\ValR}N)\CHAR^{-1}(\Omega\tensorp{\ValR}N)=(1).
$$
This finishes the proof of Proposition~\ref{prop:local factors}.
\end{proof}
\begin{rem}\label{rem:Local factors tensor}
Let $W$ be a finitely generated $\Omega$-module. If either $W$ or
$\Hc^1(I_l,M(\rho))$ is flat as an $\ValR$-module, then
\begin{multline*}
W\dtensorp{\Omega}\RightD\IwaSect(U_{K_\infty},l,\Ind[K_{\infty}/\Rat]M(\rho))\isomorph\\
\Hc^1(g_l,W\tensorp{\Omega}\Ind[K_{\infty}/\Rat]\Hc^1(I_l,M(\rho)))\\
\isomorph\RightD\IwaSect(U_{K_\infty},l,W\tensorp{\Omega}\Ind[K_{\infty}/\Rat]M(\rho)).
\end{multline*}
This is not true without the additional flatness assumption.
\end{rem}
\section{Cyclotomic Elements and
$L$-Elements}\label{sec:CyclotomicElements}
The aim this section is to assign to each admissible triple
$(K_\infty,\rho,U)$ given by
\begin{itemize}
\item a cyclotomic $\Zp$-extension $K_\infty$ of an abelian number
field,
\item a representation
$\rho:\GAL(\algclosure{\Rat}/\Rat)\To\mult{\ValR}$,
\item an open subscheme $U$ of $\SPEC \Int$ that does not
contain any prime whose ramification index in $K_\infty/\Rat$
is divisible by $p$
\end{itemize}
an $L$-element
$L(U_{K_\infty},\rho^{-1}\cyclchar)\in\mult{\quotient{\power{\ValR}{\GAL(K_\infty/\Rat)}}}$
and a cyclotomic element $c(U_{K_\infty},\rho)\in
\IwaH^1(U_{K_\infty},\ValR(\rho))$.
If $\rho$ and $U_{K_\infty}$ are unramified over $U$, then our
definition follows along the lines of the classical construction
(see \cite{Wa1}, \S7.2, respectively \cite{Ru1}, \S3.2). By the
Theorem of Kronecker and Weber there exists a number $f$ such that
\begin{itemize}
\item the set of prime divisors of $f$ is equal to the
complement of $U$ in $\SPEC \algebra{\Int}{1/p}$,
\item $K_\infty\subset \CyclF{fp^{\infty}}$,
\item $\rho$ factors through $\GAL(\CyclF{fp^{\infty}}/\Rat)$.
\end{itemize}
Let $\geoF_l$ denote the geometric Frobenius element and set
$$
\geoF_a=\prod_{l \text{ prime}}\geoF_l^{v_l(a)}
$$
for each positive integer $a$. The Stickelberger elements
$$
\Stickel{fp^k}=\sum_{\substack{0<a<fp^k\\
\ggt{a}{fp}=1}}(\frac{a}{fp^k}-\frac{1}{2})\geoF_{a}\in\algebra{\Rat_p}{\GAL(\CyclF{fp^k}/\Rat)}
$$
are compatible under the projection maps induced by
$$
\GAL(\CyclF{fp^{k+1}}/\Rat)\To\GAL(\CyclF{fp^k}/\Rat)
$$
and define an element
$$
\Stickel{fp^{\infty}}\in\quotient{\power{\ValR}{\GAL(\CyclF{fp^{\infty}}/\Rat)}}.
$$
Further, we fix for each number $k$ a primitive root of unity
$\pru{k}$ such that $\pru{ks}^s=\pru{k}$. By
Proposition~\ref{prop:cohom of Zp(1)} we may regard the system
$c_{fp^{\infty}}=(1-\pru{fp^k})_{k=0}^{\infty}$ as an element of
$\IwaH^1(U_{\CyclF{fp^{\infty}}},\ValR(\cyclchar))$.
\begin{defn}
Let $\rho$ and $U_{K_\infty}$ be unramified over $U$. Denote by
$\pr{+}$ and $\pr{-}$ the projectors onto the $(+1)$-eigenspace,
respectively the $(-1)$-eigenspace of the complex conjugation and
chose $f$ as above. We set
$$
L(U_{K_\infty},\rho^{-1}\cyclchar)=
\prKL[\CyclF{fp^{\infty}}/K_\infty]\Tw{\rho^{-1}\cyclchar}(\pr{+}-\pr{-}\Stickel{fp^\infty})
$$
The cyclotomic element $c(U_{K_\infty},\rho)$ is defined to be the
image of $1\tensor\pr{+}c_{fp^{\infty}}$ under the homomorphism
$$
(\prKL[\CyclF{fp^{\infty}}/K_\infty]\Tw{\rho^{-1}\cyclchar})_*
\IwaH^1(U_{\CyclF{fp^{\infty}}},\ValR(\cyclchar))\To
\IwaH^1(U_{K_{\infty}},\ValR(\rho)).
$$
For any $l\in U$ we denote by
$$
E_l(K_\infty,\rho^{-1}\cyclchar)=
1-\prKL[\CyclF{fp^{\infty}}/K_\infty]\Tw{\rho^{-1}\cyclchar}(\geoF_l)
$$
the Euler factor at $l$.
\end{defn}
We will now extend this definition to arbitrary admissible triples
$(K_{\infty},\rho,U)$. Let $P_{\infty}$ be the maximal
$p$-extension inside $K_\infty$. We may decompose
$\power{\ValR}{\GAL(K_\infty/\Rat)}$ by the characters of
$\GAL(K_\infty/P_\infty)$. The $L$-elements and cyclotomic
elements for $K_\infty$ are then completely determined by their
projections onto the components of the corresponding decomposition
of $\mult{\quotient{\power{\ValR}{\GAL(K_\infty/\Rat)}}}$,
respectively $\IwaH^1(U_{K_\infty},\ValR(\rho))$. Consequently, it
suffices to consider triples $(P_{\infty},\rho,U)$ with
$P_{\infty}$ a $p$-extension. Note that every ramification index
is now a power of $p$. By assumption, $U_{P_\infty}$ is therefore
an unramified cover of $U$. In other words, we only need to deal
with the ramification of $\rho$.
Let $V\subset U$ be the maximal open subscheme such that $\rho$ is
unramified over $V$ (note that the complement is a finite set
because $\rho$ is one-dimensional ) and set
\begin{align*}
L(U_{P_\infty},\rho^{-1}\cyclchar)&=L(V_{P_\infty},\rho^{-1}\cyclchar),\\
c(U_{P_\infty},\rho)&=c(V_{P_\infty},\rho),\\
\intertext{and for any $l\in U-V$}
E_l(P_\infty,\rho^{-1}\cyclchar)=1.
\end{align*}
It is easy to check that this definition is consistent with the
previous construction. Moreover, we have
\begin{prop}\label{prop:compatibilities}
Let $(K_{\infty},U,\rho)$ be any admissible triple.
\begin{enumerate}
\item The elements $L(U_{K_\infty},\rho^{-1}\cyclchar)$,
$c(U_{K_\infty},\rho)$, and $E_l(K_{\infty},\rho^{-1}\cyclchar)$
are compatible under the projection maps
$\prKL[K_{\infty}/L_\infty]$ and under twists by continuous
characters $\GAL(K_{\infty}/\Rat)\To\mult{\ValR}$.
\item Let $V\subset U$ be an open subscheme with closed complement
$T=U-V$. Then
\begin{align*}
L(V_{K_{\infty}},\rho)&=
\Tw{\rho}(\pr{+})+\Tw{\rho}(\pr{-})L(U_{K_{\infty}},\rho)
\prod_{l\in T}E_l(K_{\infty},\rho),\\
c(V_{K_\infty},\rho)&= c(U_{K_\infty},\rho)\prod_{l\in
T}E_l(K_{\infty},\rho^{-1}\cyclchar).
\end{align*}
\end{enumerate}
\end{prop}
\begin{proof}
This is implied by the corresponding compatibility properties of
$\Stickel{fp^{\infty}}$ and $c_{fp^{\infty}}$, respectively true
by definition.
\end{proof}
We want to sketch briefly the connection between our $L$-elements
and the Kubota-Leopoldt $L$-function $L_p(s,\chi)$. Let
$K_\infty=\Rat_\infty$ and $U=\SPEC \algebra{\Int}{1/p}$.
According to the decomposition $\mult{\ValR}=\mu\times F$ into the
torsion group of roots of unity $\mu$ and the torsion-free
$\Zp$-module $F$ we can write
$$
\rho=\rho_f\rho_\infty,
$$
where $\rho_f$ takes values in $\mu$ and
$$
\rho_\infty:\GAL(\Rat_\infty/\Rat)\To F
$$
is a continuous group homomorphism.
Let $N$ be the conductor of $\rho_f$ and
$$
\rec:\GAL(\CyclF{N}/\Rat)\To\mult{(\Int/N\Int)}
$$
the isomorphism which maps the geometric Frobenius $\geoF_l$ to
$l$, when $l$ is prime to $N$. Further, note that the number
$$
s=\frac{\log_p\rho_\infty(\gamma)}{\log_p\big((\cyclchar)_{\infty}(\gamma)\big)}\in
\log_p F\subset\Cp
$$
does not depend on the choice of a nontrivial
$\gamma\in\GAL(\Rat_\infty/\Rat)$. Hence, we may write
$\rho_\infty=(\cyclchar)_\infty^s$.
\begin{prop}\label{prop:evaluation of L-element}
Let $\rho=(\cyclchar)_{\infty}^s\rho_f$ be an even one-dimensional
representation. Then
$$
\prKL[\Rat_\infty/\Rat](L(U_{\Rat_\infty},\cyclchar\rho))=L_p(1+s,\rho_f\conjunct\rec^{-1}).
$$
\end{prop}
\begin{proof}
See \cite{Wa1}, Theorem~7.10, but observe that the identification
$$
\GAL(\CyclF{N}/\Rat)\To\mult{(\Int/N\Int)}
$$
used in loc. cit. is given by $1/\rec$. In particular,
$w\conjunct\rec=(\cyclchar)_f^{-1}$, where $w$ denotes the
Teichm\"uller character.
\end{proof}
\begin{rem}\label{rem:relation to f}
Let $N$ be prime to $p$ and let
$\chi:\mult{(\Int/Np\Int)}\To\mult{\ValR}$ be an odd Dirichlet
character of conductor $N$ or $Np$ (i.e. $\chi$ is of the first
kind). Set $U=\SPEC\algebra{\Int}{1/p}$ and let
$$
\beta:\power{\ValR}{\GAL(\Rat_\infty/\Rat)}\To\power{\ValR}{T}
$$
be the isomorphism that maps $\geoF_{1+Np}^{-1}$ to $T+1$. By
construction we then have
$$
f(T,\chi\omega)=\beta(L(U_{\Rat_{\infty}},\chi\conjunct\rec)),
$$
where $f(T,\chi\omega)$ is the element introduced in \cite{Wa1},
\S7.2.
\end{rem}
\begin{rem}
Let $\chi$ be a finite character and $k$ an integer.
Proposition~\ref{prop:evaluation of L-element} implies that our
element $L(U_{\Rat_\infty},\cyclchar^{1-k}\chi)$, with $U=\SPEC
\algebra{\Int}{1/p}$, coincides with the $p$-adic $L$-function
$\mathcal{L}_p(\chi,1-k)$ used in \cite{HK1}. However, note that
there is a sign error in the definition of this function. The
correct definition should read as follows (in the notation of loc.
cit.). For all $\widetilde{\mathcal{O}}_p$ and all characters
$\tau: \Gamma\To\widetilde{\mathcal{O}}_p^*$ of finite order,
$$
\tau(\mathcal{L}_p(\chi,1-k))=(1-\chi\tau(p)p^{k-1})L(\chi\tau,1-k).
$$
\end{rem}
\begin{rem}
The elements $c(U_{K_\infty},\rho)$ depend on the choice of the
system of roots of unity $(\pru{k})$, but the submodule of
$\IwaH^1(U_{K_\infty},\ValR(\rho))$ generated by
$c(U_{K_\infty},\rho)$ does not. This is the actual object we are
interested in.
\end{rem}
The Theorem of Ferrero-Washington can be rephrased to the
statement that the $\mu$-invariants of the $p$-adic $L$-functions
vanish. The following proposition translates this formulation to
our setting.
\begin{prop}\label{prop:mu=0 for L-elements}
Let $(K_{\infty},U,\rho)$ be an admissible triple and
$l\in\SPEC\Int$ of prime-to-$p$ ramification in $K_{\infty}/\Rat$.
Then $L(U_{K_\infty},\rho)$ and $E_l(K_\infty,\rho)$ map to units
in $\power{\ValR}{\GAL(K_\infty/\Rat)}_{\ideal{p}}$ for each prime
ideal $\ideal{p}$ of codimension $1$ with $p\in\ideal{p}$.
\end{prop}
\begin{proof}
By Proposition~\ref{prop:compatibilities}.(iii) we may assume that
$U_{K_\infty}$ and $\rho$ are unramified over $U$. Choose $f$ as
above and observe that
$$
(1-(1+fp)\geoF_{1+fp})\Stickel{fp^{\infty}}
\in\power{\ValR}{\GAL(\CyclF{fp^{\infty}}/\Rat)}.
$$
Define
$$
h(U_{K_\infty},\rho)=1-(1+fp)\prKL[\CyclF{fp^{\infty}}/K_\infty]\Tw{\rho}(\geoF_{1+fp}).
$$
We need to show that neither of $h(U_{K_\infty},\rho)$,
$hL(U_{K_\infty},\rho)$, or $E_l(K_\infty,\rho)$ is contained in
$\ideal{p}$. For this, we can replace
$\power{\ValR}{\GAL(K_\infty/\Rat)}$ by its normalisation and then
decompose by the characters of $\GAL(K_0/\Rat)$. Hence, we may
assume that $K_\infty=\Rat_\infty$. In particular, $\ideal{p}$ is
the radical of $(p)$. After twisting by an appropriate character
of $\GAL(\Rat_\infty/\Rat)$ we may further require that
$\rho=\chi\conjunct\rec^{-1}$ for a Dirichlet character $\chi$ of
the first kind.
Obviously, $E_l(\Rat_\infty,\chi)$ and $h(U_{\Rat_\infty},\chi)$
are prime to $p$ (note that the images of $\geoF_l$ and
$\geoF_{1+fp}$ are nontrivial in $\GAL(\Rat_\infty/\Rat)$). If
$\chi$ is even, then $L(U_{K_\infty},\chi)=1$. If $\chi$ is odd,
the claim for $hL(U_{K_\infty},\theta)$ is by Remark
\ref{rem:relation to f} equivalent to the vanishing of the
$\mu$-invariant of $f(T,\chi\omega)$, hence to the Theorem of
Ferrero-Washington (\cite{Wa1}, \S7.5, respectively \S16.2).
\end{proof}
\section{The Main Theorem} \label{sec:main}
Let $(K_\infty,\rho,U)$ be an admissible triple in the sense of
Section~\ref{sec:CyclotomicElements} and set
$$
\Omega=\power{\ValR}{\GAL(K_\infty/\Rat)},
$$
where $\ValR$ is the valuation ring of a finite extension of
$\Rat_p$. As explained in Section~\ref{sec:ProfGroupRing}, we may
assume without loss of generality that $\ValR$ contains the values
of all characters of $\GAL(K_0/\Rat)$.
Before we state our main theorem we will explain how to modify the
Iwasawa complex $\RightD\IwaSect(U_{K_\infty},\ValR(\rho))$ (see
Definition \ref{defn:Iwasawa complex}) by the cyclotomic element
$$
c(U_{K_\infty},\rho)\in\IwaH^1(U_{K_\infty},\ValR(\rho))
$$
introduced in the preceding section.
Recall that $\RightD\IwaSect(U_{K_\infty},\ValR(\rho))$ is acyclic
in degree $0$ (see Proposition~\ref{prop:IwaCohomology}.(ii)). In
particular, there exists a unique morphism
$$
\Omega
c(U_{K_\infty},\rho)[-1]\To\RightD\IwaSect(U_{K_\infty},\ValR(\rho))
$$
in the derived category that induces the natural inclusion on
cohomology.
\begin{defn}
Denote by $\Rc(U_{K_\infty},\ValR(\rho))$ the complex (unique up
to quasi-isomorphism) fitting into the following distinguished
triangle
$$
\Omega
c(U_{K_\infty},\rho)[-1]\To\RightD\IwaSect(U_{K_\infty},\ValR(\rho))\To\Rc(U_{K_\infty},\ValR(\rho)).
$$
\end{defn}
\begin{lem}\label{lem:torsion+free}\
\begin{enumerate}
\item $\Rc(U_{K_\infty},\ValR(\rho))$ is a perfect torsion complex
of $\Omega$-modules.
\item If $K_\infty$ is totally real and $\rho$ is odd, then
$\Omega c(U_{K_\infty},\rho)$ is a free $\Omega$-module of rank
$1$.
\end{enumerate}
\end{lem}
\begin{proof}
In Proposition~\ref{prop:base change comps}.(i) we have already
confirmed that the complex
$\RightD\IwaSect(U_{K_\infty},\ValR(\rho))$ is perfect. Hence, it
suffices to prove (ii) and that the complex in (i) is torsion.
Let $\widetilde{\Omega}$ denote the normalisation of $\Omega$ in
its total quotient ring $\quotient{\Omega}$. On the one hand we
have
$$
\quotient{\Omega}\tensorp{\Omega}\IwaH^i(U_{K_\infty},\ValR(\rho))=
\quotient{\Omega}\tensorp{\widetilde{\Omega}}
\Big(\Dsum_{\chi}\IwaH^i(U_{\Rat_\infty},\ValR(\chi^{-1}\rho))\Big),
$$
where the sum runs over all characters $\chi$ of $\GAL(K_0/\Rat)$;
on the other hand $\Omega
c(U_{K_\infty},\rho)\subset\IwaH^1(U_{K_\infty},\ValR(\rho))$ is a
free $\Omega$-module of rank $1$ if and only if
$\quotient{\Omega}c(U_{K_\infty},\rho)$ is a free
$\quotient{\Omega}$-module of rank $1$. This module decomposes as
$$
\quotient{\Omega}c(U_{K_\infty},\rho)=
\Dsum_{\chi}\quotient{\power{\ValR}{\GAL(\Rat_\infty/\Rat)}}c(U_{\Rat_\infty},\chi^{-1}\rho).
$$
Observe hereby that $\rho$ and $\chi^{-1}\rho$ have the same
parity if $K_\infty$ is totally real. Hence, it is enough to
consider the case $K_\infty=\Rat_\infty$. By
Proposition~\ref{prop:local factors} and
Proposition~\ref{prop:compatibilities} we may replace $U$ by
$\SPEC \algebra{\Int}{1/p}$, noting that the Euler factors
$E_l(\Rat_{\infty},\rho)$ are nonzero divisors. We are now reduced
to the statement of \cite{HK1}, Proposition~4.2.1. Observe that
the proof of (ii) in this situation uses K.~Kato's explicit
reciprocity law as an essential ingredient.
\end{proof}
\begin{cor}\label{cor:modified_compatibility}
The compatibility properties of Proposition~\ref{prop:base change
comps} hold for $\ValR(\rho)$, with $\RightD\IwaSect$ replaced by
$\Rc$.
\end{cor}
\begin{proof}
Easy consequence of Proposition~\ref{prop:compatibilities} and the
above lemma.
\end{proof}
We are now ready to formulate and prove our main result.
\begin{thm}\label{thm:Main Theorem}
Let $p$ be an odd prime, $K_\infty$ the cyclotomic $\Zp$-extension
of an abelian number field, and $U$ an open subscheme of $\SPEC
\Int$ such that the ramification index in $K_{\infty}/\Rat$ of
every place in $U$ is prime to $p$. Then
\begin{enumerate}
\item $\Rc(U_{K_\infty},\ValR(\rho))_{\ideal{p}}$ is acyclic for
all primes $\ideal{p}$ of codimension~$1$ of
$\power{\ValR}{\GAL(K_\infty/\Rat)}$ that contain $p$.
\item The characteristic ideal of $\Rc(U_{K_\infty},\ValR(\rho))$
is generated by the $L$-element
$L(U_{K_\infty},\rho^{-1}\cyclchar)$.
\end{enumerate}
\end{thm}
\begin{proof}
By the subsequent lemma we are allowed to enlarge or shrink the
scheme $U$ at our discretion.
\begin{lem}\label{lem:indep of Euler-f}
Let $V\subset U$ be an open subscheme of $U$. Then both statements
of Theorem~\ref{thm:Main Theorem} hold for $U$ if and only if they
holds for $V$.
\end{lem}
\begin{proof}
Set $T=U-V$ and define $\cmplx{C}$ by the following distinguished
triangle
$$
\Omega
c(V_{K_\infty},\rho)[-1]\To\RightD\IwaSect(U_{K_\infty},\ValR(\rho))\To\cmplx{C}.
$$
where the first map is induced by the inclusion
$$
\Omega
c(V_{K_\infty},\rho)\hookrightarrow\IwaH^1(V_{K_\infty},\ValR(\rho))
=\IwaH^1(U_{K_\infty},\ValR(\rho)).
$$
We obtain the following two triangles of perfect torsion complexes
\begin{gather*}
\RightD\IwaSect(U_{K_\infty},T,\ValR(\rho))\To\cmplx{C}\To\Rc(V_{K_\infty},\ValR(\rho))\\
\cmplx{C}\To\Rc(U_{K_\infty},\ValR(\rho))\To \Omega
c(U_{K_\infty},\rho)/\Omega c(V_{K_\infty},\rho).
\end{gather*}
Proposition~\ref{prop:compatibilities} implies
$$
\Omega c(U_{K_\infty},\rho)/\Omega c(V_{K_\infty},\rho)=
\Tw{\rho^{-1}\cyclchar}(\pr{+})\Big(\Omega\Big/\prod_{l\in
T}E_l(K_\infty,\rho^{-1}\cyclchar)\Omega\Big)
$$
and since $E_l(K_\infty,\rho^{-1}\cyclchar)$ is a unit of
$\Omega_\ideal{p}$ for any prime ideal $\ideal{p}$ of codimension
$1$ with $p\in\ideal{p}$ (see Proposition~\ref{prop:mu=0 for
L-elements}) it follows that
$$
\big(\Omega c(U_{K_\infty},\rho)/\Omega
c(V_{K_\infty},\rho)\big)_{\ideal{p}}=0.
$$
On the other hand, we know that
$\RightD\IwaSect(U_{K_\infty},T,\ValR(\rho))$ is acyclic outside
degree $3$ and that $\IwaH^3(U_{K_\infty},T,\ValR(\rho))$ is
finitely generated as $\ValR$-module (see
Proposition~\ref{prop:local factors}). Therefore,
$$
\RightD\IwaSect(U_{K_\infty},T,\ValR(\rho))_{\ideal{p}}\isomorph 0
$$
by Lemma~\ref{lem:criterion for vanishing}. This implies the
equivalence for part (i) of Theorem~\ref{thm:Main Theorem}.
By using the multiplicativity of the characteristic ideal, the
equivalence for part (ii) is reduced to proving that
$$
\CHAR
\IwaH^3(U_{K_\infty},l,\ValR(\rho))=E_l(K_\infty,\rho^{-1}\cyclchar)\Omega
$$
for $l\in T$. After decomposing by characters we may assume that
$K_\infty$ is a $p$-extension. For those primes over which $\rho$
is ramified the equality is implied by the supplement to
Proposition~\ref{prop:local factors}. For the remaining primes
choose $f$ as in Section~\ref{sec:CyclotomicElements}. From
Corollary \ref{cor:local factors Zp(1)} we obtain
\begin{multline*}
\CHAR \IwaH^3(U_{K_\infty},l,\ValR(\rho))=\\
\CHAR
(\prKL[\CyclF{fp^{\infty}}/K_{\infty}]\Tw{\rho^{-1}\cyclchar})_*
\algebra{\ValR}{\GAL(\CyclF{fp^{\infty}}/\Rat)/D_l}\\
=E_l(K_\infty,\rho^{-1}\cyclchar)\Omega.
\end{multline*}
\end{proof}
From the formulation of the main conjecture in \cite{HK1} we can
deduce the following weaker instance of Theorem~\ref{thm:Main
Theorem}.(ii).
\begin{lem}\label{lem:main normalisiert}
Let $\phi:\Omega\To\widetilde{\Omega}$ be the normalisation of
$\Omega$. Then
$$
\CHAR\LeftD\phi_*\Rc(U_{K_\infty},\ValR(\rho))=L(U_{K_\infty},\rho^{-1}\cyclchar)\widetilde{\Omega}.
$$
\end{lem}
\begin{proof}
We may decompose by the characters of $\GAL(K_0/\Rat)$. Thus, we
may assume that $K_\infty=\Rat_\infty$. By Lemma~\ref{lem:indep of
Euler-f} we can further reduce to $U=\SPEC \algebra{\Int}{1/p}$.
If $\rho$ is odd, then $L(U_{\Rat_\infty},\rho^{-1}\cyclchar)=1$
by definition. On the other hand,
$$\CHAR \Rc(U_{\Rat_\infty},\ValR(\rho))=(1)$$
by \cite{HK1}, Theorem~4.2.2. If $\rho$ is even, then
$c(U_{\Rat_\infty},\rho)=0$ and by \cite{HK1}, Theorem~4.2.4,
$$
\CHAR\RightD\IwaSect(U_{\Rat_\infty},\ValR(\rho))
=L(U_{\Rat_\infty},\rho^{-1}\cyclchar)\power{\ValR}{\GAL(\Rat_{\infty}/\Rat)}.
$$
Originally, both theorems only deal with the case that $\rho$ is a
finite character times an integral power of $\cyclchar$, but the
general case follows easily by twisting.
\end{proof}
We will now turn to the proof of Theorem~\ref{thm:Main
Theorem}.(i). A large portion of it can be dealt with by the
following lemma. Here, we use the Theorem of Ferrero-Washington
for the second time (see Proposition~\ref{prop:mu=0 for
L-elements}).
\begin{lem}\label{lem:even part}
$\Tw{\rho^{-1}\cyclchar}(\pr{-})\IwaH^1(U_{K_\infty},\ValR(\rho))$
and $\IwaH^2(U_{K_\infty},\ValR(\rho))$ are finitely generated as
$\ValR$-modules.
\end{lem}
\begin{proof}
By Corollary \ref{cor:modified_compatibility} we may enlarge
$K_\infty$ such that $\rho$ factors through $\GAL(K_\infty/\Rat)$.
Further, nothing changes if we then twist by $\rho^{-1}\cyclchar$.
Hence, we may assume $\rho=\cyclchar$.
Set $X=\SPEC\Int$, $S=X-U$. By the Theorem of Ferrero-Washington
(\cite{Wa1}, Theorem~7.15) the module
$$
\varprojlim_n \ValR\tensorp{\Int}\Pic(X_{K_n})
$$
is finitely generated over $\ValR$. By Proposition~\ref{prop:gysin
factors} this is also true for the modules
$$
\varprojlim_n \etH^i(S_{K_n},\ValR).
$$
Further, it is an elementary fact of the theory of cyclotomic
fields that
$$
\card{(\Gm(X_{K_n})/\mu(X_{K_n})\pr{+}\Gm(X_{K_n}))}\leq 2,
$$
where $\mu$ denotes the sheaf of unit roots (see \cite{Wa1},
Theorem~4.12). In particular, as $p$ was assumed to be an odd
prime,
$$
\varprojlim_n \ValR\tensorp{\Int}\pr{-}\Gm(X_{K_n})= \varprojlim_n
\ValR\tensorp{\Int}\mu(X_{K_n}).
$$
This module is obviously finitely generated over $\ValR$ as well.
Now use the exact sequence of Proposition~\ref{prop:cohom of
Zp(1)}.(ii).
\end{proof}
After decomposition by characters we may assume that $K_\infty$ is
a $p$-extension; in particular, totally real. Additionally, we may
shrink $U$ by Lemma~\ref{lem:indep of Euler-f} such that
$U_{K_\infty}$ and $\rho$ are unramified over $U$. The case that
$\rho$ is even has already been settled by the above lemma. The
key to the remaining case is the following
\begin{lem}\label{lem:key lemma}
Let $K_\infty$ be a $p$-extension, $\rho$ be odd, and let both be
unramified over $U$. Write $\ideal{p}$ for the prime ideal of
$\Omega$ with $p\in\ideal{p}$ and $\CODIM \ideal{p}=1$. Then there
exists a nonzero divisor $x$ of $\Omega_\ideal{p}$ and a
quasi-isomorphism
$$
\Rc(U_{K_\infty},\ValR(\rho))_{\ideal{p}} \isomorph
\Omega_{\ideal{p}}/x\Omega_{\ideal{p}}[-1].
$$
\end{lem}
\begin{proof}
We will first show that
$\RightD\IwaSect(U_{K_\infty},\ValR(\rho))$ is quasi-isomorphic to
a complex $\cmplx{P}$ of finitely generated projective
$\Omega$-modules with $P^i=0$ for $i\notin\set{1,2}$. By
Proposition~\ref{prop:cohom dim} we can achieve that $P^i=0$ for
$i>2$.
Recall that in the present situation, $\Omega$ is a local ring.
Let $k$ be the residue field of $\Omega$.
Proposition~\ref{prop:tensor products} implies that
$$
k\dtensorp{\Omega}\RightD\IwaSect(U_{K_\infty},\ValR(\rho))=
\RightD\etSect(U,k\tensorp{\Omega}\Ind\ValR(\rho)).
$$
Since $K_\infty$ is totally real and $\rho$ is odd, every lift of
the complex conjugation will act by multiplication by $-1$ on
$\Ind\ValR(\rho)\tensorp{\Omega}k$; consequently,
$$
\etH^0(U,k\tensorp{\Omega}\Ind\ValR(\rho))=0.
$$
Thus, we can choose $P^i=0$ for $i<1$ as well.
Lemma~\ref{lem:even part} then implies that the complex
$\RightD\IwaSect(U_{K_\infty},\ValR(\rho))_{\ideal{p}}$ is
quasi-isomorphic to a free $\Omega_{\ideal{p}}$-module sitting in
degree $1$. The claim follows since $\Omega c(U_{K_\infty},\rho)$
is free of rank $1$ and $\Rc(U_{K_\infty},\ValR(\rho))$ is
torsion by Lemma~\ref{lem:torsion+free}.
\end{proof}
Putting this and Lemma~\ref{lem:main normalisiert} together we see
that in the situation of Lemma~\ref{lem:key lemma}, the invertible
ideals of the normalisation of $\Omega_\ideal{p}$ generated by
$x^{-1}$, respectively $L(U_{K_\infty},\rho^{-1}\cyclchar)$,
agree. But
$$
L(U_{K_\infty},\rho^{-1}\cyclchar)=1;
$$
hence, $x$ is unit in the normalisation of $\Omega_{\ideal{p}}$
and therefore a unit in $\Omega_{\ideal{p}}$ itself. This finishes
the proof of Theorem~\ref{thm:Main Theorem}.(i).
At last, we complete the proof of Theorem~\ref{thm:Main
Theorem}.(ii). Let $(K_\infty,\rho,U)$ be any admissible triple.
By Proposition~\ref{prop:comparison of ideals} it suffices to show
that
$$
\CHAR \Rc(U_{K_\infty},\ValR(\rho))_{\ideal{p}}=
L(U_{K_\infty},\rho^{-1}\cyclchar)\Omega_{\ideal{p}}
$$
for all prime ideals $\ideal{p}$ of codimension $1$. In
Lemma~\ref{lem:main normalisiert} we have already proved this for
those $\ideal{p}$ that do not contain $p$. By
Theorem~\ref{thm:Main Theorem}.(i) and Proposition~\ref{prop:mu=0
for L-elements} the equality also holds for the remaining primes.
\end{proof}
\end{document}
|
\begin{document}
\title{String homology, and closed geodesics on manifolds which are elliptic spaces}
\begin{abstract}
Let $M$ be a closed simply connected smooth manifold. Let $\mathbb{F}_p$ be the finite field with $p$ elements where $p> 0$ is a prime integer. Suppose that $M$ is an $\mathbb{F}_p$-elliptic space in the sense of [FHT91a]. We prove that if the cohomology algebra $H^*(M, \mathbb{F}_p)$ cannot be generated (as an algebra) by one element, then any Riemannian metric on $M$ has an infinite number of geometrically distinct closed geodesics. The starting point is a classical theorem of Gromoll and Meyer [GM69]. The proof uses string homology, in particular the spectral sequence of [CJY04], the main theorem of [McC87], and the structure theorem for elliptic Hopf algebras over $\mathbb{F}_p$ from [FHT91a].
\end{abstract}
\section{Introduction}
We work over a ground field $\mathbb{F}$ and use $\mathbb{F}$ as the coefficients of homology and cohomology. Our main applications are in the case where this ground field is the finite field $\mathbb{F}_p$ with $p$ elements, where $p > 0$ is a prime integer.
Let $\mathrm{H}\mathrm{L}_*(M)$ denote the string homology algebra of a closed, simply connected manifold $M$. String homology is a graded commutative
$\mathbb{F}-$algebra defined as follows. Let $LM$ be the free loop space of $M$. In [CS99], Chas and Sullivan define the {\em string product}
$$
H_p(LM) \otimes H_q(LM) \to H_{p+q-n}(LM)
$$
where $n$ is the dimension of $M$. This product is studied from the point of view of homotopy theory in [CJ02]. The {\em string homology algebra} is defined by setting $\mathrm{H}\mathrm{L}_s(M) = H_{s+n}(LM;\mathbb{F})$ and using the string product to define the product. It is proved that this string product makes $\mathrm{H}\mathrm{L}_*(M)$ into a graded commutative $\mathbb{F}-$algebra in both [CS99] and [CJ02].
Our main result about string homology is the following theorem. In the statement $\mathbb{O}mega X$ refers to the based loop space of $X$.
\begin{theorem}
Let $M$ be a simply connected, closed manifold. Suppose there is a constant $C$ and an integer $K$ such that
$$
\sum_{i \leq n} \dim H_i(\mathbb{O}mega M; \mathbb{F}_p) \leq Cn^K.
$$
Let $K_0$ be the minimal exponent which can occur in this bound. Then the string homology algebra $\mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p)$ contains a polynomial algebra $P$ over $\mathbb{F}_p$ on $K_0$ generators and $\mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p)$ is a finitely generated free module over $P$.
\end{theorem}
If $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ satisfies the growth hypotheses in the statement of this theorem, then we say that $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ has {\it polynomial growth}. The main application of this theorem is the following result.
\begin{theorem}
Let $M$ be a simply connected, closed manifold. Suppose $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ has polynomial growth and the algebra $H^*(M; \mathbb{F}_p)$ cannot be generated by one element. Then for any metric on $M$, there is an infinite number of geometrically distinct closed geodesics on $M$.
\end{theorem}
To obtain this result from Theorem 1.1 we use the Gromoll - Meyer theorem relating closed geodesics and the topology of the free loop space. A metric on $M$ defines a function, the {\em energy function}, on $LM$ given by
$$
\gammama \mapsto \displaystyle \int_{S^1} \langle \gammama'(t), \gammama'(t)\rangle\, dt.
$$
If $\gammama\colon S^1 \to M$ is a closed geodesic parametrized by arc length then $\gammama$ is a critical point of the energy function, as is the loop $\gammama_n$ defined by $\gammama_n(z) = \gammama(z^n)$. Furthermore every critical point of the energy function is of the form $\gammama_n$ where $\gammama$ is a closed geodesic parametrised by arc length [B56].
The circle $S^1$ acts on $LM$ by rotating loops and the energy function is $S^1$-invariant. It follows that any closed geodesic $\gammama$ parametrized by arc length generates an infinite number of critical $S^1$ orbits of the energy function. In general these orbits will not be isolated but if there are only a finite number of geometrically distinct closed geodesics these orbits will be isolated.
We use the following terminology for graded vector spaces. If each $V_i$ is finite dimensional we say $V$ has {\em finite type}. If $V$ has finite type then we say it has {\em finite dimension} if $\dim V_i$ is zero for all but a finite number of $i$, {\em infinite dimension} if $\dim V_i$ is non-zero for an infinite number of $i$, and {\em doubly infinite dimension} if the sequence of numbers $\dim V_i$ is unbounded. Note that doubly infinite dimension is the same as polynomial growth with minimal exponent at least $2$. Using Morse-Bott theory, Gromoll and Meyer showed in [GM69] that the relation between critical points of the energy function and closed geodesics leads to the following theorem.
\begin{theorem}
Let $M$ be a simply connected closed manifold. If $H_*(LM; \mathbb{F})$ has doubly infinite dimension for some field $\mathbb{F}$, then for any metric on $M$ there is an infinite number of geometrically distinct closed geodesics on $M$.
\end{theorem}
If $\partiali_1(M)$ is finite, then we can apply this theorem to the universal cover $\tilde{M}$ of $M$. If $\partiali_1(M)$ is infinite and $\partiali_1(M)$ has an infinite number of conjugacy classes, then $LM$ has an infinite number of components. Given a metric on $M$ we can choose a minimiser of the energy function in each component of $LM$ and it follows that this metric has an infinite number of geodesics [BaThZ81]. This leaves the case where $\partiali_1(M)$ is infinite but only has a finite number of conjugacy classes. Very little is known about this case [BngHi84].
In [V-PS76] Sullivan and Vigu\'e-Poirrier took up the case where $\mathbb{F} = \mathbb{Q}$ and, as an application of the theory of minimal models in rational homotopy, proved the following theorem.
\begin{theorem}
Suppose $M$ is a closed, simply connected manifold and the algebra $H^*(M;\mathbb{Q}) $ is not generated by one element. Then $H_*(LM; \mathbb{Q})$ is doubly infinite.
\end{theorem}
A key ingredient in the proof of Theorem 1.2 from Theorem 1.1 is the following theorem from [McC87].
\begin{theorem}
Let $X$ be a simply connected space such that the algebra $H^*(X; \mathbb{F}_p)$ cannot be generated by one element. Then $H_*(\mathbb{O}mega X; \mathbb{F}_p)$ is doubly infinite.
\end{theorem}
The main idea which led to this paper is to use string homology with coefficients in $\mathbb{F}_p$ to convert Theorem 1.5 into a result about string homology. The first step in this process is to use the spectral sequence of [CJY04] to relate string homology and the homology of the based loop space. The second is to use the structure theorems for elliptic Hopf algebras over $\mathbb{F}_p$ from [FHT91a] to control the input into this spectral sequence.
This paper is set out as follows. In \S2 we deal with those aspects of string homology our main results require. The primary objective in \S2 is to prove Theorem 2.1. In \S3 we give applications of Theorem 2.1. For example we explain how this theorem applies to the main examples of [McCZ]. In \S4 we summarize the results from [FHT91a] we need and complete the proof of the main theorems. Finally in \S5 we give applications of the main theorem to homogeneous spaces.
Both authors would like to acknowledge the support of the Isaac Newton Institute in Cambridge during the Grothendieck-Teichm\"uller Groups, Deformation and Operads (GDO) programme in 2013 where this project began.
\section{String homology}
In [CJY04, Theorem 1], it is shown that there is a multiplicative second quadrant spectral sequence
$(E_r^{s,t}, d_r^{s,t})$ with
$$
d_r^{s,t}\colon E_r^{s,t} \to E_r^{s-r, t+r-1}
$$$$
E_2^{s,t} = H^{-s}(M)\otimestimes H_t(\mathbb{O}mega M)
$$
and converging to $\mathrm{H}\mathrm{L}_*(M)$.
Here second quadrant means that $E_r^{s,t}$ is zero if $s > 0$ or $t < 0$. Multiplicative means that each term $E_r^{s,t}$ is a bigraded algebra, $d_r^{s,t}$ is a bigraded derivation of the product, and the $E_{\infty}$ term of the spectral sequence is the bigraded algebra associated to a filtration of $\mathrm{H}\mathrm{L}_*(M)$. The edge homomorphism $h\colon \mathrm{H}\mathrm{L}_*(M) \to E_{\infty}^{0,*} \subseteq H_*(\mathbb{O}mega M)$ is the natural algebra homomorphism $h\colon \mathrm{H}\mathrm{L}_*(M) \to H_*(\mathbb{O}mega M)$. This give us a method of relating the algebras $H_*(\mathbb{O}mega M)$ and $\mathrm{H}\mathrm{L}_*(M)$.
The simplest way to think of this spectral sequence is to use the string topology spectrum $\mathcal{S}(M) = LM^{-TM}$ introduced in [CJ04]. The skeletal filtration of $M$ induces a filtration of $LM$ using the evaluation map $LM \to M$, and this in turn induces a filtration of $\mathcal{S}(M)$. The spectral sequence is the spectral sequence obtained from this filtration of $\mathcal{S}(M)$.
Our main application of this spectral sequence is the following theorem.
\begin{theorem}
Let $M$ be a closed oriented manifold. Then $\mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p)$ contains a polynomial algebra over $\mathbb{F}_p$ on $k$ generators if and only if the centre of $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ contains a polynomial algebra over $\mathbb{F}_p$ on $k$ generators.
\end{theorem}
The first step is to prove the following lemma.
\begin{lemma}
Let $M$ be a closed manifold. The kernel of the ring homomorphism $h\colon \mathrm{H}\mathrm{L}_*(M) \to H_*(\mathbb{O}mega M)$ is a nilpotent ideal.
\end{lemma}
\begin{proof}
Let
$$
0 = F^{-n-1} \subseteq F^{-n} \subseteq \dots \subseteq F^0 = \mathrm{H}\mathrm{L}_*(M)
$$
be the (negatively indexed) filtration of $\mathrm{H}\mathrm{L}_*(M)$ coming from the CJY spectral sequence. Here $n$ is the dimension of the manifold $M$. Then
$$
F^{-i} F^{-j}\subseteq F^{-i-j}
$$
and so $(F^{-1})^{n+1} = 0$. The proposition follows since $F^{-1}$ is exactly the kernel of the edge homomorphism of this spectral sequence.
\end{proof}
Next we give a very simple but very useful lemma.
\begin{lemma}
Suppose $M$ is a closed, simply connected manifold of dimension $n$. Let $C$ be the centre of the algebra $H_*(\mathbb{O}mega M; \mathbb{F}_p)$. Then for any $x \in C$
$$
x^{p^{n-2}} \in \mathrm{im}(h\colon \mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p) \to H_*(\mathbb{O}mega M; \mathbb{F}_p)).
$$
\end{lemma}
\begin{proof}
Because $h$ is the edge homomorphism in the CJY spectral sequence we know that an element $y \in H_*(\mathbb{O}mega M; \mathbb{F}_p) = E_2^{0,*}$ is in the image of $h$ if and only if it is an infinite cycle in this spectral sequence. Let $x \in H_*(\mathbb{O}mega M; \mathbb{F}_p) = E_2^{0, *}$ be a central element. Now $x$ may or may not be a cycle for $d_2$ in the CJY spectral sequence. But $d_2$ is a derivation and $x$ is central so it follows that
$$
d_2 x^p = px^{p-1} d_2 x.
$$
Since the ground field is $\mathbb{F}_p$ we have that $d_2 x^p = 0$. It may or may not be the case that $x^p$ is a cycle for $d_3$ but the same argument shows that $x^{p^2} = (x^p)^p$ is a cycle for $d_3$. Because $M$ has dimension $n$, $d_r = 0$ for $r \geq n + 1$. Since $M$ is simply connected $H^1(M; \mathbb{F}_p) = H^{n-1}(M; \mathbb{F}_p) = 0$. It follows that there are at most $n-2$ differentials on $E_2^{0,*}$ that could be non-zero, starting with $d_2$. Repeating this argument at most $n-2$ times shows that $x^{p^{n-2}}\in E_2^{0,*}$ is an infinite cycle and it follows that $x^{p^{n-2}}$ is in the image of $h$.
\end{proof}
We will also need the following result of [FTV].
\begin{theorem}
The image of $h\colon \mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p) \to H_*(\mathbb{O}mega M; \mathbb{F}_p)$ is contained in the centre of $H_*(\mathbb{O}mega M, \mathbb{F}_p)$.
\end{theorem}
To prove Theorem 2.1 we simply combine the previous three results.
\begin{proof}[Proof of Theorem 2.1]
The kernel of $h\colon \mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p) \to H_*(\mathbb{O}mega M; \mathbb{F}_p)$ is a nilpotent ideal, and the image of $h$ is contained in the centre of $H_*(\mathbb{O}mega M; \mathbb{F}_p)$. So if $\mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p)$ contains a polynomial algebra on $k$ generators, then so does the centre of $H_*(\mathbb{O}mega M; \mathbb{F}_p)$. On the other hand, if the centre of $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ contains the polynomial algebra $\mathbb{F}_p[x_1, \dots , x_k]$, then Lemma 2.3 shows that every element of the sub-algebra of the $E_2$-term of the CJY spectral sequence
$$
\mathbb{F}_p[(x_1)^{p^{n-2}}, \dots, (x_k)^{p^{n-2}}] \subset H_*(\mathbb{O}mega M; \mathbb{F}_p) = E_2^{0, *}
$$
is an infinite cycle. It follows that $\mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p)$ contains a polynomial algebra on $k$ generators.
\end{proof}
\section{Applications of Theorem 2.1}
\subsection{Sphere bundles over spheres}
Let $Q = Q_{2n, e}$ denote the sphere bundle
$$
S^{2n-1} \to Q \to S^{2n}
$$
with Euler class $e \in \mathbb{Z}$. We choose an orientation of $S^{2n}$ to identify the Euler class with an integer. We prove the following result.
\begin{proposition}
If $e \neq 0$, for any metric on $Q = Q_{2n, e}$, there is an infinite number of closed geodesics on $Q$.
\end{proposition}
Notice that a sphere bundle over a sphere $M$ not of the form $Q_{2n,e}$ with $e \neq 0$ has its rational cohomology ring generated by more than one element. Therefore the theorem of Sullivan and Vigu\'e - Poirrier, Theorem 1.4 shows that any metric on $M$ has an infinite number of closed geodesics. However, $Q_{2n,e}$ is a rational homology sphere if $e \neq 0$.
\begin{proof}[Proof of Proposition 3.1]
Choose a prime $p$ such that $p$ divides $e$. Standard basic calculations in algebraic topology show that
$$
H^*(Q; \mathbb{F}_p) = E[a_{2n-1}, b_{2n}], \mbox{\rm\ and\ }
H_*(\mathbb{O}mega Q; \mathbb{F}_p) = P[u_{2n-2}, v_{2n-1}].
$$
Here $E$ denotes the exterior algebra over $\mathbb{F}_p$ and $P$ the polynomial algebra over $\mathbb{F}_p$. The subscripts are the degrees of the elements. If $p = 2$, then the algebra $P[u_{2n-2}, v_{2n-1}]$ is not graded commutative since $v_{2n-1}^2 \neq 0$. However the centre of $H_*(\mathbb{O}mega Q; \mathbb{F}_p)$ is precisely $P[u_{2n-2}, v_{2n-1}^2]$. Theorem 2.1 shows that $\mathrm{H}\mathrm{L}_*(Q)$ contains a polynomial algebra on two generators and so $H_*(LQ; \mathbb{F}_p)$ has doubly infinite dimension. The Gromoll-Meyer theorem shows that for any metric on $Q$, there is an infinite number of distinct closed geodesics.
\end{proof}
\subsection{The Grassmannian of oriented two planes in $\mathbb{R}^{2n+1}$}
Let $G_2^+(\mathbb{R}^{2n+1})$ denote the Grassmannian of oriented $2$-planes in $\mathbb{R}^{2n+1}$. Recall the following two calculations from the theory of characteristic classes.
\begin{enumerate}
\item
Suppose $2$ is a unit in the coefficient field $\mathbb{F}$. Then
$$
H^*(G_2^+(\mathbb{R}^{2n+1}); \mathbb{F}) = P[x_2]/(x_2^{2n}),
$$
\item
\qquad \qquad \qquad \qquad \ \ \,$
H^*(G_2^+(\mathbb{R}^{2n+1}); \mathbb{F}_2) = P[x_2]/(x_2^{n})\otimes E(y_{2n}).
$
\end{enumerate}
So the algebra $H^*(G_2^+(\mathbb{R}^{2n+1}); \mathbb{F}_p)$ can be generated by a single generator for $p\neq 2$, but in the
case $p = 2$ it requires at least two generators.
Another standard calculation in algebraic topology shows that
$$
H_*(\mathbb{O}mega G_2^+(\mathbb{R}^{2n+1}); \mathbb{F}_2) = E(u_1) \otimes P[v_{2n-2}] \otimes P[w_{2n-1}] \cong H_*(\mathbb{O}mega(\mathbb{C}P^n \times S^{2n}); \mathbb{F}_2).
$$
Evidently this contains a central polynomial algebra generated by two elements. The following theorem
follows from the Gromoll - Meyer theorem in the case of $\mathbb{F}_2$ coefficients.
\begin{theorem}
Any metric on $G_2^+(\mathbb{R}^{2n+1})$ has an infinite number of closed geodesics.
\end{theorem}
\subsection{The list of examples from [McCZ]}
There is a list in [McCZ], based on the work of [O63],
consisting of one representative from each diffeomorphism class of homogeneous spaces $G/K$, where $G$ is a compact connected Lie group and $K$ is a connected closed subgroup, with two properties:
\begin{itemize}
\item
$G/K$ is not diffeomorphic to a sphere, a real, complex, or quaternionic projective space, nor is it diffeomorphic to the Cayley projective plane.
\item
The algebra $H^*(G/K; \mathbb{Q})$ is generated by one element;
\end{itemize}
In other words it is the list of examples of homogeneous spaces to which we we would like to apply the theorem of Gromoll - Meyer, but cannot do so over the ground field $\mathbb{Q}$.
This list contains two infinite families.
\begin{itemize}
\item
The Stiefel manifold $V_2(\mathbb{R}^{2n+1})$ of two frames in $\mathbb{R}^{2n+1}$. This is a $2n-1$ sphere bundle over $S^{2n}$ with Euler class $2$, and Proposition 3.1 shows that any metric on $V_2(\mathbb{R}^{2n+1})$ has an infinite number of geometrically distinct closed geodesics.
\item
The Grassmannian of oriented $2$-planes in $\mathbb{R}^{2n+1}$. Theorem 3.2 shows that any metric on this manifold has an infinite number of geometrically distinct closed geodesics.
\end{itemize}
There are another $7$ homogeneous spaces on this list. The first two are $SU(2)/SO(3)$ and $Sp(2)/SU(2)$, and the other $5$ are homogeneous spaces for $G_2$. It is possible to go through these $7$ examples by direct calculations with loop spaces. However we will deal with them in \S 5 as examples of our main theorem.
\def\mathbb{H}opfquot{/\hskip -3pt/}
\section{The proofs of Theorem 1.1 and Theorem 1.2}
We next need results contained in a series of inter-related papers by F\'elix, Halperin, Lemaire, and Thomas on the homology of based loop spaces. We give a brief summary of the results we need.
\subsection{Elliptic Hopf algebras}
Let $\Gammama$ be a graded Hopf algebra over the ground field $\mathbb{F}$. The {\em lower central series} of $\Gammama$ is the sequence
$$
\Gammama = \Gammama^{(0)} \supset \Gammama^{(1)} \supset \Gammama^{(2)} \supset \cdots \supset \Gammama^{(n)} \supset \cdots
$$
where
$\Gammama^{(i+1)} = [\Gammama, \Gammama^{(i)}]$.
By definition $\Gammama$ is {\em nilpotent} if $\Gammama^{(s)} = \mathbb{F}$ for some $s$. Although the definition of the $\Gammama^{(i)}$ depends only on the algebra structure of $\Gammama$, it is straightforward to check that the $\Gammama^{(i)}$ are normal sub Hopf algebras of $\Gammama$.
We say that $\Gammama$ is {\em connected} if $\Gammama_i = 0$ when $i < 0$ and $\Gammama_0 = \mathbb{F}$, and that $\Gammama$ is {\em finitely generated} if it is finitely generated as an algebra. From [FHT91a] we have the following definition.
\begin{definition}
Fix a ground field $\mathbb{F}$. A Hopf algebra $\Gammama$ over $\mathbb{F}$ is {\bf elliptic} if it is connected, co-commutative, finitely generated, and nilpotent.
\end{definition}
Note that the only part of the definition of an elliptic Hopf algebra which refers to the coproduct is the condition that it is co-commutative.
Here are some examples. In these examples we assume that the Hopf algebras in question are connected and co-commutative over a fixed ground field $\mathbb{F}$.
\begin{enumerate}
\item
If $\Gammama$ is a finite dimensional Hopf algebra, then $\Gammama$ is elliptic. To prove this first note that since $\Gammama$ is connected $\Gammama^{(i)}$ is $i+1$ connected. Since $\Gammama$ is finite dimensional it follows that $\Gammama^{(i)} = \mathbb{F}$ for sufficiently large $i$. So $\Gammama$ is nilpotent. Since $\Gammama$ is finite, it is finitely generated.
\item
If $\Gammama$ is commutative, then $\Gammama$ is elliptic if and only if $\Gammama$ is finitely generated.
\item
Let $L$ be a Lie algebra. Let $U(L)$ be the universal enveloping algebra of $L$. This becomes a Hopf algebra by defining the coproduct to be the unique coproduct which makes the elements of $L$ primitive. Then $U(L)$ is an elliptic Hopf algebra if and only if $L$ is a finitely generated nilpotent Lie algebra.
\end{enumerate}
The structure theorem for elliptic Hopf algebras proved in [FHT91a] tells us that essentially these examples generate the class of all elliptic Hopf algebras by taking extensions.
\begin{theorem}
Let $\mathbb{F}$ be a field and let $\Gammama$ be a connected, finitely generated, co-commutative Hopf algebra over $\mathbb{F}$.
\begin{itemize}
\item
If $\mathbb{F}$ has characteristic zero, then $\Gammama$ is elliptic if and only if $\Gammama = U(L)$ where $L$ is a finitely generated, nilpotent Lie algebra over $\mathbb{F}$.
\item
If $\mathbb{F}$ has characteristic $p \neq 0$, then $\Gammama$ is elliptic if and only if it contains a finitely generated, central sub Hopf algebra $C$, such that $\Gammama\mathbb{H}opfquot C$ is a finite dimensional algebra.
\end{itemize}
\end{theorem}
The statement of the second clause of the theorem is not quite the same as the statement (ii) in Theorem~B of [FHT91a] but it is easily seen to be equivalent to it. From [MM65] we know that $\Gammama$ is isomorphic to $C \otimes \Gammama\mathbb{H}opfquot C$ as a $C$ algebra. Since $C$ is finitely generated and commutative it follows from a theorem of Borel [MM65] that as an algebra $C$ is isomorphic to $P\otimes A$, where $P$ is a polynomial algebra over $\mathbb{F}$ in a finite number of variables and $A$ is a finite dimensional algebra. It follows that $\Gammama$ is isomorphic to $P \otimes A \otimes \Gammama\mathbb{H}opfquot C$ as a $P$ module. Since both $A$ and $\Gammama\mathbb{H}opfquot C$ are finite dimensional it follows that $\Gammama$ is a finitely generated free module over $P$. This is the condition given in [FHT91a].
\subsection{Depth and the Gorenstein condition}
Let $A$ be a graded augmented algebra over the ground field $\mathbb{F}$. We will assume that $A$ is connected. We can form the vector spaces
$$
\mathrm{Ext}^{i,j}_A(\mathbb{F}, A).
$$
The {\em depth} of $A$, $\mathrm{depth}\, A$, is defined as follows:
$$
\mathrm{depth}\, A = \inf \{s \mid \mathrm{Ext}^{s, *}_A(F, A) \neq 0 \}.
$$
If $n = \mathrm{depth}\, A$, then $\mathrm{Ext} ^{s,t}_A(\mathbb{F}, A) = 0$ for $s < n$ and there is an integer $t$ such that $\mathrm{Ext}^{n, t}_A(\mathbb{F},A) \neq 0$. In particular the depth of $A$ could be infinite, and this means that $\mathrm{Ext} ^{s,t}_A(\mathbb{F}, A) = 0$ for all $(s,t)$.
The graded algebra $A$ is {\em Gorenstein} if there is a pair of integers $(n,m)$ such that
\begin{itemize}
\item
$\mathrm{Ext}_A^{s,t}(\mathbb{F},A) = 0$ if $(s,t) \neq (n,m)$,
\item
$\mathrm{Ext}_A^{n,m}(\mathbb{F},A) = \mathbb{F}$.
\end{itemize}
The definition of depth and the Gorenstein condition first appear in classical commutative ring theory.
It is straightforward to check that,
\begin{itemize}
\item
$\mathrm{depth}\, A\otimes B = \mathrm{depth}\, A + \mathrm{depth}\, B$,
\item
$A \otimes B$ is Gorenstein if and only if both $A$ and $B$ are Gorenstein.
\end{itemize}
In the case of a polynomial algebra $\mathbb{F}[x]$ with one generator of degree $k$
$$
\mathrm{Ext}^{1,k}_{\mathbb{F}[x]}(k, \mathbb{F}[x]) = \mathbb{F}, \quad \mathrm{Ext}^{s,t}_{\mathbb{F}[x]}(\mathbb{F}, \mathbb{F}[x]) = 0 \quad (s,t) \neq (1, k).
$$
In the case where $A = \mathbb{F}[x]/(x^n)$ is a truncated polynomial algebra with generator of degree $k$ then
$$
\mathrm{Ext}^{0, -k(n-1)}_A(\mathbb{F}, A) = \mathbb{F}, \quad \mathrm{Ext}^{s,t}_A(\mathbb{F}, A) = 0 \quad (s,t) \neq (0, -k(n-1)).
$$
The most elementary method for doing these calculations is to use the minimal resolution of $\mathbb{F}$ over $\mathbb{F}[x]$ and the minimal resolution of $\mathbb{F}$ over $\mathbb{F}[x]/(x^n)$.
It follows that both the algebras $\mathbb{F}[x]$ and $\mathbb{F}[x]/(x^n)$ are Gorenstein, and
$$
\mathrm{depth}\, \mathbb{F}[x] = 1, \quad \mathrm{depth}\, \mathbb{F}[x]/(x^n) = 0.
$$
The following lemma is contained in [FHT88] (see Proposition~1.7).
\begin{lemma}
Suppose $A$ is an infinite tensor product of algebras. Then the depth of $A$ is infinite.
\end{lemma}
Suppose $\Gammama$ is a connected Hopf algebra that is commutative as an algebra. By a theorem of Borel [MM65, Theorem 7.11], it follows that
$\Gammama$ is isomorphic as an algebra to a tensor product of polynomial algebras and truncated polynomial algebras. If $\Gammama$ is not finitely generated then Lemma 4.3 shows that $\Gammama$ has infinite depth. If $\Gammama$ is finitely generated, then it has finite depth and it is isomorphic to $P \otimes A$ where $P$ is a polynomial algebra with $m = \mathrm{depth}\, \Gammama$ variables and $A$ is a finite tensor product of truncated polynomial algebras. This proves Theorem 4.2 in the case where $\Gammama$ is commutative. One way to think of the proof of 4.2 is that it works by reducing the general case to the commutative case by using the condition that $\Gammama$ is nilpotent.
The relevance of depth and Gorenstein to topology comes from results in [FHLT89] and [FHT88], which we state as the following theorem.
\begin{theorem}
Let $X$ be a simply connected finite complex.
\begin{enumerate}
\item
The Hopf algebra $H_*(\mathbb{O}mega X;\mathbb{F})$ has finite depth. In fact, $\mathrm{depth}\, X \leq \mbox{\rm LSCat}\, X$, where $\mbox{\rm LSCat}\, X$ denotes
the Lyusternik-Schnirelman category of $X$.
\item
If the Hopf algebra $H_*(\mathbb{O}mega X;\mathbb{F})$ is Gorenstein, then $X$ is a Poincar\'e duality space.
\end{enumerate}
\end{theorem}
In [FHT88] F\'elix, Halperin and Thomas extend the Gorenstein condition to differential graded algebras and show that a finite complex $X$ is a Poincar\'e duality space if and only if the cochain algebra $S^*(X;\mathbb{F})$ is a Gorenstein differential graded algebra. While it is true that if $H^*(X;\mathbb{F})$ is Gorenstein then so is $S^*(X;\mathbb{F})$ the reverse implication is not true -- see Examples 3.3 of [FHT88].
If $X$ is a finite complex, then we know that $H_*(\mathbb{O}mega X;\mathbb{F})$ has finite type and finite depth. The following theorem gives some useful practical ways to deduce, in addition, that $H_*(\mathbb{O}mega X;\mathbb{F})$ is elliptic. For the proof see Theorem C of [FHT91a].
\begin{theorem}
Suppose $\Gammama$ is a connected, cocommutative Hopf algebra over $\mathbb{F}$ of finite type and $\Gammama$ has finite depth. Then the following are equivalent:
\begin{enumerate}
\item $\Gammama$ is elliptic,
\item $\Gammama$ is nilpotent,
\item $\Gammama$ has polynomial growth,
\item $\Gammama$ is Gorenstein.
\end{enumerate}
\end{theorem}
\subsection{The proof of Theorem 1.1.}
If $M$ is a closed, connected, oriented manifold of finite dimension, then $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ is connected and cocommutative, and it has finite type and finite depth. We are assuming it has polynomial growth. It follows from Theorem 4.5 that $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ is elliptic. Therefore, from Theorem 4.2, it is a finitely generated free module over a central sub-algebra $P$ that is a polynomial algebra on a finite number, say $l$, variables. It follows that $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ has polynomial growth with exponent $l$ and indeed $l$ is the minimal exponent which can occur in the inequality for polynomial growth. In the notation of Theorem 1.1, $l = K_0$. This proves Theorem 1.1.
\subsection{The proof of Theorem 1.2.}
It follows from Theorem 4.2 that if $\Gammama$ is an elliptic Hopf algebra over $\mathbb{F}_p$, then $\Gammama$ is doubly infinite if and only if the centre of $\Gammama$ contains a polynomial algebra on two generators.
Now let $M$ be a simply connected closed manifold satisfying the hypotheses of Theorem 1.2. Then, as in the proof of Theorem 1.1, it follows that $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ is an elliptic Hopf algebra. Suppose in addition that the algebra ${H}^*(M; \mathbb{F}_p)$ cannot be generated by one element. From Theorem 1.5, it follows that $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ is doubly infinite and so the centre of $H_*(\mathbb{O}mega M; \mathbb{F}_p)$ contains a polynomial algebra on two generators. By theorem 2.1 it follows that $\mathrm{H}\mathrm{L}_*(M; \mathbb{F}_p)$ contains a polynomial algebra on two generators and therefore $H_*(LM, \mathbb{F}_p)$ is doubly infinite. The Gromoll - Meyer theorem, Theorem 1.3, completes the proof.
\section{Application to homogeneous spaces.}
The following theorem is Example 3.2 in [FHT93]:
\begin{theorem}
Let $G$ be a simply connected, compact Lie group and $K$, a connected, closed subgroup of $G$.
Then the homogeneous space $G/K$ is $\mathbb{F}_p$ elliptic for any prime $p$.
\end{theorem}
The proof uses the fibration
$$
\mathbb{O}mega G \to \mathbb{O}mega (G/K) \to K
$$
for which
the fundamental group $\partiali_1(K)$ acts trivially on the groups $H_*(\mathbb{O}mega G; F_p)$.
Then a Leray-Serre spectral sequence argument may be applied because $K$ and $\mathbb{O}mega G$ are both elliptic and
hence have polynomial growth.
\commenter{
\begin{lemma}
The algebra $H_*(\mathbb{O}mega G; \mathbb{F}_p)$ is a finite tensor product of polynomial algebras and truncated polynomial algebras.
\end{lemma}
}
\commenter{
Given these two lemmas the proof of the theorem is a simple argument with the Serre spectral sequence in
homology of the fibration $\mathbb{O}mega G \to \mathbb{O}mega G/K \to K$. The $E_2$ term of this spectral sequence is
$$
H_*(K; \mathbb{F}_p) \otimes H_*(\mathbb{O}mega G; \mathbb{F}_p).
$$
Now $H_*(K; \mathbb{F}_p)$ is finite $H_*(\mathbb{O}mega G; \mathbb{F}_p)$ has polynomial growth and the Serre spectral sequence shows that $H_*(\mathbb{O}mega G/K; \mathbb{F}_p)$ has polynomial growth. Since $G/K$ is a compact manifold $H_*(G/K; \mathbb{F}_p)$ has finite type and finite depth and it follows that $G/K$ is $\mathbb{F}_p$ elliptic.
}
Now return to the list from [McCZ]. The $7$ examples of homogeneous spaces in this list not covered by Theorems 3.1 and 3.2 are $\mathbb{F}_p$ elliptic spaces for any prime $p$ by Theorem 5.1. Furthermore, in each case, there is a prime $p$ such that the cohomology algebra of the homogeneous space cannot be generated by a single element. Therefore by Theorem 1.2 any metric has an infinite number of geometrically distinct closed geodesics.
\centerline{\textsc{References}}
\noindent
[BaThZ81], W.~Ballman, G.~Thorbergsson, W.~Ziller, Closed geodesies and the fundamental group, Duke
Math. J. {\bf 48}(1981), 585--588.
\noindent
[BngHi84] V.~Bangert, N.~Hingston, Closed geodesics on manifolds with infinite abelian fundamental group,
J. Differential Geom. {\bf 19}(1984), 277--282.
\noindent
[B56], R.~Bott, On the iteration of closed geodesies and the Sturm intersection theory,
Comm.~Pure Appl.~Math. {\bf 9}(1956), 171--206.
\noindent
[CS99] M.~Chas, D.~Sullivan, String Topology, preprint: {\tt math.GT/9911159}, 1999.
\noindent
[CJ02] R.L.~Cohen, J.D.S.~Jones, A Homotopy Theoretic Realization Of String Topology, Math.~Ann,
{\bf 324} (2002), 773--798. Progr.~Math., 215, Birkh\"auser, Basel, 2004.
\noindent
[CJY04] R.L.~Cohen, J.D.S.~Jones, J.~Yan, The loop homology algebra of spheres and projective spaces,
Categorical decomposition techniques in algebraic topology (Isle of Skye 2001)
Prog.~Math {\bf 215} (2004), 77--92.
\noindent
[F00] Y.~F\'elix, Croissance polynomiale de certains espaces de lacets.
Acad. Roy. Belg. Cl. Sci. M\'em. Collect. 8o (3) 17 (2000), 79 pp.
\noindent
[FHT88] Y.~F\'elix, S.~Halperin, J.C.~Thomas, Gorenstein spaces, Adv. Math. {\bf 71}(1988), 92--112.
\noindent
[FHLT89] Y.~F\'elix, S.~Halperin, J.-M.~Lemaire, J.-C.~Thomas, Mod p loop space homology, Inv.~Math. {\bf 95}(1989),
247--262.
\noindent
[FHT91a] Y.~F\'elix, S.~Halperin, J.C.~Thomas, Elliptic Hopf algebras, J.~London Math.~Soc. (2) {\bf 43}(1991), 545--555.
\noindent
[FHT91b] Y.~F\'elix, S.~Halperin, J.-C.~Thomas, Elliptic spaces, Bull.~Amer.~Math.~Soc. {\bf 25}(1991), 69--73.
\noindent
[FHT93] Y.~F\'elix, S.~Halperin, J.-C.~Thomas, Elliptic spaces. II, Enseign.~Math. {\bf 39}(1993), 25--32.
\noindent
[FTV] Y.~F\'elix, J.C.~Thomas, M.~Vigu\'e-Poirrier, Rational string topology,
J.~Eur.~Math.~Soc. {\bf 9}(2007), 123--156.
\noindent
[GM69] D.~Gromoll, W.~Meyer, Periodic geodesics on compact Riemannian manifolds,
J.~Diff.~Geom. {\bf 3} (1969), 493--510.
\noindent
[L11], F.~Laudenbach, A Note on the Chas-Sullivan product. L'Enseignement Math. {\bf 57}(2011) 1--19.
\noindent
[McC87] J.~McCleary, On the mod $p$ Betti numbers of loop spaces, Invent.~Math. {87} (1987), 643--654.
\noindent
[McCZ] J.~McCleary, W.~Ziller, On the free loop space of homogeneous spaces. Amer.~J.~Math. {\bf 109}(1987), 765--781.
Corrections to: ``On the free loop space of homogeneous spaces'' Amer.~J.~Math. {\bf 113}(1991), 375--377.
\noindent
[MM65] J.~W.~Milnor, J.~C.~Moore, On the structure of Hopf algebras, Ann.~of Math. (2) {\bf 81}(1965), 211--264.
\noindent
[O66], A.~L.~Oni\v{s}\v{c}ik, Transitive compact transformation groups, Mat.~Sb. {\bf 60}(1963), 447--485 [Russian].
\noindent
[Sp67], Spivak, M., Spaces satisfying Poincar\'e duality. Topology {\bf 6}(1967) 77--101.
\noindent
[V-PS76] M.~Vigu\'e-Poirrier, D.~Sullivan, The homology theory of the closed geodesic problem,
J.~Diff.~Geom. {\bf 11} (1976), 633--644.
\end{document}
\end{document}
To state our main theorem we need some background on elliptic Hopf algebras from \cite{?}. Let $\Gammama$ be a graded Hopf algebra over a field $F$. We say that $\Gammama$ is connected if $\Gammama_i = 0$ when $i < 0$ and $\Gammama_0 = F$. Then we have the following definition from \cite{FHT}.
\begin{definition} Fix a ground field $F$.
\begin{itemize}
\item
A Hopf algebra over $F$ is {\em elliptic} if it has finite type, and it is connected, co-commutative, and nilpotent.
\item
A simply connected CW complex is {\em $F$-elliptic} if the Hopf algebra $H_*(\mathbb{O}mega X, F)$ is elliptic.
\end{itemize}
\end{definition}
Here $\mathbb{O}mega X$ is the based loop space of $X$. So a simply connected CW complex is $X$ is $F$-elliptic if and only if the Hopf algebra $H_*(\mathbb{O}mega X, F)$ has finite type and is nilpotent. If $X$ is a simply connected finite complex then $H_*(\mathbb{O}mega X, F)$ has finite type and so $X$ is $F$-elliptic if and only if $H_*(\mathbb{O}mega X, F)$ is nilpotent. We now state the main result of this paper.
The key to the proof of this lemma is the result ?.? from \cite{MM} that under the hypotheses of the lemma $\Gammama$ is isomorphic to $H \otimes \Gammama\mathbb{H}opfquot H$ as a left $H$ module and a right $\Gammama\mathbb{H}opfquot H$ comodule and the usual change of rings theorems from homological algebra. It follows immediately from the fact that $F[x]$ and $F[x]/x^n$ are Gorenstein that any finitely generated abelian Hopf algebra is Gorenstein.
\begin{lemma}
If $\Gammama$ is an infinite tensor product of subalgebras, then $\mathrm{Ext}_\Gammama^i(F,\Gammama) = 0$ for all $i$.
\end{lemma}
\end{document}
|
\begin{document}
\begin{frontmatter}
\title{A constraint-separation principle in model predictive control\thanksref{footnoteinfo}}
\thanks[footnoteinfo]{This paper was not presented at any IFAC
meeting. Corresponding author U.~V.~Kalabi\'{c}.}
\author[merl]{Uro\v{s} V.~Kalabi\'{c}}\ead{[email protected]}~
\author[umich]{Ilya V.~Kolmanovsky}\ead{[email protected]}
\address[merl]{Mitsubishi Electric Research Laboratories, 201 Broadway, Cambridge, MA 02139}
\address[umich]{Department of Aerospace Engineering, University of Michigan, 1320 Beal Avenue, Ann Arbor, MI 48109}
\begin{keyword}
Constrained systems; model predictive control; constrained control system design.
\end{keyword}
\begin{abstract}
In this brief, we consider the constrained optimization problem underpinning model predictive control (MPC). We show that this problem can be decomposed into an unconstrained optimization problem with the same cost function as the original problem and a constrained optimization problem with a modified cost function and dynamics that have been precompensated according to the solution of the unconstrained problem. In the case of linear systems subject to a quadratic cost, the unconstrained problem has the familiar LQR solution and the constrained problem reduces to a minimum-norm projection. This implies that solving linear MPC problems is equivalent to precompensating a system using LQR and applying MPC to penalize only the control input. We propose to call this a constraint-separation principle and discuss the utility of both constraint separation and general decomposition in the design of MPC schemes and the development of numerical solvers for MPC problems.
\end{abstract}
\end{frontmatter}
\section{Introduction}
Model predictive control (MPC) is an optimization-based framework for determining constraint-admissible, stabilizing control inputs to open-loop control systems \citep{rawlings_book}. Conventionally, MPC is applied according to the schematic in Fig.~\ref{fig:MPC_ol} and presented as a constraint-enforcing, feedback control scheme,
which can simultaneously stabilize a system and enforce constraints on that system.
This is in contrast to other constraint-enforcing schemes, such as reference governors \citep{rg_survey}, which are only used to enforce constraints in precompensated systems, and anti-windup schemes \citep{anti_windup} and barrier-function methods \citep{barrier_fn}, which are used to modify stabilizing control designs in order to enforce constraints.
In this paper, we propound the perspective that
MPC is not substantially different from the alternative schemes. This is because the optimization problem solved by MPC can be decomposed into two separate optimization problems, the solution to one ensuring stability of the inner-loop, and the solution to the other computing an outer-loop modification that enforces constraints.
In the case of linear systems subject to a quadratic penalty, the inner-loop compensation has the familiar closed-form solution of the discrete-time linear-quadratic regulator (LQR) \citep{dorato71} and the constraint-enforcing, outer-loop problem is reduced to a minimum-norm projection,
without a terminal cost term.
The above implies that linear MPC can be interpreted as an add-on, constraint enforcing mechanism similar to the extended command governor (ECG) \citep{gilbertong11}.
This was first shown by \citet{kouvaritakis2000} for the time-invariant case, i.e., where the dynamics are time-invariant and the terminal cost is obtained as the solution to the discrete-time, infinite-horizon LQR problem, and the benefits of this approach for numerical implementation were discussed by \citet{kouvaritakis02,rossiter_book,rossiter10}. Here we show the same result in the case of linear time-varying systems, which is important
because numerical strategies for nonlinear MPC problems often involve sequentially solving linear time-varying MPC problems.
We refer to the result as a constraint-separation principle and discuss its implications.
In addition to the above result, we consider decomposition in the general nonlinear setting. We show that constraint separation is not generally possible in the case of nonlinear MPC problems, since the initial MPC problem is decomposed into an unconstrained problem requiring a convenient closed-form solution, which does not generally exist, and another MPC problem that does not necessarily have a convenient structure. Nevertheless, we show that if the MPC problem has a locally linear-quadratic structure, then constraint separation holds locally.
The paper is structured as follows. Section 2 derives the decomposition of nonlinear MPC into stabilizing and constraint-enforcing optimization problems. Section 3 derives the constraint-separation principle for locally linear MPC problems. Section 4 discusses the implications of the constraint-separation principle. Section 5 is the conclusion.
\tikzstyle{block} = [draw, fill=white, rectangle,
minimum height=3em, minimum width=6em, font=\sffamily]
\tikzstyle{sum} = [draw, fill=white, circle]
\tikzstyle{input} = [coordinate]
\tikzstyle{output} = [coordinate]
\tikzstyle{pinstyle} = [pin edge={to-,thin,black}]
\begin{figure}
\caption{MPC applied to an open-loop plant}
\label{fig:MPC_ol}
\end{figure}
\section{MPC optimization problem}
The optimization problem we consider is given by \cite{rawlings_book},
\begin{subequations}\label{equ:mpc_prob}
\begin{align}
\min_u &~ V_f(x_N) + \sum_{k = 0}^{N-1} L_k(x_k,u_k), \label{equ:cost} \\
\text{sub.~to} &~x_{k+1} = f_k(x_k,u_k), \\
&~(x_k,u_k) \in \mathcal C_k,~\forall k \in \mathbb Z_N, \label{equ:ineq1} \\
&~x_N \in \mathcal X_N, \label{equ:ineq2}
\end{align}
\end{subequations}
where $x_0$ is given, $f_k: \mathbb R^n \times \mathbb R^m \to \mathbb R^n$ is continuous, $f_k(0,0) = 0$, and $\mathcal C_k$ is closed for all $k\in\mathbb Z_N$ where $\mathbb Z_N$ is the set of the first $N$ non-negative integers. The cost functions $V_f:\mathbb R^n \to \mathbb R$ and $L_k:\mathbb R^n \times \mathbb R^m \to \mathbb R$ are continuous and locally bounded, and satisfy the following properties,
\begin{align*}
&L_k(0,0) = V_f(0) = 0, \\
&L_k(x,u) \geq \alpha(\|u\|),~V_f(x) \geq 0,
\end{align*}
for all $x \in \mathbb R^n$, $u \in \mathbb R^m$, $k\in \mathbb Z_N$, where $\alpha$ is a $\mathcal K_\infty$-function \citep{rawlings_book}. The assumptions are required
to ensure the existence of a solution to the optimization problem.
We note that our subsequent results do not dependent on other, standard assumptions found in the MPC literature \citep{rawlings_book}, like those which ensure that the solution is recursively feasible and results in a stabilizing control law.
We also note that we make no additional assumptions on the geometry of the sets $\mathcal C_k$ such as, for example, convexity.
We define the sets,
\begin{align*}
&\bar{\mathcal C}_k := \{(x,u): (x,u) \in \mathcal C_k,~f_k(x,u) \in \mathcal X_{k+1}\}, \\
&\bar{\mathcal C}_k(x) := \{u: (x,u) \in \bar{\mathcal C}_k\},
\end{align*}
for all $k \in \mathbb Z_N$, where $\mathcal X_k := \operatorname{Proj}_{\mathbb R^n}{\mathcal C}_k$.
These sets are closed for all $x \in \mathbb R^n$ due to the closedness of $\mathcal C_k$ and $\mathcal X_{k+1}$ and the continuity of $f_k$.
A sequence of control inputs $u^*$
solving \eqref{equ:mpc_prob} satisfies \citep{rawlings_book},
\begin{subequations}
\begin{equation}\label{equ:u_opt}
u_k^* \in \underset{u \in \bar{\mathcal C}_k(x_k^*)}{\arg\min}~ V_{k+1}(f_k(x_k^*,u)) + L_k(x_k^*,u), \\
\end{equation}
for all $k \in \mathbb Z_N$, with $x_{k+1}^* = f_k(x_k^*,u_k^*)$ and $x_0^* = x_0$, where $V_k$ satisfies the Bellman equation,
\begin{equation}\label{equ:bell1}
V_k(x) = \min_{u \in \bar{\mathcal C}_k(x)} V_{k+1}(f_k(x,u)) + L_k(x,u),
\end{equation}
\end{subequations}
with domain $\bar{\mathcal X}_k := \operatorname{Proj}_{\mathbb R^n}\bar{\mathcal C}_k$, and $V_N = V_f|_{\mathcal X_N}$.
Consider the optimization problem \eqref{equ:mpc_prob} without inequality constraints \eqref{equ:ineq1}-\eqref{equ:ineq2}. A sequence of control inputs $\tilde u^*$
solving this problem satisfies,
\begin{subequations}
\begin{equation}\label{equ:u_opt_unc}
\tilde u_k^* \in \underset{u}{\arg\min}~ \tilde V_{k+1}(f_k(\tilde x_k^*,u)) + L_k(\tilde x_k^*,u), \\
\end{equation}
for all $k \in \mathbb Z_N$, with $\tilde x_{k+1}^* = f_k(\tilde x_k^*,\tilde u_k^*)$ and $\tilde x_0^* = x_0$, where $\tilde V_k$ satisfies the Bellman equation,
\begin{equation}
\tilde V_k(x) = \min_{u}~\tilde V_{k+1}(f_k(x,u)) + L_k(x,u),
\end{equation}
\end{subequations}
and $\tilde V_N = V_f$.
We call $\tilde V_k$ an unconstrained value function and distinguish it from the corresponding value function $V_k$.
A solution to the unconstrained problem defines a feedback law $\kappa_k:\mathbb R^n \to \mathbb R^m$ satisfying,
\begin{equation}
\kappa_k(\tilde x_k^*) = \tilde u_k^*,
\end{equation}
where $\tilde u_k^*$ is a minimizer of \eqref{equ:u_opt_unc} and can be arbitrarily chosen.
The unconstrained value function therefore satisfies,
\begin{align}
\tilde V_k(x) = \tilde V_{k+1}(f_k(x,\kappa_k(x)))+L_k(x,\kappa_k(x)).
\end{align}
Consider the optimization problem,
\begin{subequations}\label{equ:mpc_prob_mod}
\begin{align}
\min_{\hat u} &~ \sum_{k = 0}^{N-1} \Delta \tilde V_k(x_k,\hat u_k), \\
\text{sub.~to} &~x_{k+1} = \hat f_k(x_k,\hat u_k), \\
&~(x_k,\kappa_k(x_k)+\hat u_k) \in \mathcal C_k,~\forall k \in \mathbb Z_N, \label{equ:ineq1_mod} \\
&~x_N \in \mathcal X_N, \label{equ:ineq2_mod}
\end{align}
\end{subequations}
where $x_0$ is given and,
\begin{equation}\label{equ:deltaV}
\Delta \tilde V_k(x_k,\hat u_k) = \tilde V_{k+1}(\hat f_k(x_k,\hat u_k))+\hat L_k(x_k,\hat u_k)-\tilde V_k(x_k),
\end{equation}
with $\hat f_k(x,\hat u) = f_k(x,\kappa_k(x_k)+\hat u)$ and $\hat L_k(x,\hat u) = L_k(x,\kappa_k(x_k)+\hat u)$.
Note that no element used in the construction of the objective function \eqref{equ:deltaV} depends on constraints.
We are now ready to state the main result.
\begin{thm}\label{thm:1}
Let the sequence of control inputs $\hat u^*$
solve the optimization problem \eqref{equ:mpc_prob_mod}. Then the sequence of control inputs $u^*$
satisfying,
\begin{equation}
u_k^* = \kappa_k(\hat x_k^*) + \hat u_k^*,
\end{equation}
for all $k \in \mathbb Z_N$, solves the optimization problem \eqref{equ:mpc_prob},
where $\hat x_{k+1}^* = \hat f_k(\hat x_k^*,\hat u_k^*)$ and $\hat x_0^* = x_0$.
\end{thm}
\begin{pf}
The sequence $\hat u^*$
satisfies,
\begin{subequations}
\begin{equation}\label{equ:u_mod_satisfies}
\hat u_k^* \in \underset{u \in \bar{\mathcal C}_k(\hat x_k^*)-\{\kappa_k(\hat x_k^*)\}}{\arg\min} \hat V_{k+1}(\hat f_k(\hat x_k^*,u)) + \Delta \tilde V_k(\hat x_k^*,u),
\end{equation}
where $\hat V_k$ satisfies the Bellman equation,
\begin{equation}\label{equ:deltaV_Bellman}
\hat V_k(x) = \min_{u \in \bar{\mathcal C}_k(x)-\{\kappa_k(x)\}} \hat V_{k+1}(\hat f_k(x,u)) + \Delta \tilde V_k(x,u),
\end{equation}
\end{subequations}
with domain $\operatorname{Proj}_{\mathbb R^n}\left(\bar{\mathcal C}_k - \{(0,\kappa_k(x))\}\right) = \operatorname{Proj}_{\mathbb R^n}\bar{\mathcal C}_k = \bar{\mathcal X}_k$, and $\hat V_N = 0|_{\mathcal X_N}$.
Let $\check V_k = \tilde V_k + \hat V_k$,~$k\in\mathbb Z_{N+1}$. Then, according to \eqref{equ:deltaV},
\begin{equation}\label{equ:long_express}
\tilde V_k(x) + \hat V_{k+1}(\hat f_k(x,u)) + \Delta\tilde V_k(x,u) = \check V_{k+1}(\hat f_k(x,u)) + \hat L_k(x,u).
\end{equation}
According to \eqref{equ:deltaV_Bellman} and \eqref{equ:long_express},
\begin{align}
\check V_k(x) &= \min_{u \in \mathcal C_k(x)-\{\kappa_k(x)\}} \check V_{k+1}(\hat f_k(x,u)) + \hat L_k(x,u) \nonumber \\
&= \min_{u \in \mathcal C_k(x)-\{\kappa_k(x)\}} \check V_{k+1}(f_k(x,\kappa_k(x)+u))
+ L_k(x,\kappa_k(x)+u). \label{equ:bell_check}
\end{align}
Fix $k \in \mathbb Z_N$ and assume $\check V_{k+1} = V_{k+1}$. Comparing \eqref{equ:bell1} to \eqref{equ:bell_check}, we see that they are equivalent and therefore $\check V_k = V_k$. Since $\hat V_N = 0$, then $\check V_N =
V_N$. Therefore $\check V_k = V_k$ for all $k \in \mathbb Z_{N+1}$.
According to \eqref{equ:u_mod_satisfies} and \eqref{equ:long_express},
\begin{align}
\hat u_k^* &\in \underset{u \in \bar{\mathcal C}_k(\hat x_k^*)-\{\kappa_k(\hat x_k^*)\}}{\arg\min} \check V_{k+1}(\hat f_k(\hat x_k^*,u)) + \hat L_k(\hat x_k^*,u) \nonumber \\
&= - \kappa_k(\hat x_k^*) + \underset{u \in \bar{\mathcal C}_k(\hat x_k^*)}{\arg\min}~ V_{k+1}(f_k(\hat x_k^*,u)) + L_k(\hat x_k^*,u), \label{equ:u_express}
\end{align}
in which the second expression was obtained by performing a change of variables $\kappa_k(\hat x_k^*) + u \mapsto u$.
Fix $k \in \mathbb Z_N$ and assume $\hat x_k^* = x_k^*$. Then the minimizer expressions in \eqref{equ:u_opt} and \eqref{equ:u_express} are equivalent, implying that there exists $u_k^*$ minimizing \eqref{equ:u_opt} where $u_k^* = \kappa_k(\hat x_k^*) + \hat u_k^*$
and $\hat x_{k+1}^* = f_k(\hat x_k^*,\kappa_k(\hat x_k^*)+\hat u_k^*) = f_k(x_k^*,u_k^*) = x_{k+1}^*$. Since $\hat x_0^* = x_0 = x_0^*$, we deduce that there exists a sequence $u^*$
solving \eqref{equ:mpc_prob} and satisfying $u_k^* = \kappa_k(\hat x_k^*) + \hat u_k^*$ for all $k \in \mathbb Z_N$. \qed
\end{pf}
\section{Linear MPC optimization problem}
In practical application, the result of Theorem \ref{thm:1} is most useful in instances where there exists an analytical solution to the unconstrained version of the optimization problem \eqref{equ:mpc_prob}, or one in which the feedback law $\kappa_k(x_k)$ is conveniently parametrizable in terms of the state $x_k$. This is in particular true in the case of LQR and we consider this case in further detail by assuming a locally linear-quadratic structure to the problem \eqref{equ:mpc_prob},
\begin{equation}\label{equ:lin_approx}
\begin{split}
f_k(x,u) &= A_kx + B_ku + o(\|(x,u)\|), \\
L_k(x,u) &= \frac{1}{2}\begin{bmatrix}x^{\text T} & u^{\text T}\end{bmatrix}\begin{bmatrix}Q_k & N_k \\ N_k^{\text T} & R_k\end{bmatrix}\begin{bmatrix}x \\ u\end{bmatrix} + o(\|(x,u)\|^2), \\
V_f(x) &= \frac{1}{2}x^{\text T}P_fx + o(\|x\|^2).
\end{split}
\end{equation}
We assume $P_f$ is positive definite and introduce,
\begin{subequations}
\begin{align}
K_{k} &= -(R_{k}+B_{k}^{\text T}P_{k+1}B_{k})^{-1}(B_{k}^{\text T}P_{k+1}A_{k}+N_{k}^{\text T}), \label{equ:K} \\
P_{k} &= A_{k}^{\text T}P_{k+1}A_{k} + Q_{k} + (A_{k}^{\text T}P_{k+1}B_{k}+N_{k})K_{k}, \label{equ:P}
\end{align}
\end{subequations}
defined for $k \in \mathbb Z_N$, with $P_N = P_f$, and assume that $P_k$ and $R_{k}+B_{k}^{\text T}P_{k+1}B_{k}$ are positive definite for all $k \in \mathbb Z_N$.
\begin{lem}
Consider the optimization problem \eqref{equ:mpc_prob} and assume that $f_k$, $L_k$, and $V_f$ are of the forms given in \eqref{equ:lin_approx}. Then,
\begin{equation}
\Delta \tilde V_k(x,\hat u) = \frac{1}{2}\hat u^{\text T}(R_k + B_k^{\mathrm T}P_{k+1}B_k)\hat u + o(\|(x,\hat u)\|^2).
\end{equation}
\end{lem}
\begin{pf}
Fix $k \in \mathbb Z_N$ and assume $\tilde V_{k+1}(x) = \frac{1}{2}x^{\text T}P_{k+1}x + o(\|x\|^2)$. Then,
\begin{equation}\label{equ:linV_min}
\tilde V_{k}(x) = \min_{u} \frac{1}{2}(Ax+Bu)^{\text T}P_{k+1}(Ax+Bu) + \frac{1}{2}\begin{bmatrix}x^{\text T} & u^{\text T}\end{bmatrix}\begin{bmatrix}Q_k & N_k \\ N_k^{\text T} & R_k\end{bmatrix}\begin{bmatrix}x \\ u\end{bmatrix} + o(\|(x,u)\|^2).
\end{equation}
As shown by \cite{dorato71}, the unique minimizer of \eqref{equ:linV_min} is given by $u = K_kx + o(\|x\|)$ and,
\begin{equation}\label{equ:linV}
\tilde V_{k}(x) = \frac{1}{2}x^{\text T}P_{k}x + o(\|x\|^2).
\end{equation}
Since $V_N = V_f$, we can deduce that \eqref{equ:linV} is true for all $k \in \mathbb Z_N$. According to \eqref{equ:deltaV},
\begin{equation*}
\Delta \tilde V_k(x,\hat u) = \frac{1}{2}\begin{bmatrix}x^{\text T} & \hat u^{\text T}\end{bmatrix}
\begin{bmatrix}
\hat Q_{k} & \hat N_{k} \\ \hat N_{k}^{\text T} & \hat R_{k}
\end{bmatrix}\begin{bmatrix}x \\ \hat u\end{bmatrix} + o(\|(x,\hat u)\|^2),
\end{equation*}
where,
\begin{align*}
\hat Q_k &= \hat A_k^{\text T}P_{k+1}\hat A_k + Q_k + K_k^{\text T}N_k^{\text T} + N_kK_k + K_k^{\text T}R_kK_k - P_k, \\
\hat N_k &= \hat A_k^{\text T}P_{k+1} B_k + N_k + K_k^{\text T}R_k, \\
\hat R_k &= B_k^{\text T}P_{k+1} B_k + R_k,
\end{align*}
and $\hat A_k = A_k+B_kK_k$.
Note that \eqref{equ:K} and \eqref{equ:P} imply that $\hat N_k = 0$ and $\hat Q_k = 0$, respectively.
\qed
\end{pf}
Consider the following optimization problem,
\begin{subequations}\label{equ:mpc_prob_lin}
\begin{align}
\min_{\hat u} &~ \frac{1}{2}\sum_{k = 0}^{N-1} \hat u_k^{\text T}(R_k+B_k^{\text T}P_{k+1}B_k)\hat u_k, \label{equ:cost_lin}\\
\text{sub.~to} &~x_{k+1} = \hat A_kx_k+B_k\hat u_k, \\
&~(x_k,K_kx_k+\hat u_k) \in \mathcal C_k,~\forall k \in \mathbb Z_N, \label{equ:ineq1_lin} \\
&~x_N \in \mathcal X_N, \label{equ:ineq2_lin}
\end{align}
\end{subequations}
with $x_0$ given.
The following result is a straightforward application of Theorem \ref{thm:1}.
\begin{cor}\label{cor:3}
Consider \eqref{equ:mpc_prob} and assume that $f_k$, $L_k$, and $V_f$ are of the forms given in \eqref{equ:lin_approx} with no residual terms, i.e., $o = 0$.
Let the sequence of control inputs $\hat u'$
solve \eqref{equ:mpc_prob_lin}. Then the sequence of control inputs $u'$
satisfying,
\begin{equation}
u_k' = K_k\hat x_k'+\hat u_k',
\end{equation}
for all $k\in\mathbb Z_N$, solves the optimization problem \eqref{equ:mpc_prob}, where $\hat x_{k+1}' = \hat A_k\hat x_k'+B_k\hat u_k'$ and $\hat x_0' = x_0$. \qed
\end{cor}
\section{Constraint-separation principle}
We refer to the result of Corollary \ref{cor:3} as a constraint-separation principle, because it separates constraint enforcement from stabilization in MPC.
The result implies that any locally linear-quadratic, open-loop MPC problem can be locally restructured as a closed-loop, constraint-enforcing, minimum-norm projection problem,
where the feedback gain of the closed-loop controller is the optimal gain obtained by solving the unconstrained, open-loop MPC problem.
Importantly, this equivalence shows that linear MPC can be interpreted as an application of a constraint enforcement scheme to an already precompensated system, an interpretation that is associated with reference and command governors \citep{rg_survey} and, in addition to being applied to reference commands, is what supposedly, significantly distinguishes these methods from MPC. It also shows that the extended command governor (ECG) \citep{gilbertong11}, if applied as an offset to a control input similarly to the approach taken by \citet{rossiter10}, can be viewed as a generalization of linear MPC, because the problem \eqref{equ:mpc_prob_lin} is a special case of the conventional ECG optimization with reference input set to $0$.\footnote{Details establishing the link between ECGs and MPC are available in Section 5 of \citet{gilbertong11}.}
The result also provides a method by which to choose the MPC penalty function when applying the decomposition technique to precompensated systems. For example, given a sequence of gains $K_k$, the penalty matrix should be set to $R_k+B_k^{\text T}P_{k+1}B_k$, where $R_k$ and $P_k$
satisfy the solution of an inverse LQR problem obtained, for example, using methods of \citet{kreindler72,dicairano10}. In the nonlinear case, a solution to a general, inverse optimal control problem could be used to determine a cost function for an MPC controller with the cost function $\Delta V_k$ being determined according to the results of the main theorem.
Additionally, as explored by \citet{kouvaritakis2000,kouvaritakis02,rossiter_book}, the result provides a potential simplification to numerical approaches to MPC problems. As discussed by \cite{numerical_mpc}, most conventional numerical approaches to nonlinear MPC problems either apply a sequential quadratic programming approach, where the corollary is obviously useful, or an interior point approach, where the main theorem is more useful since precompensation at least allows for better numerical conditioning as it prevents the closed-loop maps $\hat f_k$ from blowing up.
To the best of our knowledge, most solvers applied to MPC, although allowing for the option to precompensate according to the optimal feedback, e.g., \citet{mpt}, do not fully take advantage of the separation principle presented. For example, they do not decompose the local problem into what it essentially consists of: an LQR problem and a minimum-norm projection onto a closed set. They instead allow for a user to precompensate according to any feedback when the choice of optimal feedback has greater utility, as it guarantees no error when constraints are inactive.
The discussion thus far has focused on the special case where the MPC problem is locally linear-quadratic. We note that the main result is applicable more generally, as represented by the schematic in Fig.~\ref{fig:MPC_cl}, which we have shown to be equivalent to Fig.~\ref{fig:MPC_ol}. The constraint-separation principle does not necessarily hold in the general case,
when the problem is not locally linear-quadratic, e.g., when there does not exist a continuously differentiable stabilizing feedback $\kappa_k$ or the cost function $V_k$ is not continuously differentiable; in this case, it is not guaranteed that
the penalty function $\Delta \tilde V_k$ is locally independent of $x$. Nevertheless, the generality of the main result is remarkable: The result can be applied as long as a solution is known to the unconstrained problem, with a few minor, technical conditions corresponding to constraints and penalty function. However, note that constraint-enforcement does not necessarily simplify to a minimum-norm projection in this case.
Now note the practical use of decomposition in certifying MPC controllers. A decomposed controller is more straightforward to certify because an unconstrained problem is simpler than the same problem with constraints; it is therefore easier to certify the stabilizing component of the decomposed MPC problem. This makes it easier to ensure stability and attractiveness during online operation, limiting the need for a complex certification process, such as that of \citep{garoche18}, to the constraint-enforcement component.
Taken together, the above discussion implies that there is a strong desire to often, if not always, decompose design in the manner derived in this manuscript, whenever one is able to effectively parametrize the feedback controller $\kappa_k$ and the closed-loop state update $\hat f_k$. This approach can, for example, improve design of neural-net-based MPC \citep{saintdonat91} by decomposing the problem into the solution of a simpler, unconstrained, approximate dynamic programming problem \citep{bertsekas_dp}, and a more difficult, constrained optimization problem.
As a matter of course, this discussion has only been able to superficially consider the practical use of decomposition in nonlinear MPC. We feel, however, that it represents a promising direction for future research.
\begin{figure}
\caption{MPC decomposed into an optimal feedback controller and a constraint-enforcing scheme}
\label{fig:MPC_cl}
\end{figure}
\section{Conclusion}
In this brief, we derived a constraint-separation principle for MPC problems. The results show that MPC problems can be decomposed into the solution of an unconstrained, open-loop problem and a constrained, closed-loop problem without a terminal cost, which may simplify MPC problems when the unconstrained, stabilizing feedback can be represented explicitly by a closed-form solution or a parametrization. This is particularly true for linear MPC problems, for which the stabilizing feedback is given as the solution to the well-known LQR problem.
It is significant because it shows the equivalence of designing MPC in a two-step approach, which first stabilizes the system and then implements constraint protection in the outer-loop. It is also significant because it can be used to improve numerical solutions to both linear and nonlinear MPC problems.
\end{document}
|
\begin{document}
\begin{abstract}
Semibiproducts of monoids are introduced here as a common generalization to biproducts (of abelian groups) and to semidirect products (of groups) for exploring a wide class of monoid extensions. More generally, abstract semi\-bi\-products exist in any concrete category over sets in which map addition is meaningful thus reinterpreting Mac Lane's relative biproducts. In the pointed case they give rise to a special class of extensions called semibiproduct extensions. Not all monoid extensions are semibiproduct extensions but all group extensions are. A categorical equivalence is established between the category of pointed semibiproducts of monoids and the category of pointed monoid action systems, a new category of actions that emerges from the equivalence. The main difference to classical extension theory is that semibiproduct extensions are treated in the same way as split extensions, even though the section map may fail to be a homomorphism. A list with all the 14 semibiproduct extensions of 2-element monoids is provided.
\end{abstract}
\keywords{Semibiproduct, biproduct, semidirect product of groups and monoids, pointed semibiproduct, semibiproduct extension, pointed monoid action system, Schreier extension}
\maketitle
\section{Introduction}
Biproducts were introduced by Mac Lane in his book \emph{Homology} to study split extensions in the context of abelian categories. Semidirect products are appropriate to study group split extensions. Although these concepts have been thoroughly developed over the last decades in the context of protomodular and semi-abelian categories \cite{BB,Bourn,BournJanelidze,semiabelian}, the notion of \emph{relative biproduct} introduced by Mac Lane to study relative split extensions seems to have been forgotten (see \cite{Homology}, p.~263). On the other hand, much work has been done in extending the tools and techniques from groups \cite{MacLane2} to monoids \cite{DB.NMF.AM.MS.13, NMF14,DB.NMF.AM.MS.16,Faul,Fleischer,Ganci,Leech,NMF et all,Wells} and even more general settings \cite{GranJanelidzeSobral}. However, as it has been observed several times, it is not a straightforward task to take a well-known result in the category of groups (or any other semi-abelian category) and materialize it in the category of monoids not to mention more general situations. We will argue that a convenient reformulation of relative biproduct (called \emph{semibiproduct}) can be used to study group and monoid extensions as a unified frame of work. Even though semidirect products are suitable to describe all group split extensions they fail to capture those group extensions that do not split.
The key observation to semibiproducts in reinterpreting relative biproducts (see \cite{Homology}, diagram (5.2), p.~263 and compare with diagram $(\ref{diag: biproduct in a Mag-category})$ in Definition \ref{def: semibiproduct}) is that although an extension may fail to split as a monoid extension or as a group extension, it necessarily splits as an extension of pointed sets.
The main result (Theorem \ref{thm: equivalence}) establishes an equivalence of categories between pointed semibiproducts of monoids (Definition \ref{def: pointed semibiproduct of monoids}) and pointed monoid action systems (Definition \ref{def: pseudo-action}). The 14 classes of non-isomorphic pointed semibiproducts of 2-element monoids are listed in Section \ref{sec: eg}. We start with some motivation in Section \ref{sec: motivation}, introduce $\mathbf{Mag}$-categories and semibiproducts in Section \ref{Sec: Mag-categories}, restrict to the pointed case in Section~\ref{sec: stability} while studying some stability properties and pointing out some differences and similarities between groups, monoids and unitary magmas. From Section \ref{Sec: Sbp} on we work towards the main result and restrict our attention to monoids.
\section{Motivation}\label{sec: motivation}
It is well known that a split extension of groups
\[\xymatrix{X\ar[r]^k & A \ar[r]^{p} & B,} \]
with a specified section $s\colon{B\to A}$, can be completed into a diagram of the form
\[\xymatrix{X\ar@<-.5ex>[r]_{k} & A\ar@<-.5ex>@{..>}[l]_{q}\ar@<.5ex>[r]^{p} & B \ar@{->}@<.5ex>[l]^{s},}\]
in which $q\colon{A\to X}$ is the map uniquely determined by the formula $kq(a)=a-sp(a)$, $a\in A$. Furthermore, the information needed to reconstruct the split extension as a semidirect product is encoded in the map $\varphi\colon{B\times X\to X}$, uniquely determined as $\varphi(b,x)=q(s(b)+k(x))$. If writing the element $\varphi(b,x)\in X$ as $b\cdot x$ we see that $k(b\cdot x)$ is equal to $s(b)+k(x)-s(b)$ and that the group $A$ is recovered as the semidirect product $X\rtimes_{\varphi}B$. In the event that the section $s$, while being a zero-preserving map, is not necessarily a group homomorphism then the classical treatment of group extensions prescribes a different procedure (see e.g.\ \cite{Northcott}, p.~238). However, the results obtained here suggest that non-split extensions may be treated similarly to split extensions, and moreover the same approach is carried straightforwardly into the context of monoids.
Indeed, when $s$ is not a homomorphism, in addition to the map $\varphi$, we get a map $\gamma\colon{B\times B\to X}$, determined as $\gamma(b,b')=q(s(b)+s(b'))$ and the group $A$ is recovered as the set $X\times B$ with group operation
\[(x,b)+(x',b')=(x+\varphi(b,x')+\gamma(b,b'),b+b')\]
defined for every $x,x'\in X$ and $b,b'\in B$. Note that $X$ needs not be commutative. However, instead of simply saying that $\varphi$ is an action and that $\gamma$ is a factor system, we have to consider two maps $\varphi$ and $\gamma$ which in conjunction turn the set $X\times B$ into a group with a prescribed operation (Section \ref{Sec: Act}). This is precisely what we call a semibiproduct of groups. Observe that when $s$ is a homomorphism it reduces to the usual notion of semidirect product.
Almost every step in the treatment of groups is carried over into the context of monoids. However, while in groups all extensions are obtained as semibiproducts, in monoids we have to restrict our attention to those for which there exists a section $s$ and a retraction $q$ satisfying the condition $a=kq(a)+sp(a)$ for all $a\in A$ (Section \ref{Sec: Sbp}). Consequently, in addition to the maps $\varphi$ and $\gamma$ obtained as in groups, a new map $\rho\colon{X\times B\to X}$, determined as $\rho(x,b)=q(k(x)+s(b))$ needs to be taken into consideration. Hence, the monoid $A$ is recovered as the set $\{(x,b)\in X\times B\mid\rho(x,b)=x\}$ with operation
\begin{equation}\label{eq: operation}
(x,b)+(x',b')=(\rho(x+\varphi(b,x')+\gamma(b,b'),b+b'),b+b')
\end{equation}
which is defined for every $x,x'\in X$ and $b,b'\in B$.
\section{$\mathbf{Mag}$-categories and semibiproducts}\label{Sec: Mag-categories}
Let $\mathbf{Mag}$ denote the category of magmas and magma homomorphisms and let $U\colon{\mathbf{Mag}\to \mathbf{Set}}$ be the forgetful functor into the category of sets and maps. By a $\mathbf{Mag}$-category we mean a category $\mathbf{C}$ together with a bifunctor $\mathrm{map}\colon{\mathbf{C}^{\text{op}}\times \mathbf{C}\to \mathbf{Mag}}$ and a natural inclusion $\varepsilon\colon{\hom_{\mathbf{C}}\to U\circ \mathrm{map}}$. If $\mathbf{C}$ is a concrete category over sets in which a meaningful map addition is available then a bifunctor $\mathrm{map}$ is obtained as follows. For every pair of objects $(A,B)$ in $\mathbf{C}$, $\mathrm{map}(A,B)$ is the magma of underlying maps from object $A$ to object $B$ equipped with component-wise addition. In particular $\mathrm{map}(A,B)$ contains $\hom_{\mathbf{C}}(A,B)$ as a subset since $$\varepsilon_{A,B}\colon{\hom_{\mathbf{C}}(A,B)\to U(\mathrm{map}(A,B))}$$ is required to be a natural inclusion. As expected the category $\mathbf{Mag}$ is a $\mathbf{Mag}$-category with $\mathrm{map}(A,B)$ the magma of all maps from $U(A)$ to $U(B)$. If $f$ is a magma homomorphism from $A$ to $B$ then $\varepsilon_{A,B}(f)$ is nothing but $f$ considered as a map between the underlying sets of $A$ and $B$. In the same way the categories of groups, abelian groups, monoids and commutative monoids are $\mathbf{Mag}$-categories. However, there is a significant distinction between the Ab-category of abelian groups, the linear category of commutative monoids and the Mag-categories of groups or monoids. If $A$ is an object in an Ab-category then $\hom(A,A)$ is a ring. If $A$ is an object in a linear category then $\hom(A,A)$ is a semiring. In contrast, if $A$ is a group (or a monoid) then $\hom(A,A)$ is a subset of the near-ring $\mathrm{map}(A,A)$.
\begin{definition}\label{def: semibiproduct}
Let $(\mathbf{C},\mathrm{map},\varepsilon)$ be a $\mathbf{Mag}$-category. A \emph{semibiproduct} is a tuple $(X,A,B,p,k,q,s)$ represented as a diagram of the shape
\begin{equation}
\label{diag: biproduct in a Mag-category}
\xymatrix{X\ar@<-.5ex>[r]_{k} & A\ar@<-.5ex>@{..>}[l]_{q}\ar@<.5ex>[r]^{p} & B \ar@{..>}@<.5ex>[l]^{s}}
\end{equation}
in which $p\colon{A\to B}$ and $k\colon{X\to A}$ are morphisms in $\mathbf{C}$, whereas $q\in \mathrm{map}(A,X)$ and $s\in \mathrm{map}(B,A)$. Furthermore, the following conditions are satisfied:
\begin{eqnarray}
ps={1_B}\label{eq: biproduct1}\\ qk={1_{X}},\label{eq: biproduct2}\\
kq+sp={1_A}.\label{eq: biproduct3}
\end{eqnarray}
\end{definition}
There is an obvious abuse of notation in the previous conditions. This is justified because we will be mostly concerned with the case in which $\mathbf{C}$ is the category of monoids and $\mathrm{map}(A,B)$ is the set of zero-preserving maps. In rigorous terms, condition $ps=1_B$ should have been written as $\mathrm{map}(1_B,p)(s)=\varepsilon_{B,B}(1_B)$ whereas condition $qk=1_X$
should have been written as $\mathrm{map}(k,1_X)(q)=\varepsilon_{X,X}(1_X)$. In the same way the condition $\mathrm{map}(1_A,k)(q)+\mathrm{map}(p,1_A)(s)=\varepsilon_{A,A}(1_A)$ should have been written in the place of $kq+sp=1_A$.
For the moment we will not develop this concept further but rather concentrate our attention on the concrete cases of groups and monoids.
\section{Stability properties of pointed semibiproducts}\label{sec: stability}
From now on the category $\mathbf{C}$ is assumed to be either the category of groups or the category of monoids (occasionally we will refer to the category of unitary magmas) and $\mathrm{map}(A,B)$ is the magma of zero-preserving maps with component-wise addition.
In each case the category $\mathbf{C}$ is pointed and the composition of maps is well defined. It will be convenient to consider \emph{pointed semibiproducts} by requiring two further conditions, namely
\begin{eqnarray}
pk=0_{X,B},\quad qs=0_{B,X}.
\end{eqnarray}
However, as it is well known, in the case of groups this distinction is irrelevant.
\begin{proposition} Every semibiproduct of groups is pointed.
\end{proposition}
\begin{proof}
We have $pk=p1_Ak=p(kq+sp)k=pkqk+pspk=pk+pk$. And $s=1_As=(kq+sp)s=kqs+sps=kqs+s$. Hence we may conclude $pk=0$ and $kqs=0$. Since $k$ is a monomorphism $qs=0$.
\end{proof}
The previous proof also shows that a semibiproduct of monoids is pointed as soon as the monoid $A$ admits right cancellation. Clearly, this is not a general fact.
\begin{proposition}
Let $A$ be a monoid. The tuple $(A,A,A,1_A,1_A,1_A,1_A)$ is a semibiproduct of monoids if and only if $A$ is an idempotent monoid.
\end{proposition}
\begin{proof}
Condition $(\ref{eq: biproduct3})$ in this case becomes $a=a+a$ for all $a\in A$.
\end{proof}
Every pointed semibiproduct of monoids has an underlying exact sequence.
\begin{proposition}\label{thm:kernel of p}\label{thm:cokernel of k}
Let $(X,A,B,p,k,q,s)$ be a pointed semibiproduct of monoids. The sequence
\[\xymatrix{X\ar[r]^k & A \ar[r]^{p} & B} \] is an exact sequence.
\end{proposition}
\begin{proof}
Let $f\colon{Z\to A}$ be a morphism such that $pf=0$. Then the map $\bar{f}=qf$ is a homomorphism
\begin{eqnarray*}
qf(z+z')&=&q(fz+fz')=q(kqf(z)+spf(z)+kqf(z')+spf(z'))\\
&=& q(kqf(z)+0+kqf(z')+0)\\
&=& qk(qf(z)+qf(z'))=qf(z)+qf(z')
\end{eqnarray*}
and it is unique with the property $k\bar{f}=f$. Indeed, if $k\bar{f}=f$ then $qk\bar{f}=qf$ and hence $\bar{f}=qf$. This means that $k$ is the kernel of $p$.
Let $g\colon{A\to Y}$ be a morphism and suppose that $gk=0$. It follows that $g=gsp$,
\begin{equation*}
g=g1_A=g(kq+sp)=gkq+gsp=0+gsp=gsp,
\end{equation*}
and consequently the map $\bar{g}=gs$ is a homomorphism, indeed
\begin{eqnarray*}
gs(b)+gs(b')=g(sb+sb')=gsp(sb+sb')=gs(b+b').
\end{eqnarray*}
The fact that $\bar{g}=gs$ is the unique morphism with the property $\bar{g}p=g$ follows from $\bar{g}ps=gs$ which is the same as $\bar{g}=gs$. Hence $p$ is the cokernel of $k$ and the sequence is exact.
\end{proof}
The following results show that pointed semibiproducts are stable under pullback and in particular split semibiproducts of monoids are stable under composition.
\begin{proposition}\label{thm:stable under pullback} Pointed semibiproducts of monoids are stable under pullback.
\end{proposition}
\begin{proof}
Let $(X,A,B,p,k,q,s)$ be a pointed semibiproduct of monoids displayed as the bottom row in the following diagram which is obtained by taking the pullback of $p$ along an arbitrary morphism $h\colon{C\to B}$, with induced morphism $\langle k,0 \rangle$ and map $\langle sh,1 \rangle$,
\begin{eqnarray}
\vcenter{\xymatrix{X\ar@<-.5ex>[r]_(.35){\langle k,0 \rangle}\ar@{=}[d]_{} & A\times_B C\ar@{->}@<0ex>[d]^{\pi_1}\ar@<-.5ex>@{..>}[l]_(.6){q\pi_1}\ar@<.5ex>[r]^(.6){\pi_2} & C\ar@{->}[d]^{h} \ar@{->}@<.5ex>@{..>}[l]^(.35){\langle sh,1\rangle}\\
X\ar@<-.5ex>[r]_{k} & A \ar@<-.5ex>@{..>}[l]_{q}\ar@<.5ex>[r]^{p} & B \ar@{->}@<.5ex>@{..>}[l]^{s}.}}
\end{eqnarray}
We have to show that the top row is a pointed semibiproduct of monoids. By construction we have $\pi_2\langle sh,1\rangle=1_C$, $\pi_2\langle k,0\rangle=0$, $q\pi_1\langle sh,1\rangle=qsh=0$, $q\pi_1\langle k,0\rangle=qk=1_X$. It remains to prove the identity
\[(a,c)=(kq(a),0)+(sh(c),c)=(kq(a)+sh(c),c)\]
for every $a\in A$ and $c\in C$ with $p(a)=h(c)$, which follows from $a=kq(a)+sp(a)=kq(a)+sh(c)$.
\end{proof}
The previous results are stated at the level of monoids but are easily extended to unitary magmas. The particular case of semidirect products has been considered in \cite{GranJanelidzeSobral} and the notion of composable pair of pointed semibiproducts is borrowed from there.
We say that a pointed semibiproduct $(X,A,B,p,k,q,s)$ can be composed with a pointed semibiproduct $(C,B,D,p',k',q',s')$ if the tuple $$(A\times_B C,A,D,p'p,\pi_1,q'',ss'),$$ in which $q''$ is such that $\pi_1q''=kq+sk'q'p$ and $\pi_2q''=q'p$, is a pointed semibiproduct.
Note that in the case of groups the map $q$ is uniquely determined as $q(a)=a-sp(a)$ for all $a\in A$. However this is not the case for monoids nor for unitary magmas.
\begin{proposition}
A pointed semibiproduct of monoids $(X,A,B,p,k,q,s)$ can be composed with $(C,B,D,p',k',q',s')$, another pointed semibiproduct of monoids, if and only if the map $s$ is equal to the map $sk'q'+ss'p'$.
\end{proposition}
\begin{proof}
Let us observe that the tuple $(A\times_B C,A,D,p'p,\pi_1,q'',ss')$ is a pointed semibiproduct if and only if $\pi_1q''+ss'p'p=1_A$.
Indeed, the kernel of the composite $p'p$ is obtained by taking the pullback of $p$ along $k'$, the kernel of $p'$, as illustrated
\begin{eqnarray}
\vcenter{\xymatrix{ & A\times_B C\ar@{->}@<0ex>[d]_{\pi_1}\ar@<.5ex>[r]^(.6){\pi_2} & C\ar@{->}[d]_{k'} \ar@{->}@<.5ex>@{..>}[l]^(.35){\langle sk',1\rangle}\\
X\ar@<-.5ex>[r]_{k} & A \ar@<-.5ex>@{..>}[l]_{q}\ar@<.5ex>[r]^{p} \ar[d]_{p'p} & B \ar@{->}@<.5ex>@{..>}[l]^{s} \ar[d]_{p'}\\
& D\ar@{=}[r] & D.}}
\end{eqnarray}
In order to obtain a pointed semibiproduct we complete de diagram with a map $q''$ such that $\pi_1q''=kq+sk'q'p$ and $\pi_2q''=q'p$ as illustrated
\begin{eqnarray}
\vcenter{\xymatrix{ & A\times_B C\ar@{->}@<-0.5ex>[d]_{\pi_1}\ar@<.5ex>[r]^(.6){\pi_2} & C\ar@{->}[d]_{k'} \ar@{->}@<.5ex>@{..>}[l]^(.35){\langle sk',1\rangle}\\
X\ar@<-.5ex>[r]_{k} & A \ar@<-.5ex>@{..>}[l]_{q} \ar@<-.5ex>@{..>}[u]_{q''} \ar@<.5ex>[r]^{p} \ar@<-.5ex>[d]_{p'p} & B \ar@{->}@<.5ex>@{..>}[l]^{s} \ar[d]_{p'}\\
& D \ar@<-.5ex>@{..>}[u]_{ss'} \ar@{=}[r] & D.}}
\end{eqnarray}
The map $q''$ is well defined, $p(kq+sk'q'p)=pkq+psk'q'p=k'q'p$. Moreover, $p'pss'=1_D$, $p'p\pi_1=p'k'\pi_2=0$, $q''ss'=0$ and we observe
\begin{eqnarray*}
q''\pi_1 &=&\langle kq+sk'q'p,q'p \rangle \pi_1\\
&=&\langle kq\pi_1+sk'q'p\pi_1,q'p\pi_1 \rangle \\
&=&\langle kq\pi_1+sk'q'k'\pi_2,q'k'\pi_2 \rangle \\
&=&\langle kq\pi_1+sp\pi_1,\pi_2 \rangle \\
&=&\langle \pi_1,\pi_2 \rangle =1_{A\times_B C}.
\end{eqnarray*}
It remains to analyse the condition $\pi_1q''+ss'p'p=1_A$. If $s=sk'q'+ss'p'$ then we have $\pi_1q''+ss'p'p=kq+sk'q'p+ss'p'p$ and hence $kq+sp=1_A$. Conversely, having $\pi_1q''+ss'p'p=1_A$ we get $kq+sk'q'p+ss'p'p=1_A$ and $kqs+sk'q'ps+ss'p'ps=s$ so $sk'q'+ss'p'=s$.
\end{proof}
Note that associativity is used to convert $(kq+sk'q'p)+ss'p'p$ into $kq+(sk'q'p+ss'p'p)$. Moreover, if the map $s$ is a homomorphism then condition $s=sk'q'+ss'p'$ is trivial. A pointed semibiproduct $(X,A,B,p,k,q,s)$ in which the map $s$ is a homomorphism is called a pointed split semibiproduct.
This means that pointed split semibiproducts of monoids are stable under composition.
\section{The category of pointed semibiproducts of monoids}\label{Sec: Sbp}
The purpose of this section is to introduce the category of pointed semibiproducts of monoids, denoted $\mathbf{Psb}$.
\begin{definition}\label{def: pointed semibiproduct of monoids}
A \emph{pointed semibiproduct of monoids} consists of
a tuple $(X,A,B,p,k,q,s)$ that can also be represented as a diagram of the shape
\begin{equation}
\label{diag: biproduct}
\xymatrix{X\ar@<-.5ex>[r]_{k} & A\ar@<-.5ex>@{..>}[l]_{q}\ar@<.5ex>[r]^{p} & B \ar@{..>}@<.5ex>[l]^{s}}
\end{equation}
in which $X$, $A$ and $B$ are monoids (not necessarily commutative), $p$, $k$, are monoid homomorphisms, while $q$ and $s$ are zero-preserving maps. Moreover, the following conditions are satisfied:
\begin{eqnarray}
ps&=&1_B\\
qk&=&1_X\\
kq+sp&=&1_A\\
pk&=&0_{X,B}\\
qs&=&0_{B,X}.
\end{eqnarray}
\end{definition}
A morphism in $\mathbf{Psb}$, from the object $(X,A,B,p,k,q,s)$ to the object $(X',A',B',p',k',q',s')$, is a triple $(f_1,f_2,f_3)$, displayed as
\begin{equation}\label{diag:morphism of semi-biproduct}
\vcenter{\xymatrix{X\ar@<-.5ex>[r]_{k}\ar@{->}[d]_{f_1} & A\ar@{->}@<0ex>[d]^{f_2}\ar@<-.5ex>@{..>}[l]_{q}\ar@<.5ex>[r]^{p} & B\ar@{->}[d]^{f_3} \ar@{->}@<.5ex>@{..>}[l]^{s}\\
X'\ar@<-.5ex>[r]_{k'} & A' \ar@<-.5ex>@{..>}[l]_{q'}\ar@<.5ex>[r]^{p'} & B' \ar@{->}@<.5ex>@{..>}[l]^{s'}}}
\end{equation}
in which $f_1$, $f_2$ and $f_3$ are monoid homomorphisms and moreover the following conditions are satisfied: $f_2k=k'f_1$, $p'f_2=f_3p$, $f_2s=s'f_3$, $q'f_2=f_1q$.
\begin{theorem}\label{thm: a+a' and associativity}
Let $(X,A,B,p,k,q,s)$ be a pointed semibiproduct of monoids. For every $a,a'\in A$ the element $a+a'\in A$ can be written in terms of $q(a)$, $q(a')$, $p(a)$ and $p(a')$ as
\begin{equation}\label{eq: a+a'}
k(q(a)+q(sp(a)+ kq(a'))+q(sp(a)+ sp(a')))+s(p(a)+p(a')).
\end{equation}
\end{theorem}
\begin{proof}
We observe:
\begin{eqnarray*}
a+a'&=& kqa+(spa+kqa')+spa' \qquad(kq+sp=1)\\
&=& kqa+kq(spa+kqa')+sp(spa+kqa')+spa' \\
&=& kqa+kq(spa+kqa')+spa+spa' \qquad( ps=1,pk=0)\\
&=& kqa+kq(spa+kqa')+kq(spa+spa')+sp(spa+spa')\\
&=& kqa+kq(spa+kqa')+kq(spa+spa')+s(pa+pa')\\
&=& k(qa+q(spa+kqa')+q(spa+spa'))+sp(a+a').
\end{eqnarray*}
\end{proof}
The previous result suggests a transport of structure from the monoid $A$ into the set $X\times B$ as motivated with formula $(\ref{eq: operation})$ in Section \ref{sec: motivation}. However, as we will see, in order to keep an isomorphism with $A$ we need to restrict the set $X\times B$ to those pairs $(x,b)$ for which there exists $a\in A$ such that $x=q(a)$ and $b=p(a)$.
\section{The category of pointed monoid action systems}\label{Sec: Act}
The purpose of this section is to introduce the category of pointed monoid action systems, which will be denoted as $\mathbf{A}ct$. This category is obtained by requiring the existence of a categorical equivalence between $\mathbf{A}ct$ and $\mathbf{Psb}$ (see Theorem \ref{thm: equivalence}).
\begin{definition}\label{def: pseudo-action}
A \emph{pointed monoid action system} is a five-tuple $$(X,B,\rho,\varphi,\gamma)$$ in which $X$ and $B$ are monoids, $\rho\colon{X\times B\to X}$, $\varphi\colon{B\times X\to X}$, $\gamma\colon{B\times B\to X}$ are maps such that the following conditions are satisfied for every $x\in X$ and $b,b'\in B$:
\begin{eqnarray}
\rho(x,0)=x,\quad \rho(0,b)=0\label{eq:act01}\\
\varphi(0,x)=x,\quad \varphi(b,0)=0\label{eq:act02}\\
\gamma(b,0)=0=\gamma(0,b)\label{eq:act03}\\
\rho(x,b)=\rho(\rho(x,b),b)\label{eq:act04}\\
\varphi(b,x)=\rho(\varphi(b,x),b)\label{eq:act05}\\
\gamma(b,b')=\rho(\gamma(b,b'),b+b')\label{eq:act06}
\end{eqnarray}
and moreover the following condition holds for every $x,x',x''\in X$ and $b,b',b''\in B$,
\begin{eqnarray}\label{eq:act07}
\rho(\rho(x+\varphi(b,x')+\gamma(b,b'),b+b')+\varphi(b+b',x'')+\gamma(b+b',b''),b''')=\nonumber\\
=\rho(x+\varphi(b,\rho(x'+\varphi(b', x'') + \gamma(b', b''),{b'+b''}))+\gamma(b, b'+b''),{b'''})\quad
\end{eqnarray}
where $b'''=b+b'+b''$.
\end{definition}
A morphism of pointed monoid action systems, say from $(X,B,\rho,\varphi,\gamma)$ to $(X',B',\rho',\varphi',\gamma')$ is a pair $(f,g)$ of monoid homomorphisms, with $f\colon{X\to X'}$ and $g\colon{B\to B'}$ such that for every $x\in X$ and $b,b'\in B$
\begin{eqnarray}
f(\rho(x,b))&=&\rho'(f(x),{g(b)}),\label{eq:act08}\\
f(\varphi(b, x))&=&\varphi'(g(b), {f(x)}),\label{eq:act09}\\
f(\gamma(b, b'))&=&\gamma'(g(b), g(b')).\label{eq:act10}
\end{eqnarray}
\begin{theorem}\label{thm: functor R syntehic construction}
There exists a functor $R\colon{\mathbf{A}ct\to \mathbf{Mon}}$ such that for every morphism in $\mathbf{A}ct$, say $(f,g)\colon{(X,B,\rho,\varphi,\gamma)\to (X',B',\rho',\varphi',\gamma')}$, the diagram
\begin{equation}\label{diag:semi-biproduct with R}
\xymatrix{X\ar@<-.5ex>[r]_(.3){\langle 1,0\rangle}\ar@{->}[d]_{f} & R(X,B,\rho,\varphi,\gamma)\ar@{->}@<.0ex>[d]^{R(f,g)}\ar@<-.5ex>@{..>}[l]_(.7){\pi_X}\ar@<.5ex>[r]^(.7){\pi_B} & B\ar@{->}[d]^{g} \ar@{->}@<.5ex>@{..>}[l]^(.3){\langle 0,1\rangle}\\
X'\ar@<-.5ex>[r]_(.3){\langle 1,0 \rangle} & R{(X',B',{\rho',\varphi',\gamma'})} \ar@<-.5ex>@{..>}[l]_(.7){\pi_{X}}\ar@<.5ex>[r]^(.7){\pi_B} & B \ar@{->}@<.5ex>@{..>}[l]^(.3){{\langle 0 ,1 \rangle}}}
\end{equation}
is a morphism in $\mathbf{Psb}$.
\end{theorem}
The functor $R$ realizes a pointed monoid action system $(X,B,\rho,\varphi,\gamma)$ as a synthetic semibiproduct diagram
\begin{equation}\label{diag:synthetic semi-biproduct}
\xymatrix{X\ar@<-.5ex>[r]_(.5){\langle 1,0\rangle} & R\ar@<-.5ex>@{..>}[l]_(.5){\pi_X}\ar@<.5ex>[r]^(.5){\pi_B} & B \ar@{..>}@<.5ex>[l]^(.5){\langle 0,1\rangle}}
\end{equation}
in which $R=R(X,B,{\rho,\varphi,\gamma})=\{(x,b)\in X\times B\mid \rho(x,b)=x\}$ is equipped with the binary synthetic operation
\begin{equation}\label{eq: semibiproduct sunthetic operation}
(x,b)+(x',b')=(\rho(x+\varphi(b,x')+\gamma(b,b'),b+b'),b+b')
\end{equation}
which is well defined for every $x,x'\in X$ and $b,b'\in B$ due to condition $(\ref{eq:act04})$ and is associative due to condition $(\ref{eq:act07})$. It is clear that $\pi_B$ is a monoid homomorphism and due to conditions $(\ref{eq:act01})$--$(\ref{eq:act03})$ we see that the maps $\langle 1,0 \rangle $ and $\langle 0 ,1\rangle $ are well defined and moreover $\langle 1,0\rangle $ is a monoid homomorphism. Finally, we observe that a pair $(x,b)\in X\times B$ is in $R$ if and only if $(x,b)=(x,0)+(0,b)$. Further details can be found in the preprint \cite{NMF.20a-of}.
\section{The equivalence}
In order to establish a categorical equivalence between $\mathbf{A}ct$ and $\mathbf{Psb}$ we need a procedure to associate a pointed monoid action system to every pointed semibiproduct of monoids in a functorial manner.
\begin{theorem}\label{thm: pseudo-actions}
Let $(X,A,B,p,k,q,s)$ be an object in $\mathbf{Psb}$. The system $(X,B,\rho,\varphi,\gamma)$ with
\begin{eqnarray}
\rho(x,b)=q(k(x)+s(b))\\
\varphi(b,x)=q(s(b)+k(x))\\
\gamma(b,b')=q(s(b)+s(b'))
\end{eqnarray}
is an object in $\mathbf{A}ct$.
Moreover, if $(f_1,f_2,f_3)$ is a morphism in $\mathbf{Psb}$ then $(f_1,f_3)$ is a morphism in $\mathbf{A}ct$.
\end{theorem}
\begin{proof}
To see that the system $(X,B,\rho,\varphi,\gamma)$ is a well defined object in $\mathbf{A}ct$ we recall that $q$ and $s$ are zero-preserving maps and hence conditions $(\ref{eq:act01})$--$(\ref{eq:act03})$ are satisfied. Conditions $(\ref{eq:act04})$--$(\ref{eq:act06})$ are obtained by applying the map $q$ to both sides of equations
\begin{eqnarray*}
k(x)+s(b)=kq(k(x)+s(b))+s(b)\\
s(b)+k(x)=kq(s(b)+k(x))+s(b)\\
s(b)+s(b')=kq(s(b)+s(b'))+s(b+b')
\end{eqnarray*}
which hold because $(X,A,B,p,k,q,s)$ is a pointed semibiproduct of monoids. Condition $(\ref{eq:act07})$ follows from Theorem \ref{thm: a+a' and associativity} with $a=k(x)+s(b)$, $a'=k(x')+s(b')+k(x'')+s(b'')$ on the one hand whereas on the other hand $a=k(x)+s(b)+k(x')+s(b')$, $a'=k(x'')+s(b'')$. Moreover, the pair $(f_1,f_3)$ is a morphism of actions as soon as the triple $(f_1,f_2,f_3)$ is a morphism of semibiproducts, indeed we have
\begin{eqnarray*}
f_1(\rho(x,b))&=&f_1q(k(x)+s(b))=q'f_2(k(x)+s(b))\\
&=&q'(k'f_1(x)+s'f_3(b)))=\rho'(f_1(x),f_3(b))
\end{eqnarray*}
and similarly for $\varphi$ and $\gamma$ thus proving conditions $(\ref{eq:act08})$--$(\ref{eq:act10})$.
\end{proof}
The previous result describes a functor from the category of pointed semibipro\-ducts of monoids into the category of pointed monoid action systems, let us denote it by $P\colon{\mathbf{Psb}\to\mathbf{A}ct}$. The synthetic construction of Theorem \ref{thm: functor R syntehic construction} produces a functor in the other direction, let us denote it $Q\colon{\mathbf{A}ct\to\mathbf{Psb}}$. We will see that $PQ=1$ whereas $QP\cong 1$.
\begin{theorem}\label{thm: equivalence}
The categories $\mathbf{Psb}$ and $\mathbf{A}ct$ are equivalent.
\end{theorem}
\begin{proof}
Theorem \ref{thm: pseudo-actions} tells us that $P(X,A,B,p,k,q,s)=(X,B,\rho,\varphi,\gamma)$ and $P(f_1,f_2,f_3)=(f_1,f_3)$ is a functor from $\mathbf{Psb}$ to $\mathbf{A}ct$ whereas Theorem \ref{thm: functor R syntehic construction} gives a functor $Q$ in the other direction. It is clear that $Q(X,B,\rho,\varphi,\gamma)=(X,R,B,\pi_B,\langle 1,0\rangle,\pi_X,\langle 0,1\rangle)$ is the synthetic realization displayed in $(\ref{diag:synthetic semi-biproduct})$ and hence it is a pointed semibiproduct. Moreover $Q(f,g)=(f,R(f,g),g)$ with $R(f,g)$ illustrated as in $(\ref{diag:semi-biproduct with R})$ and defined as $R(f,g)(x,b)=(f(x),g(b))$ is clearly a morphism of semibiproducts.
We observe that $PQ(X,B,\rho,\varphi,\gamma)=(X,B,\rho,\varphi,\gamma)$ due to conditions $(\ref{eq:act05})$ and $(\ref{eq:act06})$. This proves $PQ=1$, in order to prove $QP\cong 1$ we need to specify natural isomorphisms $\alpha$ and $\beta$ as illustrated
\begin{equation}
\vcenter{\xymatrix{A\ar@<.5ex>[r]^(.3){\alpha_A}\ar[d]_{f_2} & RP(X,A,B,p,k,q,s)\ar@<.5ex>[l]^(.7){\beta_A}\ar[d]^{R(f_1,f_3)}\\
A' \ar@<.5ex>[r]^(.3){\alpha_{A'}} & RP(X',A',B',p',k',q',s')\ar@<.5ex>[l]^(.7){\beta_{A'}} }}
\end{equation}
and show that they are compatible with diagrams $(\ref{diag:morphism of semi-biproduct})$ and $(\ref{diag:semi-biproduct with R})$. Indeed it is a routine calculation to check that $\alpha(a)=(q(a),p(a))$ and $\beta(x,b)=k(x)+s(b)$ are well defined natural isomorphisms compatible with semibiproducts. Further details can be found in the preprint \cite{NMF.20a-of}.
\end{proof}
\section{Examples}\label{sec: eg}
Several examples can be found in the preprint \cite{NMF.20a-of}. Here we list all the possible pointed semibiproducts of monoids $(X,A,B,p,k,q,s)$ in which $X$ and $B$ are monoids with two elements. This particular case is interesting because it gives a simple list with all the possible components of an action system $(X,B,\rho,\varphi,\gamma)$. The equivalence of Theorem \ref{thm: equivalence} then gives us an easy way of checking all the possibilities. Let us denote by $M$ and $G$ the two monoids with two elements, $M$ being the idempotent monoid while $G$ being the group, both expressed in terms of multiplication tables as
\[M=\begin{pmatrix}
1 & 2 \\
2 & 2
\end{pmatrix},
\quad
B=\begin{pmatrix}
1 & 2 \\
2 & 1
\end{pmatrix}.\]
Note that we are using multiplicative notation so that $2\cdot 2=2$ in $M$, whereas in $G$ we have $2\cdot 2=1$. Due to restrictions $(\ref{eq:act01})$--$(\ref{eq:act03})$ we have the following two possibilities for each component $\rho$, $\varphi$ and $\gamma$:
\[\rho_0=\begin{pmatrix}
1 & 1 \\
2 & 2
\end{pmatrix}
,
\quad
\rho_1=\begin{pmatrix}
1 & 1 \\
2 & 1
\end{pmatrix},\]
\[\varphi_0=\begin{pmatrix}
1 & 2 \\
1 & 2
\end{pmatrix},
\quad
\varphi_1=\begin{pmatrix}
1 & 2 \\
1 & 1
\end{pmatrix},\]
\[\gamma_0=\begin{pmatrix}
1 & 1 \\
1 & 1
\end{pmatrix},
\quad
\gamma_1=\begin{pmatrix}
1 & 1 \\
1 & 2
\end{pmatrix}.\]
The following list shows all the possible 14 cases of pointed semibiproducts of monoids $(X,A,B,p,q,k,q,s)$ in which $X$ and $B$ are either $M$ or $G$ via the equivalence of Theorem \ref{thm: equivalence}.
\begin{multicols}{2}
\begin{enumerate}
\item $(G,G,\rho_0,\varphi_0,\gamma_0)$
\item $(G,G,\rho_0,\varphi_0,\gamma_1)$
\item $(G,M,\rho_0,\varphi_0,\gamma_0)$
\item $(G,M,\rho_0,\varphi_0,\gamma_1)$
\item $(G,M,\rho_0,\varphi_1,\gamma_0)$
\item $(G,M,\rho_1,\varphi_1,\gamma_0)$
\item $(M,G,\rho_0,\varphi_0,\gamma_0)$
\item $(M,G,\rho_0,\varphi_0,\gamma_1)$
\item $(M,G,\rho_1,\varphi_1,\gamma_1)$
\item $(M,M,\rho_0,\varphi_0,\gamma_0)$
\item $(M,M,\rho_0,\varphi_0,\gamma_1)$
\item $(M,M,\rho_0,\varphi_1,\gamma_0)$
\item $(M,M,\rho_0,\varphi_1,\gamma_1)$
\item $(M,M,\rho_1,\varphi_1,\gamma_0)$
\end{enumerate}
\end{multicols}
Note that the cases with $\gamma_0$ correspond to split extensions while the cases with $\rho_0$ correspond to Schreier extensions. The cases with $\rho_1$ correspond to $R=\{(1,1),(1,2),(2,1)\}$ since $(2,2)$ fails to be in $R$ because $\rho_1(2,2)=1\neq 2$. If interpreting $\varphi$ as an action then the map $\varphi_0$ is the trivial action whereas $\varphi_1$ is a non-trivial action.
\section{Conclusion}
A new tool has been introduced for the study of monoid extensions from which a new notion of action has emerged in order to establish the categorical equivalence of Theorem \ref{thm: equivalence}. A clear drawback to this approach is the necessity of handling morphisms and maps at the same level.
We have solved the problem by extending the hom-functor through an appropriate profunctor (Definition \ref{def: semibiproduct}). Other possible solutions would consider maps as an extra structure in higher dimensions \cite{Brown,NMF.15,NMF.20a-in} or as imaginary morphisms \cite{BZ,MontoliRodeloLinden}.
Developing further a categorical framework in which to study semibiproducts seems desirable due to several important cases occurring in different settings. For example, semibiproduct extensions can be studied in the context of preordered monoids \cite{NMF.20b} and preordered groups \cite{Preord}, where the maps $q$ and $s$ are required to be monotone maps rather than zero-preserving maps. The context of topological monoids \cite{Ganci} should also be worthwhile studying with $q$ and $s$ required to be continuous maps.
\end{document}
|
\begin{document}
\subjclass[2020]{Primary 35K05,
46E30; Secondary 26A42}
\keywords{Heat equation, Lebesgue space}
\date{Preprint January 2, 2023.}
\title[Heat equation]{Sharp norm estimates for the classical heat equation}
\author{Erik Talvila}
\address{Department of Mathematics \& Statistics\\
University of the Fraser Valley\\
Abbotsford, BC Canada V2S 7M8}
\email{[email protected]}
\begin{abstract}
Sharp estimates of solutions of the classical heat equation are proved
in $L^p$ norms on the real line.
\end{abstract}
\maketitle
\section{Introduction}\label{sectionintroduction}
In this paper we give sharp estimates of solutions of the classical heat equation on the real line
with initial value data that is in an
$L^p$ space
($1\leq p\leq\infty$).
For $u\!:\!{\mathbb R}\times(0,\infty)\to{\mathbb R}$ write $u_t(x)=u(x,t)$.
The classical problem of the heat equation on the real line is, given a function
$f\in L^p$ for some $1\leq p\leq\infty$, find a function
$u\!:\! {\mathbb R}\times (0,\infty)\to{\mathbb R}$ such that
$u_t\in C^2({\mathbb R})$ for each $t>0$, $u(x,\cdot)\in C^1((0,\infty))$ for each $x\in{\mathbb R}$ and
\begin{align}
&\frac{\partial^2u(x,t)}{\partial x^2}-\frac{\partial u(x,t)}{\partial t}=0
\text{ for each } (x,t)\in{\mathbb R}\times(0,\infty)\label{heatpde}\\
&\lim_{t\to 0^+}\norm{u_t-f}_p=0.\label{Lpic}
\end{align}
If $p=\infty$ then $f$ is also assumed to be continuous.
A solution
is given by
the convolution $u_t(x)=F\ast\Theta_t(x)=\int^\infty_{-\infty} F(x-y)\Theta_t(y)\,dy$
where the Gauss--Weierstrass heat kernel
is $\Theta_t(x)=\exp(-x^2/(4t))/(2\sqrt{\pi t})$.
For example, see \cite{follandpde}. Under suitable growth conditions on $u$
the solution is unique.
See \cite{hirschmanwidder} and \cite{widderbook}. References
\cite{cannon} and \cite{widderbook} contain many results on the classical heat equation, including
extensive bibliographies.
The heat kernel has the following properties. Let $t>0$ and let $s\not=0$ such
that $1/s+1/t>0$. Then
\begin{align}
\Theta_t\ast\Theta_s&=\Theta_{t+s}\label{thetaconvolution}\\
\norm{\Theta_t}_q&=\frac{\alpha_q}{t^{(1-1/q)/2}} \text{ where } \alpha_q=\left\{\begin{array}{cl}
1, & q=1\\
\frac{1}{(2\sqrt{\pi})^{1-1/q} \,q^{1/(2q)}}, & 1<q<\infty\\
\frac{1}{2\sqrt{\pi}}, & q=\infty.
\end{array}
\right.\label{Thetaqnormalpha}
\end{align}
The last of these follows from the probability integral $\int^\infty_{-\infty} e^{-x^2}\,dx=\sqrt{\pi}$.
\begin{theorem}\label{theoremLpestimates}
Let $1\leq p\leq\infty$ and $f\in L^p$.\\
(a) If $p\leq s\leq\infty$ then $f\ast\Theta_t\in L^s$.\\
(b)
Let $q,r\in[1,\infty]$
such that $1/p+1/q=1+1/r$.
There is a constant $K_{p,q}$ such
that $\norm{f\ast\Theta_t}_r\leq K_{p,q}\norm{f}_p \,t^{-(1-1/q)/2}$ for all $t>0$. The estimate
is sharp in the sense that if $\psi\!:\!(0,\infty)\to(0,\infty)$ such that $\psi(t)=o(t^{-(1-1/q)/2})$
as $t\to 0^+$ or $t\to\infty$ then there is $G\in L^p$ such that $\norm{G\ast\Theta_t}_r/\psi(t)$ is
not bounded as $t\to 0^+$ or $t\to\infty$. The constant
$K_{p,q}=(c_pc_q/c_r)^{1/2}\alpha_q$, where $c_p=p^{1/p}/(p')^{1/p'}$ with $p, p'$ being
conjugate exponents. It cannot be replaced with any smaller
number.\\
(c) If $1\leq s<p$ then
$f\ast\Theta_t$ need not be in $L^s$.
\end{theorem}
When $r=p$ and $q=1$ the inequality in part (b) reads $\norm{f\ast\Theta_t}_p\leq\norm{f}_p$.
When $r=\infty$ then $p$ and $q$ are conjugates and
the inequality in part (b) reads $\norm{f\ast\Theta_t}_\infty\leq\norm{f}_pt^{-1/(2p)}$.
The condition for sharpness in Young's inequality is that both functions be Gaussians.
This fact is
exploited in the proof of part (b).
See \cite[p.~99]{liebloss}, \cite{beckner} and \cite{toscani}.
Our proof also uses ideas from
\cite[Theorem~9.2, p.~195]{hirschmanwidder} and \cite[pp.~115-120]{axler}.
The estimates are known, for example \cite[Proposition~3.1]{iwabuchi}, but we have not been able to find a proof in the
literature that they are sharp.
\begin{proof}
(a), (b) Young's inequality gives
\begin{equation}
\norm{f\ast\Theta_t}_r\leq C_{p,q}\norm{f}_p\norm{\Theta_t}_q =\frac{C_{p,q}\norm{f}_p\alpha_q}{
t^{(1-1/q)/2}},\label{normyounginequality}
\end{equation}
where $\alpha_q$ is given in \eqref{Thetaqnormalpha}.
The sharp constant, given in \cite[p.~99]{liebloss},
is $C_{p,q}=(c_pc_q/c_r)^{1/2}$ where $c_p=p^{1/p}/(p')^{1/p'}$ with $p, p'$ being
conjugate exponents. Note that $c_1=c_{\infty}=1$. Also, $0<C_{p,q}\leq 1$. We then
take $K_{p,q}=C_{p,q}\alpha_q$.
To show the estimate $\norm{u_t}_r=O(t^{-(1-1/q)/2})$ is sharp as $t\to 0^+$ and $t\to\infty$, let
$\psi$ be as in the statement of the theorem. Fix $p\leq r\leq\infty$. Define the family of linear operators
$S_t\!:\! L^p\to L^r$ by $S_t[f](x)=f\ast \Theta_t(x)/\psi(t)$. The estimate
$\norm{S_t[f]}_r\leq K_{p,q}\norm{f}_pt^{-(1-1/q)/2}/\psi(t)$ shows that, for each $t>0$, $S_t$
is a bounded linear operator. Let $f_t=\Theta_t$. Then, from \eqref{thetaconvolution} and
\eqref{Thetaqnormalpha},
$$
\frac{\norm{S_t[f_t]}_r}{\norm{f_t}_p}=
\frac{\norm{\Theta_t\ast\Theta_t}_r}{\psi(t)\norm{\Theta_t}_p}
=\frac{\norm{\Theta_{2t}}_r}{\psi(t)\norm{\Theta_t}_p}
=\frac{\alpha_r}{\alpha_p 2^{(1-1/r)/2}\psi(t)t^{(1-1/q)/2}}.
$$
This is not bounded in the limit $t\to 0^+$. Hence, $S_t$ is not uniformly bounded.
By the Uniform Bounded Principle it is not pointwise bounded. Therefore, there is a function
$f\in L^p$ such that $\norm{f\ast\Theta_t}_r\not=O(\psi(t))$ as $t\to 0^+$. And, the growth estimate
$\norm{f\ast\Theta_t}_r=O(t^{-(1-1/q)/2)})$ as $t\to 0^+$ is sharp. Similarly for sharpness
as $t\to\infty$.
Now show the constant $K_{p,q}$ cannot be reduced.
A calculation shows we have equality in \eqref{normyounginequality}
when $f=\Theta^\beta_t$ and
$\beta$ is given
by the equation
\begin{equation}
\frac{\beta^{1-1/q}}{(\beta +1)^{1-1/r}}
=
\frac{c_pc_q}{c_r}\left(\frac{\alpha_p\alpha_q}{\alpha_r}\right)^2
=
\left(1-\frac{1}{p}\right)^{1-1/p}\left(1-\frac{1}{q}\right)^{1-1/q}
\left(1-\frac{1}{r}\right)^{-(1-1/r)}.\label{youngequality}
\end{equation}
First consider the case $p\not=1$ and $q\not=1$.
Notice that $1-1/r=(1-1/q)+(1-1/p)>1-1/q$. Let
$g(x)=x^A(x+1)^{-B}$ with $B>A>0$.
Then $g$ is
strictly increasing on $(0,A/(B-A))$ and strictly decreasing for $x> A/(B-A)$ so there is a unique
maximum for $g$ at $A/(B-A)$. Put $A=1-1/q$ and $B=1-1/r$. Then
$$
g\left(\frac{A}{B-A}\right) = \frac{\beta^{1-1/q}}{(\beta +1)^{1-1/r}}
= \left(1-\frac{1}{p}\right)^{1-1/p}\left(1-\frac{1}{q}\right)^{1-1/q}
\left(1-\frac{1}{r}\right)^{-(1-1/r)}.
$$
Hence, \eqref{youngequality} has a unique positive solution for $\beta$ given by
$\beta=(1-1/q)/(1-1/p)$.
If $p=1$ then $q=r$. In this case, \eqref{youngequality} reduces to
$(1+1/\beta)^{1-1/q}=1$ and the solution is given in the limit $\beta\to\infty$. Sharpness
of \eqref{normyounginequality} is then given in this limit. It can also be seen that taking $f$ to be the Dirac
distribution gives equality.
If $q=1$ then $p=r$. Now, \eqref{youngequality} reduces to $(\beta+1)^{1-1/p}=1$ and $\beta=0$. There
is equality in \eqref{normyounginequality} when $f=1$. This must be done in the limit $\beta\to0^+$.
If $p=q=r=1$ then there is equality in \eqref{normyounginequality} for each $\beta>0$.
Hence, the constant in \eqref{normyounginequality} is sharp.
(c) Suppose $f\geq 0$ and $f$ is decreasing on $[c,\infty)$ for some $c\in{\mathbb R}$. Let $x>c$. Then
\begin{eqnarray*}
f\ast\Theta_t(x) & \geq & \int_c^x f(y)\Theta_t(x-y)\,dy
\geq f(x)\int_c^x \Theta_t(x-y)\,dy\\
& = & \frac{f(x)}{\sqrt{\pi}}\int_0^{(x-c)/(2\sqrt{t})} e^{-y^2}\,dy
\sim f(x)/2 \quad \text{ as } x \to \infty.
\end{eqnarray*}
Now put $f(x)=1/[x^{1/p}\log^2(x)]$ for $x\geq e$ and $f(x)=0$, otherwise.
For $p=\infty$ replace $x^{1/p}$ by $1$.
\end{proof}
\end{document}
|
\begin{document}
\title[On a formula for the PI-exponent]{On a formula for the PI-exponent
of Lie algebras}
\author{A.\,S.~Gordienko}
\address{Vrije Universiteit Brussel, Belgium}
\email{[email protected]}
\keywords{Lie algebra, polynomial identity, derivation, Hopf algebra, $H$-module algebra,
codimension, cocharacter, Young diagram, affine algebraic group.}
\begin{abstract}
We prove that one of the conditions in M.\,V.~Zaicev's formula for the PI-exponent
and in its natural generalization for the Hopf PI-exponent,
can be weakened. Using the modification of the formula, we prove that if a finite dimensional semisimple Lie algebra acts by derivations on
a finite dimensional Lie algebra over a field of characteristic $0$,
then the differential PI-exponent coincides with the ordinary one.
Analogously, the exponent
of polynomial $G$-identities of a finite dimensional Lie algebra
with a rational action of a connected reductive affine algebraic group $G$
by automorphisms,
coincides with the ordinary PI-exponent. In addition, we
provide a simple formula
for the Hopf PI-exponent and prove the existence of the Hopf PI-exponent itself
for $H$-module Lie algebras whose solvable radical is nilpotent, assuming
only the $H$-invariance of the radical, i.e.
under weaker assumptions on the $H$-action, than in
the general case. As a consequence, we show that the analog of Amitsur's
conjecture holds for $G$-codimensions of all finite dimensional
Lie $G$-algebras whose solvable radical is nilpotent, for an arbitrary group $G$.
\end{abstract}
\subjclass[2010]{Primary 17B01; Secondary 17B10, 17B40, 16T05, 20C30, 14L17.}
\thanks{
Supported by Fonds Wetenschappelijk Onderzoek~--- Vlaanderen Pegasus Marie Curie post doctoral fellowship (Belgium).
}
\maketitle
\section{Introduction}
The intensive study of polynomial identities and their numeric invariants
revealed the strong connection of the invariants with the structure of an algebra~\cite{Bahturin, DrenKurs, ZaiGia, ZaiLie}. If an algebra is endowed with a grading, an action of a Lie algebra
by derivations, an action of a group by automorphisms and anti-automorphisms, or an action of a Hopf algebra, it is natural to consider graded, differential, $G$- or $H$-identities~\cite{BahtZaiGradedExp,
BahtZaiSehgal, BereleHopf, Kharchenko}.
In 2002, M.\,V.~Zaicev~\cite{ZaiLie} proved a formula
for the PI-exponent of finite dimensional Lie algebras
over an algebraically closed field of characteristic $0$. It can be shown~\cite{ASGordienko2, ASGordienko5, GordienkoKochetov}
that, under some assumpions, the natural generalization of the formula (see Subsection~\ref{SubsectionOrigFormulaHPIexp}) holds for the exponent
of graded, differential, $G$-, and $H$-identities too.
In Subsection~\ref{SubsectionModificationHPIexp} we prove that one of the conditions can be weakened,
which makes the formula easier to apply.
In~\cite{GordienkoKochetov}, the authors showed
that if a connected reductive affine algebraic group~$G$
acts on a finite dimensional associative algebra~$A$
rationally by automorphisms,
then the exponent of $G$-identities coincides with the ordinary PI-exponent of $A$.
Also, if a finite dimensional semisimple Lie algebra acts on
a finite dimensional associative algebra by derivations,
then the differential PI-exponent coincides with the ordinary one.
Using the modification of M.\,V.~Zaicev's formula, we prove the
analogous results for finite dimensional Lie algebras (Theorems~\ref{TheoremLieDerPIexpEqual}
and~\ref{TheoremLieGConnPIexpEqual} in Section~\ref{SectionApplLieDer}).
In Section~\ref{SectionHRTheSame} we consider finite dimensional $H$-module Lie algebras~$L$
such that the solvable radical of $L$ is nilpotent and $H$-invariant. We prove the analog of Amitsur's conjecture for such algebras $L$
and provide a simple formula for the Hopf PI-exponent of $L$.
\section{Polynomial $H$-identities and their codimensions}\label{SectionDerH}
Let $H$ be a Hopf algebra over a field $F$.
An algebra $A$
over $F$
is an \textit{$H$-module algebra}
or an \textit{algebra with an $H$-action},
if $A$ is endowed with a homomorphism $H \to \End_F(A)$ such that
$h(ab)=(h_{(1)}a)(h_{(2)}b)$
for all $h \in H$, $a,b \in A$. Here we use Sweedler's notation
$\Delta h = h_{(1)} \otimes h_{(2)}$ where $\Delta$ is the comultiplication
in $H$.
\begin{example}
If $M$ is an $H$-module, then $\End_F(M)$ is an associative $H$-module
algebra where $(h\psi)(v)=h_{(1)}\psi((Sh_{(2)})v)$ for all $h\in H$, $\psi \in \End_F(M)$, and $v\in M$.
(Here $S$ is the antipode of $H$.)
\end{example}
We refer the reader to~\cite{Abe, Danara, Montgomery, Sweedler}
for an account
of Hopf algebras and algebras with Hopf algebra actions.
Let $F \lbrace X \rbrace$ be the absolutely free nonassociative algebra
on the set $X := \lbrace x_1, x_2, x_3, \ldots \rbrace$.
Then $F \lbrace X \rbrace = \bigoplus_{n=1}^\infty F \lbrace X \rbrace^{(n)}$
where $F \lbrace X \rbrace^{(n)}$ is the linear span of all monomials of total degree $n$.
Let $H$ be a Hopf algebra over a field $F$. Consider the algebra $$F \lbrace X | H\rbrace
:= \bigoplus_{n=1}^\infty H^{{}\otimes n} \otimes F \lbrace X \rbrace^{(n)}$$
with the multiplication $(u_1 \otimes w_1)(u_2 \otimes w_2):=(u_1 \otimes u_2) \otimes w_1w_2$
for all $u_1 \in H^{{}\otimes j}$, $u_2 \in H^{{}\otimes k}$,
$w_1 \in F \lbrace X \rbrace^{(j)}$, $w_2 \in F \lbrace X \rbrace^{(k)}$.
We use the notation $$x^{h_1}_{i_1}
x^{h_2}_{i_2}\ldots x^{h_n}_{i_n} := (h_1 \otimes h_2 \otimes \ldots \otimes h_n) \otimes x_{i_1}
x_{i_2}\ldots x_{i_n}$$ (the arrangements of brackets on $x_{i_j}$ and on $x^{h_j}_{i_j}$
are the same). Here $h_1 \otimes h_2 \otimes \ldots \otimes h_n \in H^{{}\otimes n}$,
$x_{i_1} x_{i_2}\ldots x_{i_n} \in F \lbrace X \rbrace^{(n)}$.
Note that if $(\gamma_\beta)_{\beta \in \Lambda}$ is a basis in $H$,
then $F \lbrace X | H\rbrace$ is isomorphic to the absolutely free nonassociative algebra over $F$ with free formal generators $x_i^{\gamma_\beta}$, $\beta \in \Lambda$, $i \in \mathbb N$.
Define on $F \lbrace X | H\rbrace$ the structure of a left $H$-module
by $$h\,(x^{h_1}_{i_1}
x^{h_2}_{i_2}\ldots x^{h_n}_{i_n})=x^{h_{(1)}h_1}_{i_1}
x^{h_{(2)}h_2}_{i_2}\ldots x^{h_{(n)}h_n}_{i_n},$$
where $h_{(1)}\otimes h_{(2)} \otimes \ldots \otimes h_{(n)}$
is the image of $h$ under the comultiplication $\Delta$
applied $(n-1)$ times, $h\in H$. Then $F \lbrace X | H\rbrace$ is \textit{the absolutely free $H$-module nonassociative algebra} on $X$, i.e. for each map $\psi \colon X \to A$ where $A$ is an $H$-module algebra,
there exists a unique homomorphism $\bar\psi \colon
F \lbrace X | H\rbrace \to A$ of algebras and $H$-modules, such that $\bar\psi\bigl|_X=\psi$.
Here we identify $X$ with the set $\lbrace x^1_j \mid j \in \mathbb N\rbrace \subset F \lbrace X | H\rbrace$.
Consider the $H$-invariant ideal $I$ in $F\lbrace X | H \rbrace$
generated by the set \begin{equation}\label{EqSetOfHGen}
\bigl\lbrace u(vw)+v(wu)+w(uv) \mid u,v,w \in F\lbrace X | H \rbrace\bigr\rbrace \cup\bigl\lbrace u^2 \mid u \in F\lbrace X | H \rbrace\bigr\rbrace.
\end{equation}
Then $L(X | H) := F\lbrace X | H \rbrace/I$
is \textit{the free $H$-module Lie algebra}
on $X$, i.e. for any $H$-module Lie algebra $L$
and a map $\psi \colon X \to L$, there exists a unique homomorphism $\bar\psi \colon L(X | H) \to L$
of algebras and $H$-modules such that $\bar\psi\bigl|_X =\psi$.
We refer to the elements of $L(X | H)$ as \textit{Lie $H$-polynomials}
and use the commutator notation for the multiplication.
\begin{remark} If $H$ is cocommutative and $\ch F \ne 2$, then $L(X | H)$ is the ordinary
free Lie algebra with free generators $x_i^{\gamma_\beta}$, $\beta \in \Lambda$, $i \in \mathbb N$
where $(\gamma_\beta)_{\beta \in \Lambda}$ is a basis in $H$, since the ordinary ideal of
$F\lbrace X | H \rbrace$ generated by~(\ref{EqSetOfHGen})
is already $H$-invariant.
However, if $h_{(1)} \otimes h_{(2)} \ne h_{(2)} \otimes h_{(1)}$ for some $h \in H$,
we still have $$[x^{h_{(1)}}_i, x^{h_{(2)}}_j]=h[x_i, x_j]=-h[x_j, x_i]=-[x^{h_{(1)}}_j, x^{h_{(2)}}_i]
= [x^{h_{(2)}}_i, x^{h_{(1)}}_j]$$ in $L(X | H)$ for all $i,j \in\mathbb N$,
i.e. in the case $h_{(1)} \otimes h_{(2)} \ne h_{(2)} \otimes h_{(1)}$ the algebra $L(X | H)$ is not free as an ordinary Lie algebra.
\end{remark}
Let $L$ be an $H$-module Lie algebra for
some Hopf algebra $H$ over a field $F$.
An $H$-polynomial
$f \in L ( X | H )$
is a \textit{$H$-identity} of $L$ if $\psi(f)=0$
for all homomorphisms $\psi \colon L(X|H) \to L$
of algebras and $H$-modules. In other words, $f(x_1, x_2, \ldots, x_n)$
is a polynomial $H$-identity of $L$
if and only if $f(a_1, a_2, \ldots, a_n)=0$ for any $a_i \in L$.
In this case we write $f \equiv 0$.
The set $\Id^H(L)$ of all polynomial $H$-identities
of $L$ is an $H$-invariant ideal of $L(X|H)$.
Denote by $V^H_n$ the space of all multilinear Lie $H$-polynomials
in $x_1, \ldots, x_n$, $n\in\mathbb N$, i.e.
$$V^{H}_n = \langle [x^{h_1}_{\sigma(1)},
x^{h_2}_{\sigma(2)}, \ldots, x^{h_n}_{\sigma(n)}]
\mid h_i \in H, \sigma\in S_n \rangle_F \subset L( X | H ).$$
(All long commutators in the article are left-normed, although this is not important
in this particular case in virtue of the Jacobi identity.)
The number $c^H_n(L):=\dim\left(\frac{V^H_n}{V^H_n \cap \Id^H(L)}\right)$
is called the $n$th \textit{codimension of polynomial $H$-identities}
or the $n$th \textit{$H$-codimension} of $L$.
The analog of Amitsur's conjecture for $H$-codimensions of $L$ can be formulated
as follows.
\begin{conjecture} There exists
$\PIexp^H(L):=\lim\limits_{n\to\infty}
\sqrt[n]{c^H_n(L)} \in \mathbb Z_+$.
\end{conjecture}
We call $\PIexp^H(L)$ the \textit{Hopf PI-exponent} of $L$.
Here we list three important particular cases:
\begin{example} Every algebra $L$ is an $H$-module algebra
for $H=F$. In this case the $H$-action is trivial and we get ordinary polynomial identities and their codimensions. (See the original definition e.g. in~\cite{Bahturin}.) We write $c_n(L):= c_n^F(L)$, $\Id(L):= \Id^F(L)$,
$V_n(L):=V_n^F(L)$, $\PIexp(L)=\PIexp^F(L)$.
\end{example}
\begin{example}
If $H=FG$ where $FG$ is the group algebra of a group $G$, then
an $H$-module algebra $L$ is an algebra with a $G$-action by automorphisms.
In this case we get \textit{polynomial $G$-identities} and
\textit{$G$-codimensions}.
We write $c^G_n(L):= c_n^{FG}(L)$, $\Id^G(L):= \Id^{FG}(L)$,
$V^G_n(L):=V_n^{FG}(L)$, $\PIexp^G(L)=\PIexp^{FG}(L)$.
Note that one can consider $G$-actions not only by automorphisms, but by anti-automorphisms too
and define polynomial $G$-identities and
$G$-codimensions in this case as well. (See e.g.~\cite[Section~1.2]{ASGordienko5}.)
\end{example}
\begin{example} If $H=U(\mathfrak g)$ where $U(\mathfrak g)$ is the universal enveloping
algebra of a Lie algebra $\mathfrak g$, then an $H$-module algebra is an algebra
with a $\mathfrak g$-action by derivations. The corresponding $H$-identities are called
\textit{differential identities} or \textit{polynomial identities with derivations}.
\end{example}
\section{Two formulas for the Hopf PI-exponent}
\subsection{$H$-nice Lie algebras}\label{SubsectionHnice}
The analog of Amitsur's conjecture was proved~\cite{ASGordienko5} for a wide class of $H$-module Lie algebras
that we call $H$-nice (see the definition below). The class of $H$-nice algebras includes finite dimensional semisimple $H$-module Lie algebras,
finite dimensional $H$-module Lie algebras for finite dimensional semisimple Hopf algebras $H$,
finite dimensional Lie algebras with a rational action of a reductive affine algebraic group
by automorphisms, and finite dimensional Lie algebras graded by an Abelian group
(see~\cite{ASGordienko5}).
Let $L$ be a finite dimensional $H$-module Lie algebra
where $H$ is a Hopf algebra over an algebraically closed field $F$
of characteristic $0$.
We say that $L$ is \textit{$H$-nice} if either $L$ is semisimple or the following conditions hold:
\begin{enumerate}
\item \label{ConditionRNinv}
the nilpotent radical $N$ and the solvable radical $R$ of $L$ are $H$-invariant;
\item \label{ConditionLevi} \textit{(Levi decomposition)}
there exists an $H$-invariant maximal semisimple subalgebra $B \subseteq L$ such that
$L=B\oplus R$ (direct sum of $H$-modules);
\item \label{ConditionWedderburn} \textit{(Wedderburn~--- Mal'cev decompositions)}
for any $H$-submodule $W \subseteq L$ and associative $H$-module subalgebra $A_1 \subseteq \End_F(W)$,
the Jacobson radical $J(A_1)$ is $H$-invariant and
there exists an $H$-invariant maximal semisimple associative subalgebra $\tilde A_1 \subseteq A_1$
such that $A_1 = \tilde A_1 \oplus J(A_1)$ (direct sum of $H$-submodules);
\item \label{ConditionLComplHred}
for any $H$-invariant Lie subalgebra $L_0 \subseteq \mathfrak{gl}(L)$
such that $L_0$ is an $H$-module algebra and $L$ is a completely reducible $L_0$-module disregarding $H$-action, $L$ is a completely reducible $(H,L_0)$-module.
\end{enumerate}
\subsection{Original formula}\label{SubsectionOrigFormulaHPIexp}
Let $L$ be an $H$-nice Lie algebra over an algebraically closed field $F$ of characteristic $0$.
Fix some Levi decomposition $L=B\oplus R$ (direct sum of $H$-submodules).
Consider $H$-invariant ideals $I_1, I_2, \ldots, I_r$,
$J_1, J_2, \ldots, J_r$, $r \in \mathbb Z_+$, of the algebra $L$ such that $J_k \subseteq I_k$,
satisfying the conditions
\begin{enumerate}
\item $I_k/J_k$ is an irreducible $(H,L)$-module;
\item for any $H$-invariant $B$-submodules $T_k$
such that $I_k = J_k\oplus T_k$, there exist numbers
$q_i \geqslant 0$ such that $$\bigl[[T_1, \underbrace{L, \ldots, L}_{q_1}], [T_2, \underbrace{L, \ldots, L}_{q_2}], \ldots, [T_r,
\underbrace{L, \ldots, L}_{q_r}]\bigr] \ne 0.$$
\end{enumerate}
Let $M$ be an $L$-module. Denote by $\Ann M$ its annihilator in $L$.
Let $$d(L, H) := \max \left(\dim \frac{L}{\Ann(I_1/J_1) \cap \dots \cap \Ann(I_r/J_r)}
\right)$$
where the maximum is found among all $r \in \mathbb Z_+$ and all $I_1, \ldots, I_r$, $J_1, \ldots, J_r$
satisfying Conditions 1--2.
In~\cite[Theorem~9, see also Section~1.8]{ASGordienko5}
the following theorem is proved:
\begin{theorem}\label{TheoremMainH}
Let $L$ be a non-nilpotent $H$-nice Lie algebra over an algebraically closed field $F$ of characteristic $0$. Then there exist constants $C_1, C_2 > 0$, $r_1, r_2 \in \mathbb R$ such that $$C_1 n^{r_1} d^n \leqslant c^{H}_n(L) \leqslant C_2 n^{r_2} d^n\text{ for all }n \in \mathbb N.$$ Here $d:=d(L, H)$.
\end{theorem}
In particular, there exists $\PIexp^H(L)=d(L, H)\in \mathbb Z_+$.
\subsection{Modification}\label{SubsectionModificationHPIexp}
Let $L$ be an $H$-nice Lie algebra.
By~\cite[Lemma~10]{ASGordienko5}, $L=B\oplus S \oplus N$
for some $H$-submodule $S\subseteq R$ such that $[B, S] = 0$.
Consider the associative subalgebra $A_0$ in $\End_F(L)$
generated by $\ad S$. Note that $A_0$ is an $H$-module algebra
since $S$ is $H$-invariant. By Condition~\ref{ConditionWedderburn}
of Subsection~\ref{SubsectionHnice},
$A_0 = \tilde A_0 \oplus J(A_0)$ (direct sum of $H$-submodules)
where $\tilde A_0$ is a maximal semisimple subalgebra of $A_0$.
(If $L$ is semisimple, $A_0=\tilde A_0=0$.)
\begin{lemma}\label{LemmaA0DirectSumOfFields}
$\tilde A_0=Fe_1 \oplus \dots \oplus Fe_q$ (direct sum of ideals)
for some idempotents $e_i \in A_0$.
\end{lemma}
\begin{proof}
Since $R$ is solvable, by Lie's theorem, there exists a basis of $L$
such that the matrices of all operators $\ad a$, $a\in R$, are upper triangular. Denote the corresponding isomorphism $\End_F(L) \to M_s(F)$ of algebras by $\psi$ where $s := \dim L$. Since $\psi(\ad R) \subseteq UT_s(F)$,
we have $\psi(A_0) \subseteq UT_s(F)$ where $UT_s(F)$ is the associative algebra of
upper triangular $s\times s$ matrices. However, $$UT_s(F) = Fe_{11}\oplus Fe_{22}\oplus
\dots\oplus Fe_{ss}\oplus \tilde N$$
where $$\tilde N := \langle e_{ij} \mid 1 \leqslant i < j \leqslant s \rangle_F$$
is a nilpotent ideal. Since $\psi$ is an isomorphism, there is no subalgebras in $A_0$
isomorphic to $M_2(F)$, and
$\tilde A_0=Fe_1 \oplus \dots \oplus Fe_q$ (direct sum of ideals)
for some idempotents $e_i \in A_0$.
\end{proof}
Since $[B,S]=0$ and $e_i$ are polynomials in $\ad a$, $a \in S$, we have $[\ad B, \tilde A_0]=0$.
The semisimplicity of $B$ implies $(\ad B) \cap \tilde A_0 = \lbrace 0 \rbrace$.
Now we treat $(\ad B)\oplus \tilde A_0$ as an $H$-module Lie algebra.
\begin{lemma}\label{LemmaLHBA0ComplReducible}
$L$ is a completely reducible $(\ad B)\oplus \tilde A_0$- and $(H, (\ad B)\oplus \tilde A_0)$-module.
\end{lemma}
\begin{proof} If $L$ is semisimple, then $L=B_1 \oplus \ldots \oplus B_s$
(direct sum of $H$-invariant ideals)
for some $H$-simple Lie algebras $B_i$ (see~\cite[Theorem~6]{ASGordienko4}),
and $L$ is a completely reducible $(H, (\ad B)\oplus \tilde A_0)$-module.
Suppose now that $L$ satisfies Conditions~\ref{ConditionRNinv}--\ref{ConditionLComplHred}
of Subsection~\ref{SubsectionHnice}.
Note that $e_i$ are commuting diagonalizable operators
on $L$. Hence they have a common basis of eigenvectors,
and $L=\bigoplus_j W_j$ where $W_j$ are the intersections of eigenspaces of $e_i$.
Each $e_i$ commutes with the operators from $\ad B$. Thus $W_j$ are $(\ad B)$-submodules.
Recall that $B$ is semisimple. Therefore, $W_j$ is the direct sum of irreducible $(\ad B)$-submodules.
Since $e_i$ act on each $W_j$ as scalar operators, $L$ is the direct sum of
irreducible $(\ad B)\oplus \tilde A_0$-submodules.
Now Condition~\ref{ConditionLComplHred}
of Subsection~\ref{SubsectionHnice} implies the lemma.
\end{proof}
We replace Condition~2 of Subsection~\ref{SubsectionOrigFormulaHPIexp} with Condition~2' below:
\begin{enumerate}
\item[(2')] there exist $H$-invariant $(\ad B)\oplus \tilde A_0$-submodules $T_k$, $I_k = J_k\oplus T_k$,
and numbers
$q_i \geqslant 0$ such that $$\bigl[[T_1, \underbrace{L, \ldots, L}_{q_1}], [T_2, \underbrace{L, \ldots, L}_{q_2}], \ldots, [T_r,
\underbrace{L, \ldots, L}_{q_r}]\bigr] \ne 0.$$
\end{enumerate}
Define $$d'(L, H) := \max \left(\dim \frac{L}{\Ann(I_1/J_1) \cap \dots \cap \Ann(I_r/J_r)}
\right)$$
where the maximum is found among all $r \in \mathbb Z_+$ and all $I_1, \ldots, I_r$, $J_1, \ldots, J_r$
satisfying Conditions 1 and 2'.
\begin{theorem}\label{Theoremddprime} Let $L$ be an $H$-nice Lie algebra over an algebraically closed field $F$ of characteristic $0$.
Then $\PIexp^H(L)=d'(L, H)$.
\end{theorem}
\begin{proof} Clearly, $d'(L, H) \geqslant d(L, H) = \PIexp^H(L)$ since, by Lemma~\ref{LemmaLHBA0ComplReducible}, $L$ is a completely reducible $(H,(\ad B)\oplus \tilde A_0)$-module
and we can always choose $H$-invariant $(\ad B)\oplus \tilde A_0$-submodules $T_k$ such that $I_k = J_k \oplus T_k$.
If $L$ is semisimple, then~\cite[Example~7]{ASGordienko5} implies $d'(L, H) = d(L, H)$.
Hence we may assume that $L$ satisfies Conditions~\ref{ConditionRNinv}--\ref{ConditionLComplHred}
of Subsection~\ref{SubsectionHnice}.
We prove that there exist $r \in \mathbb R$, $C > 0$ such that $c_n^H(L) \geqslant C n^r (d'(L, H))^n$
for all $n\in \mathbb N$. We
take $H$-invariant ideals $I_1, \ldots, I_r$ and $J_1, \ldots, J_r$
satisfying Conditions 1 and 2' such that
$\dim \frac{L}{\Ann(I_1/J_1) \cap \dots \cap \Ann(I_r/J_r)}
=d'(L, H)$. Then we choose $H$-invariant $(\ad B)\oplus \tilde A_0$-submodules $\tilde T_k$, $I_k = J_k\oplus \tilde T_k$,
such that $$\bigl[[\tilde T_1, \underbrace{L, \ldots, L}_{q_1}], [\tilde T_2, \underbrace{L, \ldots, L}_{q_2}], \ldots, [\tilde T_r,
\underbrace{L, \ldots, L}_{q_r}]\bigr] \ne 0$$
for some numbers $q_i \geqslant 0$. Now we
repeat the arguments of~\cite[Section~6]{ASGordienko5}
with the following changes. (We use the notation from~\cite[Section~6]{ASGordienko5}.) Instead of using Lemma~15, we choose
$c_{ij}\in \tilde A_0$ and $d_{ij}\in J(A_0)$ such that
each $\ad a_{ij} = c_{ij}+d_{ij}$. Note that, by the second part of the proof of~\cite[Lemma~5]{ASGordienko5} for $W=S$ and $M=L$, we have $J(A_0)\subseteq J(A)$
where $A$ is the associative subalgebra of $\End_F(L)$ generated by the operators from $H$
and $\ad L$. Hence $d_{ij}\in J(A)$.
Moreover, $\tilde T_k$ that we have chosen by Condition 2', are $H$-invariant $\tilde B$-submodules, and we use them in~\cite[Lemma~17]{ASGordienko5}.
The rest of the proof is the same as in~\cite[Section~6]{ASGordienko5}.
Finally, we have $\PIexp^H(L)\geqslant d'(L, H)$, and the theorem is proved.
\end{proof}
\section{Lie $G$-algebras and Lie algebras with derivations}\label{SectionApplLieDer}
In~\cite[Theorem~7]{GordienkoKochetov}, the authors proved the existence of the differential PI-exponent
for finite dimensional Lie algebras with an action of a finite dimensional semisimple Lie algebra by derivations. Here we prove that the differential PI-exponent coincides with the ordinary one.
\begin{theorem}\label{TheoremddprimeDiff}
Let $L$ be a finite dimensional Lie algebra
over an algebraically closed field $F$ of characteristic $0$. Suppose a
Lie algebra $\mathfrak g$ is acting on $L$ by derivations, and $L$ is an $U(\mathfrak g)$-nice algebra.
Then $\PIexp(L) = \PIexp^{U(\mathfrak g)}(L)$.
\end{theorem}
\begin{remark}
If a reductive affine algebraic group $G$ is rationally acting on $L$ by automorphisms,
then $L$ is an $FG$-nice algebra~\cite[Example~6]{ASGordienko5}.
Hence if $G$ is connected and $\mathfrak g$ is the Lie algebra of $G$, then by~\cite[Theorems 13.1 and 13.2]{HumphreysAlgGr}, $L$ is an $U(\mathfrak g)$-nice algebra.
In particular, a finite dimensional Lie algebra $L$ with an action of a
finite dimensional semisimple Lie algebra~$\mathfrak g$ by derivations is always an $U(\mathfrak g)$-nice algebra, since there exists a simply connected semisimple affine algebraic group $G$ rationally acting on $L$ by automorphisms, such that $\mathfrak g$ is the Lie algebra of $G$
and the $\mathfrak g$-action is the differential of the $G$-action (see e.g.~\cite[Chapter XVIII, Theorem 5.1]{Hochschild} and~\cite[Theorem~3]{GordienkoKochetov}).
\end{remark}
\begin{proof}[Proof of Theorem~\ref{TheoremddprimeDiff}]
By Theorems~\ref{TheoremMainH} and~\ref{Theoremddprime}, there exist $\PIexp(L)=d'(L, F)$
and $\PIexp^{U(\mathfrak g)}(L)=d'(L, U(\mathfrak g))$.
If we treat differential and ordinary multilinear Lie polynomials as
multilinear functions on $L$, we obtain $c_n(L) \leqslant c^{U(\mathfrak g)}_n(L)$ for all $n \in\mathbb N$.
Hence $\PIexp(L) \leqslant \PIexp^{U(\mathfrak g)}(L)$.
Suppose $\mathfrak g$-invariant ideals $I_1, I_2, \ldots, I_r$,
$J_1, J_2, \ldots, J_r$, $r \in \mathbb Z_+$, of the algebra $L$ such that $J_k \subseteq I_k$,
satisfy Conditions 1 and 2' for $H=U(\mathfrak g)$.
By Condition 2', there exist $\mathfrak g$-invariant $(\ad B)\oplus \tilde A_0$-submodules $T_k$, $I_k = J_k\oplus T_k$, and numbers $q_i \geqslant 0$ such that $$\bigl[[T_1, \underbrace{L, \ldots, L}_{q_1}], [T_2, \underbrace{L, \ldots, L}_{q_2}], \ldots, [T_r,
\underbrace{L, \ldots, L}_{q_r}]\bigr] \ne 0.$$
By Lemma~\ref{LemmaLHBA0ComplReducible}, $L$ is a completely reducible $(\ad B)\oplus \tilde A_0$-module.
Hence $T_k=T_{k1}\oplus T_{k2}\oplus \ldots \oplus T_{kn_k}$
for some irreducible $(\ad B)\oplus \tilde A_0$-submodules $T_{kj}$.
Therefore we can choose $1 \leqslant j_k \leqslant n_k$ such that
$$\bigl[[T_{1j_1}, \underbrace{L, \ldots, L}_{q_1}], [T_{2j_2}, \underbrace{L, \ldots, L}_{q_2}], \ldots, [T_{rj_r},
\underbrace{L, \ldots, L}_{q_r}]\bigr] \ne 0.$$
Let $\tilde I_k = T_{kj_k}\oplus J_k$.
We claim that $\tilde I_k$ is an ideal in $L$ and $\Ann(\tilde I_k / J_k)=\Ann(I_k/J_k)$
for all $1 \leqslant k \leqslant r$.
Denote by $L_0$, $B_0$, $R_0$, $\mathfrak g_0$, respectively, the images
of $L$, $B$, $R$, $\mathfrak g$ in $\mathfrak{gl}(I_k/J_k)$.
Note that $B_0$ and $R_0$
are, respectively, semisimple and solvable. Hence $L_0=B_0\oplus R_0$
(direct sum of $\mathfrak g$-submodules) where $\mathfrak g$-action
on $\mathfrak{gl}(I_k/J_k)$ is induced from the $\mathfrak g$-action
on $I_k/J_k$ and corresponds to the adjoint action of $\mathfrak g_0$
on $\mathfrak{gl}(I_k/J_k)$.
In particular, $R_0$ is a solvable ideal of $(L_0+\mathfrak g_0)$
and $B_0$ is an ideal
of $(B_0 + \mathfrak g_0)$.
Note that $I_k/J_k$ is an irreducible $(L_0+\mathfrak g_0)$-module. By~E.~Cartan's theorem~\cite[Proposition~1.4.11]{GotoGrosshans}, $L_0+\mathfrak g_0 = B_1
\oplus R_1$ (direct sum of ideals) where $B_1$ is semisimple and $R_1$ is either zero
or equal to the center $Z(\mathfrak{gl}(I_k/J_k))$ consisting of scalar operators.
Considering the resulting projection $(L_0+\mathfrak g_0) \to R_1$,
we obtain $B_0 \subseteq B_1$. Since $R_0 \subseteq R_1$ consists of scalar operators,
$B_0$ is an ideal of $(L_0+\mathfrak g_0)$ and $B_1$.
Since $\tilde I_k/J_k$ is an irreducible $(\ad B)\oplus \tilde A_0$-module and $\tilde A_0$ is acting on $I_k/J_k$ by scalar operators,
$\tilde I_k/J_k$ is an irreducible $B_0$- and $L$-module. In particular, $\tilde I_k$ is an ideal.
If $\Ann(\tilde I_k/J_k)\ne \Ann(I_k/J_k)$, then $a\tilde I_k/J_k=0$
for some $a \in L_0 \cong L/\Ann(I_k/J_k)$, $a\ne 0$.
Let $\varphi \colon L_0 \to \mathfrak{gl}(\tilde I_{k}/J_k)$
be the corresponding action and $a = b + c$
where $b\in B_0$, $c \in R_0$.
Then $\varphi(b)=-\varphi(c)$ is a scalar operator on $\tilde I_{k}/J_k$.
Hence $\varphi(b)$ belongs to the center of the semisimple
algebra $\varphi(B_0)$. Thus $\varphi(b)=\varphi(c)=0$, $b\ne 0$.
Recall that $B_1$ is a semisimple algebra.
Therefore $B_1 = B_0 \oplus B_2$ (direct sum of ideals)
for some $B_2$. Since $R_1$ consists of scalar operators, $I_k/J_k$ is an irreducible $B_1$-module and
we have $$I_k/J_k = \sum_{\substack{a_i \in B_2,\\ \alpha\in\mathbb Z_+}} a_1 \ldots a_\alpha\ \tilde I_k/J_k.$$
Now $[b, B_2]=0$ and $b\tilde I_{k}/J_k=0$ implies $bI_k/J_k=0$ and $b=0$. We get a contradiction.
Hence $\Ann(\tilde I_k/J_k)=\Ann(I_k/J_k)$.
Note that $\tilde I_1, \tilde I_2, \ldots, \tilde I_r$,
$J_1, J_2, \ldots, J_r$ satisfy Conditions 1 and 2' for $H=F$, i.e. for the case of ordinary
polynomial identities. Moreover, $$\dim \frac{L}{\Ann(I_1/J_1) \cap \dots \cap \Ann(I_r/J_r)}
= \dim \frac{L}{\Ann(\tilde I_1/J_1) \cap \dots \cap \Ann(\tilde I_r/J_r)}.$$
Hence $\PIexp^{U(\mathfrak g)}(L)=\PIexp(L)$.
\end{proof}
Analogs for associative algebras of Theorems~\ref{TheoremLieDerPIexpEqual} and~\ref{TheoremLieGConnPIexpEqual} below
were proved in~\cite[Theorems 15 and 16]{GordienkoKochetov}.
\begin{theorem}\label{TheoremLieDerPIexpEqual}
Let $L$ be a finite dimensional Lie algebra
over a field $F$ of characteristic $0$. Suppose a finite dimensional
semisimple Lie algebra $\mathfrak g$ acts on $L$ by derivations.
Then $\PIexp^{U(\mathfrak g)}(L)=\PIexp(L)$.
\end{theorem}
\begin{proof}
$H$-codimensions do not change upon an extension of the base field.
The proof is analogous to the cases of ordinary codimensions of
associative~\cite[Theorem~4.1.9]{ZaiGia} and
Lie algebras~\cite[Section~2]{ZaiLie}.
Thus without loss of generality we may assume
$F$ to be algebraically closed.
Now we use Theorem~\ref{TheoremddprimeDiff} and the remark after it.
\end{proof}
\begin{remark}
Theorem~\ref{TheoremLieDerPIexpEqual} implies similar asymptotic
behavior of ordinary and differential codimensions, however the codimensions themselves may be different.
Consider the adjoint action of $\mathfrak{sl}_2(F)$ on itself. Then $c_1(\mathfrak{sl}_2(F))=1
< c_1^{U(\mathfrak{sl}_2(F))}(\mathfrak{sl}_2(F))$
since $x_1^{e_{11}-e_{22}}$ and $x_1^{e_{12}}$ are linearly independent modulo $\Id^{U(\mathfrak{sl}_2(F))}(\mathfrak{sl}_2(F))$.
\end{remark}
\begin{theorem}\label{TheoremLieGConnPIexpEqual}
Let $L$ be a finite dimensional Lie algebra
over an algebraically closed field $F$ of characteristic $0$. Suppose a connected reductive
affine algebraic group $G$ is rationally acting on $L$ by automorphisms.
Then $\PIexp^{G}(L)=\PIexp(L)$.
\end{theorem}
\begin{proof}
Note that the Lie algebra $\mathfrak g$ of the group $G$ is acting on $L$ by derivations.
By~\cite[Lemma~5]{GordienkoKochetov}, $c_n^{U(\mathfrak g)}(L)=c_n^{G}(L)$ for all $n\in\mathbb N$.
Hence Theorem~\ref{TheoremddprimeDiff} implies $\PIexp^{G}(L)=\PIexp(L)$.
\end{proof}
\begin{remark} In Theorem~\ref{TheoremLieGConnPIexpEqual} one could consider the case
when $G$ is acting by anti-automorphisms too. However, in this case $G = G_0 \cup G_1$, $G_0 \cap G_1 = \varnothing$, where the elements of $G_0$ are acting on $L$ by automorphisms and the elements
of $G_1$ are acting by anti-automorphisms. Since $G_0$ and $G_1$ are defined by polynomial
equations, they are closed subsets in $G$. Recall that $G$ is connected.
Therefore $G_1 = \varnothing$ and $G$ must act by automorphisms only.
\end{remark}
\section{Lie algebras with $R=N$}\label{SectionHRTheSame}
\subsection{Formulation of the theorem}
If the solvable radical of an $H$-module Lie algebra $L$ is nilpotent, we
do not require from $L$ to satisfy Conditions~\ref{ConditionLevi}--\ref{ConditionLComplHred} in the definition of an $H$-nice algebra (see Subsection~\ref{SubsectionHnice}). Moreover, the formula for the Hopf PI-exponent is simpler, than in the general case (Subsections~\ref{SubsectionOrigFormulaHPIexp} and~\ref{SubsectionModificationHPIexp}).
\begin{theorem}\label{TheoremMainNRSame} Let $L$ be a finite dimensional non-nilpotent
$H$-module Lie algebra
where $H$ is a Hopf algebra over a field $F$ of characteristic $0$.
Suppose that the solvable radical of $L$ coincides with the nilpotent
radical $N$ of $L$ and $N$ is an $H$-submodule.
Then there exist constants $d\in\mathbb N$, $C_1, C_2 > 0$, $r_1, r_2 \in \mathbb R$ such that $$C_1 n^{r_1} d^n \leqslant c^{H}_n(L) \leqslant C_2 n^{r_2} d^n\text{ for all }n \in \mathbb N.$$
Moreover, if $F$ is algebraically closed, the constant $d$ is defined as follows.
Let $$L/N = B_1 \oplus \ldots \oplus B_q \text{ (direct sum of $H$-invariant ideals)}$$
where $B_i$ are $H$-simple Lie algebras and let $\varkappa \colon L/N \to L$
be any homomorphism of algebras (not necessarily $H$-linear) such that $\pi\varkappa = \id_{L/N}$ where $\pi \colon L \to L/N$ is the natural projection.
Then $$d= \max\left( B_{i_1}\oplus B_{i_2} \oplus \ldots \oplus B_{i_r}
\mathbin{\Bigl|} r \geqslant 1,\ \bigl[[ H\varkappa(B_{i_1}), \underbrace{L, \ldots, L}_{q_1}], \right.$$ \begin{equation}\label{EqdRNSame}\left. [ H\varkappa(B_{i_2}), \underbrace{L, \ldots, L}_{q_2}], \ldots, [ H\varkappa(B_{i_r}),
\underbrace{L, \ldots, L}_{q_r}]\bigr] \ne 0 \text{ for some } q_i \geqslant 0 \right).\end{equation}
\end{theorem}
\begin{remark}
If $L$ is nilpotent, i.e. $[x_1, \ldots, x_p]\equiv 0$ for some $p\in\mathbb N$, then
$V^{H}_n \subseteq \Id^{H}(L)$ and $c^H_n(L)=0$ for all $n \geqslant p$.
\end{remark}
Theorem~\ref{TheoremMainNRSame} will be proved at the end of Subsection~\ref{SubsectionNRSameLowerBound}.
\begin{corollary}
The analog of Amitsur's conjecture holds
for such codimensions.
\end{corollary}
\begin{remark}
The existence of a decomposition $L/N = B_1 \oplus \ldots \oplus B_q$ (direct sum of $H$-invariant ideals)
where $B_i$ are $H$-simple Lie algebras, follows from~\cite[Theorem~6]{ASGordienko4}.
The existence of the map $\varkappa$ follows from the ordinary Levi theorem.
\end{remark}
\begin{remark}
Note that by~\cite[Lemma~9]{GordienkoKochetov},
every differential simple algebra is simple.
By~\cite[Lemma~10]{GordienkoKochetov}, a $G$-simple algebra is simple
for a rational action of a connected affine algebraic group~$G$.
Therefore, Theorem~\ref{TheoremMainNRSame} yields another proof of Theorems~\ref{TheoremLieDerPIexpEqual}
and~\ref{TheoremLieGConnPIexpEqual} for the case $R=N$ since in the conditions of the latter theorems there exists an $H$-invariant Levi decomposition and we can choose $\varkappa$ to be a homomorphism of $H$-modules.
\end{remark}
\begin{corollary}
Let $L$ be a finite dimensional non-nilpotent
Lie algebra over a field $F$ of characteristic $0$ with an action of a group $G$ by automorphisms and anti-automorphisms. Suppose that the solvable radical of $L$ coincides with the nilpotent radical $N$ of $L$.
Then there exist constants $d\in\mathbb N$, $C_1, C_2 > 0$, $r_1, r_2 \in \mathbb R$ such that $$C_1 n^{r_1} d^n \leqslant c^{G}_n(L) \leqslant C_2 n^{r_2} d^n\text{ for all }n \in \mathbb N.$$
\end{corollary}
\begin{proof}
By~\cite[Lemma~28]{ASGordienko5}, we may assume that $G$ is acting by automorphisms only. Now we notice that
radicals are invariant under all automorphisms. Hence we may apply Theorem~\ref{TheoremMainNRSame}.
\end{proof}
\begin{corollary}
Let $L$ be a finite dimensional non-nilpotent
Lie algebra over a field $F$ of characteristic $0$ with an action of a Lie algebra $\mathfrak g$ by derivations. Suppose that the solvable radical of $L$ coincides with the nilpotent radical $N$ of $L$. Then there exist constants $d\in\mathbb N$, $C_1, C_2 > 0$, $r_1, r_2 \in \mathbb R$ such that $$C_1 n^{r_1} d^n \leqslant c^{U(\mathfrak g)}_n(L) \leqslant C_2 n^{r_2} d^n\text{ for all }n \in \mathbb N.$$
\end{corollary}
\begin{proof}
By~\cite[Chapter~III, Section~6, Theorem~7]{JacobsonLie}, the
radical is invariant under all derivations. Hence we may apply Theorem~\ref{TheoremMainNRSame}.
\end{proof}
The algebra in the example below has no $G$-invariant Levi decomposition (see~\cite[Example~12]{ASGordienko4}), however it satisfies the analog of Amitsur's conjecture.
\begin{example}[Yuri Bahturin]\label{ExampleGnoninvLevi}
Let $F$ be a field of characteristic $0$ and let $$L = \left\lbrace\left(\begin{array}{cc} C & D \\
0 & 0
\end{array}\right) \mathrel{\biggl|} C \in \mathfrak{sl}_m(F), D\in M_m(F)\right\rbrace
\subseteq \mathfrak{sl}_{2m}(F),$$ $m \geqslant 2$.
Consider $\varphi \in \Aut(L)$ where
$$\varphi\left(\begin{array}{cc} C & D \\
0 & 0
\end{array}\right)=\left(\begin{array}{cc} C & C+D \\
0 & 0
\end{array}\right).$$
Then $L$ is a Lie algebra with an action of the group $G=\langle \varphi \rangle
\cong \mathbb Z$ by automorphisms
and there exist constants $C_1, C_2 > 0$, $r_1, r_2 \in \mathbb R$ such that $$C_1 n^{r_1} (m^2-1)^n \leqslant c^G_n(L) \leqslant C_2 n^{r_2} (m^2-1)^n\text{ for all }n \in \mathbb N.$$
\end{example}
\begin{proof}
$G$-codimensions do not change upon an extension of the base field.
The proof is analogous to the cases of ordinary codimensions of
associative~\cite[Theorem~4.1.9]{ZaiGia} and
Lie algebras~\cite[Section~2]{ZaiLie}. Moreover, upon an extension of $F$, $L$
remains the algebra of the same type.
Thus without loss of generality we may assume
$F$ to be algebraically closed.
Note that
$$N=\left\lbrace\left(\begin{array}{cc} 0 & D \\
0 & 0
\end{array}\right) \mathrel{\biggl|} D\in M_m(F)\right\rbrace
$$
is the solvable (and nilpotent) radical of $L$ and $L/N \cong \mathfrak{sl}_m(F)$
is a simple Lie algebra. Hence $\PIexp^G(L)=\dim\mathfrak{sl}_m(F)= m^2-1$
by Theorem~\ref{TheoremMainNRSame}.
\end{proof}
The algebra in the example below has no $L$-invariant Levi decomposition (see~\cite[Example~13]{ASGordienko4}), however it satisfies the analog of Amitsur's conjecture.
\begin{example}
Let $L$ be the Lie algebra from Example~\ref{ExampleGnoninvLevi}.
Consider the adjoint action of $L$ on itself by derivations.
Then there exist constants $C_1, C_2 > 0$, $r_1, r_2 \in \mathbb R$ such that $$C_1 n^{r_1} (m^{2}-1)^n \leqslant c^{U(L)}_n(L) \leqslant C_2 n^{r_2} (m^2-1)^{n}\text{ for all }n \in \mathbb N.$$
\end{example}
\begin{proof} Again, without loss of generality we may assume
$F$ to be algebraically closed. Since $L/N \cong \mathfrak{sl}_m(F)$
is a simple Lie algebra, $\PIexp^{U(L)}(L)=\dim\mathfrak{sl}_m(F)= m^2-1$
by Theorem~\ref{TheoremMainNRSame}.
\end{proof}
\subsection{$S_n$-cocharacters and upper bound}
One of the main tools in the investigation of polynomial
identities is provided by the representation theory of symmetric groups.
Let $L$ be an $H$-module Lie algebra
over a field $F$ of characteristic $0$.
The symmetric group $S_n$ acts
on the spaces $\frac {V^H_n}{V^H_{n}
\cap \Id^H(L)}$
by permuting the variables.
Irreducible $FS_n$-modules are described by partitions
$\lambda=(\lambda_1, \ldots, \lambda_s)\vdash n$ and their
Young diagrams $D_\lambda$.
The character $\chi^H_n(L)$ of the
$FS_n$-module $\frac {V^H_n}{V^H_n
\cap \Id^H(L)}$ is
called the $n$th
\textit{cocharacter} of polynomial $H$-identities of $L$.
We can rewrite $\chi^H_n(L)$ as
a sum $$\chi^H_n(L)=\sum_{\lambda \vdash n}
m(L, H, \lambda)\chi(\lambda)$$ of
irreducible characters $\chi(\lambda)$.
Let $e_{T_{\lambda}}=a_{T_{\lambda}} b_{T_{\lambda}}$
and
$e^{*}_{T_{\lambda}}=b_{T_{\lambda}} a_{T_{\lambda}}$
where
$a_{T_{\lambda}} = \sum_{\pi \in R_{T_\lambda}} \pi$
and
$b_{T_{\lambda}} = \sum_{\sigma \in C_{T_\lambda}}
(\sign \sigma) \sigma$,
be Young symmetrizers corresponding to a Young tableau~$T_\lambda$.
Then $M(\lambda) = FS e_{T_\lambda} \cong FS e^{*}_{T_\lambda}$
is an irreducible $FS_n$-module corresponding to
a partition~$\lambda \vdash n$.
We refer the reader to~\cite{Bahturin, DrenKurs, ZaiGia}
for an account
of $S_n$-representations and their applications to polynomial
identities.
In the next two lemmas we consider
a finite dimensional
$H$-module Lie algebra $L$ with an $H$-invariant nilpotent ideal $N$
where $H$ is a Hopf algebra over a field $F$ of characteristic $0$
and $N^p=0$
for some $p\in\mathbb N$.
Fix a decomposition $L/N = B_1 \oplus \ldots \oplus B_q $
where $B_i$ are some subspaces.
Let $\varkappa \colon L/N \to L$
be an $F$-linear map such that $\pi\varkappa = \id_{L/N}$ where $\pi \colon L \to L/N$
is the natural projection. Define the number $d$ by~(\ref{EqdRNSame}).
\begin{lemma}\label{LemmaNRSameUpperCochar}
Let $n\in\mathbb N$ and $\lambda = (\lambda_1, \ldots, \lambda_s) \vdash n$. Then if $\sum_{k=d+1}^s \lambda_k \geqslant p$, we have $m(L, H, \lambda)=0$.
\end{lemma}
\begin{proof}
It is sufficient to prove that $e^{*}_{T_\lambda}f \in \Id^H(L)$ for all $f \in V_n$ and for all Young tableaux $T_\lambda$ corresponding to $\lambda$.
Fix a basis in $L$ that is a union of bases of~$\varkappa(B_1),\ldots, \varkappa(B_q)$ and~$N$.
Since $e^{*}_{T_\lambda}f$ is multilinear, it sufficient to prove that $e^{*}_{T_\lambda}f$
vanishes under all evaluations on basis elements.
Fix some substitution of basis elements and choose $1 \leqslant i_1,\ldots,i_r \leqslant q$
such that all the elements substituted belong to $\varkappa(B_{i_1})\oplus \ldots \oplus \varkappa(B_{i_r}) \oplus N$, and for each $j$ we have an element being substituted from $\varkappa(B_{i_j})$.
Then we may assume that $\dim(B_{i_1}\oplus \ldots \oplus B_{i_r}) \leqslant d$,
since otherwise $e^{*}_{T_\lambda}f$ is zero by the definition of $d$.
Note that
$e^{*}_{T_\lambda} = b_{T_\lambda} a_{T_\lambda}$
and $b_{T_\lambda}$ alternates the variables of each column
of $T_\lambda$. Hence if $e^{*}_{T_\lambda} f$ does not vanish, this implies that different basis elements
are substituted for the variables of each column.
Therefore, at least $\sum_{k=d+1}^s \lambda_k \geqslant p$ elements must be taken from $N$.
Since $N^p = 0$, we have $e^{*}_{T_\lambda} f \in \Id^H(L)$.
\end{proof}
\begin{lemma}\label{LemmaNRSameUpper}
If $d > 0$, then there exist constants $C_2 > 0$, $r_2 \in \mathbb R$
such that $c^H_n(L) \leqslant C_2 n^{r_2} d^n$
for all $n \in \mathbb N$. In the case $d=0$, the algebra $L$ is nilpotent.
\end{lemma}
\begin{proof}
Lemma~\ref{LemmaNRSameUpperCochar} and~\cite[Lemmas~6.2.4, 6.2.5]{ZaiGia}
imply
$$
\sum_{m(L,H, \lambda)\ne 0} \dim M(\lambda) \leqslant C_3 n^{r_3} d^n
$$
for some constants $C_3, r_3 > 0$.
Together with \cite[Theorem~12]{ASGordienko5} this inequality yields the upper bound.
\end{proof}
\subsection{Lower bound}\label{SubsectionNRSameLowerBound}
Lemma~\ref{LemmaHRSameLowerPolynomial} below is a version of~\cite[Lemma~20]{ASGordienko5}
adapted for our case.
\begin{lemma}\label{LemmaHRSameLowerPolynomial}
Suppose that $F$ is an algebraically closed field of charactreistic $0$ and
let $L$, $N$, $\varkappa$, $B_i$, and $d$ be the same as in Theorem~\ref{TheoremMainNRSame}.
If $d > 0$, then there exists a number $n_0 \in \mathbb N$ such that for every $n\geqslant n_0$
there exist disjoint subsets $X_1$, \ldots, $X_{2k} \subseteq \lbrace x_1, \ldots, x_n
\rbrace$, $k := \left[\frac{n-n_0}{2d}\right]$,
$|X_1| = \ldots = |X_{2k}|=d$ and a polynomial $f \in V^H_n \backslash
\Id^H(L)$ alternating in the variables of each set $X_j$.
\end{lemma}
\begin{proof} Without lost of generality,
we may assume that $d = \dim(B_1 \oplus B_2 \oplus \ldots \oplus B_r)$
where
$\bigl[[ H\varkappa(B_1), a_{11}, \ldots, a_{1q_1}], [ H\varkappa(B_2), a_{21}, \ldots, a_{2q_2}], \ldots, [ H\varkappa(B_r),
a_{r1}, \ldots, a_{rq_r}]\bigr] \ne 0$ for some $q_i\geqslant 0$
and $a_{kj}\in L$.
Since $N$ is nilpotent, we can increase $q_i$ adding to $\lbrace a_{ij} \rbrace$
sufficiently many elements of $N$ such that
$$\bigl[[ {\gamma_1}\varkappa(b_1), a_{11}, \ldots, a_{1q_1}], [ {\gamma_2}\varkappa(b_2), a_{21}, \ldots, a_{2q_2}], \ldots, [{\gamma_r}\varkappa(b_r),
a_{r1}, \ldots, a_{rq_r}]\bigr] \ne 0$$ for some $q_i\geqslant 0$, $b_i \in B_i$, $\gamma_i \in H$,
however
\begin{equation}\label{Eqbazero}\bigl[[ \tilde b_1, a_{11}, \ldots, a_{1q_1}], [ \tilde b_2, a_{21}, \ldots, a_{2q_2}], \ldots, [\tilde b_r,
a_{r1}, \ldots, a_{rq_r}]\bigr] = 0 \end{equation} for all $t_i \geqslant 0$, $\tilde b_i\in [H\varkappa(B_i), \underbrace{L, \ldots, L}_{t_i}]$
such that $\tilde b_j\in [H\varkappa(B_j), L, \ldots, L, N, L, \ldots, L]$ for at least one $j$.
Recall that $\varkappa$ is a homomorphism of algebras.
Moreover $\pi(h\varkappa(a)-\varkappa(ha))=0$
implies $h\varkappa(a)-\varkappa(ha) \in N$ for all $a\in L$ and $h\in H$.
Hence, by~(\ref{Eqbazero}), if we replace $\varkappa(b_i)$ in $$\bigl[[ \gamma_1\varkappa(b_1), a_{11}, \ldots, a_{1q_1}], [ \gamma_2\varkappa(b_2), a_{21}, \ldots, a_{2q_2}], \ldots, [ \gamma_r\varkappa(b_r),
a_{r1}, \ldots, a_{rq_r}]\bigr]$$ with the commutator of $\varkappa(b_i)$ and an expression involving $\varkappa$, the map $\varkappa$
will behave like a homomorphism of $H$-modules.
We will exploit this property further.
In virtue of~\cite[Theorem~11]{ASGordienko5},
there exist constants $m_i \in \mathbb Z_+$
such that for any $k$ there exist
multilinear associative $H$-polynomials $f_i$ of degree $(2kd_i + m_i)$,
$d_i := \dim B_i$,
alternating in the variables from disjoint sets
$X^{(i)}_{\ell}$, $1 \leqslant \ell \leqslant 2k$, $|X^{(i)}_{\ell}|=d_i$,
such that each $f_i$ does not vanish under some evaluation in $\ad B_i$.
Since $B_i$ is an irreducible $(H, \ad B_i)$-module, by the Density Theorem,
$\End_F(B_i)$ is generated by the operators from~$H$ and~$\ad B_i$.
Note that $\End_F(B_i) \cong M_{d_i}(F)$.
Thus every matrix unit $e^{(i)}_{j\ell} \in M_{d_i}(F)$ can be
represented as a polynomial in operators from $H$
and $\ad B_i$. Choose such polynomials
for all $i$ and all matrix units. Denote by $m_0$ the maximal degree of those
polynomials.
Let $n_0 := r(2m_0+1)+ \sum_{i=1}^r (m_i+q_i)$.
Now we choose $f_i$ for $k = \left[\frac{n-n_0}{2d}\right]$.
In addition, we choose $\tilde f_1$ for $\tilde k = \left[\frac{n-2kd-m_1}{2d_1}\right]+1$
and $B_{i_1}$ using~\cite[Theorem~11]{ASGordienko5} once again. The polynomials $f_i$ will deliver us the required alternations. However, the total degree of the product may be less than $n$. We will use $\tilde f_1$ to increase the number of variables and obtain a polynomial of degree $n$.
By~\cite[Theorem~11]{ASGordienko5},
there exist $\bar x_{i1}, \ldots, \bar x_{i, 2k d_i+m_i} \in B_i$
such that $$f_i(\ad \bar x_{i1}, \ldots, \ad \bar x_{i, 2k d_i+m_i})\ne 0,$$
and $\bar x_1, \ldots, \bar x_{2\tilde k d_1+m_1} \in B_1$
such that $\tilde f_1(\ad \bar x_1, \ldots, \ad \bar x_{2\tilde k d_1+m_1}) \ne 0$.
Hence $$e^{(i)}_{\ell_i \ell_i} f_i(\ad \bar x_{i1}, \ldots, \ad \bar x_{i, 2k d_i+m_i})
e^{(i)}_{s_i s_i} \ne 0$$
and $$e^{(1)}_{\tilde\ell \tilde\ell}\tilde f_1(\ad \bar x_1, \ldots, \ad \bar x_{2\tilde k d_1+m_1})
e^{(1)}_{\tilde s \tilde s} \ne 0$$
for some matrix units $e^{(i)}_{\ell_i \ell_i},
e^{(i)}_{s_i s_i} \in \End_F(B_i)$, $1 \leqslant \ell_i, s_i \leqslant d_i$,
$e^{(1)}_{\tilde\ell \tilde\ell}, e^{(1)}_{\tilde s \tilde s} \in \End_F(B_1)$, $1 \leqslant \tilde \ell,
\tilde s \leqslant d_1$.
Thus $$\sum_{\ell=1}^{d_i}
e^{(i)}_{\ell \ell_i} f_i(\bar x_{i1}, \ldots, \bar x_{i, 2k d_i+m_i})
e^{(i)}_{s_i \ell}$$ is a nonzero scalar operator in $\End_F(B_i)$.
Hence
$$ [[\gamma_1\varkappa\left(\sum_{\ell=1}^{d_1}
e^{(1)}_{\ell \ell_1} f_1(\ad \bar x_{11}, \ldots, \ad \bar x_{1,2k d_1+m_1})
e^{(1)}_{s_1 \tilde \ell} \tilde f_1(\ad \bar x_1, \ldots, \ad \bar x_{2\tilde k d_1+m_1})
e^{(1)}_{\tilde s \ell}b_1\right), a_{11}, \ldots, a_{1q_1}],$$
$$ [\gamma_2\varkappa\left(\sum_{\ell=1}^{d_2}
e^{(2)}_{\ell \ell_2} f_2(\ad \bar x_{21}, \ldots, \ad \bar x_{2,2k d_2+m_2})
e^{(2)}_{s_2 \ell}b_2\right), a_{21}, \ldots, a_{2q_2}],
\ldots, $$
$$
[\gamma_r\varkappa\left(\sum_{\ell=1}^{d_r}
e^{(r)}_{\ell \ell_r} f_r(\ad \bar x_{r1}, \ldots, \ad \bar x_{r, 2k d_r+m_r})
e^{(r)}_{s_r \ell}b_r\right), a_{r1}, \ldots, a_{rq_r}]]\ne 0.$$
Now we rewrite
$e^{(i)}_{\ell j}$ as polynomials in elements of $\ad B_i$
and $H$.
Using linearity of the expression in $e^{(i)}_{\ell j}$,
we can replace $e^{(i)}_{\ell j}$ with the products
of elements from $\ad B_i$
and $H$, and the expression will not vanish
for some choice of the products. By the definition
of an $H$-module algebra,
$h(\ad a )b=\ad (h_{(1)}a)(h_{(2)}b)$
for all $h\in H$ and $a, b \in L$. Hence
we can move all elements from $H$ to the right.
As we have mentioned, $\varkappa$ is a homomorphism of algebras and, by~(\ref{Eqbazero}), behaves like
a homomorphism of $H$-modules. Hence
we get
$$ a_0 := \biggl[\Bigl[\gamma_1\Bigl[\bar y_{11}, [\bar y_{12}, \ldots
[\bar y_{1 \alpha_1},
$$ $$
(f_1(\ad \varkappa(\bar x_{11}), \ldots, \ad \varkappa(\bar x_{1, 2k d_1+m_1})))^{h_1}
[\bar w_{11}, [\bar w_{12}, \ldots, [\bar w_{1 \theta_1},$$ $$
(\tilde f_1(\ad \varkappa(\bar x_1), \ldots, \ad \varkappa(\bar x_{2\tilde k d_1+m_1})))^{\tilde h}
[\bar w_{1}, [\bar w_{2}, \ldots, [\bar w_{\tilde \theta},
\varkappa({h'_1}b_1)]\ldots \Bigr],
a_{11}, \ldots, a_{1q_1}\Bigr],$$
$$\Bigl[\gamma_2\Bigl[\bar y_{21}, [\bar y_{22}, \ldots
[\bar y_{2 \alpha_2}, $$ $$
(f_2(\ad \varkappa(\bar x_{21}), \ldots, \ad \varkappa(\bar x_{2, 2k d_2+m_2})))^{h_2}
[\bar w_{21}, [\bar w_{22}, \ldots, [\bar w_{2 \theta_2},
\varkappa({h'_2}b_2)]\ldots \Bigr],
a_{21}, \ldots, a_{2q_2}\Bigr],
\ldots, $$
$$\Bigl[\gamma_r\Bigl[\bar y_{r1}, [\bar y_{r2}, \ldots,
[\bar y_{r \alpha_r}, $$ $$
(f_r(\ad \varkappa(\bar x_{r1}), \ldots, \ad \varkappa(\bar x_{r, 2k d_r+m_r})))^{h_r}
[\bar w_{r1}, [\bar w_{r2}, \ldots, [\bar w_{r \theta_r}, \varkappa({h'_r}b_r)]\ldots \Bigr],
a_{r1}, \ldots, a_{rq_r}\Bigr]\biggr]\ne 0$$
for some $0 \leqslant \alpha_i, \theta_i, \tilde \theta \leqslant m_0$,
\quad $h_i, h'_i, \tilde h \in H$,\quad $\bar y_{ij}, \bar w_{ij} \in \varkappa(B_i)$,
\quad $\bar w_j \in \varkappa(B_1)$.
We assume that each $f_i$ is a polynomial in $x_{i1}, \ldots,
x_{i,2k d_i+m_i}$ and $\tilde f_1$ is a polynomial in $x_1, \ldots, x_{2\tilde k d_1 + m_1}$.
Denote $X_\ell := \bigcup_{i=1}^{r} X^{(i)}_{\ell}$
where $f_i$ is alternating in the variables of each $X^{(i)}_{\ell}$.
Let $\Alt_\ell$ be the operator of alternation
in the variables from $X_\ell$.
Consider
$$\hat f :=
\Alt_1 \Alt_2 \ldots \Alt_{2k} \biggl[\Bigl[\gamma_1\Bigl[y_{11}, [y_{12}, \ldots
[y_{1 \alpha_1},
$$ $$
(f_1(\ad x_{11}, \ldots, \ad x_{1, 2k d_1+m_1}))^{h_1}
[w_{11}, [w_{12}, \ldots, [w_{1 \theta_1},$$ $$
(\tilde f_1(\ad x_1, \ldots, \ad x_{2\tilde k d_1+m_1}))^{\tilde h}
[w_{1}, [w_{2}, \ldots, [w_{\tilde \theta},
z_1]\ldots \Bigr],
u_{11}, \ldots, u_{1q_1}\Bigr],$$
$$\Bigl[\gamma_2\Bigl[y_{21}, [y_{22}, \ldots
[y_{2 \alpha_2}, $$ $$
(f_2(\ad x_{21}, \ldots, \ad x_{2, 2k d_2+m_2}))^{h_2}
[w_{21}, [w_{22}, \ldots, [w_{2 \theta_2},
z_2]\ldots \Bigr],
u_{21}, \ldots, u_{2q_2}\Bigr],
\ldots, $$
$$\Bigl[\gamma_r\Bigl[y_{r1}, [y_{r2}, \ldots,
[y_{r \alpha_r}, $$ $$
(f_r(\ad x_{r1}, \ldots, \ad x_{r, 2k d_r+m_r}))^{h_r}
[w_{r1}, [w_{r2}, \ldots, [w_{r \theta_r}, z_r]\ldots \Bigr],
u_{r1}, \ldots, u_{rq_r}\Bigr]\biggr].$$
Then the value of $\hat f$
under the substitution
$z_i=\varkappa({h'_i}b_i)$, $u_{i\ell}=a_{i\ell}$,
$x_{i\ell}=\varkappa(\bar x_{i\ell})$, $x_i = \varkappa(\bar x_i)$, $y_{i\ell}=\bar y_{i\ell}$, $w_{i\ell}=\bar w_{i\ell}$, $w_i = \bar w_i$
equals $(d_1!)^{2k} \ldots (d_r!)^{2k} a_0 \ne 0$
since $f_i$ are alternating in the variables of each $X^{(i)}_{\ell}$, $[B_i, B_\ell] = 0$ for $i \ne \ell$,
and $\varkappa$ is a homomorphism of algebras.
Hence $$f_0 :=
\Alt_1 \Alt_2 \ldots \Alt_{2k} \biggl[\Bigl[\gamma_1\Bigl[y_{11}, [y_{12}, \ldots
[y_{1 \alpha_1},
$$ $$
(f_1(\ad x_{11}, \ldots, \ad x_{1, 2k d_1+m_1}))^{h_1}
[w_{11}, [w_{12}, \ldots, [w_{1 \theta_1},
z_1]\ldots \Bigr],
u_{11}, \ldots, u_{1q_1}\Bigr],$$
$$\Bigl[\gamma_2\Bigl[y_{21}, [y_{22}, \ldots
[y_{2 \alpha_2}, $$ $$
(f_2(\ad x_{21}, \ldots, \ad x_{2, 2k d_2+m_2}))^{h_2}
[w_{21}, [w_{22}, \ldots, [w_{2 \theta_2},
z_2]\ldots \Bigr],
u_{21}, \ldots, u_{2q_2}\Bigr],
\ldots, $$
$$\Bigl[\gamma_r\Bigl[y_{r1}, [y_{r2}, \ldots,
[y_{r \alpha_r}, $$ $$
(f_r(\ad x_{r1}, \ldots, \ad x_{r, 2k d_r+m_r}))^{h_r}
[w_{r1}, [w_{r2}, \ldots, [w_{r \theta_r}, z_r]\ldots \Bigr],
u_{r1}, \ldots, u_{rq_r}\Bigr]\biggr]$$
does not vanish under the substitution
$$z_1 = (\tilde f_1(\ad \varkappa(\bar x_1), \ldots, \ad \varkappa(\bar x_{2\tilde k d_1+m_1})))^{\tilde h}
[\bar w_{1}, [\bar w_{2}, \ldots, [\bar w_{\tilde \theta},
\varkappa(h'_1 b_1)]\ldots],$$
$z_i=\varkappa(h'_i b_i)$ for $2 \leqslant i \leqslant r$; $u_{i\ell}=a_{i\ell}$,
$x_{i\ell}=\varkappa(\bar x_{i\ell})$, $y_{i\ell}=\bar y_{i\ell}$, $w_{i\ell}=\bar w_{i\ell}$.
Note that $f_0 \in V_{\tilde n}^H$,
$\tilde n: = 2kd +r+ \sum_{i=1}^r (m_i + q_i + \alpha_i+\theta_i)
\leqslant n$. If $n=\tilde n$, then we take $f:=f_0$.
Suppose $n > \tilde n$.
Note that $(\tilde f_1(\ad \varkappa(\bar x_1), \ldots, \ad \varkappa(\bar x_{2\tilde k d_1+m_1})))^{\tilde h}
[\bar w_{1}, [\bar w_{2}, \ldots, [\bar w_{\tilde \theta},
\varkappa(h'_1 b_1)]\ldots]$ is a linear combination of long commutators.
Each of these commutators contains at least $2\tilde k d_1+m_1+1 > n-\tilde n+1$
elements of $L$.
Hence $ f_0$ does not vanish under a substitution
$z_1 = [\bar v_1, [\bar v_2, [\ldots, [\bar v_\theta, \varkappa(h'_1 b_1)]\ldots]$
for some $\theta \geqslant n-\tilde n$, $\bar v_i \in L$;
$z_i=\varkappa(h'_i b_i)$ for $2 \leqslant i \leqslant r$; $u_{i\ell}=a_{i\ell}$,
$x_{i\ell}=\varkappa(\bar x_{i\ell})$, $y_{i\ell}=\bar y_{i\ell}$,
$w_{i\ell}=\bar w_{i\ell}$.
Therefore, $$f := \Alt_1 \Alt_2 \ldots \Alt_{2k} \biggl[\Bigl[\gamma_1\Bigl[y_{11}, [y_{12}, \ldots
[y_{1 \alpha_1},
$$ $$
(f_1(\ad x_{11}, \ldots, \ad x_{1, 2k d_1+m_1}))^{h_1}
[w_{11}, [w_{12}, \ldots, [w_{1 \theta_1},$$
$$
\bigl[v_1, [v_2, [\ldots, [v_{n-\tilde n}, z_1]\ldots\bigr]\ldots \Bigr],
u_{11}, \ldots, u_{1q_1}\Bigr],$$
$$\Bigl[\gamma_2\Bigl[y_{21}, [y_{22}, \ldots
[y_{2 \alpha_2}, $$ $$
(f_2(\ad x_{21}, \ldots, \ad x_{2, 2k d_2+m_2}))^{h_2}
[w_{21}, [w_{22}, \ldots, [w_{2 \theta_2},
z_2]\ldots \Bigr],
u_{21}, \ldots, u_{2q_2}\Bigr],
\ldots, $$
$$\Bigl[\gamma_r\Bigl[y_{r1}, [y_{r2}, \ldots,
[y_{r \alpha_r}, $$ $$
(f_r(\ad x_{r1}, \ldots, \ad x_{r, 2k d_r+m_r}))^{h_r}
[w_{r1}, [w_{r2}, \ldots, [w_{r \theta_r}, z_r]\ldots \Bigr],
u_{r1}, \ldots, u_{rq_r}\Bigr]\biggr]$$
does not vanish under the substitution
$v_\ell = \bar v_\ell$, $1 \leqslant \ell \leqslant n-\tilde n$,
$$z_1 = [\bar v_{n-\tilde n +1}, [\bar v_{n-\tilde n +2}, [\ldots, [\bar v_\theta, \varkappa(h'_1 b_1)]\ldots];$$
$z_i=\varkappa(h'_i b_i)$ for $2 \leqslant i \leqslant r$; $u_{i\ell}=a_{i\ell}$,
$x_{i\ell}=\varkappa(\bar x_{i\ell})$, $y_{i\ell}=\bar y_{i\ell}$, $w_{i\ell}=\bar w_{i\ell}$.
Note that $f \in V_n^H$ and satisfies all the conditions of the lemma.
\end{proof}
Lemma~\ref{LemmaHRSameCochar} is an analog of~\cite[Lemma~21]{ASGordienko5}.
\begin{lemma}\label{LemmaHRSameCochar} Let
$k, n_0$ be the numbers from
Lemma~\ref{LemmaHRSameLowerPolynomial}. Then for every $n \geqslant n_0$ there exists
a partition $\lambda = (\lambda_1, \ldots, \lambda_s) \vdash n$,
$\lambda_i \geqslant 2k-p$ for every $1 \leqslant i \leqslant d$,
with $m(L, H, \lambda) \ne 0$.
Here $p \in \mathbb N$ is such a number that $N^p=0$.
\end{lemma}
\begin{proof}
Consider the polynomial $f$ from Lemma~\ref{LemmaHRSameLowerPolynomial}.
It is sufficient to prove that $e^*_{T_\lambda} f \notin \Id^H(L)$
for some tableau $T_\lambda$ of the desired shape $\lambda$.
It is known that $FS_n = \bigoplus_{\lambda,T_\lambda} FS_n e^{*}_{T_\lambda}$ where the summation
runs over the set of all standard tableax $T_\lambda$,
$\lambda \vdash n$. Thus $FS_n f = \sum_{\lambda,T_\lambda} FS_n e^{*}_{T_\lambda}f
\not\subseteq \Id^H(L)$ and $e^{*}_{T_\lambda} f \notin \Id^H(L)$ for some $\lambda \vdash n$.
We claim that $\lambda$ is of the desired shape.
It is sufficient to prove that
$\lambda_d \geqslant 2k-p$, since
$\lambda_i \geqslant \lambda_d$ for every $1 \leqslant i \leqslant d$.
Each row of $T_\lambda$ includes numbers
of no more than one variable from each $X_i$,
since $e^{*}_{T_\lambda} = b_{T_\lambda} a_{T_\lambda}$
and $a_{T_\lambda}$ is symmetrizing the variables of each row.
Thus $\sum_{i=1}^{d-1} \lambda_i \leqslant 2k(d-1) + (n-2kd) = n-2k$.
In virtue of Lemma~\ref{LemmaNRSameUpperCochar},
$\sum_{i=1}^d \lambda_i \geqslant n-p$. Therefore
$\lambda_d \geqslant 2k-p$.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{TheoremMainNRSame}]
Let $K \supset F$ be an extension of the field $F$.
Then $$(L \otimes_F K)/(N \otimes_F K) \cong (L/N) \otimes_F K$$
is again a semisimple Lie algebra and $N \otimes_F K$ is still nilpotent.
As we have already mentioned, $H$-codimensions do not change upon an extension of $F$.
Hence we may assume $F$ to be algebraically closed.
The Young diagram~$D_\lambda$ from Lemma~\ref{LemmaHRSameCochar} contains
the rectangular subdiagram~$D_\mu$, $\mu=(\underbrace{2k-p, \ldots, 2k-p}_d)$.
The branching rule for $S_n$ implies that if we consider the restriction of
$S_n$-action on $M(\lambda)$ to $S_{n-1}$, then
$M(\lambda)$ becomes the direct sum of all non-isomorphic
$FS_{n-1}$-modules $M(\nu)$, $\nu \vdash (n-1)$, where each $D_\nu$ is obtained
from $D_\lambda$ by deleting one box. In particular,
$\dim M(\nu) \leqslant \dim M(\lambda)$.
Applying the rule $(n-d(2k-p))$ times, we obtain $\dim M(\mu) \leqslant \dim M(\lambda)$.
By the hook formula, $$\dim M(\mu) = \frac{(d(2k-p))!}{\prod_{i,j} h_{ij}}$$
where $h_{ij}$ is the length of the hook with edge in $(i, j)$.
By Stirling formula,
$$c_n^H(L)\geqslant \dim M(\lambda) \geqslant \dim M(\mu) \geqslant \frac{(d(2k-p))!}{((2k-p+d)!)^d}
\sim $$ $$\frac{
\sqrt{2\pi d(2k-p)} \left(\frac{d(2k-p)}{e}\right)^{d(2k-p)}
}
{
\left(\sqrt{2\pi (2k-p+d)}
\left(\frac{2k-p+d}{e}\right)^{2k-p+d}\right)^d
} \sim C_4 k^{r_4} d^{2kd}$$
for some constants $C_4 > 0$, $r_4 \in \mathbb Q$,
as $k \to \infty$.
Since $k = \left[\frac{n-n_0}{2d}\right]$,
this gives the lower bound.
The upper bound has been proved in Lemma~\ref{LemmaNRSameUpper}.
\end{proof}
\end{document}
|
\begin{document}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{conjecture}{Conjecture}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}{Definition}
\newtheorem{assumption}{Assumption}
\newcommand{{\rm E}}{{\rm E}}
\newcommand{{\mathbb F}}{{\mathbb F}}
\newcommand{{\rm GL}}{{\rm GL}}
\newcommand{{\rm End}}{{\rm End}}
\newcommand{{\rm e}}{{\rm e}}
\newcommand{\textbf{tr}}{\textbf{tr}}
\newcommand{\textbf{rk}}{\textbf{rk}}
\newcommand{\mathbb{Z}}{\mathbb{Z}}
\newcommand{\mathbb{R}}{\mathbb{R}}
\newcommand{\textrm{Cons}}{\textrm{Cons}}
\newcommand{\textrm{Spans}}{\textrm{Spans}}
\newcommand{\textrm{poly}}{\textrm{poly}}
\newcommand{\vec{a}}{\vec{a}}
\newcommand{\vec{b}}{\vec{b}}
\newcommand{\vec{x}}{\vec{x}}
\newcommand{\vec{y}}{\vec{y}}
\newcommand{\vec{v}}{\vec{v}}
\newcommand{\vec{g}}{\vec{g}}
\newcommand{\vec{w}}{\vec{w}}
\newcommand{\remove}[1]{}
\title{A classical one-way function to confound quantum adversaries}
\author{Cristopher Moore\\ \texttt{[email protected]}\\ University of New Mexico
\\ and the Santa Fe Institute \and Alexander Russell\\ \texttt{[email protected]}\\ University of Connecticut \and Umesh Vazirani\\ \texttt{[email protected]}\\ U. C. Berkeley}
\maketitle
\abstract{The promise of quantum computation and its consequences
for complexity-theoretic cryptography motivates an
immediate search for cryptosystems which can be implemented with
current technology, but which remain secure even in the presence of
quantum computers. Inspired by recent negative results pertaining to
the nonabelian hidden subgroup problem, we present here a classical
algebraic function $f_V(M)$ of a matrix $M$ which we believe is a
one-way function
secure against quantum attacks. Specifically, inverting
$f_V$ reduces naturally to solving a hidden subgroup problem over the
general linear group (which is at least as hard as the hidden subgroup problem
over the symmetric group). We also demonstrate a reduction from Graph
Isomorphism to the problem of inverting $f_V$;
unlike Graph Isomorphism, however, the function $f_V$ is
random self-reducible and therefore uniformly hard.
These results suggest that, unlike Shor's algorithm for the discrete logarithm---which
is, so far, the only successful quantum attack on a classical one-way function---quantum attacks
based on the hidden subgroup problem are unlikely to work.
We also show that reconstructing any entry of $M$, or the trace of $M$, with
nonnegligible advantage is essentially as hard as inverting $f_V$.
Finally, $f_V$ can be efficiently computed and the number of output
bits is less than $1+{\rm e}psilon$ times the number of input bits for any
${\rm e}psilon > 0$.
\pagenumbering{arabic}
\section{Introduction}
When a quantum computer is finally built, perhaps its most important
practical impact will be on modern cryptography, thanks to Shor's
celebrated quantum algorithms for factoring and discrete
logs~\cite{Shor:1997:PTA} (and a sequence of followup results).
Quantum cryptography provides a partial recourse, though its scope is
limited by ``no-go'' theorems such as the impossibility of quantum bit
commitment, as well as extravagant physical infrastructure
requirements. A plausible route to a more acceptable antidote was
suggested in a result contemporaneous with Shor's paper, showing that
quantum computers require exponential time to invert a random
permutation in a black box model~\cite{BBBV:1997:BBBV}. Since a random
permutation is a standard abstraction for a one-way function, this
result suggested the possibility of creating classical cryptography
that is resistant to quantum cryptanalysis. The practical challenge is
to design a function $f: \Sigma^n \rightarrow \Sigma^m$ that can be computed
very efficiently by a classical computer, while providing
credible evidence that inversion is difficult even
with a quantum computer. It is also desirable that $f$ be nonexpansive,
i.e., that $m$ not be much larger than $n$. This is the goal of this paper.
Our task is facilitated by new insights obtained over the last
few years into the limits of quantum algorithms for
the non-abelian hidden subgroup problem (HSP).
A series of negative
results~\cite{Hallgren:2000:NSR,Grigni:2001:QMA,Moore:2005:SGF}
culminating in Hallgren, et
al.~\cite{Hallgren:2006:LQC} shows
that for sufficiently non-abelian groups the HSP
is hard for quantum computers in the sense that any quantum
algorithm using the coset state framework requires exponential
time unless it makes highly entangled measurements of
$\Omega(\log |G|)$ registers. Very few algorithmic models for
highly-entangled measurements are known; one
of the few proposals for carrying out such measurements efficiently is
a ``quantum sieve,'' developed by Kuperberg~\cite{K} for the HSP on the
dihedral group. However, a recent result of Moore, Russell, and
\'Sniady~\cite{MRS:Impossibility} shows that no such approach yields an efficient algorithm
over the symmetric groups. In fact, for the cases relevant to Graph Isomorphism, algorithms of this form cannot even do much better than the best known
classical algorithms. This forms the basis of our main assumption about the
limitations of quantum algorithms.
Our function, which we denote $f_V$, is parametrized by a list of
vectors $V={\vec{v}_1,\vec{v}_2,\ldots,\vec{v}_m}$; we will choose each $\vec{v}_i$ independently and uniformly
at random from ${\mathbb F}_q^n$, where $q$ is some small prime. Then given $M
\in {\rm GL}_n({\mathbb F}_q)$, that is, an invertible $n \times n$ matrix over ${\mathbb F}_q$,
we define $f_V(M)$ as the collection
\[
MV = \{ M\vec{v} \mid \vec{v} \in V\}{\rm e}nspace.
\]
However, $f_V$ returns this collection as an unordered set (say,
sorted in lexicographic order). In other words, we know that each $\vec{w}
\in f_V(M)$ is $M\vec{v}$ for some $\vec{v} \in V$, but we do not know with what
permutation the $\vec{v}$s and $\vec{w}$s correspond.
In Section~\ref{sec:injective}, we show that $f_V$ is one-to-one with
high probability in $V$ whenever $m$ is slightly larger than $n$, say $m = n +
O(\ln^2 n)$. Also, clearly $f_V$
can be computed very efficiently, in time $M(n)$, the time to multiply
two $n \times n$ matrices. As a function of the input length $k =
n^2$, the time is essentially $\sqrt{M(k)}$.
In Section~\ref{sec:hardness}, we point out that the natural reduction of inverting $f_V$ to a
hidden subgroup (or hidden shift) problem results in hidden subgroup
problems on the general linear group ${\rm GL}_n$. This group contains
the symmetric group $S_n$ as a subgroup, and its HSP appears resistant
to all known quantum techniques. Moreover, we reduce the Graph
Isomorphism problem to the problem of inverting $f_V$. This
implies that no quantum attack analogous to Shor's algorithm for
the discrete logarithm can succeed, unless there is an efficient
quantum algorithm for Graph Isomorphism.
We stress that unlike Graph Isomorphism, for which there is no known way to generate
hard random instances, inverting $f_V$ is uniformly hard because of the following simple
observation: for any matrix $A$, we have $f_V(AM) = A f_V(M)$. By choosing $A$ randomly,
this allows us to map a fixed instance $f_V(M)$ to a random one with the same $V$.
It follows that, for any fixed $V$, if $f_V$ can be inverted on
even a $1/\textrm{poly}(n)$ fraction of matrices $M$, then there is a
probabilistic algorithm that inverts it on arbitrary inputs $M$.
A similar though more complicated assertion can be made about uniform
hardness with respect to choice of $V$ (see
Section~\ref{sec:hard-core}).
Moreover, we show in Section~\ref{sec:hard-core} that
reconstructing partial information about $f_V^{-1}(x)$ is almost as hard as inverting $f_V$.
Specifically, assuming that $f_V$ is a one-way function, we show that {\rm e}mph{any
entry of $M$ is hard to recover in any basis}, though this requires a
quasipolynomial hardness assumption on $f_V$. We observe, also, that
$\textbf{tr} \,M$, the trace of $M$, is hard to recover even under typical
super-polynomial hardness assumptions.
It remains an open question whether we can embed a trapdoor in $f_V$
or a suitable modification. We should point out that there are some
classical cryptosystems that are not known to be breakable by a quantum
computer---lattice-based cryptosystems such as the
Ajtai-Dwork~\cite{Ajtai:1997:PKC} cryptosystem and their subsequent
improvements due to Regev~\cite{Regev:2004:NLB}, and the McEliece
cryptosystem~\cite{McEliece:Public}. Indeed, Regev's improvement in
the efficiency of lattice-based cryptosystems is based on a quantum
reduction---thus the increased efficiency is predicated on
resistance of the cryptosystem to quantum attacks! Evidence of
quantum intractibility for this cryptosystem comes from the
relationship between finding short vectors and the dihedral hidden
subgroup problem~\cite{Regev:2004:QCL}. In particular, even though
single register Fourier sampling is information-theoretically
sufficient to reconstruct the hidden subgroup, the classical
reconstruction problem is as hard as Subset Sum. On the other hand,
quantum reconstruction is not ruled out, and Kuperberg's quantum
sieve~\cite{K} provides what may be thought of as a mildly
subexponential quantum reconstruction algorithm.
The evidence for quantum intractibility for the one-way function proposed
here is stronger: single register Fourier sampling is provably
insufficient, highly-entangled measurements on polynomially many registers is
necessary, and no Kuperberg-like approach can yield an efficient
algorithm. The design of efficient cryptographic primitives resistant
to quantum attack is a pressing practical problem whose solution can
have an enormous impact on the practice of cryptography long before a
quantum computer is physically realized. A program to create such
primitives must necessarily rely on insights into the limits of
quantum algorithms, and this paper explores consequences of the
strongest such insights we have about the limits of quantum
algorithms.
\paragraph{Notation.} As above, we let ${\mathbb F} = {\mathbb F}_q$ denote the finite field with $q$ elements, $q$ a fixed prime. We let ${\rm GL}_n({\mathbb F}_q)$ (abbreviated ${\rm GL}_n$ when the context is clear) denote the collection of invertible $n \times n$ matrices over ${\mathbb F}_q$. Similarly ${\rm End}_n = {\rm End}_n({\mathbb F}_q)$ denotes the set of all $n \times n$ matrices. If $M \in {\rm End}_n$ and $V \subset {\mathbb F}_q^n$, we let $M V$ denote the collection $\{ M\vec{v} \mid \vec{v} \in V\}$.
\section{The function is one-to-one}
\label{sec:injective}
Our first theorem shows that when $m$ is slightly larger than $n$,
then $f_V$ is a one-to-one function with high probability. We have
made only desultory attempts to optimize the rate at which $\delta =
m-n$ must grow for the theorem to hold.
\begin{theorem}
There is a constant $A$ such that if $m=n+\delta$ where $\delta \ge A
\ln^2 n$, then $f_V$ is one-to-one with high probability in $V$.
{\rm e}nd{theorem}
\begin{proof} If there are two matrices $M, M'$ such that $MV=M'V$,
then $KV=V$ where $K=M^{-1} M'$. In other words, there is a
permutation $\pi \in S_m$ such that $K\vec{v}_i = \vec{v}_{\pi(i)}$ for all $i$.
We will show that with high probability $K=\vec{a}rmathbb 1$ is the only matrix
with this property, and therefore that $M=M'$.
Let us call a particular permutation $\pi \in S_m$ {\rm e}mph{consistent}
if there is a $K$ such that $K\vec{v}_i = \vec{v}_{\pi(i)}$ for all $i$, and let
$\textrm{Cons}_\pi$ be this event. We will show that
\[ \Pr\left[ \bigvee_{\pi \ne 1} \textrm{Cons}_\pi \right] = o(1) {\rm e}nspace. \]
i.e., with high probability the only consistent permutation is the identity $\pi=1$.
Given a fixed $\pi$, we determine an order on $V$ as follows. First,
we sort the cycles of $\pi$ in order of increasing length, starting
with the fixed points. We break ties by assigning each cycle an index
equal to the smallest $i$ such that $\vec{v}_i$ appears in it and putting
cycles with the smallest index first. Then, we rotate each cycle so
that the $\vec{v}_i$ with smallest $i$ in that cycle comes first. The
details here are irrelevant; all that matters is that each $\pi$
determines an order on $V$ with the properties that the vectors
corresponding to fixed points come first, and that groups of vectors
corresponding to cycles of $\pi$ are contiguous.
Now fix a constant $C$, and let $L_\pi$ consist of the first
$n+\delta-C \ln n$ vectors in $V$ according to this order. Let
$\textrm{Spans}_\pi$ be the event that $L_\pi$ spans the entire space
${\mathbb F}_q^n$. Then the union bound gives
\begin{align*}
\Pr\left[ \bigvee_{\pi \ne 1} \textrm{Cons}_\pi \right]
&\le \sum_{\pi \ne 1} \Pr\left[ \textrm{Cons}_\pi \!\mid\! \textrm{Spans}_\pi \right]
+ \Pr\left[ \bigvee_{\pi} \overline{\textrm{Spans}_\pi} \right] \\
{\rm e}nd{align*}
To bound the conditional probability $\Pr[\textrm{Cons}_\pi \!\mid\!
\textrm{Spans}_\pi]$, note that if $L_\pi$ spans the entire space, then $K$ is
determined by the images of the vectors in $L_\pi$. Therefore, if all
the vectors in $L_\pi$ are fixed by $K$, then $K=\vec{a}rmathbb 1$ and $\pi=1$.
On the other hand, we have sorted $V$ so that the fixed vectors come
first, so if $\pi \ne 1$ none of the the $C \ln n$ vectors outside
$L_\pi$ can be fixed. We expose these vectors in sorted order. For
each $\vec{v}_i \notin L_\pi$ which is not the first in its cycle, the
probability that $\vec{v}_i$ is the image under $K$ of its predecessor
$\vec{v}_{\pi^{-1}(i)}$ is $q^{-n}$ since $\vec{v}_i$ is uniformly random. These
events are independent and each of these cycles is of length at least
$2$, so the probability that $K \vec{v}_i = \vec{v}_{\pi(i)}$ for all $\vec{v}_i \notin
L$ is at most $q^{-(C/2) n \ln n}$. Summing over all $(n+\delta)!$
permutations $\pi$ and assuming for simplicity that $\delta \le n$ (a
condition which we can easily remove), the conditional probability
that any $\pi \ne 1$ is consistent is at most
\[ (2n)! \,q^{-(C/2) n \ln n} = n^{O(1)} (2/{\rm e})^{2n} n^{(2-(C/2) \ln q)n} \]
which is $o(1)$ if
\begin{equation}
\label{eq:c1}
C \ge 4/\ln q {\rm e}nspace .
{\rm e}nd{equation}
Now we bound the probability that $\textrm{Spans}_\pi$ fails to hold for any
$\pi$ by proving that with high probability $V$ contains no subsets
$L$ of size $n+\delta-C \ln n$ which do not span the entire space. By
Markov's inequality, the probability that a given such $L$ does not
span the space is at most the expected number of nonzero vectors $\vec{u}$
which are perpendicular to all $\vec{v} \in L$. Since the $\vec{v} \in V$ are
uniformly random, for any fixed $\vec{u}$ the inner product $\vec{u} \cdot \vec{v}$ is
zero with probability $1/q$. Thus this expectation is
\[
(q^n-1)/q^{n+\delta-C \ln n}
< q^{-\delta+C \ln n}
= n^{O(1)} n^{-(A \ln q) \ln n}
\]
where we used $\delta = A \ln^2 n$. The number of subsets of size $n+\delta-C \ln n$ is
\[
{n+\delta \choose C \ln n}
< (2n)^{C \ln n}
= n^{O(1)} n^{C \ln n}
\]
where we again assume for simplicity that $\delta \le n$. So, by the union bound, the probability that a non-spanning subset of size $n+\delta-C \ln n$ is at most
$n^{O(1)} n^{(C-A \ln q) \ln n}$
which is $o(1)$ if
\begin{equation}
\label{eq:c2}
A > C / \ln q {\rm e}nspace .
{\rm e}nd{equation}
In order to satisfy~{\rm e}qref{eq:c1} and~{\rm e}qref{eq:c2}, we set, say, $C =
4/\ln q$ and $A = 5/\ln^2 q$. Then with high probability, the
identity permutation $1$ is the only consistent one. Finally, note
that $V$ spans the entire space with overwhelming probability; and in
this case, if $K\vec{v} = \vec{v}$ for all $\vec{v}$ in $V$, then $K$ must be the
identity.
{\rm e}nd{proof}
\section{Evidence for immunity against hidden subgroup attacks}
\label{sec:hardness}
In this section we relate the hardness of our function to several
fundamental problems in the area of quantum computation. Our
principal hardness result, suggesting that $f_V$ can resist the
quantum attacks which Shor applied so dramatically to factoring and
discrete log, shows that Graph Isomorphism can be reduced to the
problem of inverting $f_V$. Our current belief, based on a series of
negative results, is that Graph Isomorphism, and more generally the
HSP on groups like $S_n$ and ${\rm GL}_n$ which have exponentially
high-dimensional representations, is hard for quantum computers. If
this belief is correct, then $f_V$ cannot be efficiently inverted by
such methods. We observe, also, that inverting $f_V$ can be reduced to
natural hidden shift and hidden subgroup problems on the group
${\rm GL}_n$.
We begin by reducing the problem of inverting $f_V$ to the Hidden
Shift Problem on the group ${\rm GL}_n$. Given a group $G$, an instance of
a Hidden Shift problem consists of two functions $f_1, f_2:G \to S$,
with the promise that $f_2(g) = f_1(gs)$ for some shift $s \in G$.
Now, given $V$ and $f_V(M) = MV$, we can define two functions $f_1,
f_2: {\rm GL}_n \to S$ where $S$ is the set of unordered lists of vectors
in ${\mathbb F}_q^n$. Namely, we define
\[ f_1(N) = NV \; \mbox{and} \; f_2(N) = N f_V(M) = NMV {\rm e}nspace . \]
Then $f_1(N) = f_V(N)$ and $f_2(N) = f_V(NM) = f_1(NM)$, and $M$ is
the hidden shift.
Now, given a Hidden Shift Problem on a group $G$ where the functions
$f_1,f_2$ are one-to-one, we can reduce it to a Hidden Subgroup
Problem on a larger group, namely the wreath product $G \wr \mathbb{Z}_2$.
This group is the semidirect product $(G \times G) \rtimes \mathbb{Z}_2$,
where we extend $G \times G$ with an involution which exchanges the
two copies of $G$. We denote its elements $(g_1,g_2,z)$, where those
with $z=0$ form the normal subgroup which fixes the two copies of $G$,
and those with $z=1$ form its nontrivial coset which exchanges them.
Recall that an instance of the Hidden Subgroup Problem consists of a
function $f:G \to S$ with the promise that, for some subgroup $H$,
$f(x)=f(y)$ if and only if $x=yh$ for some $h \in H$. Given a Hidden
Shift Problem with functions $f_1,f_2:G \to S$, define the following
function $f:G \wr \mathbb{Z}_2 \to S^2$:
\begin{align*}
f(g_1,g_2,0) &= (f_1(g_1), f_2(g_2)) \\
f(g_1,g_2,1) &= (f_2(g_2), f_1(g_1))
{\rm e}nd{align*}
Now suppose that $f_2(g)=f_1(gs)$ and let $\alpha$ be the involution
$(s^{-1},s,1)$. If multiplication in $G \wr \mathbb{Z}_2$ is defined so that
$(g_1,g_2,0)\cdot \alpha = (g_2 s, g_1 s^{-1}, 1)$, then $f$'s hidden
subgroup is the order-2 subgroup $H=\{1,\alpha\}$. (Indeed, the
canonical reduction of Graph Isomorphism to the Hidden Subgroup
Problem over $S_n \wr \mathbb{Z}_2$ is exactly of this type, where
$\alpha=(\pi^{-1},\pi,1)$ exchanges the two graphs and $\pi$ is the
isomorphism between them.) Finally, we point out that ${\rm GL}_{2n}$
contains a copy of ${\rm GL}_n \wr \mathbb{Z}_2$: namely, the subgroup consisting
of matrices of the form
\[
\begin{pmatrix} g_1 & 0 \\ 0 & g_2 {\rm e}nd{pmatrix}
\; \mbox{or} \;
\begin{pmatrix} 0 & g_1 \\ g_2 & 0 {\rm e}nd{pmatrix}
\]
where $g_1, g_2 \in {\rm GL}_n$. Thus the problem of inverting $f_V$ reduces to the Hidden Shift and Hidden Subgroup Problems in ${\rm GL}_n$ and ${\rm GL}_{2n}$ respectively.
Now, we give a reduction from Graph Isomorphism to the problem of
inverting $f_V$. Specifically, we reduce the decision problem of
telling whether two graphs $G_1, G_2$ are isomorphic to the decision
problem of telling, given $V$ and $W$, whether there is a matrix $M$
such that $MV=W$, and hence whether $W$ is in the image of $f_V$. The
same construction reduces the promise problem of finding the
isomorphism between two isomorphic graphs to the problem of finding
$M=f_V^{-1}(W)$.
The reduction is quite simple. Given a graph $G_1$ with $n$ vertices
and $m$ edges, $V$ will consist of $n+m$ vectors in ${\mathbb F}_q^n$. We
identify each vertex $u$ with a basis vector $\vec{u}$, which we include in $V$,
and for each edge $(u,v)$ we include the vector $\vec{u}+\vec{v}$. We construct
$W$ from $G_2$ similarly.
Clearly $G_1 \cong G_2$ if and only if $MV=W$ for some permutation
matrix $M$. First we show that, if $q \ge 3$, any $M$ such that
$MV=W$ is necessarily a permutation matrix. To see this, note that
since each vertex of $G_1$ gets mapped to a vertex or an edge of
$G_2$, each column of $M$ is zero except for one or two $1$s. But in
${\mathbb F}_q^n$ with $q \ge 3$, the sum of two such vectors has at least two
nonzero components, so no edge of $G_1$ can be mapped to a vertex of
$G_2$. It follows that every vertex of $G_1$ is mapped to a vertex of
$G_2$, so $M$ is a permutation matrix.
In the case $q=2$, it is possible that $M$ is not a permutation
matrix, and that some vertices get mapped to edges and vice versa.
However, $M$'s existence still implies that $G_1$ and $G_2$ are
isomorphic, and allows us to easily determine the isomorphism $\pi$
between them.
Let us call a vertex of $G_1$ ``green'' or ``red'' if it is mapped to
a vertex or an edge, respectively, and consider a vertex $w$ of $G_2$.
Since $M^{-1}w$ is either a vertex or an edge, either there is a green
vertex $u$ such that $M\vec{u} = \vec{w}$, or there is a red vertex $u$ with a
unique green neighbor $v$ such that $M\vec{u}=\vec{w}+M\vec{v}$ and so $M(\vec{u}+\vec{v})= \vec{w}$. In
either case, define $\pi(u)=w$; since $\pi$ is one-to-one, it follows
that {\rm e}mph{every} red vertex has a unique green neighbor.
It remains to check that $\pi$ is an isomorphism. Denote the set of
edges of $G_1$ and $G_2$ as $E_1$ and $E_2$ respectively, and suppose
that $(u,v) \in E_1$. If $u$ and $v$ are green, then
$M(\vec{u}+\vec{v})=\vec{\pi(u)}+\vec{\pi(v)}$. If $u$ is red and $v$ is its unique green
neighbor, then $M\vec{u}=\vec{\pi(u)}+\vec{\pi(v)}$. Finally, if $u$ and $v$ are both
red, they must have the same green neighbor $t$ since otherwise
$M(\vec{u}+\vec{v})$ would be the sum of four basis vectors; then $M(\vec{u}+\vec{v}) =
\vec{\pi(u)}+\vec{\pi(v)}+2 \vec{\pi(t)} = \vec{\pi(u)}+\vec{\pi(v)}$. In each case, since
$\vec{\pi(u)}+\vec{\pi(v)} \in W$ we have $(\pi(u),\pi(v)) \in E_2$, and this
completes the proof.
\section{Uniformity of hardness, amplification, and hard-core predicates}
\label{sec:hard-core}
\paragraph{Self-reducibility and uniform hardness.}
As we pointed out in the Introduction, our function has a simple symmetry
which causes it to be self-reducible from the worst case to the random case:
for any fixed $V$, we have $f_V(AM) = A f_V(M)$. It follows by standard
amplification that, for any fixed $V$, if $f_V$ can be inverted on even a $1/\textrm{poly}(n)$ fraction of
matrices $M$ then it can be inverted with probability $1 - e^{-\textrm{poly}(n)}$ on any particular $M$.
We can define uniform hardness with respect to $V$ using another obvious symmetry,
\[ f_{BV}(M) = f_V(MB) {\rm e}nspace . \]
Let us say that $V \sim V'$ if there is a $B \in {\rm GL}_n$ such that $V'=BV$. This is clearly an
equivalence relation; we will call the equivalence class containing $V$ its {\rm e}mph{orbit},
and denote it $[V]$. Then a similar argument shows that inverting $f_V$ is uniformly hard
within each orbit: namely, if $f_V$
can be inverted on even a $1/\textrm{poly}(n)$ fraction of
matrices $M$ and vectors $V' \in [V]$ then it can be
inverted with probability $1 - e^{-\textrm{poly}(n)}$ on any particular
$M$ and $V' \in [V]$.
\remove{
We begin by pointing
out a simple self-reducibility property of the function $f_V$ which
will be applied in the hardness proofs that follow: for invertible
matrices $A, B \in {\rm GL}_n({\mathbb F})$,
$$
f_{BV}(AM) = A f_V(MB) = [AMB]V{\rm e}nspace.
$$
It follows, for example, by standard amplification that if the
function $f_V$ can be inverted on even a $1/\textrm{poly}(n)$ fraction of
matrices $M$ then the function can be inverted with probability $1 -
e^{-\textrm{poly}(n)}$ on any particular $M$. Similarly, the mapping from $V$
to $BV$ partitions the vectors $V$ into orbits, within each of which
the function $f_V$ is uniformly hard.
To formulate uniform hardness with respect to $V$,
define $V \sim V'$, for two sets of vectors of ${\mathbb F}^n$, if
there is an element $T$ of ${\rm GL}_n$ for which $V = TV'$. It is easy to
see that $\sim$ is an equivalence relation; we let $[V]$ denote the
equivalence class of $V$, which we call the {\rm e}mph{orbit} of
$V$. Now it is easy to see that the function $f_V$
can be inverted on even a $1/\textrm{poly}(n)$ fraction of
matrices $M$ and vectors $V' \in [V]$ then the function can be
inverted with probability $1 - e^{-\textrm{poly}(n)}$ on any particular
$M$ and $V'$.
}
A priori, even if it is hard to invert $f_V$, one might hope to recover
partial information about $M$ from its image $f_V(M)$, such as its trace
or a single entry in some basis. In this section, we show that this is essentially as hard
as recovering all of $M$. Therefore, under reasonable hardness assumptions regarding $f_V$,
these goals are also impossible for quantum computers to carry out efficiently.
\paragraph{Hard-core predicates.}
A hard-core predicate is an efficient description of a bit of information that is concealed by a given one-way function. Specifically, if $\{ f_n : D_n \rightarrow R_n \}$ is a family of one-way functions, then an {\rm e}mph{$s(n)$-hard-core predicate} is a polynomial time computable family of functions $\{ b_n : D_n \rightarrow \{0,1\}\}$ so that for any algorithm $A$ running in time $s(n)$, for sufficiently large $n$,
$$
\left| \Pr_{f_n,w}[A(f_n(w)) = b_n(w)] - \frac{1}{2} \right| \leq \frac{1}{s(n)}{\rm e}nspace.
$$
Our goal here is to show that every individual entry of $M$ is a hard-core bit in any basis; in particular, {\rm e}mph{recovering any entry of $M$ is as hard as inverting $f_V$}. We also point out that recovering the trace of $M$ is as hard as inverting $f_V$.
We begin by formalizing the notions of hardness we require for the function $f_V$.
\begin{assumption}[$t(n)$-hardness]\label{assume:poly-hardness}
For each $n \geq 1$, let $m = m(n) = (1+ {\rm e}psilon)n$ for some constant ${\rm e}psilon > 0$, let $M$ be a uniformly random element of ${\rm GL}_n({\mathbb F})$, and let $V$ be a collection of $m$ independently and uniformly selected elements of ${\mathbb F}^n$. Then for all quantum algorithms $A$ running in time $t(n)$,
\[
\Pr_{V,M}[A(M(V), V) = M] = \frac{1}{t(n)}{\rm e}nspace.
\]
{\rm e}nd{assumption}
We devote the remainder of this section to showing the following two theorems.
\begin{theorem}
If $f_V$ is quasipolynomially hard (that is, $t(n)$-hard for every $t(n) = 2^{\log^{O(1)} n}$) then every entry of $M$ (in any basis) is a quasipolynomially hard-core predicate.
{\rm e}nd{theorem}
\begin{theorem}
If $f_V$ is polynomially hard (that is, $t(n)$-hard for every $t(n) = n^{O(1)}$) then the trace $\textbf{tr}: {\rm GL}_n({\mathbb F}) \rightarrow {\mathbb F}$ is a polynomially hard-core predicate.
{\rm e}nd{theorem}
\subsection{The bilinear predicate: every matrix entry is hard}
Given two basis vectors $\vec{a}$ and $\vec{b}$, the corresponding matrix element
can be written as an inner product $\langle \vec{a}, M\vec{b}\rangle$.
We will show that if $f_V$ is quasipolynomially hard,
then this function is a hard-core predicate for $f_V$
for any fixed nonzero $\vec{a}, \vec{b} \in {\mathbb F}^n$.
Specifically, given an algorithm $P$ running in time $2^{\log^{O(1)} n}$
for which
\[
\Pr_{V,M} \bigl[ P(f_V(M), V) = \langle a, Mb\rangle \bigr] \geq 1/2 + {\rm e}psilon
\;\mbox{ with }\;
{\rm e}psilon = 2^{-\log^{O(1)} n}
{\rm e}nspace ,
\]
we show how to invert $f_V$ on a $2^{-\log^{O(1)} n}$ fraction of its
inputs $M$, which would contradicting the assumption that $f_V$ is quasipolynomially hard.
To simplify the exposition, we will fix $q$ to be $2$ in this section,
and write ${\mathbb F}={\mathbb F}_2$. We rely on the Goldreich-Levin theorem~\cite{Goldreich:1989:HCP};
for larger prime $q$, we rely on its generalization to arbitrary finite fields by
Goldreich, Rubinfeld, and Sudan~\cite{GoldreichRS1995}.
Initially, we wish to focus attention on certain ``good'' choices of $V$, where
the algorithm $P$ is a good predictor for $\langle \vec{a}, M \vec{b} \rangle$.
\iffalse
In
particular, define $V \sim V'$, for two sets of vectors of ${\mathbb F}^n$, if
there is an element $T$ of ${\rm GL}_n$ for which $V = TV'$. It is easy to
see that $\sim$ is an equivalence relation; we let $[V]$ denote the
equivalence class of $V$, which we call the {\rm e}mph{orbit} of
$V$.
\fi
Recall that $[V]$ denotes the orbit of $V$ under multiplication
by elements of ${\rm GL}_n$.
Define an element $V$ to be ``good'' if
\begin{equation}
\label{eq:good}
\Pr_{V' \in [V],M}\bigl[ P(f_{V'}(M), V') = \langle a, Mb\rangle \bigr] \geq \frac{1}{2} + \frac{{\rm e}psilon}{2}{\rm e}nspace.
{\rm e}nd{equation}
It is easy to show that at least an ${\rm e}psilon/2$ fraction of $V$ must be good in this sense; we fix a specific such $V$ for the remainder of the proof, and show how to invert the function $f_V$ in this case.
We first show how to use $P$ to implement an algorithm for any fixed $M$,
which takes as input $x,y \in {\mathbb F}^n$ (and $(f_{V} (M), V)$) and outputs
$\langle x, My\rangle$ correctly on $1/2+ {\rm e}psilon/2$ fraction of
$x,y$. First note that for two matrices $A, B \in {\rm GL}_n$,
the pair $(f_{BV}(AMB^{-1}), BV) = (AMV, BV)$ can be computed efficiently from
$(f_{V}(M), V)=(MV,V)$ by left-multiplying $MV$ and $V$ by $A$ and
$B$ respectively. Defining $T(A, B) = P(f_{BV}(AMB^{-1}), BV)$, we may then
rewrite~{\rm e}qref{eq:good} in terms of $T(\cdot,\cdot)$:
\begin{equation}
\label{eq:good-matrix}
\Pr_{A, B \in {\rm GL}_n({\mathbb F})} [T(A, B) = \langle a, AMB^{-1}b\rangle] \geq \frac{1}{2} + \frac{{\rm e}psilon}{2}{\rm e}nspace.
{\rm e}nd{equation}
Finally, for a pair of vectors $\vec{x}, \vec{y} \in {\mathbb F}^n$, define $t(\vec{x}, \vec{y}) = T(A, B)$, where $A$ and $B$ are random elements of ${\rm GL}_n({\mathbb F})$ for which $A^t \vec{a} = \vec{x}$ and $B^{-1} \vec{b} = \vec{y}$, so that $\langle \vec{a}, AMB^{-1} \vec{b}\rangle = \langle \vec{x}, M \vec{y} \rangle$. Rewriting~{\rm e}qref{eq:good-matrix}, we conclude:
\begin{equation}
\Pr_{\vec{x}, \vec{y} \in {\mathbb F}^n} [t(\vec{x}, \vec{y}) = \langle \vec{x}, M \vec{y} \rangle] \geq \frac{1}{2} + \frac{{\rm e}psilon}{2}{\rm e}nspace.
{\rm e}nd{equation}
Let us call a vector $\vec{x} \in {\mathbb F}^n$ {\rm e}mph{${\rm e}ll$-good} if
$\Pr_{\vec{y} \in {\mathbb F}^n} [t(\vec{x}, \vec{y}) = \langle \vec{x}, M \vec{y}\rangle] \geq 1/2 + {\rm e}psilon/4$. If
follows that a uniformly selected $\vec{x}$ is ${\rm e}ll$-good with probability
at least ${\rm e}psilon/4$. Note, furthermore, that if $\vec{x}$ is a fixed
${\rm e}ll$-good element of ${\mathbb F}^n$, then the Goldreich-Levin construction~\cite{Goldreich:1989:HCP}
can be used to determine $\langle \vec{x}, M \vec{y} \rangle$ for all $\vec{y} \in {\mathbb F}^n$
(in time polynomial in $n$ and ${\rm e}psilon^{-1}$). In particular, this
determines an entire row of $M$ when expressed in a basis containing $\vec{x}$.
We consider now a family $G$, consisting of $2 \log m$ vectors
selected independently and uniformly in ${\mathbb F}^n$. We say that $G$ is
${\rm e}ll$-good if this is true of each of its elements, a favorable event
that occurs with probability at least $({\rm e}psilon/4)^{\log 2m}$.
Furthermore, the probability that $G$ contains a linearly
dependent set of vectors is no more than $2 \log(m) \cdot 2^{-n +
2\log m} = 2^{-\Omega(n)}$. (This can be seen by selecting the
elements of $G$ in order and bounding the unlikely event that an
element falls into the span of the previously chosen vectors.) Thus
\[
\Pr[ G~\text{is ${\rm e}ll$-good} \wedge G~\text{is independent}]
\geq ({\rm e}psilon/4)^{2\log m} + e^{-\Omega(n)}
{\rm e}nspace .
\]
Now, for each $\vec{g} \in G$, application of the Goldreich-Levin construction to each component of $\vec{g}$
(reconstructing $\langle \vec{g}, M \vec{y}\rangle$ for all $\vec{y}$) determines
$\langle \vec{g}, M \vec{v} \rangle$ for each $\vec{v} \in V$ and $\vec{g} \in G$. Therefore,
in this case we can reconstruct $2 \log m$ ``generalized rows'' of $M$.
Observe that if the elements of $V$ (and hence $W=M(V)$) are
considered to be selected independently and uniformly at random,
{{\rm e}m and} independently of $G$, then
the probability that two elements $w$ and $w'$ of $W$ have the
property that $\langle \vec{g}, \vec{w} \rangle = \langle \vec{g}, \vec{w}' \rangle$ for all
$\vec{g} \in G$ is $2^{-2 \log m}$. Let $\Pi_G: {\mathbb F}^n \rightarrow {\mathbb F}^{2 \log m}$
denote the projection onto the space spanned by the vectors in
$G$. In particular, this information would appear to determine the
bijection $b_M: V \rightarrow W$ effected by the action of $M$ on
$V$. This intuitive argument is misleading, as written, since the
notion of ${\rm e}ll$-good depends on $V$ (and so on $W$) via the
arbitrary predicting algorithm $P$. Instead, our
goal below will be to show that the total number of permutations of
the set $W$ under which $\Pi_G$ is invariant is small enough that we
can exhaustively search them to uncover the bijection $b_M$ and hence
the linear operator $M$.
Consider random (and independent) selection of $G$, $V$, and $M$ (so
that $W = M(V)$ is also determined) with no extra conditioning except
that $G$ be linearly independent. Let $I_G$ denote the collection of
permutations $\phi: M \rightarrow M$ with the property that $\Pi_G w =
\Pi_G \phi(w)$, for all $w \in W$. We will show below that
${\rm E}_{V,M,G}[|I_G|] = O(\sqrt{m})$. Then Markov's inequality
will allow us to bound the probability that $|I_G|$ exceeds
${\rm e}psilon^{O(\log n)}$. To round out the proof we will show
that the chance that $V$ is good and that $G$ is $l$-good is
much higher than this failure probability, thereby concluding that
there is a significant chance that $V$ is good, $G$ is
$l$-good and that $|I_G| = {\rm e}psilon^{O(\log n)}$.
As the elements of $w$ are selected
independently (and uniformly) in ${\mathbb F}^n$, each $\Pi_G w$ is an
independent, uniform element of ${\mathbb F}^{|G|}$. Fixing a permutation
$\phi$, let $\lambda_1, \lambda_2, \ldots$ be the lengths of its
cycles, arranged in nonincreasing order. The probability that the
elements of $M$ in each of these cycles are mapped to the same element
under $\Pi_G$ is no more than $\prod_i (2^{-|G|})^{\lambda_i -1} =
\prod_i (m^{-2})^\tau(\phi)$, where $\tau(\phi) = \sum_i (\lambda_i -
1)$ is also the minimum number of transpositions required to write
$\phi$.
This quantity is bounded by the lemma below. Its proof uses the machinery of exponential generating functions, and is relegated to Appendix~\ref{app:egf}.
\begin{lemma}
\label{lem:egf}
Let $0 < z < 1/k$; then
\begin{equation}
\label{eq:qn}
q_k(z) = \sum_{\pi \in S_k} z^{t(\pi)} = O(\sqrt{k}) \,\frac{e^{-k}}{(1-zk)^{1/z}} {\rm e}nspace .
{\rm e}nd{equation}
{\rm e}nd{lemma}
In light of this bound, the expectation of $|I_G|$, the number of $\phi$ under which $\pi_G$ is invariant, is no more than
$$
\sum_{\phi \in S_m} \left(\frac{1}{m^2}\right)^{\tau(\phi)} = O(\sqrt{m}) \frac{e^{-m}}{(1 - 1/m)^{m^2}}{\rm e}nspace.
$$
As $- \ln(1 - x) = x + x^2/2 + x^3/3 + \ldots$, we have
$$
e^{-m} \cdot (1 - 1/m)^{-m^2} = {\rm e}xp(-m + m^2[ 1/m + (1/m)^2/2 + O(1/m^3)]) = O(1){\rm e}nspace.
$$
Thus ${\rm E}[|I_G|] = O(\sqrt{m})$.
Putting the pieces together, with $M$, $V$, and $G$ selected as above,
$$
\Pr_{V, M, G} [ (\text{$V$ is good}) \wedge (\text{$G$ is both ${\rm e}ll$-good and linearly independent})] \geq \frac{{\rm e}psilon}{2} \cdot \left(\frac{{\rm e}psilon}{4}\right)^{2 \log m} \geq \left(\frac{{\rm e}psilon}{4}\right)^{1 + 2 \log m}{\rm e}nspace.
$$
As ${\rm E}_{V,M,G}[|I_G|] = O(\sqrt{m})$, by Markov's inequality there is a constant $c$ so that
$$
\Pr_{V,M,G}\left[|I_G| \geq c \sqrt{m} (4/{\rm e}psilon)^{2 \log m}\right] \leq \frac{1}{2} \cdot \left[\left(\frac{{\rm e}psilon}{4}\right)^{1 + 2 \log m}\right]{\rm e}nspace.
$$
Thus, with probability at least $(1/2) ({\rm e}psilon/4)^{(1 + 2 \log m)}$, $V$ is good, $G$ is ${\rm e}ll$-good, and there are $(4/{\rm e}psilon)^{O(\log n)}$ permutations of $W$ that fix $\Pi_G$. These permutations determine a set of no more than $(4/{\rm e}psilon)^{O(\log n)}$ mappings between $V$ and $W$ consistent with $M$; these can be exhaustively searched in time $\text{poly}(n) \cdot ({\rm e}psilon/4)^{O(\log n)}$, which is quasipolynomial when ${\rm e}psilon^{-1}$ is.
We conclude this section with a proof that, even if ${\rm e}psilon^{-1}$ is only polynomial in $n$, hardness with respect to quasipolynomial time is the most we can hope for in the case of the bilinear predicate (in absence of further information about the preimage). First, choose a subspace $S$ of ${\mathbb F}^n$ with dimension $\dim S = \log_2 n$. Now consider an oracle $P(a,b)$ defined as follows. If either $a$ or $b$ is orthogonal to $S$, then $P(a,b) = \langle a, Mb \rangle$, but if neither of them is orthogonal to $S$, then $P(a,b)$ is uniform in ${\mathbb F}$. Since a uniform vector in ${\mathbb F}^n$ is orthogonal to $S$ with probability $1/n$, it follows that $P(a,b)$ is correct with probability $1/2 + {\rm e}psilon$ where ${\rm e}psilon > 1/n$.
Now choose a basis for ${\mathbb F}^n$, and let $S$ be the subspace generated by the first $\dim S$ basis vectors. It is clear that this oracle gives us no information whatever regarding the matrix elements in the $\dim S \times \dim S$ minor at the upper left-hand corner of $M$. Therefore, we are forced to try all possible values for the elements of this minor by exhaustive search, and this takes $2^{(\dim S)^2} = 2^{\log^2 n}$ time.
\subsection{The trace predicate}
The proof that the trace predicate is hard is a direct consequence of the Goldreich-Levin theorem~\cite{Goldreich:1989:HCP} and its generalization to arbitrary finite fields by Goldreich, Rubinfeld, and Sudan~\cite{GoldreichRS1995}. Specifically, consider the trace $\textbf{tr}: {\rm GL}_n({\mathbb F}) \rightarrow {\mathbb F}$. Suppose now that there is a polynomial-time quantum algorithm $P$ so that for $M$ selected uniformly at random in ${\rm GL}_n$ and $V$ a collection of $m$ independent and uniform vectors of ${\mathbb F}^n$,
\[
\Pr_{V,M} \bigl[ P(f_V(M),V) = \textbf{tr}(M) \bigr] \geq \frac{1}{2} + {\rm e}psilon{\rm e}nspace,
\]
where ${\rm e}psilon = n^{-O(1)}$. It follows that for at least an ${\rm e}psilon/2$ fraction of the $V$, when selected as above, we have
\[
\Pr_{M} \bigl[ P(f_V(M),V) = \textbf{tr}(M) \bigr] \geq \frac{1}{2} + \frac{{\rm e}psilon}{2}{\rm e}nspace.
\]
We show how to invert $f_V$ for such ``good'' $V$; as these occur with probability ${\rm e}psilon/2$, this would contradict the assumption that $f_V$ is polynomially hard. For the remainder of the proof we fix a specific $V$ satisfying the the equation above.
Again note that for any matrix $N \in {\rm GL}_n$, the collection $f_V(NM)=NMV$ can be computed in polynomial time from $f_V(M)$, simply by left-multiplying the collection $f_V(M)=MV$ by $N$. In particular, given $f_V(M)$, the function $T: {\rm GL}_n({\mathbb F}) \rightarrow {\mathbb F}$ given by $T(N) = P(f_V(NM),V)$ can be computed in polynomial time and has the property that
\begin{equation}
\label{eq:over-GL}
\Pr_N \bigl[ T(N) = \textbf{tr}(NM) \bigr] \geq \frac{1}{2} + {\rm e}psilon{\rm e}nspace.
{\rm e}nd{equation}
Now, for a fixed matrix $C$, the function ${\rm e}ll_C: M \mapsto \textbf{tr}(CM)$ is a linear function and, moreover, all linear combinations of the entries of $M$
can be written in this way. In light of this, note that if the guarantee~{\rm e}qref{eq:over-GL} could be arranged with the matrix $C$ being selected uniformly at random from the collection of {\rm e}mph{all} matrices (rather than the invertible ones), we could immediately apply the Goldreich-Levin~\cite{Goldreich:1989:HCP} construction at this point to recover $M$. This ``oracle'' $T$ can, however, be extended to an oracle $\tilde{T}$ defined on the family of all matrices $C$ by simply assigning random values to the singular matrices $C \not\in {\rm GL}_n$, in which case with constant probability (over the selection of random values for this oracle),
\begin{equation}
\label{eq:over-End}
\Pr_N \bigl[ T(N) = \textbf{tr}(NM) \bigr] \geq \frac{1}{2} + \alpha_p(n) {\rm e}psilon{\rm e}nspace,
{\rm e}nd{equation}
where
\[
\alpha_p(n) = \prod_{i = 0}^{n-1} \left( 1 - \frac{1}{p^{n-i}}\right) \geq \prod_{i = 0}^{\infty} \left( 1 - \frac{1}{2^{i}}\right) \approx .2711
\]
is the probability that a random $n \times n$ matrix over ${\mathbb F}_p$ is invertible. In this case, when $p = 2$ the Goldreich-Levin theorem can be applied directly:
\begin{theorem}[\cite{Goldreich:1989:HCP}]
Let $g: {\mathbb F}_2^n \rightarrow {\mathbb F}_2$ be a function so that for some $h \in {\mathbb F}_2^n$, $\Pr_{x \in {\mathbb F}_2^n} \left[ g(x) = \langle x, h \rangle \right] \geq \frac{1}{2} + {\rm e}psilon$ and let $c \geq 0$. Then there is a randomized algorithm running in time $\text{poly}(n, {\rm e}psilon^{-1})$ (and making no more than $\text{poly}(n, {\rm e}psilon^{-1})$ black-box queries to $g$) that determines $h$ with probability $1 - 1/n^c$.
{\rm e}nd{theorem}
\noindent
When $q > 2$, one has to apply the generalization of~\cite{Goldreich:1989:HCP} to arbitrary finite fields by Goldreich, Rubinfeld, and Sudan~\cite{GoldreichRS1995}.
\newcommand{{\rm e}talchar}[1]{$^{#1}$}
\ifx \k \undefined \let \k = \c \immediate\write16{Ogonek accent unavailable:
replaced by cedilla} \fi\ifx \ocirc \undefined \def \ocirc
#1{{\accent'27#1}}\fi\ifx \mathbb \undefined \def \mathbb #1{{\bf #1}}
\fi\ifx \mathbb \undefined \def \mathbb #1{{\bf #1}}\fi\input
path.sty\hyphenation{ Cher-vo-nen-kis Eh-ren-feucht Hal-pern Jean-ette
Kam-eda Leigh-ton Mehl-horn Metro-po-lis Pra-sad Prep-a-ra-ta Press-er
Pros-ku-row-ski Ros-en-krantz Ru-dolph Schie-ber Schnei-der Te-zu-ka
Vis-wa-na-than Yech-ez-kel Yech-i-ali data-base data-bases dead-lock
poly-adic }
\begin{thebibliography}{HMR{{\rm e}talchar{+}}06}
\bibitem[AD97]{Ajtai:1997:PKC}
Mikl{\'o}s Ajtai and Cynthia Dwork.
\newblock A public-key cryptosystem with worst-case\slash average-case
equivalence.
\newblock In {ACM}, editor, {{\rm e}m Proceedings of the 29th annual {ACM} Symposium
on the Theory of Computing}, pages 284--293, New York, NY, USA, 1997. ACM
Press.
\bibitem[BBBV97]{BBBV:1997:BBBV}
Charles Bennett, Ethan Bernstein, Gilles Brassard and Umesh Vazirani.
\newblock Strengths and Weaknesses of Quantum Computation.
\newblock {{\rm e}m SIAM Journal on Computing}, 26(5):1510--1523, October 1997.
\bibitem[GL89]{Goldreich:1989:HCP}
O.~Goldreich and L.~A. Levin.
\newblock A hard-core predicate for all one-way functions.
\newblock In {ACM}, editor, {{\rm e}m Proceedings of the twenty-first annual {ACM}
Symposium on Theory of Computing, Seattle, Washington, May 15--17, 1989},
pages 25--32, New York, NY, USA, 1989. ACM Press.
\newblock ACM order no. 508890.
\bibitem[GRS95]{GoldreichRS1995}
O.~Goldreich, R.~Rubinfeld, M.~Sudan.
\newblock Learning polynomials with queries: the highly noisy case.
\newblock In {{\rm e}m Proceedings of the 36th Annual Symposium on Foundations of Computer Science}, pages 294--303, Milwaukee, WI, October, 1995.
\bibitem[GSVV01]{Grigni:2001:QMA}
Michelangelo Grigni, Leonard Schulman, Monica Vazirani, and Umesh Vazirani.
\newblock Quantum mechanical algorithms for the nonabelian hidden subgroup
problem.
\newblock In {ACM}, editor, {{\rm e}m Proceedings of the 33rd Annual ACM Symposium
on Theory of Computing}, pages 68--74, New York, NY, USA, 2001. ACM Press.
\bibitem[HMR{{\rm e}talchar{+}}06]{Hallgren:2006:LQC}
Sean Hallgren, Cristopher Moore, Martin R{\"o}tteler, Alexander Russell, and
Pranab Sen.
\newblock Limitations of quantum coset states for graph isomorphism.
\newblock In {ACM}, editor, {{\rm e}m Proceedings of the 38th Annual {ACM} Symposium
on Theory of Computing}, pages 604--617, New York, NY, USA, 2006. ACM Press.
\bibitem[HRTS00]{Hallgren:2000:NSR}
Sean Hallgren, Alexander Russell, and Amnon Ta-Shma.
\newblock Normal subgroup reconstruction and quantum computation using group
representations.
\newblock In {ACM}, editor, {{\rm e}m Proceedings of the 32nd annual {ACM} Symposium
on Theory of Computing}, pages 627--635, New York, NY, USA, 2000.
ACM Press.
\bibitem[KNV02]{Kashefi:2002:KNV}
E. Kashefi, H. Nishimura and V. Vedral.
\newblock On quantum one-way permutations.
\newblock In Quantum Information and Computation, 5, 379, 2002.
\bibitem[K05]{K}
Greg Kuperberg.
\newblock A subexponential-time quantum algorithm for the dihedral hidden
subgroup problem.
\newblock {{\rm e}m SIAM Journal on Computing}, 35(1):170--188, 2005.
\bibitem[{McE}78]{McEliece:Public}
R.~J. {McEliece}.
\newblock A public-key cryptosystem based on algebraic coding theory.
\newblock Technical Report 42-44, Jet Propulsion Lab, Pasadena, CA, 1978.
\bibitem[MR06]{MR:Impossibility}
Cristopher Moore and Alexander Russell.
\newblock On the impossibility of a quantum sieve algorithm for graph isomorphism.
\newblock Technical Report quant-ph/0609138, arXiv.org e-Print archive, 2006.
\bibitem[MRS06]{MRS:Impossibility}
Cristopher Moore, Alexander Russell, and Piotr \'{S}niady.
\newblock On the impossibility of a quantum sieve algorithm for graph isomorphism: unconditional results.
\newblock Technical Report quant-ph/0612089, arXiv.org e-Print archive, 2006.
\bibitem[MRS05]{Moore:2005:SGF}
Cristopher Moore, Alexander Russell, and Leonard Schulman.
\newblock The symmetric group defies {Fourier} sampling.
\newblock In {{\rm e}m Proceedings of the 46th Symposium on Foundations of Computer
Science}, pages 479--488, 2005.
\bibitem[Reg04a]{Regev:2004:NLB}
Oded Regev.
\newblock New lattice-based cryptographic constructions.
\newblock {{\rm e}m Journal of the ACM}, 51(6):899--942, November 2004.
\bibitem[Reg04b]{Regev:2004:QCL}
Oded Regev.
\newblock Quantum Computation and Lattice Problems.
\newblock {{\rm e}m SIAM Journal on Computing}, 33(3):738-760, 2004.
\bibitem[Sho97]{Shor:1997:PTA}
Peter~W. Shor.
\newblock Polynomial-time algorithms for prime factorization and discrete
logarithms on a quantum computer.
\newblock {{\rm e}m SIAM Journal on Computing}, 26(5):1484--1509, October 1997.
\bibitem[Sta97]{Stanley:EC1}
Richard~P. Stanley.
\newblock {{\rm e}m Enumerative Combinatorics}, volume~I.
\newblock Cambridge, 1997.
\bibitem[Wil94]{Wilf94}
Hebert Wilf.
\newblock {{\rm e}m Generatingfunctionology}.
\newblock Academic Press, 1994.
{\rm e}nd{thebibliography}
\pagebreak
\appendix
\section{Proof of Lemma~\ref{lem:egf}}
\label{app:egf}
\begin{figure}
\label{fig:proof-details}
\begin{center}
\includegraphics[width=4in]{proof-chickens}
{\rm e}nd{center}
\caption{Two of the authors hard at work chalking up the proof of Lemma~\ref{lem:egf} on an asphalt driveway.}
\hrule
{\rm e}nd{figure}
Recall that Lemma~\ref{lem:egf} asserts that if $0 < z < 1/k$; then
\begin{equation}
\label{eq:qn-recap}
q_k(z) = \sum_{\pi \in S_k} z^{t(\pi)} = O(\sqrt{k}) \,\frac{e^{-k}}{(1-zk)^{1/z}} {\rm e}nspace .
{\rm e}nd{equation}
\begin{proof}[Proof of Lemma~\ref{lem:egf}]
Consider the exponential generating function
\[ g(y,z) = \sum_{m=0}^\infty
\frac{y^m}{m!} \,q_m(z) {\rm e}nspace . \]
Using the techniques of~\cite[Chapter 3]{Wilf94}, we can write this as a product
over all $k$ of contributions from the $(k-1)!$ possible $k$-cycles, including
fixed points. Since each such cycle contributes $k$ to $m$ and $k-1$ to
$t(\pi)$, and since there are $(k-1)!$ $k$-cycles on a given set of $k$ objects,
it follows (cf. Figure~\ref{fig:proof-details}) that
\[
g(y,z) = \prod_{k=1}^\infty {\rm e}xp\!\left( \frac{y^k z^{k-1}}{k} \right)
= {\rm e}xp\!\left( \sum_{k=1}^\infty \frac{y^k z^{k-1}}{k} \right)
= {\rm e}xp\!\left( -\frac{1}{z} \ln (1-yz) \right)
= \frac{1}{(1-yz)^{1/z}} {\rm e}nspace .
\]
Now note that $e^{-k} g(k,z)$ is the expectation of $q_m(z)$, where $m$ is
Poisson-distributed with mean $k$. Since $q_m(z) > 0$, this expectation is at
least $q_k(z)$ times the probability that $m=k$, which is $e^{-k} k^k / k! =
(1-o(1)) / \sqrt{2 \pi k}$. Thus we have \[ q_k(z) \le (1+o(1)) \sqrt{2 \pi k}
\cdot e^{-k} g(k,z) \]
which concludes the proof.
{\rm e}nd{proof}
{\rm e}nd{document}
|
\begin{document}
\date{}
\title{Gallai-Ramsey numbers for monochromatic $K_4^{+}$ or $K_{3}$
}
\author{\small Xueli Su, Yan Liu\\
\small School of Mathematical Sciences, South China Normal University,\\
\small Guangzhou, 510631, P.R. China \thanks{This work is supported
by the Scientific Research Fund of the Science and Technology Program of Guangzhou, China (authorized in 2019) and by the Qinghai Province Natural Science Foundation£¨No.2020-ZJ-924).
Correspondence should be addressed to Yan
Liu(e-mail:[email protected])}}
\maketitle
\setcounter{theorem}{0}
\begin{abstract}
A Gallai $k$-coloring is a $k$-edge coloring of a complete graph in which
there are no rainbow triangles. For two given graphs $H, G$
and two positive integers $k,s$ with that $s\leq k$, the
$k$-colored Gallai-Ramsey number $gr_{k}(K_{3}: s\cdot H,~
(k-s)\cdot G)$ is the minimum integer $n$ such that every Gallai
$k$-colored $K_{n}$ contains a monochromatic copy of $H$ colored
by one of the first $s$ colors or a monochromatic copy of $G$
colored by one of the remaining $k-s$ colors. In this paper, we
determine the value of Gallai-Ramsey number in the case that
$H=K_{4}^{+}$ and $G=K_{3}$. Thus the Gallai-Ramsey number
$gr_{k}(K_{3}: K_{4}^{+})$ is obtained.
\noindent {\bf Key words:} Gallai coloring, Rainbow triangle, Gallai-Ramsey number, Gallai partition.
\end{abstract}
\section{Introduction}
All graphs considered in this paper are finite, simple and undirected. For a graph $G$, we use $|G|$ to denote the number of vertices of $G$, say the \emph{order} of $G$.
The complete graph of order $n$ is denoted by $K_{n}$.
For a subset $S\subseteq V(G)$, let $G[S]$ be the subgraph of $G$ induced by $S$.
For two disjoint subsets $A$ and $B$ of $V(G)$, $E_{G}(A, B)=\{ab\in E(G) ~|~ a\in A, b\in B\}$. Let $G_1=(V_1,E_1)$ and $G_2=(V_2,E_2)$ be two graphs. The \emph{union} of $G_1$ and $G_2$, denoted by $G_{1}+G_{2}$, is the graph with the vertex set $V_1\bigcup V_2$ and the edge set $E_1\bigcup E_2$. The \emph{join} of $G_1$ and $G_2$, denoted by $G_1\vee G_2$, is the graph obtained from $G_1+G_2$ by adding all edges joining each vertex of $G_1$ and each vertex of $G_2$.
For any positive integer $k$, we write $[k]$ for the set $\{1, 2, \cdots, k\}$.
An edge coloring of a graph is called \emph{monochromatic} if all edges are colored by the same color. An edge-colored graph is called \emph{rainbow} if no two edges are colored by the same color.
Given graphs $H_{1}$ and $H_{2}$, the classical Ramsey number $R(H_{1}, H_{2})$ is the smallest integer $n$ such that for any 2-edge coloring of $K_{n}$ with red and blue, there exists a red copy of $H_{1}$ or a blue copy of $H_{2}$.
A \emph{sharpness example} of the Ramsey number $R(H_{1}, H_{2})$, denoted by $C_{(H_{1}, H_{2})}$, is a 2-edge colored $K_{R(H_{1}, H_{2})-1}$ with red and blue such that there are neither red copies of $H_{1}$ nor blue copies of $H_{2}$. Given graphs $H_{1}, H_{2}, \cdots, H_{k}$, the multicolor Ramsey number $R(H_{1}, H_{2}, \cdots, H_{k})$ is the smallest positive integer $n$ such that for every $k$-edge colored $K_{n}$ with the color set $[k]$, there exists some $i\in [k]$ such that $K_{n}$ contains a monochromatic copy of $H_{i}$ colored by $i$.
The multicolor Ramsey number is an obvious generalization of the classical Ramsey number.
When $H=H_{1}=\cdots=H_{k}$, we simply denote $R(H_{1}, \cdots, H_{k})$ by $R_{k}(H)$. The problem about computing Ramsey numbers is notoriously difficult.
For more information on classical Ramsey number, we refer the readers to ~\cite{PRCRT, RRA, RBJR}.
In this paper, we study Ramsey number in Gallai-coloring. A \emph{Gallai-coloring} is an edge coloring of a complete graph with no rainbow triangle. Gallai-coloring naturally arises in several areas including: information theory \cite{JG}; the study of partially ordered sets, as in Gallai's original paper \cite{Gallai} (his result was restated in \cite{GyarfasSimonyi} in the terminology of graphs); and the study of perfect graphs \cite{KJLA}. More information on this topic can be found in \cite{JAJT, SCKR}.
A Gallai $k$-coloring is a Gallai-coloring that uses at most $k$ colors.
Given a positive integer $k$ and graphs $H_{1}, H_{2}, \cdots, H_{k}$, the \emph{Gallai-Ramsey number} $gr_{k}(K_{3}: H_{1}, H_{2}, \cdots, H_{k})$ is the smallest integer $n$ such that every Gallai $k$-colored $K_{n}$ contains a monochromatic copy of $H_{i}$ in color $i$ for some $i\in [k]$.
Clearly, $gr_{k}(K_{3}: H_{1}, H_{2}, \cdots, H_{k})\leq R(H_{1}, H_{2}, \cdots, H_{k})$ for any $k$ and $gr_{2}(K_{3}: H_{1}, H_{2})=R(H_{1}, H_{2})$.
When $H=H_{1}=\cdots=H_{k}$, we simply denote $gr_{k}(K_{3}: H_{1}, H_{2}, \cdots, H_{k})$ by $gr_{k}(K_{3}: H)$.
When $H=H_{1}=\cdots=H_{s} (0\leq s\leq k)$ and $G=H_{s+1}=\cdots=H_{k}$, we use the following shorthand notation $$gr_{k}(K_{3}:s\cdot H, (k-s)\cdot G)=gr_{k}(K_{3}:\underbrace{H, \cdots, H}_{s ~\text{times}}, \underbrace{G, \cdots, G}_{(k-s) ~\text{times}}).$$
The authors in \cite{HColton} and \cite{WMG} determined the Gallai-Ramsey number $gr_{k}(K_{3}:s\cdot K_{4}, (k-s)\cdot K_{3})$ and $gr_{k}(K_{3}:s\cdot K_{3}, (k-s)\cdot C_{4})$, respectively. In this paper, we investigate the Gallai-Ramsey number $gr_{k}(K_{3}:s\cdot K_{4}^{+}, (k-s)\cdot K_{3}),$ where $K_{4}^{+}=K_{1}\vee(K_{3}+K_{1})$.
We will prove the following result in Section 2.
\begin{theorem}\label{Thm:K4+K3}
Let $k$ be a positive integer and $s$ an integer such that $0\leq s\leq k$. Then
$$
gr_k(K_{3}:s\cdot K_{4}^{+}, (k-s)\cdot K_{3})=\begin{cases}
17^{\frac{s}{2}}\cdot 5^{\frac{k-s}{2}}+1, &\text{if $s$ and $(k-s)$ are both even,}\\
2\cdot17^{\frac{s}{2}}\cdot 5^{\frac{k-s-1}{2}}+1, &\text{if $s$ is even and $(k-s)$ is odd,}\\
4\cdot 17^{\frac{k-1}{2}}+1, &\text{if $s=k$ and $k$ is odd,}\\
8\cdot17^{\frac{s-1}{2}}\cdot 5^{\frac{k-s-1}{2}}+1, &\text{if $s$ and $(k-s)$ are both odd,}\\
16\cdot17^{\frac{s-1}{2}}\cdot 5^{\frac{k-s-2}{2}}+1, &\text{if $s<k$ and $s$ is odd and $(k-s)$ is even.}
\end{cases}
$$
\end{theorem}
When $s=k$ and $s=0$, we can get the following Theorem~\ref{Thm:K4+} and Theorem~\ref{Thm:K3} from Theorem~\ref{Thm:K4+K3}, respectively. So Theorem~\ref{Thm:K4+} and Theorem~\ref{Thm:K3} can be seen as two corollaries obtained from Theorem~\ref{Thm:K4+K3}.
\begin{theorem}\label{Thm:K4+}
For a positive integer $k$,
$$
gr_k(K_{3}:K_{4}^{+})=\begin{cases}
17^{\frac{k}{2}}+1, &\text{if $k$ is even,}\\
4\cdot 17^{\frac{k-1}{2}}+1, &\text{if $k$ is odd.}
\end{cases}
$$
\end{theorem}
\begin{theorem}{\upshape \cite{Fre, AGAS}}\label{Thm:K3}
For a positive integer $k$,
$$
gr_k(K_{3}:K_{3})=\begin{cases}
5^{\frac{k}{2}}+1, &\text{if $k$ is even,}\\
2\cdot 5^{\frac{k-1}{2}}+1, &\text{if $k$ is odd.}
\end{cases}
$$
\end{theorem}
To prove Theorem~\ref{Thm:K4+K3}, the following theorem is useful.
\begin{theorem}{\upshape \cite{Gallai, GyarfasSimonyi, CameronEdmonds}} {\upshape (Gallai-partition)}\label{Thm:G-Part}
For any Gallai-coloring of a complete graph $G$, there exists a partition of $V(G)$ into at least two parts such that there are at most two colors on the edges between the parts and only one color on the edges between each pair of parts. The partition is called a \emph{Gallai-partition}.
\end{theorem}
Given a Gallai-partition $(V_{1}, V_{2}, \cdots, V_{t})$ of a Gallai-colored complete graph $G$, let $H_{i}=G[V_{i}]$, $h_{i}\in V_{i}$ for each $i\in [t]$ and $R=G\left[\{h_{1}, h_{2}, \cdots, h_{t}\}\right]$.
Then $R$ is said to be the \emph{reduced graph} of $G$ corresponding to the given Gallai-partition. By Theorem~\ref{Thm:G-Part}, all edges in the reduced graph $R$ are colored at most two colors.
\section{Proof of Theorem~\ref{Thm:K4+K3}}
First, recall some known classical Ramsey numbers of $K_{4}^{+}$ and $K_{3}$.
\begin{lemma}{\upshape \cite{Clm, HHIM, SPR}}\label{Lem:k4+K3}
\begin{align*}
R(K_{3}, K_{3})=6,~~~ R(K_{4}^{+}, K_{3})=R(K_{3}, K_{4}^{+})=9,~~~ R(K_{4}^{+}, K_{4}^{+})=18.
\end{align*}
\end{lemma}
For the sake of notation, now we define functions $f_{i}$ for $i\in [5]$ as follows.
\begin{align*}
f_{1}(k, s)&=17^{\frac{s}{2}}\cdot 5^{\frac{k-s}{2}},\\
f_{2}(k, s)&=2\cdot17^{\frac{s}{2}}\cdot 5^{\frac{k-s-1}{2}},\\
f_{3}(k, s)&=4\cdot 17^{\frac{k-1}{2}},\\
f_{4}(k, s)&=8\cdot17^{\frac{s-1}{2}}\cdot 5^{\frac{k-s-1}{2}},\\
f_{5}(k, s)&=16\cdot17^{\frac{s-1}{2}}\cdot 5^{\frac{k-s-2}{2}}.
\end{align*}
Let $$
f(k, s)=\begin{cases}
f_{1}(k, s), &\text{if $s$ and $(k-s)$ are both even,}\\
f_{2}(k, s), &\text{if $s$ is even and $(k-s)$ is odd,}\\
f_{3}(k, s), &\text{if $s=k$ and $k$ is odd,}\\
f_{4}(k, s), &\text{if $s$ and $(k-s)$ are both odd,}\\
f_{5}(k, s), &\text{if $s<k$, $s$ is odd and $(k-s)$ is even.}
\end{cases}
$$
It is easy to check that
\begin{eqnarray*}
2f(k-1, s)
&=&\begin{cases}
2f_{2}(k-1, s), &\text{if $s$ and $(k-s)$ are both even,}\\
2f_{1}(k-1, s), &\text{if $s$ is even and $(k-s)$ is odd,}\\
2f_{3}(k-1, s), &\text{if $s=k-1$ and $k$ is even,} \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (1)\\
2f_{5}(k-1, s), &\text{if $s<k-1$, $s$ and $(k-s)$ are both odd,}\\
2f_{4}(k-1, s), &\text{if $s$ is odd and $(k-s)$ is even}\\
\end{cases}\\
&\leq &f(k, s),
\end{eqnarray*}
\begin{eqnarray*}
5f(k-2, s)
&=&\begin{cases}
5f_{1}(k-2, s), &\text{if $s$ and $(k-s)$ are both even,}\\
5f_{2}(k-2, s), &\text{if $s$ is even and $(k-s)$ is odd,}\\
5f_{3}(k-2, s), &\text{if $s=k-2$ and $k$ is odd,} \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (2)\\
5f_{4}(k-2, s), &\text{if $s$ and $(k-s)$ are both odd,}\\
5f_{5}(k-2, s), &\text{if $s<k-2$ and $s$ is odd and $(k-s)$ is even}\\
\end{cases}\\
&\leq &f(k, s),
\end{eqnarray*}
and the following inequations hold.
\begin{equation*}
f(k-1, s-1)\le \frac{5}{16}f(k, s),
\tag{3}
\end{equation*}
\begin{equation*}
f(k-2, s-1)\le \frac{1}{8}f(k, s),
\tag{4}
\end{equation*}
\begin{equation*}
f(k, s-1)+f(k-1, s-1)\leq f(k, s).
\tag{5}
\end{equation*}
Now we prove Theorem~\ref{Thm:K4+K3}.
\begin{proof}
We first prove that $gr_k(K_{3}:s\cdot K_{4}^{+}, (k-s)\cdot K_{3})\geq f(k, s)+1$ by constructing a Gallai $k$-colored complete graph with order $f(k, s)$ which contains neither monochromatic copy of $K_{4}^{+}$ colored by one of the first $s$ colors nor monochromatic copy of $K_{3}$ colored by one of the remaining $k-s$ colors.
For this construction, we use the sharpness example of classical Ramsey results.
Let $Q_{1}=C_{(K_{3}, K_{3})}$, $Q_{2}=C_{(K_{4}^{+}, K_{4}^{+})}$ and $Q_{3}=C_{(K_{4}^{+}, K_{3})}$.
Then by Lemma~\ref{Lem:k4+K3}, $Q_{1}$ is a 2-edge colored $K_{5}$, $Q_{2}$ is a 2-edge colored $K_{17}$ and $Q_{3}$ is a 2-edge colored $K_{8}$.
We construct our sharpness example by taking blow-ups of these sharpness examples $Q_{i}~ (i\in\{1, 2, 3\})$. A \emph{blow-up} of an edge-colored graph $G$ on a graph $H$ is a new graph obtained from $G$ by replacing each vertex of $G$ with $H$ and replacing each edge $e$ of $G$ with a monochromatic complete bipartite graph $(V(H), V(H))$ in the same color with $e$.
By induction, suppose that we have constructed $G_{0}, G_{1}, \cdots, G_{i}$, where $G_{0}$ be a single vertex and $G_{i}$ is colored by the color set $[i]$ such that $G_{i}$ has no monochromatic copy of $K_{4}^{+}$ colored by one of the first $s$ colors and no monochromatic copy of $K_{3}$ colored by one of the remaining $i-s$ colors.
If $i=k$, then the construction is completed.
Otherwise, we consider the following cases.
{\bf Case a.} If $i\leq s-2$, then construct $G_{i+2}$ by a blow-up of $Q_{2}$ colored by the two colors $i+1$ and $i+2$ on $G_{i}$. Then $G_{i+2}$ has no monochromatic copy $K_{4}^{+}$ in the first $i+2$ colors.
{\bf Case b.} If $i= s-1$ and $k=s$, then construct $G_{i+1}$ by a blow-up of $K_{4}$ colored by the color $i+1$ on $G_{i}$. Then $G_{i+1}$ has no monochromatic copy $K_{4}^{+}$ in the first $i+1$ colors.
{\bf Case c.} If $i= s-1$ and $k>s$, then construct $G_{i+2}$ by a blow-up of $Q_{3}$ colored by the two colors $i+1$ and $i+2$ on $G_{i}$. Then $G_{i+2}$ contains neither monochromatic copy of $K_{4}^{+}$ colored by one of the first $i+1$ colors nor monochromatic copy of $K_{3}$ colored by the color $i+2$.
{\bf Case d.} If $i\geq s$ and $i=k-1$, then construct $G_{i+1}$ by a blow-up of $K_{2}$ colored by the color $i+1$ on $G_{i}$. Then $G_{i+1}$ contains neither monochromatic copy of $K_{4}^{+}$ colored by one of the first $s$ colors nor monochromatic copy of $K_{3}$ colored by one of the remaining $k-s$ colors.
{\bf Case e.} If $i\geq s$ and $i\leq k-2$, then construct $G_{i+2}$ by a blow-up of $Q_{1}$ colored by the two colors $i+1$ and $i+2$ on $G_{i}$. Then $G_{i+2}$ contains neither monochromatic copy of $K_{4}^{+}$ colored by one of the first $s$ colors nor monochromatic copy of $K_{3}$ colored by one of the remaining $i+2-s$ colors.
By the above construction, it is clear that $G_{k}$ is Gallai $k$-colored and contains neither monochromatic copy of $K_{4}^{+}$ in any of the first $s$ colors nor monochromatic copy of $K_{3}$ in any of the remaining $k-s$ colors.
If $s$ and $k-s$ are both even, then by Case a, we can first construct $G_{s}$ of order $17^{\frac{s}{2}}$. Next by Case e, we continue to construct $G_{s+2}, G_{s+4}, \cdots, G_{k}$. So we can get $|G_{k}|=f_{1}(k, s)$.
If $s$ is even and $k-s$ is odd, then by Case a, we can first construct $G_{s}$ of order $17^{\frac{s}{2}}$. Next by Case e, we continue to construct $G_{s+2}, G_{s+4}, \cdots, G_{k-1}$ and we can get $|G_{k-1}|=17^{\frac{s}{2}}\cdot5^{\frac{k-s-1}{2}}$.
Finally by Case d, we can construct $G_{k}$ of order $f_{2}(k, s)$. Similarly, we can get that $|G_{k}|=f(k, s)$ in the remaining three cases.
Therefore, $$gr_k(K_{3}:s\cdot K_{4}^{+}, (k-s)\cdot K_{3})\geq f(k, s)+1.$$
Now we prove that $gr_k(K_{3}:s\cdot K_{4}^{+}, (k-s)\cdot
K_{3})\leq f(k, s)+1$ by induction on $k+s$. Let $n=f(k, s)+1$ and
$G$ be a Gallai $k$-colored complete graph of order $n$. The
statement is trivial in the case that $k=1$. The statement holds
in the case that $k=2$ by Lemma~\ref{Lem:k4+K3}. The statement
holds in the case that $s=0$ by Theorem~\ref{Thm:K3}. So we can
assume that $s\geq1$ and $k\geq3$, and the statement holds for any
$s^{'}$ and $k^{'}$ such that $s^{'}+k^{'}<s+k$. Then $f(k, s)\geq 16$ and $n\ge 17$.
Suppose, to the contrary, that $G$ contains neither monochromatic
copy of $K_{4}^{+}$ in any of the first $s$ colors nor
monochromatic copy of $K_{3}$ in any of the remaining $k-s$
colors. By Theorem~\ref{Thm:G-Part}, there exists a
Gallai-partition of $V(G)$. Choose a Gallai-partition with the
smallest number of parts, say $(V_{1},
V_{2}, \cdots, V_{t})$ and let $H_{i}=G[V_{i}]$ for $i\in [t]$. Then $t\ge 2$. Choose
one vertex $h_{i}\in V_{i}$ and set $R=G[\{h_{1}, h_{2}, \cdots,
h_{t}\}]$. Then $R$ is a reduced graph of $G$ colored by at most
two colors.
We first consider the case that $t=2$. W.L.O.G, suppose that the
color on the edges between two parts is red. If red is in the last
$k-s$ colors (so $s<k$), then $H_{1}$ and $H_{2}$ both have no red
edges, otherwise, there exists a red $K_{3}$, a contradiction.
Hence $|H_{i}|\leq gr_{k-1}(K_{3}:s\cdot K_{4}^{+}, (k-s-1)\cdot
K_{3})-1$ for each $i\in [2]$. By the induction hypothesis and
(1),
$$|G|=|H_{1}|+|H_{2}|\leq 2f(k-1, s)\leq f(k, s)<n,$$
a contradiction. If red is in the first $s$ colors (W.L.O.G,
suppose that red is the $s$th color), then $H_{1}$ has no red
edges or $H_{2}$ has no red edges, otherwise, there exists a red
$K_{4}^{+}$ in $G$, a contradiction. W.L.O.G, suppose that $H_{1}$
has no red edges. Then $|H_{1}|\leq gr_{k-1}(K_{3}:(s-1)\cdot
K_{4}^{+}, (k-s)\cdot K_{3})-1$. If $H_{2}$ has a red $K_{3}$,
then we can find a red $K_{4}^{+}$ in $G$, a contradiction. Hence
$H_{2}$ contains no monochromatic copy of $K_{4}^{+}$ in any of
the first $s-1$ colors and no monochromatic copy of $K_{3}$ in any
of the remaining $(k-s+1)$ colors. Hence $|H_{2}|\leq
gr_{k}(K_{3}:(s-1)\cdot K_{4}^{+}, (k-s+1)\cdot K_{3})-1$. By the
induction hypothesis and (5), we get that
$$|G|=|H_{1}|+|H_{2}|\leq f(k-1, s-1)+f(k, s-1)\leq f(k, s)<n,$$ a contradiction.
Then we can assume that $t\ge 3$ and since $t$ is smallest, $R$ is colored by exactly two
colors. Suppose that the two colors appeared in the
Gallai-partition $(V_{1}, V_{2}, \cdots, V_{t})$ are red and blue,
that is, the reduced graph $R$ is colored by red and blue. If the
edge $h_{i}h_{j}$ is red in $R$, then $h_{j}$ is said to be a red
neighbor of $h_{i}$. Let $N_{r}(h_{i})=\{$all red neighbors of
$h_{i}\}$, named the red neighborhood of $h_{i}$, and
$N_{b}(h_{i})$ be the blue neighborhood of $h_{i}$ symmetrically.
For each vertex $h_{i} \in V(R)$ $(i\in [t])$, let
$d_{r}(h_{i})=|N_{r}(h_{i})|$, named the red degree of $h_{i}$ in
$R$, and $d_{b}(h_{i})$ be the blue degree of $h_{i}$ in $R$. Then
$d_{r}(h_{i})+d_{b}(h_{i})=t-1$. If there exists one part (say
$V_{1}$) such that all edges joining $V_{1}$ to the other parts
are colored by the same color, then we can find a new
Gallai-partition with two parts $(V_{1}, V_{2}\bigcup\cdots
\bigcup V_{t})$, which contradicts with that $t$ is smallest. It
follows that $t\neq 3$ and the following fact holds.
\begin{fact}\label{fact:dwi}
For any $h_{i} \in V(R)$, we have that $d_{r}(h_{i})\geq1$ and $d_{b}(h_{i})\geq1$.
\end{fact}
Now we can assume that $t\ge 4$. We consider the following three cases.
\begin{case}\label{case:s>=0}
Both red and blue are in the last $k-s$ colors (so $s\leq k-2$).
\end{case}
This means that there is neither red $K_{3}$ nor blue $K_{3}$ in
$G$. Since $R(K_{3}, K_{3})=6$, we have $t\leq5$. By
Fact~\ref{fact:dwi}, $H_{i}$ contains neither red edges nor blue
edges for each $i$. Then $|H_{i}|\leq gr_{k-2}(K_{3}:s\cdot
K_{4}^{+}, (k-s-2)\cdot K_{3})-1$ for $1\leq i\leq t$. By the
induction hypothesis and (2), we get that
\begin{eqnarray*}
|G|=\sum_{i=1}^{t}|H_{i}| \leq t\cdot f(k-2, s) \leq 5f(k-2,
s)\leq f(k, s) < n,
\end{eqnarray*}
a contradiction.
Now we only consider the case that red or blue are in the first
$s$ colors in the following. Then we have the following
Facts.
\begin{fact}\label{fact:red k4} If red is in the first
$s$ colors, then for any $p\in [t-1]$ and any $p$ parts $V_{j_{1}},
\cdots, V_{j_{p}}$, $G[V_{j_{1}}\cup\cdots\cup V_{j_{p}}]$ has no
red $K_{4}$, and the statement holds for blue symmetrically.
\end{fact}
Otherwise, suppose that there is a red $K_{4}$ in
$G[V_{j_{1}}\cup\cdots\cup V_{j_{p}}]$. If there exists one red
edge in $E_{G}(V_{j_{1}}\cup\cdots\cup V_{j_{p}}, V(G)\setminus
(V_{j_{1}}\cup\cdots\cup V_{j_{p}}))$, then $G$ contains a red
$K_{4}^{+}$, a contradiction. It follows that all edges in
$E_{G}(V_{j_{1}}\cup\cdots\cup V_{j_{p}}, V(G)\setminus
(V_{j_{1}}\cup\cdots\cup V_{j_{p}}))$ are blue. Then
$(V_{j_{1}}\cup\cdots\cup V_{j_{p}}, V(G)\setminus
(V_{j_{1}}\cup\cdots\cup V_{j_{p}}))$ is a new Gallai-partition
which contradicts with that $t\geq4$ and $t$ is smallest.
By Fact~\ref{fact:dwi} and Fact~\ref{fact:red k4}, we have the following facts.
\begin{fact}\label{new}
If red is in the first $s$ colors, then every $H_{i}$ has no
red $K_3$, and the statement holds
for blue symmetrically.
\end{fact}
\begin{fact}\label{fact:red k3}
If red is in the first $s$ colors and $h_{i}$ is contained
in a red $K_{3}$ of $R$, then $H_{i}$ has no
red edge, and the statement holds
for blue symmetrically.
\end{fact}
\begin{fact}\label{fact:red edges}
If red is in the first
$s$ colors, and $H_{i}$ and $H_{j}$ both contain red edges, then the edges $E_{G}(V_{i}, V_{j})$ are blue, that is, edge $h_{i}h_{j}$ is blue in $R$, and the statement holds for blue symmetrically.
\end{fact}
Let $I_{r}=\{h_{i}\in V(R)$ : $H_{i}$ contains at least one red
edge$\}$ and $I_{b}=\{h_{i}\in V(R)$ : $H_{i}$ contains at least one
blue edge$\}$. Clearly by Fact~\ref{fact:red edges}, if red is in the first
$s$ colors, then the induced subgraph of $R$ by $I_{r}$ is a blue complete graph and if blue is in the first
$s$ colors, then the induced subgraph of $R$ by $I_{b}$ is a red complete graph.
\begin{fact}\label{fact:VbVr} If red and blue are both in the first $s$
colors, then $|I_{b}\cap I_{r}|\leq 1$.
\end{fact}
Suppose, to the contrary, that $h_{i}$, $h_{j}$$\in I_{b}\cap
I_{r}$. We know that $h_{i}h_{j}$ is either red or blue. Then we
can find either a red $K_{4}$ or a blue $K_{4}$ in $G[V_{i}\cup V_{j}]$,
which contradicts with Fact~\ref{fact:red k4}.
Suppose that $|H_{i}|\geq 2$ for each $i\in [l]$ and $|H_{j}|=1$
for each $j$ with $l+1\leq j\leq t$. Then $l\geq1$. Otherwise,
$R=G$. Then $G$ is 2-edge colored, which contradicts with that
$k\geq3$. Let $p_{0}$ be the number of $H_{i}$($i\in [l]$) which
contains neither red nor blue edges, $p_{1}$ the number of $H_{i}$
which contains either red or blue edges and $p_{2}$ the number of
$H_{i}$ which contains both red and blue edges. Then
$l=p_{0}+p_{1}+p_{2}$, $p_{2}=|I_{b}\cap I_{r}|$ and
$p_{1}+p_{2}=|I_{b}\cup I_{r}|$.
\begin{case}\label{case:s>=1}
Exactly one of red and blue is in the first $s$ colors and the other is in the last $k-s$ colors. (so $s\leq k-1$).
\end{case}
W.L.O.G., suppose that red appears in the first $s$ colors, blue
appears in the last $k-s$ colors and red is the $s$th color. This
means that there is neither red $K_{4}^{+}$ nor blue $K_{3}$ in
$G$. Then by
Fact~\ref{fact:dwi}, we can get that every $H_{i}$ contains no
blue edge, which implies that $p_{2}=0$. Since $R(K_{4}^{+}, K_{3})=9$, we have that $t\leq8$.
\begin{claim}\label{claim:p1}
$p_{1}\leq 2$.
\end{claim}
Otherwise, suppose that $p_{1}\geq 3$. Then W.L.O.G., suppose that $H_{1}$, $H_{2}$ and $H_{3}$ are three corresponding subgraphs which contain red edges.
By Fact~\ref{fact:red edges}, $R[\{h_{1}, h_{2}, h_{3}\}]$ is a blue $K_{3}$. So $G$ has a blue $K_{3}$, a contradiction.
\begin{claim}\label{claim:G}
$$
|G|\leq\left(\frac{1}{8}p_{0}+\frac{5}{16}p_{1}\right)f(k, s)+(t-l).
$$
\end{claim}
\begin{proof}
First let $i\in [l]$ and $H_{i}$ contain no red edges. Then $H_{i}$ is colored with exactly $k-2$ colors and satisfies that it contains neither monochromatic $K_{4}^{+}$ in one of the first $s-1$ color nor monochromatic $K_{3}$ in one of the remaining $k-s-1$ colors.
Hence by the induction hypothesis and (4),
\begin{eqnarray*}
|H_{i}|
&\leq gr_{k-2}(K_{3}:(s-1)\cdot K_{4}^{+}, (k-s-1)\cdot K_{3})-1\\
&= f(k-2, s-1)
\leq\frac{1}{8}~f(k, s)\label{con:1}.
\end{eqnarray*}
Next suppose that $H_{i}$ contains a red edge.
Then $H_{i}$ is colored with exactly $k-1$ colors and by Fact~\ref{new}, $H_{i}$ contains neither monochromatic $K_{4}^{+}$ in one of the first $s-1$ colors nor monochromatic $K_{3}$ in one of the remaining $k-s$ colors.
Hence by the induction hypothesis and (3),
\begin{eqnarray*}
|H_{i}|\leq gr_{k-1}(K_{3}:(s-1)\cdot K_{4}^{+}, (k-s)\cdot K_{3})-1=f(k-1, s-1)\leq\frac{5}{16}~f(k, s)\label{con:2}.
\end{eqnarray*}
Therefore by the above inequalities, we get that
\begin{eqnarray*}
|G|\leq\left(\frac{1}{8}p_{0}+\frac{5}{16}p_{1}\right)f(k, s)+(t-l).
\end{eqnarray*}
\end{proof}
We now consider subcases based on the values of $l$ and $t$.
\begin{subcase}\label{case:t<=5}
$t\leq 5$.
\end{subcase}
By Claim~\ref{claim:p1}, $p_1\le 2$. Since $p_0+p_1=l\le t$ and by
Claim~\ref{claim:G}, we have that
\begin{eqnarray*}
|G|\leq\left(\frac{1\times3}{8}+\frac{5\times2}{16}\right)f(k, s)
< n,
\end{eqnarray*} a contradiction.
\begin{subcase}\label{case:l<=3}
$l\leq 2$ and $6\leq t\leq 8$.
\end{subcase}
Then by Claim~\ref{claim:p1} and Claim~\ref{claim:G}, we can get
that
\begin{eqnarray*}
|G|
&\leq&\left(\frac{1\times0}{8}+\frac{5\times2}{16}\right)f(k, s)+(t-2)\\
&\leq&\frac{5}{8}f(k, s)+6\\
&\leq&f(k, s) < n,
\end{eqnarray*} a contradiction.
\begin{subcase}\label{case:l>=4}
$l\geq 3$ and $6\leq t\leq 8$.
\end{subcase}
\begin{claim}\label{claim:3p1}
In this case, $p_{1}\leq1$. Further, if $t\ge 7$, then $p_{1}=0$.
\end{claim}
\begin{proof}
First, to the contrary, we can assume
that $H_{1}$ and $H_{2}$ both contain a red edge, and each other
$H_{i}$ contains no red edge by Claim~\ref{claim:p1}. Then by
Fact~\ref{fact:red edges}, we have that the edges
in $E_{G}(V_{1},V_{2})$ are blue. If there are two blue edges in $\{h_{1}h_{i}|\ \ 3\le i\le
t\}$, then we can find a blue triangle $h_{1}h_ph_q$($2\le p<q\le t$) which contradicts with that $G$ has no blue $K_{3}$ or find a red $K_3$ which is $h_2h_ph_q$($3\le p<q\le t$), which contradicts with Fact~\ref{fact:red k3}.
So we can assume that all edges in
$\{h_{1}h_{i}|\ \ 4\le i\le t\}$ are red. It follows that either
we can find a red triangle $h_1h_ph_q$($4\le p<q\le
t$) which contradicts with Fact~\ref{fact:red k3}, or we can find
a blue $K_3$ which is $h_4h_5h_6$, a contradiction. Secondly, let
$t\ge 7$. To the contrary, we can assume that
$H_{1}$ contains a red edge and each other $H_{i}$ contains no red
edge. Then by Fact~\ref{fact:red k3} and since $G$ has no blue
$K_{3}$, there are at most two red edges in $\{h_{1}h_{i}|\ \ 2\le
i\le t\}$. So we can assume that all edges in $\{h_{1}h_{i}|\ \
4\le i\le t\}$ are blue. It follows that either we can find a blue
$K_3$ which is $h_1h_ph_q$, where $4\le p<q\le t$, a
contradiction, or we can find a red $K_4$ induced by
$\{h_4,h_5,h_6,h_7\}$, which contradicts with Fact~\ref{fact:red
k4}.
\end{proof}
Thus, if $t=6$, then $p_{1}\leq1$. By Claim~\ref{claim:G}, we have that
\begin{eqnarray*}
|G|
&\leq&\left(\frac{5\times1}{16}+\frac{l-1}{8}\right)f(k, s)+(6-l)\\
&=&\frac{2l+3}{16}f(k, s)+(6-l)<f(k, s)<n,
\end{eqnarray*} a contradiction.
If $ t\ge 7$, then by Claim~\ref{claim:3p1} and
Claim~\ref{claim:G}, we have that $p_{1}=0$ and
\begin{eqnarray*}
|G|\leq \frac{l}{8}f(k, s)+(t-l)
\leq f(k, s)
< n,
\end{eqnarray*} a contradiction. The proof of Case~\ref{case:s>=1} is completed.
\begin{case}\label{case:s>=2}
Red and blue are both in the first $s$ colors (so $s\geq 2$).
\end{case}
This means that there exists neither red $K_{4}^{+}$ nor blue $K_{4}^{+}$ in
$G$. W.L.O.G., suppose that red and blue are the $(s-1)$th and
$s$th color, respectively. Since $R(K_{4}^{+}, K_{4}^{+})=18$, we know that
$t\leq 17$. Since $s\ge 2$ and $k\ge 3$, $f(k, s)\ge 35$. First we prove some claims.
\begin{claim}\label{claim:drbwi}
For any $h_{i} \in V(R)$, we have $d_{r}(h_{i})\leq8$ and $d_{b}(h_{i})\leq8$ in $R$.
\end{claim}
W.L.O.G., suppose, to the contrary, that $d_{r}(h_{i})\geq9$. Since $R(K_{3},
K_{4}^{+})=9$, then the subgraph of $R$ induced by $N_{r}(h_{i})$
contains either a red $K_{3}$ or a blue $K_{4}^{+}$. So $R$
contains a red $K_{4}^{+}$ or blue $K_{4}^{+}$. Thus $G$ contains
also, a contradiction.
\begin{claim}\label{claim:drbwi2}
If $d_{r}(h_{i})\geq4$, then $h_{i}\notin I_{r}$, and if $d_{b}(h_{i})\geq4$, then $h_{i}\notin I_{b}$.
\end{claim}
Suppose that $d_{r}(h_{i})\geq4$. To the contrary, suppose that
$H_{i}$ contains a red edge. If the induced subgraph
$R[N_{r}(h_{i})]$ contains a red edge, say $h_{p}h_{q}$, then we
find a red $K_3$ which is $h_{i}h_{p}h_{q}$, which contradicts
with Fact~\ref{fact:red k3}. Otherwise, $R[N_{r}(h_{i})]$ contains
a blue $K_{4}$. So $G[\bigcup_{h_p\in N_{r}(h_{i})}V_{p}]$ contains
a blue $K_{4}$, which contradicts with Fact~\ref{fact:red k4}. So we have
that $h_{i}\notin I_{r}$. The proof for blue is as same as the above one for red
symmetrically.
\begin{claim}\label{claim:Vb+Vr}
$|I_{b}|+|I_{r}|\leq 4$.
\end{claim}
If there is a vertex $h_{i}\in I_{b}\cap I_{r}$, then by
Fact~\ref{fact:red k3}, $h_{i}$ is contained in neither a red
$K_{3}$ nor a blue $K_{3}$ in $R$. By Fact~\ref{fact:red edges},
we know that the induced subgraph of $R$ by $I_{r}$ is a blue
complete graph and the induced subgraph of $R$ by $I_{b}$ is a red
complete graph. It follows that $|I_{b}|\leq 2$ and $|I_{r}|\leq
2$.
Now we can assume that $I_{b}\cap I_{r}=\emptyset$ by
Fact~\ref{fact:VbVr}. To the contrary, suppose that
$|I_{b}|+|I_{r}|\ge 5$. If $|I_{b}|\geq 4$, then by
Fact~\ref{fact:red edges}, the subgraph $R[I_{b}]$ contains a red
$K_{4}$, which contradicts with Fact~\ref{fact:red k4}. Then
$|I_{b}|\leq 3$. By the same reasons, we know that $|I_{r}|\leq
3$. Then $|I_{b}|=3$ or $|I_{r}|=3$. W.L.O.G., suppose that
$|I_{b}|=3$. Then $|I_{r}|\ge 2$. Let $I_{b}=\{h_1, h_2, h_3\}$
and $h_4, h_5\in I_{r}$. By Fact~\ref{fact:red edges}, $h_1h_2h_3$
is a red triangle and $h_4h_5$ is a blue edge in $R$. It is easy
to check that there exists a red triangle $h_ph_ih_j$ such that $p\in
\{4, 5\}$ and $i,j\in [3]$ or a blue $K_3=h_ih_4h_5$ such that
$i\in [3]$, which contradicts with Fact~\ref{fact:red k3}.
\begin{claim}\label{claim:G2}
$$
|G|\leq \left(\frac{1}{17}p_{0}+\frac{5}{2\times17}p_{1}+\frac{5}{17}p_{2}\right)f(k, s)+(t-l).
$$
\end{claim}
\begin{proof}
First suppose that $H_{i}$($i\leq l$) contains neither red nor blue edges.
This means that $H_{i}$ is colored with exactly $k-2$ colors and satisfies that it has neither monochromatic $K_{4}^{+}$ in one of the first $s-2$ colors nor monochromatic $K_{3}$ in one of the remaining $k-s$ colors.
It is easy to check that $$\frac{f_{j}(k-2, s-2)}{f_{j}(k, s)}=\frac{1}{17},$$ for any $1\leq j\leq 5$.
Hence by the induction hypothesis,
\begin{eqnarray*}|H_{i}|\leq gr_{k-2}(K_{3}:(s-2)\cdot K_{4}^{+}, (k-s)\cdot K_{3})-1=f(k-2, s-2)=\frac{1}{17}f(k, s).\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (6)\end{eqnarray*}
Next suppose that $H_{i}$ contains no red edges but contains blue edges.
This means that $H_{i}$ is colored with exactly $k-1$ colors and satisfies that it contains neither monochromatic $K_{4}^{+}$ in one of the first $s-2$ colors nor monochromatic $K_{3}$ in one of the remaining $k-s+1$ colors by Fact~\ref{new}.
It is easy to check that $$\frac{f_{2}(k-1, s-2)}{f_{1}(k, s)}=\frac{f_{3}(k-1, s-2)}{f_{3}(k, s)}=\frac{f_{5}(k-1, s-2)}{f_{4}(k, s)}=\frac{2}{17},$$ $$\frac{f_{1}(k-1, s-2)}{f_{2}(k, s)}=\frac{f_{4}(k-1, s-2)}{f_{5}(k, s)}=\frac{5}{2\times17},$$
So by the induction hypothesis,
\begin{eqnarray*}|H_{i}|\leq gr_{k-1}(K_{3}:(s-2)\cdot K_{4}^{+}, (k-s+1)\cdot K_{3})-1=f(k-1, s-2)\leq\frac{5}{2\times17}f(k, s)\label{con:4}. \ \ \ \ \ \ \ \ \ \ \ (7)\end{eqnarray*}
The same inequality holds if $H_{i}$ contains no blue edges but contains red edges.
Finally suppose that $H_{i}$ contains both red and blue edges.
This means that $H_{i}$ is colored with all $k$ colors and satisfies that it contains neither monochromatic $K_{4}^{+}$ in one of the first $s-2$ colors nor monochromatic $K_{3}$ in one of the remaining $k-s+2$ colors by Fact~\ref{new}.
It is easy to check that $\frac{f(k, s-2)}{f(k, s)}\leq\frac{5}{17},$
so by the induction hypothesis,
\begin{eqnarray*}|H_{i}|\leq gr_{k}(K_{3}:(s-2)\cdot K_{4}^{+}, (k-s+2)\cdot K_{3})-1=f(k, s-2)\leq\frac{5}{17}f(k, s)\label{con:5}. \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (8)\end{eqnarray*}
Combining Inequalities(6)-(8), we have the following inequality
\begin{eqnarray*}
|G|\leq \left(\frac{1}{17}p_{0}+\frac{5}{2\times17}p_{1}+\frac{5}{17}p_{2}\right)f(k, s)+(t-l).
\end{eqnarray*}
\end{proof}
We now consider subcases based on the value of $l$ and $t$.
\begin{subcase}\label{case:13=<t<=17}
$13\leq t\leq 17$.
\end{subcase}
By Claim~\ref{claim:drbwi}, $d_{r}(h_{i})\leq8$ and $d_{b}(h_{i})\leq8$ in $R$ for any $i\in [l]$.
Since $d_{r}(h_{i})+d_{b}(h_{i})=t-1$, we have that $d_{b}(h_{i})\geq4$ and $d_{r}(h_{i})\geq4$ in $R$.
Then by Claim~\ref{claim:drbwi2}, every $H_{i}$ contains neither red nor blue edge.
So $p_{2}=p_{1}=0$. Thus $p_{0}=l$. By Claim~\ref{claim:G2}, we have that
\begin{eqnarray*}
|G|\leq \frac{l}{17}~f(k, s)+(t-l)< f(k, s)+1=n,
\end{eqnarray*}
a contradiction.
Next, we consider the case that $4\leq t\leq 12$. By
Fact~\ref{fact:VbVr}, we have that $p_{2}\leq1$.
\begin{subcase}\label{case:1=<l<=3}
$l\leq 3$.
\end{subcase}
Then $p_{1}\leq2$ if $p_{2}=1$ and $p_{1}\leq3$ if $p_{2}=0$. Hence by Claim~\ref{claim:G2}, we get
\begin{eqnarray*}
|G|
&\leq&\begin{cases}
\frac{10}{17}~f(k, s)+(t-3), &\text{if $p_{2}=1$ and $p_{1}\leq2$,}\\
\frac{15}{34}~f(k, s)+(t-3), &\text{if $p_{2}=0$ and $p_{1}\leq3$}\\
\end{cases}\\
&\leq &f(k, s)< n,
\end{eqnarray*}
a contradiction.
\begin{subcase}\label{case:4=<l<=10}
$4\leq l\leq10$.
\end{subcase}
First suppose that $p_{2}=1$. It follows that $t\leq7$. Otherwise, for any $h_{i} \in V(R)$, we have that $d_{r}(h_{i})\ge 4$ or $d_{b}(h_{i})\ge 4$ in $R$ since $d_{r}(h_{i})+d_{b}(h_{i})=t-1$. Then by
Claim~\ref{claim:drbwi2}, every $H_{i}$ contains no red
edge or contains no blue edge, which contradicts with the assumption that $p_{2}=1$. By Claim~\ref{claim:Vb+Vr}, we have that $p_{1}\leq2$. Thus, by
Claim~\ref{claim:G2}, we have that
\begin{eqnarray*}
|G|\leq \frac{14}{17}~f(k, s)< n,
\end{eqnarray*} a contradiction.
Now we assume that $p_{2}=0$. By Claim~\ref{claim:Vb+Vr}, we have that $p_{1}\leq4$.
This means that
\begin{eqnarray*}
|G|\leq \left[\frac{5}{2\times17}p_{1}+\frac{1}{17}(l-p_{1})\right]f(k, s)+(t-l)
\leq\frac{16}{17}~f(k, s)+2\leq f(k, s)
< n,
\end{eqnarray*} a contradiction.
\begin{subcase}\label{case:l=11}
$l\ge 11$.
\end{subcase}
Then $11\le l\le t\le 12$. Hence $d_{r}(h_{i})\geq4$ or
$d_{b}(h_{i})\geq4$ for each $i\in [t]$. It follows that $p_{2}=0$
by Claim~\ref{claim:drbwi2}. If $d_{r}(h_{i})\geq4$ and
$d_{b}(h_{i})\geq4$ for each $i\in [l]$, then $p_{1}=0$ by
Claim~\ref{claim:drbwi2}. So $p_{0}=l$. By Claim~\ref{claim:G2},
we have that $$|G|\leq\frac{l}{17}f(k, s)+(t-l)\leq f(k, s)<n,$$ a
contradiction. So, W.L.O.G., we can assume that
$d_{r}(h_{1})\geq4$ and $d_{b}(h_{1})\le 3$. By
Claim~\ref{claim:drbwi}, we know that $(d_{r}(h_{1}),
d_{b}(h_{1}), l, t)\in \{(7, 3, 11, 11), (8, 2, 11, 11), (8, 3,
11, 12), (8, 3, 12, 12)\}$. Let
$\widetilde{d_{r}}(h_{1})=|N_r(h_{1})\cap \{h_{1}, \cdots,
h_{l}\}|$ and $\widetilde{d_{b}}(h_{1})=|N_b(h_{1})\cap \{h_{1},
\cdots, h_{l}\}|$. Then $(\widetilde{d_{r}}(h_{1}),
\widetilde{d_{b}}(h_{1}), l)\in \{(7, 3, 11), (8, 2, 11), (8, 3,
12)\}$. Let $F$ be the subgraph of $R$ induced by $N_r(h_{1})\cap
\{h_{1}, \cdots, h_{l}\}$. So $|F|\ge 7$. Clearly, $F$ has no red
$K_3$. Otherwise we can find a red $K_4$ obtained by a red $K_3$
in $F$ and $h_1$, which contradicts with Fact~\ref{fact:red k4}.
\begin{claim}\label{claim:FIrIb}
$V(F)\cap I_{b}=\emptyset$, $V(F)\cap I_{r}=\emptyset$
and $h_1\notin I_r$.
\end{claim}
First we claim that the red degree of $h_i$ in $F$ is at most 3
for each $h_i\in V(F)$. Otherwise, suppose that $h_i$ has at least
four red neighbors in $F$. Since $F$ has no red $K_3$, we can find
a blue $K_4$ induced by the red neighbors of $h_{i}$ in $F$, which
contradicts with Fact~\ref{fact:red k4}. Then the blue degree
of $h_i$ in $F$ is at least 3 for each $h_i\in V(F)$. It follows
that every vertex $h_i$ of $F$ is contained in a blue triangle of
$F$.
Thus, by
Fact~\ref{fact:red k3}, for each $h_i\in V(F)$, we have that
$h_{i}\notin I_{b}$. Secondly, we claim that the blue degree of
$h_i$ in $F$ is at most 5 for each $h_i\in V(F)$. Otherwise, we
find a blue $K_3$ induced by the blue
neighbors of $h_{i}$ in $F$ since $R(K_{3}, K_{3})=6$ and $F$ has no red $K_3$. Then there
exists a blue $K_{4}$ in $R$, which contradicts with
Fact~\ref{fact:red k4}. It follows that the red degree of $h_i$ in
$F$ is at least 1 for each $h_i\in V(F)$. Hence $h_1$ and $h_i$
are contained in the same red triangle of $R$ for each vertex
$h_i$ of $F$. Thus, by Fact~\ref{fact:red k3}, $h_{1}\notin I_{r}$
and for each $h_{i}\in V(F)$, we have that $h_{i}\notin I_{r}$.
By Claim~\ref{claim:FIrIb}, $p_{0}\geq |F|$. Now we consider the case that $(\widetilde{d_{r}}(h_{1}),
\widetilde{d_{b}}(h_{1}, l)=(8, 2, 11)$. Then $p_{0}\geq 8$. By Fact~\ref{fact:VbVr},
$p_2\le 1$.
If $p_2=0$, then by Claim~\ref{claim:G2}, we get that
\begin{eqnarray*}
|G|\leq
\left[\frac{1}{17}p_{0}+\frac{5}{2\times17}(11-p_{0})\right]f(k,
s)+(t-l) \leq\frac{31}{34}~f(k, s)+1< f(k, s)
< n,
\end{eqnarray*} a contradiction. If $p_2=1$, we can assume that
$h_p\in I_r\cap I_b$. Then by Claim~\ref{claim:FIrIb}, $h_p\in
N_b(h_{1})$. By Fact~\ref{fact:red k4}, $h_1\notin I_b$. Then by
Claim~\ref{claim:FIrIb}, $h_1\notin I_r\cup I_b$. So
$p_{0}\geq |F|+1=9$. Then by Claim~\ref{claim:G2}, we get that
\begin{eqnarray*}
|G|\leq
\left[\frac{9}{17}+\frac{5}{2\times17}+\frac{5}{17}\right]f(k, s)+(t-l)
\leq\frac{33}{34}~f(k, s)+1< f(k, s)+1= n,
\end{eqnarray*} a contradiction.
Now we consider the remaining cases that
$(\widetilde{d_{r}}(h_{1}), \widetilde{d_{b}}(h_{1}), l)\in \{(7,
3, 11), (8, 3, 12)\}$. Then we can assume that $N_b(h_{1})\cap
\{h_{1}, \cdots, h_{l}\}=\{h_o, h_p, h_q\}$. First consider the
case that there is a blue edge spanned by vertices in $\{h_o, h_p,
h_q\}$, say $h_ph_q$. Then $h_{1}h_ph_q$ is a blue $K_{3}$. By
Fact~\ref{fact:red k3}, we have that $h_{1}, h_p, h_q\notin
I_{b}$. Then by Claim~\ref{claim:FIrIb}, $h_1\notin I_r\cup I_b$. So $p_{0}\geq |F|+1$. If $p_2=0$, then by
Claim~\ref{claim:G2}, we get that
\begin{eqnarray*}
|G|\leq
\left[\frac{1}{17}p_{0}+\frac{5}{2\times17}(l-p_{0})\right]f(k,
s)+(t-l) \leq\frac{33}{34}~f(k, s)+1< f(k, s)+1= n,
\end{eqnarray*} a contradiction. If $p_2=1$, then
$h_o\in I_r\cap I_b$ and $h_oh_p, h_oh_q$ are red. Then by
Fact~\ref{fact:red k4}, $h_p, h_q\notin I_r\cup I_b$. So $p_{0}=|F|+3$. Then by Claim~\ref{claim:G2}, we get that
\begin{eqnarray*}
|G|\leq \left[\frac{11}{17}+\frac{5}{17}\right]f(k, s)+t-l
< f(k, s)+1=n,
\end{eqnarray*} a contradiction.
Secondly, we consider the case that $h_oh_ph_q$ is a red $K_{3}$.
By Fact~\ref{fact:red k3}, $h_o, h_p, h_q\notin I_r$. Then
$p_2=0$. By Fact~\ref{fact:red k4}, for each vertex $h_i$ in $F$,
there is at least one blue edge in $E_R(h_i, \{h_o, h_p, h_q\})$.
This means that there are at least 7 blue edges between $V(F)$ and
$\{h_o, h_p, h_q\}$. By the pigeonhole principle, there is a
vertex in $\{h_o, h_p, h_q\}$, say $h_o$, such that
$|N_{b}(h_o)\cap V(F)|\ge 3$.
If the subgraph induced by $N_{b}(h_o)\cap V(F)$ contains no blue
edge, then $N_{b}(h_o)\cap V(F)$ along with $h_{1}$ induce a red
$K_{4}$ in $R$, which contradicts with Fact~\ref{fact:red k4}.
Then the subgraph induced by $N_{b}(h_o)\cap V(F)$ contains a blue
edge. So $h_o$ is contained in both a red $K_{3}$ and a blue
$K_{3}$. Thus $h_o\notin I_{b}\cup I_{r}$ by Fact~\ref{fact:red k3}. So $p_{0}\geq |F|+1$. By
Claim~\ref{claim:G2}, we have that
$$|G|\leq\left[\frac{1}{17}p_{0}+\frac{5}{2\times17}(l-p_{0})\right]f(k, s)+(t-l)\leq\frac{33}{34}~f(k, s)+1< f(k, s)+1= n,$$ a contradiction.
Complete the proof of Case~\ref{case:s>=2} and then the proof of Theorem~\ref{Thm:K4+K3}.
\end{proof}
\end{document}
|
\begin{document}
\title{Dynamical invariants in non-Markovian quantum state diffusion equation}
\author{Da-Wei Luo}
\affiliation{Beijing Computational Science Research Center, Beijing 100084, China}
\affiliation{Department of Theoretical Physics and History of Science, The Basque Country University (UPV/EHU), PO Box 644, 48080 Bilbao, Spain}
\affiliation{Ikerbasque, Basque Foundation for Science, 48011 Bilbao, Spain}
\author{P. V. Pyshkin}
\affiliation{Beijing Computational Science Research Center, Beijing 100084, China}
\affiliation{Department of Theoretical Physics and History of Science, The Basque Country University (UPV/EHU), PO Box 644, 48080 Bilbao, Spain}
\affiliation{Ikerbasque, Basque Foundation for Science, 48011 Bilbao, Spain}
\author{Chi-Hang Lam}
\affiliation{Department of Applied Physics, Hong Kong Polytechnic University, Hung Hom, Hong Kong, China}
\author{Ting Yu}
\affiliation{Beijing Computational Science Research Center, Beijing 100084, China}
\affiliation{Center for Controlled Quantum Systems and Department of Physics and Engineering Physics, Stevens Institute of Technology, Hoboken, New Jersey 07030, USA}
\author{Hai-Qing Lin}
\affiliation{Beijing Computational Science Research Center, Beijing 100084, China}
\author{J. Q. You}
\affiliation{Beijing Computational Science Research Center, Beijing 100084, China}
\author{Lian-Ao Wu}
\email{[email protected]}
\affiliation{Department of Theoretical Physics and History of Science, The Basque Country University (UPV/EHU), PO Box 644, 48080 Bilbao, Spain}
\affiliation{Ikerbasque, Basque Foundation for Science, 48011 Bilbao, Spain}
\date{\today}
\begin{abstract}
We find dynamical invariants for open quantum systems described by the non-Markovian quantum state diffusion (QSD) equation. In stark contrast to closed systems where the dynamical invariant can be identical to the system density operator, these dynamical invariants no longer share the equation of motion for the density operator. Moreover, the invariants obtained {with} bi-orthonormal basis can be used to render an exact solution to the QSD equation and the corresponding non-Markovian dynamics without using master equations or numerical simulations. Significantly we show that we can apply these dynamical invariants to reverse-engineering a Hamiltonian that is capable of driving the system to the target state, providing a novel way to design control strategy for open quantum systems.
\end{abstract}
\pacs{03.67.Pp, 03.65.Ge, 32.80.Qk, 33.80.Be}
\maketitle
\section{Introduction}
The theory of open quantum system~\cite{Breuer2002} provides a realistic and complete description that takes into account the often uncontrollable and inevitable interaction between the system under consideration and its environment. This particular field has attracted high attention of physicists because environment-induced effects play a vital role in a wide variety of research topics such as quantum information~\cite{Nielsen2000}, quantum transport~\cite{Rebentrost2009} and quantum optics~\cite{Scully1997}. Indeed, in practical quantum information processing, the inevitable interactions between the system and the environment generally lead to a deterioration of quantum information which is one of the biggest hurdle of building quantum devices or setups. Conventionally, the Markov approximation was extensively used because of its simplicity {and validity for systems where the system-bath coupling is weak and the memory effect of the bath is neglectable}. The Markovian approximation entails that the open system dynamics is forgetful and is valid only when memory effects of the environment are negligible. However, this approximation becomes invalid when the system-environment coupling is strong or when the environment is structured~\cite{Breuer2002}. Consequently, general non-Markovian environments have to be considered in explaining new experimental advances in quantum optics~{\cite{exp_a1}}, as well as in various quantum information tasks where environmental memory can be utilized to control entanglement dynamics~{\cite{exp_a2}}. Therefore, it is vital to have a non-Markovian description of the system's dynamics under the influence of the memory effects and the back-action of the environment without making any approximation. However, a precise description of non-Markovian open systems has long been a challenge. To this end, many theoretical approaches have been developed~\cite{Bellomo2007,Piilo2008,Tu2008,Diosi1998,Strunz1999}. Among them, a stochastic Schr\"odinger equation called the non-Markovian quantum state diffusion (QSD)~\cite{Diosi1998,Strunz1999} which was derived from a microscopic Hamiltonian has several advantages over other exact master equations and has been proven to be a powerful tool in study of the system dynamics. While originally derived for systems embedded in bosonic bath, the QSD framework has been extended to deal with fermonic bath as well~\cite{fermionic_Chen2013,Fermionic_Zhao2012}. Exact master equations were derived for many interesting systems such as dissipative multi-level atoms~\cite{Jing2012}, multiple qubits~\cite{Jing2013} and quantum Brownian motion~\cite{Diosi1998,Strunz2004} which was also exactly given via a path-integral approach~\cite{Hu1992}. Recently, a generic tool for deriving non-Markovian master equation has been developed using QSD~\cite{Chen2014} which is applicable to a generic open quantum system irrespective of the system-environment coupling strength and the environment frequency distribution. Quantum continuous measurement~\cite{Diosi2008,Wiseman2008,Wu04} and quantum control method~\cite{Jing2013a} employing the QSD were also studied.
In quantum mechanics, an invariant of a quantum system remains intact during evolution of the system. The Lewis-Riesenfeld dynamical invariant~\cite{Lewis1967,Lewis1969} which was first introduced to find the solutions of time-dependent Schr\"{o}dinger equations has been used lately to engineer quantum states~\cite{Jing2013b,Chen2011}, perform quantum computation tasks~\cite{Sarandy2011} as well as study shortcuts to adiabaticity~\cite{Ibanez2011}. However, it has been shown that for closed systems under hermitian Hamiltonians, the system density matrix itself (evolved by the propagator) can be a dynamic invariant, since they share the same linear equation of motion. The dynamical invariant has also been extended to non-hermitian Hamiltonians~\cite{Gao1992} and convolutionless master equations~\cite{Sarandy2007}. In this paper, we show that for open systems whose dynamics can be described by the QSD equation, the invariants are no longer equivalent to the reduced density operator. It is also possible to obtain an analytic solution of the QSD equation using the dynamical invariants under a bi-orthonormal basis, yielding new information on the analytical quantum trajectories. Using the QSD invariants, we can also reverse-engineer a Hamiltonian that is capable of driving the system to the target state. Unlike unitary evolutions, this control protocol allows the spectrum of the state to change, making it more appealing to experimental realizations.
\section{QSD equation}
We consider a generic quantum system embedded in a bosonic bath with the Hamiltonian~\cite{Diosi1998,Strunz1999} (setting $\hbar=1$)
\begin{equation}
H=H_\mathrm{sys}+\sum_k \left(g_k L b_k ^\dagger+g_k^* L^\dagger b_k \right)+\sum_k \omega_k b_k ^\dagger b_k,
\end{equation}
where $H_\mathrm{sys}$ is the Hamiltonian of the system, $L$ is the Lindblad operator, $b_k$ denotes the $k$th-mode annihilation operator of the bosonic bath with frequency $\omega_k$, and $g_k$ stands for the coupling strength. The bath state can be represented by a set of complex numbers $\{z_k\}$ which labels the Bargmann coherent state of each bath mode $k$. One remarkable feature of this open system is that the influence of the bath can be fully encoded in a bath correlation function $\alpha(t,s)=\sum_k |g_k|^2 e^{-i \omega_k (t-s)}$. If we define a function $z_t^*\equiv -i\sum_k g_k^* z_k^* e^{i \omega_k t}$ that characterizes the time-dependent states of the bath and interpret $z_k$ as a Gaussian random variable, then $z_t^*$ becomes a Gaussian random process with a zero mean $\mathcal{M}[z_t^*]=0$ and the correlation function $\alpha(t,s)=\mathcal{M}[ z_t z_s^*]$, where $\mathcal{M}[\ldots]$ stands for the ensemble average. For simplicity, we first consider the case of zero-temperature bath. In this case, the state $|\psi_{z^*}(t) \rangle=\langle z^*|\Psi_{\rm tot}(t)\rangle$, obtained by projecting the total wave function $|\Psi(t)\rangle$ onto the bath state $|z \rangle$, corresponds to a quantum trajectory of the system and obeys a linear, time-local QSD equation~\cite{Diosi1998}
\begin{equation}
\frac{\partial}{\partial t}|\psi_{z^*}(t) \rangle= \left[-iH_{sys}+Lz^*-L ^\dagger \bar{O}(t,z)\right]|\psi_{z^*}(t) \rangle,\label{qsd}
\end{equation}
where $O$ is an operator ansatz defined by the functional derivative $\frac{\delta}{\delta z_s^*}|\psi_{z^*}(t) \rangle=O(t,s,z^*)|\psi_{z^*}(t) \rangle$, and $\bar{O}(t,z^*)=\int_0^t \alpha(t,s) O(t,s,z^*)ds$. The reduced density operator $\rho_s(t)\equiv\mathrm{Tr}_\mathrm{env}|\Psi_\mathrm{tot} \rangle\langle \Psi_\mathrm{tot}|$ can be obtained as $\rho_s(t)=\mathcal{M}[|\psi_{{z}^*}(t) \rangle \langle{\psi}_{{z}}(t)|]$ by the ensemble average of the quantum trajectories under all possible realizations of the noise function and then the corresponding non-Markovian master equations can in principle be derived. The main challenge in the application of the QSD is to derive the functional derivative $O$-operator. This $O$-operator can be exactly obtained for some simple models (see e.g.~\cite{Diosi1998}) or perturbatively derived for more general systems~\cite{Li2014e}. Note that the QSD equation under a specific noise realization $z_t^*$ can be formally interpreted as a Schr\"{o}dinger equation with the non-hermitian effective Hamiltonian
\begin{equation}
H_\mathrm{eff}=H_s+iLz^*-iL^\dagger \bar{O}(t,z^*). \label{heff_qsd_def}
\end{equation}
Below we use the dynamical invariants to analytically solve the QSD equation, which could give us an explicit expression for the reduced density matrix.
\section{Dynamical invariants in an open quantum system}
The Lewis-Riesenfeld dynamical invariant was first developed~\cite{Lewis1967} to study the magnetic-moment series for a charged particle moving nonrelativistically in an electromagnetic field and later generalized~\cite{Lewis1969} to solve the time-dependent Schr\"odinger equation. The invariant $I(t)$ was defined so that its expectation value under any density operator $\rho(t)$ is \emph{time-independent}, i.e., $\frac{\partial}{\partial t}\mathrm{Tr}\left[\rho(t) I(t)\right]\equiv \frac{\partial}{\partial t}\mathcal{I}\equiv0$.
It has been shown that the Lewis-Riesenfeld dynamical invariant is useful in dealing with time-dependent quantum problems, such as quantum computing in continuous time~\cite{Sarandy2011}. In fact, for closed systems whose dynamics is governed by a hermitian Hamiltonian, the Lewis-Riesenfeld dynamical invariant obeys $\frac{\partial}{\partial t}I(t)=-i[H,I(t)]$, i.e. the von Neumann equation and thus shares the same dynamical behavior as the density operator. As a result, with the knowledge of the dynamical invariant, one can know the dynamics of the system under consideration. It is shown~\cite{Jing2013b} that the dynamical invariant $I(t)$, the propagator $U(t)$ and the density operator $\rho(t)$ are mutually equivalent to each other without considering the Lewis-Riesenfeld phase. Indeed, if we let $I(t)=\rho(t)$, then for any unitary propagator $U(t)$, and density operator $\sigma(t)$, we have $\mathcal{I}=\mathrm{Tr}\left[\rho(0)\sigma(0)\right]$, which is time independent. The propagator $U(t)$ can also be written as $U(t)=\sum_n |\varphi_n(t)\rangle\langle\varphi_n(0)|$, where $|\varphi_n(t)\rangle$ is the instant eigenvector of the dynamical invariant. Thus, it readily follows that if a closed system is initially prepared in one of the eigenvectors of $I(0)$, then it will necessarily evolve to the instant eigenvector of $I(t)$ with the same index at a later time $t$. This property makes the dynamical invariant a valuable tool for both studying the state engineering~\cite{Jing2013b} and calculating the geometric phases.
In contrast, for an open quantum system whose dynamics is determined by the QSD equation, the problem becomes complicated. The reduced density operator under a particular realization of noise function $z$ can be shown to satisfy
\begin{equation}
\frac{\partial}{\partial t}P_z(t)=i \left[P_z(t) H_{\mathrm{eff}}^\dagger-H_{\mathrm{eff}}P_z(t)\right]\label{drho},
\end{equation}
where $P_z(t)=|\psi_{z^*}(t) \rangle\langle \psi_{z}(t)|$ and $H_\mathrm{eff}$ is the effective time-dependent non-hermitian Hamiltonian given by the QSD in Eq.~\eqref{qsd}. If one directly defines the dynamical invariant as in a hermitian system by imposing $\frac{\partial}{\partial t}\mathrm{Tr}\left[P_z(t) I(t)\right]\equiv 0$, it can be seen that the invariant satisfies $\frac{\partial}{\partial t}I(t)=i \left[I(t)H_{\mathrm{eff}}-H_{\mathrm{eff}}^\dagger I(t)\right]$,
which differs from Eq.~\eqref{drho} unless $H_\mathrm{eff}$ is hermitian. Thus, the dynamical invariant defined this way does not give the reduced density operator of the system under a given noise channel $z$.
On the other hand, various studies~\cite{Gao1992,Ibanez2011} have used the biorthonormal basis to study the dynamical invariants for a non-hermitian system. In such a framework, a complete biorthonormal set of eigenvectors is introduced so that the left and right eigenvectors of the Hamiltonian are given, respectively, by $H_{\mathrm{eff}}|\psi_\lambda(t)\rangle=\lambda(t)|\psi_\lambda(t)\rangle$ and $\langle\tilde{\psi}_\lambda(t)|H_{\mathrm{eff}}=\lambda(t)\langle\tilde{\psi}_\lambda(t)|$, where the orthonormal condition becomes $\langle\tilde{\psi}_\lambda|\psi_\mu \rangle=\delta_{\mu,\lambda}$ and the completeness is $\sum_\lambda |\psi_\lambda \rangle\langle\tilde{\psi}_\lambda|=\mathds{1}$. Note that it should be carefully checked if such an eigen decomposition indeed exists for the system under consideration, because it may not be always so for any non-hermitian Hamiltonian~\cite{Wong1967}. {For the criteria proposed in~\cite{Wong1967}, we have the system Hamiltonian as the self-adjoint part, and the rest should generally be continuous and bounded for real physical scenarios.}. Then, the time evolution is now governed by $i|\dot{\psi}(t)\rangle=H_\mathrm{eff}|\psi(t)\rangle$ and $i\frac{\partial}{\partial t}|\tilde{\psi}(t)\rangle=H_\mathrm{eff} ^\dagger|\tilde{\psi}(t)\rangle$. As a result, the definition $\frac{\partial}{\partial t}\mathrm{Tr}\left[\tilde{P}_z(t) I(t)\right]\equiv 0$, where $\tilde{P}_z(t)=|\psi_{z^*}(t) \rangle\langle \tilde{\psi}_{z}(t)|$, gives
\begin{equation}
\frac{\partial}{\partial t} I(t)=-i\left[H_{\mathrm{eff}},I(t)\right],\label{i_heff_def}
\end{equation}
which has the same form as in the hermitian case, albeit with a non-hermitian effective Hamiltonian given by the QSD equation~\eqref{qsd}. Remarkably, this newly defined invariant can be used to give an analytical solution to the QSD equation. Since the dynamical invariants for a non-hermitian Hamiltonian is no longer guaranteed to be hermitian, we should use the biorthonormal basis given by the instantaneous eigenvectors of $I(t)$, i.e., $I(t)|\varphi_\mu(t)\rangle=\mu|\varphi_\mu(t)\rangle$, and $\langle\tilde{\varphi}_\mu(t)|I(t)=\mu\langle\tilde{\varphi}_\mu(t)|$, with $\langle\tilde{\varphi}_\mu(t)|\varphi_\nu(t)\rangle=\delta_{\mu,\nu}$. We formally write the general solution to the QSD equation as $|\psi_{z^*}(t)\rangle=\sum_\mu c_\mu(t)|\varphi_\mu(t)\rangle$ and substitute it into the QSD equation. After some algebra {(See Appendix)}, we find that under the biorthonormal basis of the invariants, the QSD equation becomes an effectively uncoupled set of differential equations of the coefficients $c_\mu(t)$, and its solution is given by
\begin{equation}
c_\mu(t)=c_\mu(0)\exp \left[-\int_0^t d\tau \left(i \langle\widetilde{\varphi}_\mu|H_\mathrm{eff} |\varphi_\mu \rangle+\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\mu\rangle \right)\right].\label{psi_solu}
\end{equation}
This compact, explicit solution to the QSD equation is our central result and it applies to any realizations of the noise $z_t^*$. With $O$ determined, we can analytically predict the quantum trajectory for each realization of the noise $z_t$, which was previously determined numerically using the QSD method. The reduced density operator of the system can be obtained by analytically taking the ensemble average for all realizations of noises via Novikov's theorem~\cite{Chen2014}.
\begin{figure}
\caption{(Color online) The real and imaginary part of the wave function versus time $t$ under one random realization of the noise function $z_t$ for the RWA spin-boson model, where we used $\gamma=\Gamma=\lambda=1$. The solid curves correspond to the numerical solutions; the solid, open circles and triangles correspond to the analytical results obtained via the dynamical invariant Eq.~\eqref{i_heff_def}
\label{fig_cf}
\end{figure}
\section{Quantum trajectories}
As an illustrative example, we apply the dynamical invariants method to the quantum dynamics of a dissipative qubit under the rotating-wave approximation (RWA). This model is widely used to display the decoherence effects and is exactly solvable. The Hamiltonian of the system is $H_s=\sigma_z$ and the Lindblad operator is $L=\lambda\sigma_-$, where $\lambda$ is the system-bath coupling strength and $\sigma$'s are Pauli matrices. The $\bar{O}$ operator for this model~\cite{Diosi1998} takes the form of $\bar{O}(t,z^*)=F(t)\sigma_-$, where $F(t)$ is a function depending on the both system parameters and the bath spectral density. We can obtain the dynamical invariant for a given channel $z$ via Eq.~\eqref{i_heff_def} as
\begin{align}
I(t)&=\sigma_z+2\lambda\int_0^t z^*_u\exp \left[\int_u^t 2i+ \lambda F(s) ds\right] du \sigma_-\nonumber\\
&\equiv\sigma_z+g(t) \sigma_-.
\end{align}
The left and right eigenvectors can be readily obtained and we finally have
\begin{align}
|\psi_{z^*}(t)\rangle=&\psi_1(0)\exp \left[-\lambda\int_0^t F(\tau)d\tau-it\right]\begin{bmatrix}1\\\frac{g(t)}{2}\end{bmatrix} \nonumber\\
&+\psi_2(0)\exp(it)\begin{bmatrix}0\\1\end{bmatrix},
\end{align}
assuming an initial state $|\psi_{z^*}(0)\rangle=[\psi_1(0),\psi_2(0)]^T$, with $T$ denoting the transpose of a matrix. In particular, for the Ornstein-Uhlenbeck noise $\alpha(t,s)=\gamma\Gamma \exp(-\gamma|t-s|)/2$, $F(t)$ can be explicitly given by $\dot{F}(t)=-\gamma F(t)+2iF(t)+\lambda F(t)^2+\lambda\gamma/2$.
Implementation of $\mathcal{M}[|\psi_{z^*}(t)\rangle\langle\psi_z(t)|]$ gives the reduced density operator. This provides us an analytic tool to deal with the QSD equation. For a system with known $O$ operator, we can use it to directly obtain an explicit expression for the reduced density operator as a function of time neither using a master equation nor resorting to numerical calculations. This can be very beneficial for high-dimensional systems whose numerical calculations may be very time-consuming. Another more complex example is the dissipative three-level atom with $H_s=\omega J_z=\omega \left(|0 \rangle\langle 0|-|2 \rangle\langle 2|\right)$ and $L=J_-=\sqrt{2}\left(|0 \rangle\langle 1|+|1 \rangle\langle 2|\right)$. The $\bar{O}$ operator for this model~\cite{Jing_3lvl} explicitly depends on the noise $z_t$ and is given by $\bar{O}=F(t)J_-+G(t)J_zJ_-+P_z(t) J_-^2$, where $F(t)$, $G(t)$ and $P_z(t)$ are time-dependent functions that can in principle be calculated once the correlation function $\alpha(t,s)$ is known. {It is clear that we can assume an upper-triangular invariant of the form}
\begin{equation}
I(t)=
\begin{pmatrix}
0&a(t)&b(t)\\
0&1&c(t)\\
0&0&2
\end{pmatrix}.
\end{equation}
{Using the commutation relationship of the ladder operator for the three level system, we found from the definition Eq.~\eqref{i_heff_def}} the dynamic invariant $I(t)$ for this model
\begin{align}
&a(t)=\mathcal{R}\left[2(F(t)+G(t))-i \omega,\sqrt{2}z_t\right], \nonumber\\
&b(t)=\mathcal{R}\left[F(t)-i \omega,\sqrt{2} \left(2a(t)P_z(t)-\left(a(t)-c(t)\right)z_t\right) \right], \nonumber\\
&c(t)=\mathcal{R} \left[-2G(t)-i \omega,\sqrt{2}(z_t-2P_z(t))\right].
\end{align}
where $\mathcal{R}[g(t),h(t)]=\int_0^t \exp \left(\int_u^t g(s)ds\right)h(u)du$.
We then have
\begin{align}
|\psi_{z^*}(0)\rangle&=\psi_1(0)e^{-i \omega t}|0\rangle \nonumber\\
&+\psi_2(0)\exp\left[-2\int_0^t F(s)+G(s)ds\right]|\varphi_2(t) \rangle \nonumber\\
&+\psi_3(0)\exp\left[i \omega t-2\int_0^t F(s)ds\right]|\varphi_3(t)\rangle,
\end{align}
where
\begin{align}
|\varphi_2(t) \rangle&=a(t)|0 \rangle+|1 \rangle, \nonumber\\
|\varphi_3(t) \rangle&=\frac{b(t)+a(t)c(t)}{2}| 0 \rangle+c(t) |1 \rangle+|2 \rangle.
\end{align}
\begin{figure}
\caption{(Color online) The target fidelity and purity of three random initial states marked by {dashed}
\label{fig_re}
\end{figure}
It analytically reveals the quantum trajectory of the dissipative three-level system.
\section{Reverse engineering}
Now we show how to use dynamical invariants to design a Hamiltonian that can be used to drive an initial state to a target state by means of reverse engineering~\cite{Jing2013b}. Specifically, to design the needed Hamiltonian, we first construct an invariant $I(t)$ such that one of its time-dependent eigenvector $|\varphi_1 \rangle$ follows the desired time-evolution path, according to Eq.~\eqref{psi_solu}. The rest eigenvectors $\mathcal{R}=\{| \varphi_i\rangle\}$, $i=2,\ldots,N$, are left as undetermined parameters which we will use later to make the invariant compatible with the QSD equation. Then, we take the time-derivative of this invariant to obtain its equation of motion, and compare it with Eq.~\eqref{i_heff_def}, where $H_{\rm eff}$ should be formally compatible with Eq.~\eqref{heff_qsd_def}, imposed by the QSD equation, i.e., $H_s$ should be hermitian, both $H_s$ and $L$ should be \emph{noise-independent}, and the $\bar{O}$ operator is determined by $H_s$ and $L$. This is achievable by choosing an appropriate set of basis $\mathcal{R}$. We then have the desired Hamiltonian $H_s$ of the system and the corresponding $L$ operator.
As an illustrative example, we consider a two-level open system with the target state $|\psi_T \rangle=(|0 \rangle+|1 \rangle)/\sqrt{2}$. By letting this state be one of the eigenvector of the invariant, we first make a noise-dependent invariant of the form
\begin{equation}
I(t)=\begin{pmatrix}p(t,z^*_t)&-p(t,z^*_t)-1\\p(t,z^*_t)-1&-p(t,z^*_t)\end{pmatrix},\label{i_re}
\end{equation}
where $p(t,z^*_t)$ is a function determined by the Hamiltonian $H_s$ of the system and Lindblad operator $L$ that we are designing. If the coefficient $c_\mu(t)$ in the general solution to Eq~\eqref{psi_solu} decays to zero for the other eigenvector of the invariant, we certainly find the steady target state. Unlike a closed system, the specific form of the effective Hamiltonian of the QSD poses constraints on $H_s$ and $L$ in that $H_s$ needs to be hermitian and noise-independent. With both $H_s$ and $L$ given, the $\bar{O}$ operator is then determined. Taking this into consideration and inserting Eq.~\eqref{i_re} into Eq.~\eqref{i_heff_def}, we find that $H_s=-\omega \sigma_x$ and $L=\lambda(\sigma_z-i \sigma_y)$. In Fig.~\ref{fig_re} we numerically plot the fidelity between the engineered state and the target state, as well as the purity of the engineered state for three randomly chosen mixed initial states. It can be seen that the fidelity monotonically increases to one and become unity after some time, indicating a steady target state is reached. In sharp {contrast} to the closed quantum system that evolves unitarily, the spectrum of the state is free to change, and we can drive a \emph{mixed} state to a target \emph{pure} state by using the non-Markovian dynamics of an open quantum system.
\section{Conclusion}
In conclusion, we studied the dynamical invariants in non-Markovian open systems whose temporal evolution is governed by the non-Markovian QSD equation. {For systems that have an exact operator representation of the function derivative in the form of the $O$ operator, the dynamical invariant can be obtained analytically.} Dynamical invariants of the QSD equation are discovered, and it is found that the non-hermitian dynamical invariants do not share the same equation of motion as the reduced density matrix but its eigenvectors can be used to generate an analytical expression of the solution of QSD. This enables us to obtain temporal evolution of the open system without deriving and then solving the non-Markovian master equations. Using reverse engineering along with the QSD invariants, we are able to design a Hamiltonian and Lindblad operator that can be used to drive an initial state to a target state via non-Markovian evolution.
\appendix*
{\section{Derivation for the solution to QSD}}
{Using Eq.~\eqref{i_heff_def}, we take the time derivative of
\begin{align}
I|\varphi_\lambda\rangle&=\lambda|\varphi_\lambda\rangle,\label{ieig}
\end{align}
and project it onto $\langle\widetilde{\varphi}_\mu|$,
\begin{align}
&\quad -i\lambda\langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda\rangle+i\mu\langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda\rangle+\mu\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\lambda\rangle \nonumber \\
&=\dot{\lambda}\langle\widetilde{\varphi}_\mu|\varphi_\lambda\rangle+\lambda\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\lambda\rangle.
\end{align}
Thus,
\begin{align}
\dot{\lambda}\delta_{\mu,\lambda}&=\left(\mu-\lambda\right)\left[\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\lambda\rangle+i\langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda\rangle\right],\\
\mu=\lambda&\Rightarrow \dot{\lambda}\equiv 0,\\
\mu\neq \lambda &\Rightarrow \langle\widetilde{\varphi}_\mu|\dot{\varphi}_\lambda\rangle=-i\langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda\rangle.\label{ieig_hsol}
\end{align}
We then expand the wave function in this basis:
\begin{align}
|\psi(t)\rangle=\sum_\lambda c_\lambda(t)|\varphi_\lambda(t)\rangle.
\end{align}
Inserting it into the QSD equation and projecting onto $\langle\widetilde{\varphi}_\mu|$, we have
\begin{align}
\sum_\lambda \dot{c_\lambda}\delta_{\mu,\lambda}+\sum_\lambda c_\lambda\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\lambda\rangle&=-i\sum_\lambda c_\lambda \langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda \rangle.
\end{align}
Therefore,
\begin{align}
\dot{c_\mu}&=\sum_\lambda \left[-i c_\lambda \langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda \rangle-c_\lambda\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\lambda\rangle\right],
\end{align}
using Eq.~\eqref{ieig_hsol},
\begin{align}
\dot{c_\mu}&=\sum_{\lambda\neq\mu} \left[-i c_\lambda \langle\widetilde{\varphi}_\mu|H_{\rm eff} |\varphi_\lambda \rangle+ic_\lambda\langle\widetilde{\varphi}_\mu|H_{\rm eff}|\varphi_\lambda\rangle\right] \nonumber\\
&\phantom{=} -i c_\mu \langle\widetilde{\varphi}_\mu|H_{\rm eff} |\varphi_\mu \rangle-c_\mu\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\mu\rangle \nonumber\\
&=-c_\mu \left[i \langle\widetilde{\varphi}_\mu|H_{\rm eff} |\varphi_\mu \rangle+\langle\widetilde{\varphi}_\mu|\dot{\varphi}_\mu\rangle\right]
\end{align}
Now the differential equations for coefficients $c_\mu(t)$ are decoupled and can be readily solved,
\begin{align}
c_\mu(t)=c_\mu(0)\exp \left(-\int_0^t d\tau \left[i \langle\widetilde{\varphi}_\mu|H_{\rm eff} |\varphi_\mu \rangle+\langle\widetilde{\varphi}_\mu|\frac{\partial}{\partial \tau}|\varphi_\mu\rangle \right]\right)
\end{align}
}
\begin{acknowledgments}
This work is supported by the Basque Government (Grant No.~IT472-10), the Spanish MICINN (Project No.~FIS2012-36673-C03-03), and the Basque Country University UFI (Project No.~11/55-01-2013). C.H.L. is supported by the Hong Kong GRF (Project No.~501213). J. Q. You is supported by the National Natural Science Foundation of China No.~91421102 and the National Basic Research Program of China No.~2014CB921401. T.Y. is supported by the NSF PHY-0925174 and DOD/AF/AFOSR No.~FA9550-12-1-0001. We would like to thank J. G. Muga for helpful discussions.
\end{acknowledgments}
\end{document}
|
\begin{equation*}gin{document}
\title{Global solutions of $2$-$D$ cubic Dirac equation with non-compactly supported data}
\author{Qian Zhang}
\date{}
\maketitle
\begin{equation*}gin{comment}
\begin{equation*}gin{center}
{School of Mathematical Sciences, Laboratory of Mathematics and Complex Systems, MOE, Beijing Normal University, 100875, Beijing, P. R. China}
\end{center}
\end{comment}
\noindent {\bf{Abstract}}\ \ We are interested in the cubic Dirac equation in two space dimensions. We establish the small data global existence and sharp pointwise decay results for general cubic nonlinearities without additional structure. We also prove the scattering of the Dirac equation for certain classes of nonlinearities. In all the above results we do not require the initial data to have compact support.
\noindent {\bf{Keywords}}\ \ cubic Dirac equation $\cdot$ global-in-time solutions $\cdot$ sharp pointwise decay $\cdot$ ghost weight method
\noindent {\bf{Mathematics Subject Classifications (2010)}}\ \ 35L05 $\cdot$ 35L52 $\cdot$ 35L71
\section{Introduction}\leftabel{s1}
Consider the nonlinear Dirac equation in two space dimensions
\begin{equation*}gin{equation}\leftabel{s1: 1.1}
i\gamma^\mu\partial_\mu\psi+m\psi=F(\psi)
\end{equation}
with initial data
\begin{equation*}gin{equation}\leftabel{s1: 1.2}
\psi(0,x)=\psi_0(x),
\end{equation}
where $i\gamma^\mu\partial_\mu=i\gamma^0\partial_t+i\gamma^1\partial_1+i\gamma^2\partial_2$ is the Dirac operator, $\partial_a=\partial_{x_a}$ for $a=1,2$, $\psi(t,x):\mathbb{R}^{1+2}\to\mathbb{C}^2$ is a spinor field with mass $m\ge 0$, and $\gamma^\mu$ are the Dirac matrices. Dirac matrices are defined by the identities
\begin{equation*}gin{equation}\leftabel{s1: 1.3}
\gamma^\mu\gamma^\nu+\gamma^\nu\gamma^\mu=-2g^{\mu\nu}I_2,\quad\quad (\gamma^\mu)^*=-g_{\mu\nu}\gamma^\nu,
\end{equation}
where $g=\mathrm{diag}(-1,1,1)$ denotes the Minkowski metric in $\mathbb{R}^{1+2}$, $\mu,\nu\in\{0,1,2\}$, $I_2$ is the $2\times 2$ identity matrix and $A^*=(\bar{A})^T$ is the Hermitian conjugate of the matrix $A$. We consider general cubic nonlinearities $F$ and do not require additional structures of $F$, i.e.,
\begin{equation*}gin{equation}\leftabel{s1: 1.4}
F(\psi)=(\psi^*H\psi)\psi,
\end{equation}
where $H\in\mathbb{C}^{2\times 2}$ is an arbitrary matrix and $\psi^*$ denotes the complex conjugate transpose of the vector $\psi$.
In the sequel, we use $C$ to denote a universal constant whose value may change from line to line. As usual, $A\leftesssim B$ means that $A\lefte CB$ for some constant $C$. Given a vector or a scalar $w$ we use Japanese bracket to denote $\leftangle w\rightangle:=(1+|w|^2)^{1/2}$. As usual, we use $\Box=g^{\mu\nu}\partial_\mu\partial_\nu=-\partial_t^2+\partial_1^2+\partial_2^2$ to denote the Minkowski wave operator.
In quantum field theory the nonlinear Dirac equation is a model of self-interacting Dirac fermions and has been widely used to build relativistic models of extended particles. It was originally formulated in one space dimension known as the Thirring model \cite{T} and in three space dimension known as the Soler model \cite{So}, with cubic nonlinearities $F$ which can be written as
\begin{equation*}gin{equation}\leftabel{s1: TS}
F(\psi)=\left\{\begin{equation*}gin{array}{l}
(\psi^*\gamma^0\gamma^\mu\psi)\gamma_{\!\mu}\psi,\\
(\psi^*\gamma^0\psi)\psi
\end{array}\right.
\end{equation}
respectively, where $\gamma_{\!\mu}=g_{\mu\nu}\gamma^\nu$.
In terms of the well-posedness of the Cauchy problem, the scale invariant regularity for the nonlinear Dirac equation in $\mathbb{R}^{1+n}$ is $s_c=\frac{n-1}{2}$ and therefore it is expected to be well posed for data $\psi_0\in H^s(\mathbb{R}^n)$ with $s\ge\frac{n-1}{2}$. In the low regularity setting, there are numerous results concerning local and global (in time) existence of solutions, see for example \cite{Ev, Tz, MNN, BH} in the case of three space dimensions. On the other hand, in the case of two space dimensions, Pecher \cite{P} proved the local well-posedness for data in $H^s(\mathbb{R}^2)$ in the almost critical case $s>\frac{1}{2}$. Bournaveas and
Candy \cite{BC} proved local well-posedness with initial data in the critical space $H^{\frac{1}{2}}(\mathbb{R}^2)$ and global well-posedness for the case $m=0$. Global well-posedness and scattering for the case $m>0$ with small initial data in $H^{\frac{1}{2}}(\mathbb{R}^2)$ was established by Bejenaru and Herr \cite{BH2}. We point out that these previous work mostly focused on nonlinearities as in \eqref{s1: TS}.
\begin{equation*}gin{comment}
the Dirac equation with covariant homogeneous nonlinearities of degree $p\ge 3$ was studied in the almost critical Sobolev space $H^s, s>1$ in \cite{Ev}, where local well-posedness for $p\ge 3$ and global well-posedness for $p>3$ and small initial data were proved. The results in \cite{Ev, DF} were extended by Tzvetkov \cite{Tz}, who proved global existence of the solution in time for small smooth data in the case $|F(\psi)|\leftesssim|\psi|^p, p>2$. existence of global solutions for cubic nonlinearities \eqref{s1: TS} and small data in $H^s$ in the almost critical case $s>1$ for positive mass $m>0$ was proved in \cite{MNO}. This was improved to small $H^1$ data with some regularity assumption for the angular variable in \cite{MNN}, and was extended to the case of potentials in \cite{CD}. See also \cite{BH} for the recent result concerning global well-posedness and scattering for small data in the critical space $H^1$.
In the case of one space dimension, global existence for the Thirring model with data in $H^1$ was proved in \cite{De}, this was improved to less regular case $s>\frac{1}{2}$ in \cite{ST}. Candy \cite{Ca} improved this result and proved the global well-posedness for the Thirring model with data in the critical space $L^2$.
\end{comment}
We study global-in-time existence with pointwise decay of the solution to \eqref{s1: 1.1}-\eqref{s1: 1.2} in $\mathbb{R}^{1+2}$, with nonlinearities as in \eqref{s1: 1.4} and non-compactly supported initial data in weighted Sobolev space of high regularity. We are interested in the massless case $m=0$ since the massive Dirac equation with cubic nonlinearities is easier to treat (see Remark \rightef{s1: 1.2}). In a recent work \cite{DL}, the authors considered \eqref{s1: 1.1} with the nonlinearity $F(\psi)=(\psi^*\gamma^0\psi)\psi$ and compactly supported initial data, and established global existence and long time dynamics including pointwise decay and scattering, using the hyperboloidal foliation of spacetime. Compared with \cite{DL}, the novelty of our results is that we remove the compactness assumption on the initial data and the structural condition on the nonlinearities. See also \cite{D21} for global existence results on two dimensional coupled wave and Klein-Gordon equations with non-compactly supported initial data.
To conclude, we focus on the the study of the following Cauchy problem in $\mathbb{R}^{1+2}$:
\begin{equation*}gin{equation}\leftabel{s1: 1.1s}
i\gamma^\mu\partial_\mu\psi=F(\psi)=(\psi^*H\psi)\psi,\quad\quad\psi(0,x)=\psi_0(x),
\end{equation}
for an arbitrary matrix $H\in\mathbb{C}^{2\times 2}$. From now on, we also denote the Dirac operator by
\begin{equation*}gin{equation}\leftabel{s1: Dirac}
\mathcal{D}:=i\gamma^\mu\partial_\mu.
\end{equation}
\noindent${\mathbf{Major\ difficulties\ and\ key\ ideas.}}$ We apply Klainerman's vector field method \cite{K85,K86} to study the Dirac equation \eqref{s1: 1.1s}. Using the identity $\Box=\mathcal{D}^2$, we obtain
\begin{equation*}gin{equation}\leftabel{s1: boxd}
\Box \psi=\mathcal{D}^2\psi=\mathcal{D}F(\psi)=\mathcal{D}\left((\psi^*H\psi)\psi\right).
\end{equation}
We first note that the cubic nonlinearity in \eqref{s1: boxd} violates the standard null condition \cite{K86}. Since the free-linear waves in $\mathbb{R}^{1+2}$ decays at the speed of $\leftangle t\rightangle^{-\frac{1}{2}}$,
the identity \eqref{s1: boxd} implies that a linear massless Dirac field in $\mathbb{R}^{1+2}$ should have the same slow decay rate. Hence, the best decay rate we can expect about the $L^2$ norm of the nonlinearity in \eqref{s1: boxd} is
\begin{equation*}
\|\mathcal{D}\left((\psi^*H\psi)\psi\right)\|_{L^2_x}\leftesssim \leftangle t\rightangle^{-1},
\end{equation*}
which is the borderline nonintegrable rate. Hence the nonlinearities as in \eqref{s1: 1.4} under consideration may contribute to the long time behavior of the solution to \eqref{s1: 1.1s}. Let us recall the following wave equation in $\mathbb{R}^{1+3}$ with critical nonlinearity
\begin{equation*}
-\Box u=(\partial_tu)^2,\quad\quad (u,\partial_tu)|_{t=0}=(0,u_1)
\end{equation*}
for which $u_1$ is compactly supported. John \cite{J81} showed that nontrivial $C^3$ solutions to this equation blow up in finite time. On the other hand, under compactness assumption on the initial data and additional structure condition on the nonlinearity (i.e. $F(\psi)=(\psi^*\gamma^0\psi)\psi$), global existence and unified (in $m$) pointwise decay results for the Dirac equation \eqref{s1: 1.1} were established in \cite{DL}, where the authors use the hyperboloidal foliation method and make full use of the $\gamma^0$-structure of $F(\psi)$ to obtain better decay estimates of the solution. For non-compactly supported initial data and nonlinearities as in \eqref{s1: 1.4} without additional structures, our difficulties in using Klainerman's vector field method to study global existence for the problem \eqref{s1: 1.1s} include: $i)$ obtaining good decay estimate for the solution to close the bootstrap argument; $ii)$ dealing with non-compactly supported initial data (in which case the hyperboloidal foliation method cannot be used directly).
To conquer these difficulties, our key ideas include: $i)$ using the good commutative property of the scaling vector field $L_0$ with the Dirac operator. This means that we can use the full range of the (compatible) vector fields (denoted by $\hat{\Gamma}^I$ for any multi-indices $I$) and obtain the $\leftangle t-|x|\rightangle$ decay of the solution $\psi$ by employing the classical Klainerman-Sobolev inequality; $ii)$ applying Alinhac's ghost weight energy method \cite{Al1} adapted to the Dirac equation and, by a careful calculation when deriving the energy estimate, we obtain the $\gamma^0$-structure $F^*\gamma^0\psi$ (for nonlinearities $F$ as in \eqref{s1: 1.4}) even if $F$ does not necessarily have this structure itself. This idea of discovering the $\gamma^0$-structure is inspired by a recent work \cite{DLMY}, where the authors established global existence, sharp time decay and scattering result for $2D$ Dirac-Klein-Gordon system with non-compactly supported initial data. By a delicate cancellation, we can further write
\begin{equation*}gin{equation}\leftabel{s1: Fgzpsi}
F^*\gamma^0\psi=[F]_{-}^*\gamma^0[\psi]_{+}+[F]_{+}^*\gamma^0[\psi]_{-},
\end{equation}
where $[\psi]_{\pm}=\left(I_2\pm\frac{x_a}{|x|}\gamma^0\gamma^a\right)\psi$ and similarly for $[F]_{\pm}$. The definition \eqref{s1: 1.4} then gives $[F]_{-}=(\psi^*H\psi)[\psi]_{-}$. That is, both terms in \eqref{s1: Fgzpsi} can be written roughly as $[\psi]_{-}\cdot|\psi|^3$. When acting the vector fields $\hat{\Gamma}^I$ on both sides of \eqref{s1: 1.1s} and applying the ghost energy estimate, we obtain the corresponding structure $(\hat{\Gamma}^IF)^*\gamma^0\hat{\Gamma}^I\psi$ which can be written roughly as
\begin{equation*}gin{equation}\leftabel{s1: gzFps}
\left([\hat{\Gamma}^I\psi]_{-}\cdot\psi\cdot\psi+[\psi]_{-}\cdot\hat{\Gamma}^I\psi\cdot\psi\right)\cdot \hat{\Gamma}^I\psi.
\end{equation}
For the estimate of the first term in \eqref{s1: gzFps}, we need to use the $\leftangle t-|x|\rightangle$ decay of the solution $\psi$, which follows from the Klainerman-Sobolev inequality as stated above. Hence the key to closing the energy estimate is obtaining good pointwise decay estimate of $[\psi]_{-}$ in \eqref{s1: gzFps}. For this, we adopt an idea due to Bournaveas \cite{Bo} and introduce a new function $\Psi$ which solves the wave equation
$$\Box\Psi=F,\quad\quad (\Psi,\partial_t\Psi)|_{t=0}=(0,-i\gamma^0\psi_0),$$
and find that we can roughly write $[\psi]_{-}$ as $G_a\Psi$, where $G_a$ denotes the good derivatives. By employing the $L^\infty$ estimate on linear wave equation and using the $\leftangle t\rightangle^{-1}$ decay of good derivatives, we obtain sufficient decay estimate of $[\psi]_{-}$.
\begin{equation*}gin{comment}
In addition, by the relation $\psi=\mathcal{D}\Psi$ given by the equation of $\Psi$, we get an improved $\leftangle t-|x|\rightangle$ decay estimate of $\psi$, i.e. $|\psi|\leftesssim\leftangle t-|x|\rightangle^{-1}\leftangle t\rightangle^{-\frac{1}{2}+\delta}$, for any $\delta>0$ small.
\end{comment}
The main result is stated as follows.
\begin{equation*}gin{thm}\leftabel{s1: thm1}
Let $N\ge 3$ be an integer. Then there exists $\epsilon_0>0$ such that for all $0<\epsilon<\epsilon_0$ and all initial data $\psi_0$ satisfying the smallness condition
\begin{equation*}gin{equation}\leftabel{s1: psis}
\sum_{k\lefte N}\left(\|\leftangle |x|\rightangle^{k+1}\nabla^k\psi_0\|_{L^1_x}+\|\leftangle |x|\rightangle^{k+1}\nabla^k\psi_0\|_{L^2_x}\right)\lefte\epsilon,
\end{equation}
the Cauchy problem \eqref{s1: 1.1s} admits a global-in-time solution $\psi$, which satisfies the following pointwise decay estimate
\begin{equation*}n
|\psi|\leftesssim\epsilon\leftangle t+|x|\rightangle^{-\frac{1}{2}}\leftangle t-|x|\rightangle^{-\frac{1}{2}}.
\end{equation*}n
\end{thm}
\begin{equation*}gin{rem}\leftabel{s1: rem 4}
Theorem \rightef{s1: thm1} holds with relaxed condition on the smallness of the initial data (choosing $N$ larger, for example $N\ge 5$), i.e.,
\begin{equation*}
\|\psi_0\|_{L^1_x}+\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k+1}\nabla^k\psi_0\|_{L^2_x}\lefte\epsilon.
\end{equation*}
See Appendix \rightef{sB} for the proof.
\end{rem}
\begin{equation*}gin{rem}\leftabel{s1: rem3}
Theorem \rightef{s1: thm1} also holds for nonlinearities $F=(\psi^*\gamma^0\psi)A\psi$, where $A\in\mathbb{C}^{2\times 2}$ is an arbitrary matrix. In this case, the expression of $F$ itself admits a $\gamma^0$-structure, and hence $F$ can be written roughly as $[\psi]_{-}|\psi|^2$ as stated in the paragraph above Theorem \rightef{s1: thm1}.
\end{rem}
\begin{equation*}gin{rem}\leftabel{s1: rem 2}
For the massive Dirac equation
\begin{equation*}gin{equation}\leftabel{s1: 1.1m}
i\gamma^\mu\partial_\mu\psi+m\psi=F(\psi),\quad m>0
\end{equation}
with initial data \eqref{s1: 1.2} and nonlinearity $F$ as in \eqref{s1: 1.4}, assume without loss of generality that $m=1$. Acting the Dirac operator on both sides of \eqref{s1: 1.1m}, we find that $\psi$ solves the following Klein-Gordon equation
\begin{equation*}gin{equation}\leftabel{s1: 1.2m}
-\Box \psi+\psi=F(\psi)-i\gamma^\mu\partial_\mu F(\psi).
\end{equation}
Since the nonlinearity $F$ is cubic, using the decay estimates for linear Klein-Gordon equation \cite{Ge} and the bootstrap argument, one can obtain global existence of the solution $\psi$ to \eqref{s1: 1.1m} with small, high-regular, non-compactly supported initial data, and the sharp pointwise decay estimate
\begin{equation*}
|\psi|\leftesssim\epsilon\leftangle t+|x|\rightangle^{-1},
\end{equation*}
where $0<\epsilon\leftl 1$ measures the size of the initial data.
\end{rem}
\begin{equation*}gin{rem}\leftabel{s1: rem1}
As mentioned above, for compactly supported initial data, the global existence and uniform (in the mass parameter $m\in[0,1]$) pointwise decay estimate for the Cauchy problem in $\mathbb{R}^{1+2}$
\begin{equation*}gin{equation}\leftabel{s1: 1.1mgz0}
i\gamma^\mu\partial_\mu\psi+m\psi=F(\psi)=(\psi^*\gamma^0\psi)\psi,\quad\quad\psi(t_0,x)=\psi_0(x),\quad t_0=2
\end{equation}
were established in \cite{DL}. Our method of treating more general nonlinearities as in \eqref{s1: 1.4} can be adapted to the hyperboloidal foliation case there, and then one can prove the global existence of the solution $\psi$ to the problem
\begin{equation*}gin{equation}\leftabel{s1: 1.1mH}
i\gamma^\mu\partial_\mu\psi+m\psi=F(\psi)=(\psi^*H\psi)\psi,\quad\quad\psi(t_0,x)=\psi_0(x)
\end{equation}
for an arbitrary matrix $H\in\mathbb{C}^{2\times 2}$ and small compactly supported data $\psi_0$, with the unified pointwise decay estimate
$$|\psi(t,x)|\leftesssim\frac{\epsilon}{t^{\frac{1}{2}}(t-|x|)^{\frac{1}{2}}+mt}.$$
Namely, the unified (in $m\in[0,1]$) pointwise decay result in \cite[Theorem 1.1]{DL} can be generalized to cubic nonlinearities as in \eqref{s1: 1.4}. Indeed, the idea used in this paper (see also \cite[Lemma 2.6]{DLMY}) of discovering the $\gamma^0$-structure in deriving the ghost energy estimate can be used in proving the hyperboloidal energy estimate (\cite[Proposition 2.1]{DL}) for the Dirac equation (see also \cite[Lemma 2.2]{DLW} and \cite{DW}).
\end{rem}
Using the ghost weight energy estimate and the pointwise decay result given by Theorem \rightef{s1: thm1}, together with the integral formula for the Dirac equation, we obtain the result below concerning the asymptotic behavior (in the Sobolev space) of the global solution obtained in Theorem \rightef{s1: thm1}. Precisely, the small global solution scatters as time tends to infinity, and it tends to the solution of a linear Dirac equation in the Sobolev space of high regularity.
\begin{equation*}gin{thm}\leftabel{s1: thm2}
Let $N\ge 3, \epsilon_0>0$ be as in Theorem \rightef{s1: thm1} and $\psi_0$ satisfy \eqref{s1: psis} with $0<\epsilon\lefte\epsilon_0$. Suppose $\psi$ is the global solution to \eqref{s1: 1.1s} with $F(\psi)=(\psi^*\gamma^0\psi)\psi$. Then the solution $\psi$ scatters linearly as $t\to+\infty$. More precisely, there exists some $\psi^+\in H^N(\mathbb{R}^2)$ such that
\begin{equation*}
\|\psi(t)-S(t)\psi^+\|_{H^N}\lefte C\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t),\quad\fracorall t\ge 0
\end{equation*}
and
\begin{equation*}
\|\psi(t)-S(t)\psi^+\|_{H^{N-2}}\lefte C(t)\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t)
\end{equation*}
for some $C(t)>0$ satisfying $\leftim_{t\to+\infty}C(t)=0$, where $S(t):=e^{-t\gamma^0\gamma^a\partial_a}$ is the propagator for the linear Dirac equation (see Sect. \rightef{s4} for the definition).
\end{thm}
\section{Preliminaries}\leftabel{s2}
\subsection{Notation}\leftabel{s2.1}
We work in the $(1+2)$ dimensional spacetime $\mathbb{R}^{1+2}$ with Minkowski metric $g=(-1,1,1)$, which is used to raise or lower indices. The space indices are denoted by Roman letters $a,b\in\{1,2\}$, and the spacetime indices are denoted by Greek letters $\mu,\nu,\alpha,\begin{equation*}ta\in\{0,1,2\}$. Einstein summation convention for repeated upper and lower indices is adopted throughout the paper. We denote a point in $\mathbb{R}^{1+2}$ by $(t,x)=(x_0,x_1,x_2)$ with $t=x_0,x=(x_1,x_2),x^a=x_a,a=1,2$, and its spacial radius is denoted by $r:=|x|=\sqrt{x_1^2+x_2^2}$. The following vector fields will be used frequently in the analysis:
\begin{equation*}gin{itemize}
\item[(i)] Translations: $\partial_\alpha:=\partial_{x_\alpha}$, for $\alpha=0,1,2$.
\item[(ii)] Lorentz boosts: $L_a:=x_a\partial_t+t\partial_a$, for $a=1,2$.
\item[(iii)] Rotation: $\Omega_{12}:=x_1\partial_2-x_2\partial_1$.
\item[(iv)] Scaling: $L_0=t\partial_t+x^a\partial_a$.
\end{itemize}
We also use the modified Lorentz boosts and rotation, first introduced in \cite{Ba},
$$\hat{L}_a:=L_a-\frac{1}{2}\gamma^0\gamma^a,\quad\quad\hat{\Omega}_{12}:=\Omega_{12}-\frac{1}{2}\gamma^1\gamma^2,$$
which enjoy the following commutative property, i.e.
\begin{equation*}
[\hat{L}_a,\mathcal{D}]=[\hat{\Omega}_{12},\mathcal{D}]=0,
\end{equation*}
where the commutator $[A,B]$ is defined as
$$[A,B]:=AB-BA.$$
For the ordered sets
$$\{\Gamma_1,\Gamma_2,\cdots,\Gamma_7\}:=\{\partial_0, \partial_1, \partial_2, L_1, L_2, \Omega_{12}, L_0\}$$
and
$$\{\hat{\Gamma}_1,\hat{\Gamma}_2,\cdots,\hat{\Gamma}_7\}:=\{\partial_0, \partial_1, \partial_2, \hat{L}_1, \hat{L}_2, \hat{\Omega}_{12}, L_0\}$$
and any multi-index $I=(i_1,i_2,\dots,i_7)\in\mathbb{N}^7$ of length $|I|=\sum_{k=1}^7i_k$, we denote
$$\Gamma^I=\prod_{k=1}^7\Gamma_k^{i_k},\quad\mathrm{where}\quad\Gamma=(\Gamma_1,\Gamma_2,\dots,\Gamma_7)$$
and
$$\hat{\Gamma}^I=\prod_{k=1}^7\hat{\Gamma}_k^{i_k},\quad\mathrm{where}\quad\hat{\Gamma}=(\hat{\Gamma}_1,\hat{\Gamma}_2,\dots,\hat{\Gamma}_7).$$
We also introduce the good derivatives
\begin{equation*}
G_a=\frac{1}{r}(x_a\partial_t+r\partial_a),\quad\mathrm{for}\;a=1,2.
\end{equation*}
\subsection{Estimates on the vector fields and Sobolev inequalities}\leftabel{s2.2}
We first recall the well-known relations
\begin{equation*}
[\Box, \Gamma_k]=0,\quad\mathrm{for}\ k=1,\dots,6,\quad\quad [\Box,L_0]=2\Box.
\end{equation*}
Also, by straightforward computation, we have
\begin{equation*}gin{equation}\leftabel{s2: Dhgz}
[\mathcal{D},\hat{\Gamma}_k]=0,\quad\mathrm{for}\ k=1,\dots,6,\quad\quad [\mathcal{D},L_0]=\mathcal{D}.
\end{equation}
\begin{equation*}gin{lem}\leftabel{s2: 2.2-4}
The following statements hold:
\begin{equation*}gin{itemize}
\item[$i)$]
\begin{equation*}gin{equation}\leftabel{s2: Dhgz^I}
[\mathcal{D},\hat{\Gamma}^I]=\sum_{|I'|<|I|}c_{I'}\ \mathcal{D}\hat{\Gamma}^{I'}=\sum_{|J'|<|I|}c_{J'}\hat{\Gamma}^{J'}\mathcal{D}
\end{equation}
for some constants $c_{I'},c_{J'}$.
\item[$ii)$] Let $u=u(t,x):\mathbb{R}^{1+2}\to\mathbb{C}$ be a scalar field and $\Phi=\Phi(t,x):\mathbb{R}^{1+2}\to\mathbb{C}^2$ be a vector field. Then
\begin{equation*}gin{equation}\leftabel{s2: uPhi}
\hat{\Gamma}^I(u\Phi)=\sum_{I_1+I_2=I}(\Gamma^{I_1}u)(\hat{\Gamma}^{I_2}\Phi).
\end{equation}
\item[$iii)$]
\begin{equation*}gin{equation}\leftabel{s2: hgz^I}
\hat{\Gamma}^I\Phi=\Gamma^I\Phi+\sum_{|I'|<|I|}c_{I'}\Gamma^{I'}\Phi,\quad\quad\Gamma^J\Phi=\hat{\Gamma}^J\Phi+\sum_{|J'|<|J|}c_{J'}\hat{\Gamma}^{J'}\Phi
\end{equation}
for some constant matrices $c_{I'}, c_{J'}$.
\end{itemize}
\begin{equation*}gin{proof}
$i)$ By \eqref{s2: Dhgz}, the equality \eqref{s2: Dhgz^I} holds for $|I|=1$. Assume by induction that \eqref{s2: Dhgz^I} holds for $|I|=l$ with $l\in\mathbb{N}$. For each fixed $|I|=l+1$, we can decompose $I$ into $I_1+I_2$ with $|I_1|=l$ and $|I_2|=1$. Hence,
\begin{equation*}n
[\mathcal{D},\hat{\Gamma}^{I_1+I_2}]&=&[\mathcal{D},\hat{\Gamma}^{I_1}]\hat{\Gamma}^{I_2}+\hat{\Gamma}^{I_1}[\mathcal{D},\hat{\Gamma}^{I_2}]\\
&=&\left(\sum_{|I'_1|<|I_1|}c_{I'_1}\mathcal{D}\hat{\Gamma}^{I'_1}\right)\hat{\Gamma}^{I_2}+c\hat{\Gamma}^{I_1}\mathcal{D}\\
&=&\sum_{|I'_1|<|I_1|}c_{I'_1}\mathcal{D}\hat{\Gamma}^{I'_1}\hat{\Gamma}^{I_2}+c \mathcal{D}\hat{\Gamma}^{I_1}+c\sum_{|J'_1|<|I_1|}c_{J'_1}\mathcal{D}\hat{\Gamma}^{J'_1}\\
&=&\sum_{|J'|<|I|}c_{J'}\mathcal{D}\hat{\Gamma}^{J'}
\end{equation*}n
for some constants $c_{I'_1},c,c_{J'_1},c_{J'}$. This gives the first equality in \eqref{s2: Dhgz^I}. The second equality can be proved similarly.
$ii)$ By straightforward computation,
\begin{equation*}
\hat{L}_a(u\Phi)=(L_au)\Phi+u(\hat{L}_a\Phi)
\end{equation*}
and similarly for $\hat{\Omega}_{12}$. Recall from the definitions in Sect. \rightef{s2.1} that $\Gamma_k=\hat{\Gamma}_k$ for $k=1,2,3,7$. Hence we have
\begin{equation*}
\hat{\Gamma}_k(u\Phi)=(\Gamma_ku)\Phi+u(\hat{\Gamma}_k\Phi),\quad\mathrm{for}\;k=1,2,\dots,7.
\end{equation*}
Assume for some $l\in\mathbb{N}$ and any $|I|\lefte l$ we have
\begin{equation*}
\hat{\Gamma}^I(u\Phi)=\sum_{I_1+I_2=I}(\Gamma^{I_1}u)(\hat{\Gamma}^{I_2}\Phi).
\end{equation*}
For each fixed $|I|=l+1$, we write $I=I_1+I_2$ with $|I_1|=l$ and $|I_2|=1$. Then
\begin{equation*}n
\hat{\Gamma}^{I_2}\hat{\Gamma}^{I_1}(u\Phi)&=&\sum_{J_1+J_2=I_1}\hat{\Gamma}^{I_2}\left((\Gamma^{J_1}u)(\hat{\Gamma}^{J_2}\Phi)\right)\\
&=&\sum_{J_1+J_2=I_1}(\Gamma^{I_2+J_1}u)(\hat{\Gamma}^{J_2}\Phi)+(\Gamma^{J_1}u)(\hat{\Gamma}^{I_2+J_2}\Phi)\\
&=&\sum_{J_1+J_2=I}(\Gamma^{J_1}u)(\hat{\Gamma}^{J_2}\Phi).
\end{equation*}n
$iii)$ Note that \eqref{s2: hgz^I} holds for $|I|=1$. Assume that it holds for some $l\in\mathbb{N}$ and all $|I|\lefte l$. For each fixed $|I|=l+1$, we write $I=I_1+I_2$ as above, then
\begin{equation*}n
\hat{\Gamma}^{I_2}\hat{\Gamma}^{I_1}\Phi&=&\hat{\Gamma}^{I_2}\Gamma^{I_1}\Phi+\sum_{|I'_1|<|I_1|}c_{I'_1}\hat{\Gamma}^{I_2}\Gamma^{I'_1}\Phi\\
&=&\Gamma^{I_2}\Gamma^{I_1}\Phi+c\Gamma^{I_1}\Phi+\sum_{|I'_1|<|I_1|}c_{I'_1}\left(\Gamma^{I_2}\Gamma^{I'_1}\Phi+\tilde{c}_{I'_1}\Gamma^{I'_1}\Phi\right)\\
&=&\Gamma^I\Phi+\sum_{|I'|<|I|}c_{I'}\Gamma^{I'}\Phi
\end{equation*}n
for some constant matrices $c,c_{I'_1},\tilde{c}_{I'_1},c_{I'}$.
\end{proof}
\end{lem}
We next give the estimate below for the vector fields $\partial_\alpha$ and good derivatives $G_a$, which will be used in obtaining good pointwise decay result of the solution to \eqref{s1: 1.1s}.
\begin{equation*}gin{lem}\leftabel{s2: DMY2.1}
We have
\begin{equation*}
\leftangle t-r\rightangle|\partial u|+\leftangle t+r\rightangle|G_a u|\leftesssim\sum_{|I|=1}|\Gamma^Iu|.
\end{equation*}
\begin{equation*}gin{proof}
The estimate of $|\partial u|$ is well-known, see for example \cite{S}. For $|G_a u|$, we just use the equalities
\begin{equation*}
G_au=\frac{1}{r}\left(L_a u+(r-t)\partial_a u\right)=\frac{1}{t}\left(L_a u-\frac{x_a}{r}(r-t)\partial_t u\right),
\end{equation*}
which follow from a direct calculation.
\end{proof}
\end{lem}
Next we present the famous Klainerman-Sobolev inequality whose proof can be found in \cite{A,Ho,S}.
\begin{equation*}gin{lem}(See \cite[Proposition 6.5.1]{Ho})\leftabel{s2: K-S}
Let $u=u(t,x)$ be a sufficiently smooth function which decays sufficiently fast at space infinity for each fixed $t\ge 0$. Then for any $t\ge 0$ and $x\in\mathbb{R}^2$, we have
\begin{equation*}
\leftangle t+r\rightangle^{\frac{1}{2}}\leftangle t-r\rightangle^{\frac{1}{2}}|u(t,x)|\leftesssim\sum_{|I|\lefte 2}\|\Gamma^Iu(t,\cdot)\|_{L^2_x}.
\end{equation*}
\end{lem}
\subsection{Reformulation of the solution to \eqref{s1: 1.1s}}\leftabel{s2.3}
Below we reformulate the solution to the massless Dirac equation $\psi$ as the Dirac operator acting on the solution $\Psi$ to a linear wave equation. Then by decomposing $\psi$ into two parts $[\psi]_{-}$ and $[\psi]_{+}$ and writing $[\psi]_{-}$ as the good derivatives $G_a\Psi$, we obtain good pointwise decay estimate for $[\psi]_{-}$. This is an important observation and will be used in closing the bootstrap estimate for the ghost weight energy (see Sect. \rightef{ss3.1}).
For any $\mathbb{C}^2$-valued function $\Phi$, we define the Hermitian matrices
\begin{equation*}
T_{-}:=I_2-\frac{x_a}{r}\gamma^0\gamma^a,\quad\quad T_{+}:=I_2+\frac{x_a}{r}\gamma^0\gamma^a,
\end{equation*}
and let
\begin{equation*}gin{equation}\leftabel{s2.3: Phi-}
[\Phi]_{-}=T_{-}\Phi=\Phi-\frac{x_a}{r}\gamma^0\gamma^a\Phi,\quad\quad [\Phi]_{+}=T_{+}\Phi=\Phi+\frac{x_a}{r}\gamma^0\gamma^a\Phi.
\end{equation}
\begin{equation*}gin{lem}\leftabel{s2: DL4.2}
The following statements hold:
\begin{equation*}gin{itemize}
\item[$i)$] Let $\varphi,\Phi$ be two $\mathbb{C}^2$-valued functions, then
\begin{equation*}gin{equation}\leftabel{s2: Psi*Phi}
\varphi^{*}\gamma^0\Phi=\frac{[\varphi]^*_{-}\gamma^0[\Phi]_{+}+[\varphi]^*_{+}\gamma^0[\Phi]_{-}}{4}.
\end{equation}
\item[$ii)$] For any $\mathbb{C}^2$-valued function $\Phi$, let $\varphi:=\mathcal{D}\Phi$. Then
\begin{equation*}gin{equation}\leftabel{s2: Phi-}
[\varphi]_{-}=i\left(I_2-\frac{x_b}{r}\gamma^0\gamma^b\right)\gamma^\mu\partial_\mu\Phi=i\left(I_2-\frac{x_b}{r}\gamma^0\gamma^b\right)\gamma^aG_a\Phi.
\end{equation}
\end{itemize}
\begin{equation*}gin{proof}
$i)$ We write
\begin{equation*}
\varphi=\frac{[\varphi]_{-}+[\varphi]_{+}}{2},\quad\quad \Phi=\frac{[\Phi]_{-}+[\Phi]_{+}}{2}.
\end{equation*}
By direct computation,
\begin{equation*}gin{equation}n\leftabel{s2: T-}
T_{-}\gamma^0T_{-}&=&\left(I_2-\frac{x_b}{r}\gamma^0\gamma^b\right)\gamma^0\left(I_2-\frac{x_a}{r}\gamma^0\gamma^a\right)\nonumber\\
&=&\gamma^0-\frac{x_b}{r}\gamma^0\gamma^b\gamma^0-\frac{x_a}{r}\gamma^0\gamma^0\gamma^a+\frac{x_ax_b}{r^2}\gamma^0\gamma^b\gamma^0\gamma^0\gamma^a\nonumber\\
&=&\gamma^0+\frac{x_a^2}{r^2}\gamma^0(\gamma^a)^2+\frac{x_1x_2}{r^2}\gamma^0\left(\gamma^1\gamma^2+\gamma^2\gamma^1\right)\nonumber\\
&=&0,
\end{equation}n
and similarly,
\begin{equation*}gin{equation}n\leftabel{s2: T+}
T_{+}\gamma^0T_{+}&=&\left(I_2+\frac{x_b}{r}\gamma^0\gamma^b\right)\gamma^0\left(I_2+\frac{x_a}{r}\gamma^0\gamma^a\right)\nonumber\\
&=&\gamma^0+\frac{x_b}{r}\gamma^0\gamma^b\gamma^0+\frac{x_a}{r}\gamma^0\gamma^0\gamma^a+\frac{x_ax_b}{r^2}\gamma^0\gamma^b\gamma^0\gamma^0\gamma^a\nonumber\\
&=&0.
\end{equation}n
It follows that
\begin{equation*}
[\varphi]_{-}^*\gamma^0[\Phi]_{-}=\varphi^*T_{-}\gamma^0T_{-}\Phi=0,\quad\quad [\varphi]_{+}^*\gamma^0[\Phi]_{+}=\varphi^*T_{+}\gamma^0T_{+}\Phi=0,
\end{equation*}
which implies $i)$.
$ii)$ Using the relation
\begin{equation*}
\partial_a=G_a-\frac{x_a}{r}\partial_t,
\end{equation*}
we can write
\begin{equation*}
\gamma^0\partial_t+\gamma^a\partial_a=\gamma^0\left(I_2-\frac{x_a}{r}\gamma^0\gamma^a\right)\partial_t+\gamma^aG_a.
\end{equation*}
By \eqref{s2: T-},
\begin{equation*}
\left(I_2-\frac{x_b}{r}\gamma^0\gamma^b\right)\gamma^0\left(I_2-\frac{x_a}{r}\gamma^0\gamma^a\right)=T_{-}\gamma^0T_{-}=0,
\end{equation*}
hence \eqref{s2: Phi-} holds.
\end{proof}
\end{lem}
Let $\psi$ be the solution to \eqref{s1: 1.1s}. We choose $\Psi$ which solves the problem
\begin{equation*}gin{equation}\leftabel{s3: DL5.3}
\Box\Psi=\mathcal{D}\psi=F(\psi),\quad\quad \left(\Psi,\partial_t\Psi\right)|_{t=0}=\left(0,-i\gamma^0\psi_0\right),
\end{equation}
where $F$ is as in \eqref{s1: 1.1s}. Then
\begin{equation*}gin{equation}\leftabel{s2: psi=dPsi}
\psi=\mathcal{D}\Psi=i\gamma^\mu\partial_\mu\Psi,
\end{equation}
since $\varphi:=\psi-\mathcal{D}\Psi$ verifies
$$\mathcal{D}\varphi=0,\quad\quad\varphi(0,x)=0.$$
Using \eqref{s2: Phi-} we can write
\begin{equation*}gin{equation}\leftabel{s2: psi-}
[\psi]_{-}=i\left(I_2-\frac{x_b}{r}\gamma^0\gamma^b\right)\gamma^aG_a\Psi.
\end{equation}
In addition, we have the following estimate on $|[\hat{\Gamma}^I\psi]_{-}|$.
\begin{equation*}gin{lem}\leftabel{s2: lem psi-}
Let $\psi$ be the solution to \eqref{s1: 1.1s} and $\Psi$ be chosen to satisfy \eqref{s3: DL5.3}. Then we have
\begin{equation*}
|[\hat{\Gamma}^I\psi]_{-}|\leftesssim\leftangle t+r\rightangle^{-1}\sum_{|I'|\lefte|I|+1}|\Gamma^{I'}\Psi|\leftesssim\leftangle t-r\rightangle^{-\frac{1}{2}}\leftangle t+r\rightangle^{-\frac{3}{2}}\sum_{|I'|\lefte |I|+3}\|\Gamma^{I'}\Psi\|_{L^2_x}
\end{equation*}
and
\begin{equation*}
|\hat{\Gamma}^I\psi|\leftesssim\leftangle t-r\rightangle^{-1}\sum_{|I'|\lefte|I|+1}|\Gamma^{I'}\Psi|\leftesssim\leftangle t-r\rightangle^{-\frac{3}{2}}\leftangle t+r\rightangle^{-\frac{1}{2}}\sum_{|I'|\lefte |I|+3}\|\Gamma^{I'}\Psi\|_{L^2_x}.
\end{equation*}
\begin{equation*}gin{proof}
Acting the vector field $\hat{\Gamma}^I$ on both sides of \eqref{s2: psi=dPsi} and using \eqref{s2: Dhgz^I}, we obtain
\begin{equation*}
\hat{\Gamma}^I\psi=\mathcal{D}\hat{\Gamma}^I\Psi+\sum_{|I'|<|I|}c_{I'}\ \mathcal{D}\hat{\Gamma}^{I'}\Psi
\end{equation*}
for some constants $c_{I'}$. Hence \eqref{s2: Phi-} implies
\begin{equation*}gin{equation}\leftabel{s2: hgz^Ipsi-}
[\hat{\Gamma}^I\psi]_{-}=i\left(I_2-\frac{x_b}{r}\gamma^0\gamma^b\right)\left(\gamma^aG_a\hat{\Gamma}^I\Psi+\sum_{|I'|<|I|}c_{I'}\gamma^aG_a\hat{\Gamma}^{I'}\Psi\right).
\end{equation}
By Lemma \rightef{s2: DMY2.1} and \eqref{s2: hgz^Ipsi-}, we have
\begin{equation*}
|[\hat{\Gamma}^I\psi]_{-}|\leftesssim\sum_{|I'|\lefte|I|}|G_a\hat{\Gamma}^{I'}\Psi|\leftesssim\leftangle t+r\rightangle^{-1}\sum_{|I'|\lefte|I|,|J|=1}|\Gamma^J\hat{\Gamma}^{I'}\Psi|\leftesssim\leftangle t+r\rightangle^{-1}\sum_{|K|\lefte|I|+1}|\Gamma^K\Psi|
\end{equation*}
and
\begin{equation*}
|\hat{\Gamma}^I\psi|\leftesssim\sum_{|I'|\lefte|I|}|\partial\hat{\Gamma}^{I'}\Psi|\leftesssim\leftangle t-r\rightangle^{-1}\sum_{|I'|\lefte|I|,|J|=1}|\Gamma^J\hat{\Gamma}^{I'}\Psi|\leftesssim\leftangle t-r\rightangle^{-1}\sum_{|K|\lefte|I|+1}|\Gamma^K\Psi|,
\end{equation*}
where we use \eqref{s2: hgz^I} in these inequalities. By Lemma \rightef{s2: K-S},
\begin{equation*}
|\Gamma^K\Psi|\leftesssim\leftangle t-r\rightangle^{-\frac{1}{2}}\leftangle t+r\rightangle^{-\frac{1}{2}}\sum_{|J|\lefte 2}\|\Gamma^J\Gamma^K\Psi\|_{L^2_x}\leftesssim\leftangle t-r\rightangle^{-\frac{1}{2}}\leftangle t+r\rightangle^{-\frac{1}{2}}\sum_{|K'|\lefte |K|+2}\|\Gamma^{K'}\Psi\|_{L^2_x}.
\end{equation*}
The conclusion follows from the last three estimates.
\end{proof}
\end{lem}
Recall that $\Psi$ is the solution to \eqref{s3: DL5.3}. The lemma below gives $L^\infty$ estimate on $2D$ linear wave equation.
\begin{equation*}gin{lem}(See \cite[Theorems 4.6.1, 4.6.2]{LZ})\leftabel{s2: DMY2.78}
Let $u$ be the solution to the Cauchy problem in $\mathbb{R}^{1+2}$
\begin{equation*}
\left\{\begin{equation*}gin{array}{rcl}
-\Box u(t,x)&=&f(t,x),\\
(u,\partial_tu)|_{t=0}&=&(u_0,u_1).
\end{array}\right.
\end{equation*}
Then we have
\begin{equation*}
\|u(t,x)\|_{L^\infty_x}\leftesssim\leftangle t\rightangle^{-\frac{1}{2}}\Bigg\{\|u_0\|_{W^{2,1}}+\|u_1\|_{W^{1,1}}+
\sum_{|I|\lefte 1}\int_0^t(1+\tau)^{-\frac{1}{2}}\|\Gamma^If(\tau,x)\|_{L^1_x}\rightm{d}\tau\Bigg\}.
\end{equation*}
\end{lem}
\begin{equation*}gin{comment}
the following estimates hold.
\begin{equation*}gin{itemize}
\item[(i)] $L^2$ estimate on solution:
\begin{equation*}n
\|u(t,x)\|_{L^2_x}&\leftesssim&\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\left(\|u_1\|_{L^1_x}+\|u_1\|_{L^2_x}\right)\\
&+&\leftn^{\frac{1}{2}}(2+t)\int_0^t\left(\|f(\tau,x)\|_{L^1_x}+\|f(\tau,x)\|_{L^2_x}\right)d\tau.
\end{equation*}n
\item[(ii)]
\end{comment}
\section{Proof of Theorem \rightef{s1: thm1}}\leftabel{s3}
\subsection{Ghost weight energy estimate}\leftabel{ss3.1}
To treat general nonlinearities as in \eqref{s1: 1.4} without additional structure, we prove the ghost weight energy estimate below, which generates the $\gamma^0$ structure $F^*\gamma^0\psi$ on the right hand side. This brings great advantages since we can decompose $F^*\gamma^0\psi$ into terms involving $[\psi]_{-}$ (using Lemma \rightef{s2: DL4.2}) and therefore applying the decay estimates given by Lemmas \rightef{s2: lem psi-} and \rightef{s2: DMY2.78} to close the ghost energy estimate in the bootstrap assumption.
\begin{equation*}gin{prop}\leftabel{s3: Ghost}
Let $\psi$ be the solution to the Dirac equation \eqref{s1: 1.1s}, then for any $t\ge 0$, we have the following ghost weight energy estimate:
\begin{equation*}
E^D_{gst}(t,\psi)\leftesssim \|\psi(0)\|_{L^2_x}^2+\int_0^t\|F^*\gamma^0\psi\|_{L^1_x}\rightm{d}\tau,
\end{equation*}
where
\begin{equation*}gin{equation}\leftabel{s3: G1''}
E^D_{gst}(t,\psi):=\|\psi(t)\|_{L^2_x}^2+\int_0^t\left\|\frac{[\psi]_-}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\rightm{d}\tau.
\end{equation}
Here $[\psi]_-=\psi-\frac{x_a}{r}\gamma^0\gamma^a\psi$ is defined as in \eqref{s2.3: Phi-} and $\delta>0$ is a constant.
\begin{equation*}gin{proof}
Let $q(t,x)=\tilde{q}(r-t)$, where
\begin{equation*}
\tilde{q}(s)=\int_{-\infty}^s\frac{1}{\leftangle\tau\rightangle^{1+\delta}}\rightm{d}\tau.
\end{equation*}
Multiplying on both sides of \eqref{s1: 1.1} by $-ie^q\psi^*\gamma^0$, we obtain
\begin{equation*}
e^q\psi^*\partial_t\psi+e^q\psi^*\gamma^0\gamma^a\partial_a\psi=-ie^q\psi^*\gamma^0F.
\end{equation*}
Taking the complex conjugate of the last equality, we find that
\begin{equation*}
e^q\partial_t\psi^*\psi+e^q\partial_a\psi^*\gamma^0\gamma^a\psi=ie^qF^*\gamma^0\psi.
\end{equation*}
Summarizing the above two equalities and using Leibniz rule, we derive
\begin{equation*}gin{equation}\leftabel{s3: DL4.3}
\partial_t\left(e^q\psi^*\psi\right)+\partial_a\left(e^q\psi^*\gamma^0\gamma^a\psi\right)-(\partial_tq) e^q\psi^*\psi-(\partial_aq) e^q\psi^*\gamma^0\gamma^a\psi=G,
\end{equation}
where
\begin{equation*}
G:=ie^q\left(F^*\gamma^0\psi-\psi^*\gamma^0F\right).
\end{equation*}
Substituting the relations
\begin{equation*}
\partial_tq=-\tilde{q}'(r-t)=-\frac{1}{\leftangle r-t\rightangle^{1+\delta}},\quad\quad\partial_aq=\tilde{q}'(r-t)=\frac{1}{\leftangle r-t\rightangle^{1+\delta}}\frac{x_a}{r}
\end{equation*}
into \eqref{s3: DL4.3}, we get
\begin{equation*}gin{equation}\leftabel{s3: DL4.3'}
\partial_t\left(e^q\psi^*\psi\right)+\partial_a\left(e^q\psi^*\gamma^0\gamma^a\psi\right)+\frac{e^q}{\leftangle r-t\rightangle^{1+\delta}}\left(\psi^*\psi-\frac{x_a}{r}\psi^*\gamma^0\gamma^a\psi\right)=G.
\end{equation}
By straightforward computation,
\begin{equation*}
\left(\psi-\frac{x_a}{r}\gamma^0\gamma^a\psi\right)^*\left(\psi-\frac{x_a}{r}\gamma^0\gamma^a\psi\right)=2\left(\psi^*\psi-\frac{x_a}{r}\psi^*\gamma^0\gamma^a\psi\right).
\end{equation*}
Hence \eqref{s3: DL4.3'} yields
\begin{equation*}gin{equation}\leftabel{s3: DL4.3''}
\partial_t\left(e^q\psi^*\psi\right)+\partial_a\left(e^q\psi^*\gamma^0\gamma^a\psi\right)+\frac{e^q}{2\leftangle r-t\rightangle^{1+\delta}}\left(\psi-\frac{x_a}{r}\gamma^0\gamma^a\psi\right)^*\left(\psi-\frac{x_a}{r}\gamma^0\gamma^a\psi\right)=G.
\end{equation}
Integrating \eqref{s3: DL4.3''} over $\mathbb{R}^2$, we obtain
\begin{equation*}gin{equation}\leftabel{s3: G1}
\partial_t\|e^{q/2}\psi\|_{L^2_x}^2+\frac{1}{2}\left\|e^{q/2}\frac{\psi-\frac{x_a}{r}\gamma^0\gamma^a\psi}{\leftangle r-t\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\lefte 2\|e^qF^*\gamma^0\psi\|_{L^1_x}.
\end{equation}
Integrating \eqref{s3: G1} over $[0,t]$, we further obtain
\begin{equation*}gin{equation}\leftabel{s3: G1'}
E^D_{gst}(t,\psi)\leftesssim \|\psi(0)\|_{L^2_x}^2+\int_0^t\|F^*\gamma^0\psi\|_{L^1_x}\rightm{d}\tau,
\end{equation}
where we use that $e^q\sim 1$.
\end{proof}
\end{prop}
\subsection{Proof of Theorem \rightef{s1: thm1}}\leftabel{ss3.2}
\noindent{$\mathbf{Bootstrap\ setting.}$} Let the assumptions in Theorem \rightef{s1: thm1} hold. Fix $N\ge 3$ and $0<\delta\leftl 1$. Following the local well-posedness theory for the Dirac equation, there exist constants $C_0>0$ and $T>0$ (small) such that \eqref{s1: 1.1s} admits a solution in $[0,T)$ with
\begin{equation*}
\sum_{|I|\lefte N}[E^D_{gst}(0,\hat{\Gamma}^I\psi)]^{\frac{1}{2}}+\sum_{|I|\lefte N-2}\|\hat{\Gamma}^I\psi(0,x)\|_{L^\infty_x}\lefte C_0\epsilon.
\end{equation*}
In addition, let $\Psi$ be the solution to \eqref{s3: DL5.3}, i.e.,
\begin{equation*}gin{equation}\leftabel{s3: DL5.3'}
\Box\Psi=F(\psi)=(\psi^*H\psi)\psi,\quad\quad \left(\Psi,\partial_t\Psi\right)|_{t=0}=\left(0,-i\gamma^0\psi_0\right),
\end{equation}
where $H$ is as in \eqref{s1: 1.1s}, then we also have
\begin{equation*}
\sum_{|I|\lefte N-1}\left(\|\Gamma^I\Psi(0)\|_{W^{2,1}}+\|\partial_t\Gamma^I\Psi(0)\|_{W^{1,1}}\right)\lefte C_0\epsilon.
\end{equation*}
Let $C_1\gg 1$ and $0<\epsilon_0\leftl C_1^{-1}$ be chosen later. We assume the following bootstrap setting:
\begin{equation*}gin{equation}\leftabel{s3: bs}
\sum_{|I|\lefte N}[E^D_{gst}(t,\hat{\Gamma}^I\psi)]^{\frac{1}{2}}+\sum_{|I|\lefte N-2}\leftangle t\rightangle^{\frac{3}{2}-\delta}|[\hat{\Gamma}^I\psi]_{-}|\lefte C_1\epsilon,
\end{equation}
where $\epsilon\lefte\epsilon_0$ measures the size of the initial data, and (see \eqref{s3: G1''})
\begin{equation*}gin{equation}\leftabel{s3: ghost}
E^D_{gst}(t,\hat{\Gamma}^I\psi):=\|\hat{\Gamma}^I\psi(t)\|_{L^2_x}^2+\int_0^t\left\|\frac{[\hat{\Gamma}^I\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\rightm{d}\tau.
\end{equation}
Define
\begin{equation*}gin{equation}\leftabel{s3: T_*}
T_*=\sup\{t\in(0,\infty): \psi\ \mathrm{satisfies\ }\eqref{s3: bs}\ \mathrm{on}\ [0,t]\}.
\end{equation}
Theorem \rightef{s1: thm1} follows from the result below.
\begin{equation*}gin{prop}\leftabel{s3: prop 1}
For all initial data $\psi_0$ satisfying the assumption in Theorem \rightef{s1: thm1}, we have $T_*=\infty$.
\end{prop}
Below we give the proof of Proposition \rightef{s3: prop 1}. We emphasize that the implied constants in $\leftesssim$ do not depend on the constants $C_1$ and $\epsilon$ appearing in the bootstrap assumption \eqref{s3: bs}.
We first observe that, by the bootstrap setting \eqref{s3: bs} and Lemma \rightef{s2: K-S}, the estimate
\begin{equation*}gin{equation}\leftabel{s3: gzpsiinf}
\leftangle t+r\rightangle^{\frac{1}{2}}\leftangle t-r\rightangle^{\frac{1}{2}}\sum_{|I'|\lefte N-2}|\Gamma^{I'}\psi(t,x)|\leftesssim\sum_{|I|\lefte N}\|\Gamma^I\psi(t)\|_{L^2_x}\leftesssim C_1\epsilon
\end{equation}
holds true, where we use \eqref{s2: hgz^I} in the last inequality. \\
\noindent$\textit{Proof\ of\ Proposition\ \rightef{s3: prop 1}:}$ We divide the proof into two steps.\\
\noindent{$\mathbf{Step\ 1.}$} Refining the estimate of $E^D_{gst}(t,\hat{\Gamma}^I\psi)$. Acting the vector field $\hat{\Gamma}^I$ on both sides of \eqref{s1: 1.1s} and using \eqref{s2: Dhgz^I}, we obtain
\begin{equation*}gin{equation}\leftabel{s3: Dhgzpsi}
\mathcal{D}\hat{\Gamma}^I\psi=\hat{\Gamma}^IF+\sum_{|I'|<|I|}c_{I'}\hat{\Gamma}^{I'}F
\end{equation}
for some constants $c_{I'}$. Applying Proposition \rightef{s3: Ghost} to $\hat{\Gamma}^I\psi, |I|\lefte N$ and using \eqref{s3: Dhgzpsi}, we obtain
\begin{equation*}
E^D_{gst}(t,\hat{\Gamma}^I\psi)\leftesssim \|\hat{\Gamma}^I\psi(0)\|_{L^2_x}^2+\sum_{|I'|\lefte|I|}\int_0^t\|(\hat{\Gamma}^{I'}F)^*\gamma^0\hat{\Gamma}^I\psi\|_{L^1_x}\rightm{d}\tau.
\end{equation*}
For each $|I'|\lefte|I|$, using Lemma \rightef{s2: DL4.2}, we have
\begin{equation*}gin{equation}n\leftabel{s3: Fgz0psi}
|(\hat{\Gamma}^{I'}F)^*\gamma^0\hat{\Gamma}^I\psi|&\leftesssim&\left|[\hat{\Gamma}^{I'}F]^*_{-}\gamma^0[\hat{\Gamma}^I\psi]_{+}+[\hat{\Gamma}^{I'}F]^*_{+}\gamma^0[\hat{\Gamma}^I\psi]_{-}\right|\nonumber\\
&\leftesssim&\left|\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}\hat{\Gamma}^{I'}F\right|\cdot\left|\frac{[\hat{\Gamma}^I\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right|+|[\hat{\Gamma}^{I'}F]_{-}|\cdot|\hat{\Gamma}^I\psi|.
\end{equation}n
By \eqref{s2: uPhi}, we have
\begin{equation*}gin{equation}\leftabel{s3: hgzF}
\hat{\Gamma}^{I'}F=\hat{\Gamma}^{I'}\left((\psi^*H\psi)\psi\right)=\sum_{I'_1+I'_2=I'}(\Gamma^{I'_1}(\psi^*H\psi))(\hat{\Gamma}^{I'_2}\psi)=\sum_{I'_1+I'_2+I'_3=I'}(\Gamma^{I'_1}\psi)^*H(\Gamma^{I'_2}\psi)(\hat{\Gamma}^{I'_3}\psi),
\end{equation}
which implies
\begin{equation*}gin{equation}\leftabel{s3: hgzF-}
[\hat{\Gamma}^{I'}F]_{-}=\sum_{I'_1+I'_2+I'_3=I'}(\Gamma^{I'_1}\psi)^*H(\Gamma^{I'_2}\psi)[\hat{\Gamma}^{I'_3}\psi]_{-}.
\end{equation}
Substituting \eqref{s3: hgzF} and \eqref{s3: hgzF-} into \eqref{s3: Fgz0psi}, using \eqref{s2: hgz^I}, and recalling that $|I|\lefte N$, we get
\begin{equation*}gin{equation}n\leftabel{s3: Fgz0psi'}
|(\hat{\Gamma}^{I'}F)^*\gamma^0\hat{\Gamma}^I\psi|&\leftesssim&\sum_{|I_1|+|I_2|+|I_3|\lefte|I|}\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}|\Gamma^{I_1}\psi|\cdot|\Gamma^{I_2}\psi|\cdot|\Gamma^{I_3}\psi|\cdot\left|\frac{[\hat{\Gamma}^I\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right|\nonumber\\
&+&\sum_{\substack{|I_1|+|I_2|+|I_3|\lefte|I|,\\|J|\lefte |I|}}|\Gamma^{I_1}\psi|\cdot|\Gamma^{I_2}\psi|\cdot|[\hat{\Gamma}^{I_3}\psi]_{-}|\cdot|\Gamma^J\psi|\nonumber\\
&\leftesssim&\sum_{\substack{|I_1|,|I_2|\lefte N-2,\\|I_3|\lefte N}}\|\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}\Gamma^{I_1}\psi\|_{L^\infty_x}\cdot\|\Gamma^{I_2}\psi\|_{L^\infty_x}\cdot|\Gamma^{I_3}\psi|\cdot\left|\frac{[\hat{\Gamma}^I\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right|\nonumber\\
&+&\sum_{\substack{|I_1|,|I_2|\lefte N-2,\\|I_3|,|J|\lefte N}}\|\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}\Gamma^{I_1}\psi\|_{L^\infty_x}\cdot\|\Gamma^{I_2}\psi\|_{L^\infty_x}\cdot\left|\frac{[\hat{\Gamma}^{I_3}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right|\cdot|\Gamma^J\psi|\nonumber\\
&+&\sum_{\substack{|I_2|,|I_3|\lefte N-2,\\|I_1|,|J|\lefte N}}\|\Gamma^{I_2}\psi\|_{L^\infty_x}\cdot\|[\hat{\Gamma}^{I_3}\psi]_{-}\|_{L^\infty_x}\cdot|\Gamma^{I_1}\psi|\cdot|\Gamma^J\psi|\nonumber\\
&\leftesssim&(C_1\epsilon)^2\sum_{|I_1|,|I_2|\lefte N}\left(\leftangle \tau\rightangle^{-1+\frac{\delta}{2}}|\Gamma^{I_1}\psi|\cdot\left|\frac{[\hat{\Gamma}^{I_2}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right|+\leftangle \tau\rightangle^{-2+\delta}|\Gamma^{I_1}\psi|\cdot|\Gamma^{I_2}\psi|\right),\nonumber\\
\end{equation}n
where we use \eqref{s3: gzpsiinf}, the bootstrap setting \eqref{s3: bs}, and recall that $N\ge 3$. It follows that
\begin{equation*}n
&&\|(\hat{\Gamma}^{I'}F)^*\gamma^0\hat{\Gamma}^I\psi\|_{L^1_x}\\
&\leftesssim&(C_1\epsilon)^2\sum_{|I_1|,|I_2|\lefte N}\left(\leftangle \tau\rightangle^{-1+\frac{\delta}{2}}\|\Gamma^{I_1}\psi\|_{L^2_x}\cdot\left\|\frac{[\hat{\Gamma}^{I_2}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}+\leftangle \tau\rightangle^{-2+\delta}\|\Gamma^{I_1}\psi\|_{L^2_x}\cdot\|\Gamma^{I_2}\psi\|_{L^2_x}\right)\\
&\leftesssim&(C_1\epsilon)^3\sum_{|J|\lefte N}\left(\leftangle \tau\rightangle^{-1+\frac{\delta}{2}}\left\|\frac{[\hat{\Gamma}^{J}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}+C_1\epsilon\leftangle \tau\rightangle^{-2+\delta}\right).
\end{equation*}n
where we use\eqref{s2: hgz^I} and the bootstrap assumption \eqref{s3: bs}. Using \eqref{s3: bs} again, we obtain
\begin{equation*}n
&&\sum_{|I'|\lefte|I|}\int_0^t\|(\hat{\Gamma}^{I'}F)^*\gamma^0\hat{\Gamma}^I\psi\|_{L^1_x}\rightm{d}\tau\\
&\leftesssim&(C_1\epsilon)^3\sum_{|J|\lefte N}\left[\left(\int_0^t\leftangle \tau\rightangle^{-2+\delta}\rightm{d}\tau\right)^{\frac{1}{2}}\left(\int_0^t\left\|\frac{[\hat{\Gamma}^{J}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|^2_{L^2_x}\rightm{d}\tau\right)^{\frac{1}{2}}+C_1\epsilon\int_0^t\leftangle \tau\rightangle^{-2+\delta}\rightm{d}\tau\right]\\
&\leftesssim&(C_1\epsilon)^4.
\end{equation*}n
We conclude that
\begin{equation*}
\sum_{|I|\lefte N}[E^D_{gst}(t,\hat{\Gamma}^I\psi)]^{\frac{1}{2}}\lefte C_0\epsilon+C(C_1\epsilon)^2.
\end{equation*}
This strictly improves the bootstrap estimate of $E^D_{gst}(t,\hat{\Gamma}^I\psi)$ in \eqref{s3: bs} for $C_1$ sufficiently large and $\epsilon$ sufficiently small (choose $C_1\ge 8C_0$ and then $C(C_1\epsilon)\lefte 1/8$).
\noindent{$\mathbf{Step\ 2.}$} Refining the estimate of $[\hat{\Gamma}^I\psi]_{-}$. Let $\Psi$ be chosen to satisfy \eqref{s3: DL5.3'}. Acting the vector fields $\Gamma^I, |I|\lefte N-1$ on both sides of \eqref{s3: DL5.3'}, we derive
\begin{equation*}gin{equation}\leftabel{s3: bgzIPsi}
\Box \Gamma^I\Psi=\Gamma^IF+\sum_{|J'|<|I|}c_{J'}\Gamma^{J'}F
\end{equation}
for some constants $c_{J'}$. By Lemma \rightef{s2: DMY2.78}, we have
\begin{equation*}gin{equation}n\leftabel{s3: gPsiB}
\|\Gamma^I\Psi(t)\|_{L^\infty_x}&\leftesssim&\leftangle t\rightangle^{-\frac{1}{2}}\left(\|\Gamma^I\Psi(0)\|_{W^{2,1}}+\|\partial_t\Gamma^I\Psi(0)\|_{W^{1,1}}+\sum_{|I'|\lefte|I|+1}\int_0^t\leftangle\tau\rightangle^{-\frac{1}{2}}\|\Gamma^{I'}F(\tau)\|_{L^1_x}\rightm{d}\tau\right).
\end{equation}n
Since
\begin{equation*}
\Gamma^{I'}F=\Gamma^{I'}\left((\psi^*H\psi)\psi\right)=\sum_{I'_1+I'_2=I'}(\Gamma^{I'_1}(\psi^*H\psi))(\Gamma^{I'_2}\psi)=\sum_{I'_1+I'_2+I'_3=I'}(\Gamma^{I'_1}\psi)^*H(\Gamma^{I'_2}\psi)(\Gamma^{I'_3}\psi),
\end{equation*}
we have
\begin{equation*}n
\|\Gamma^{I'}F(\tau)\|_{L^1_x}&\leftesssim&\sum_{|I_1|+|I_2|+|I_3|\lefte N}\||\Gamma^{I_1}\psi|\cdot|\Gamma^{I_2}\psi|\cdot|\Gamma^{I_3}\psi|\|_{L^1_x}\\
&\leftesssim&\sum_{\substack{|I_1|\lefte N-2,\\|I_2|,|I_3|\lefte N}}\|\Gamma^{I_1}\psi\|_{L^\infty_x}\|\Gamma^{I_2}\psi\|_{L^2_x}\|\Gamma^{I_3}\psi\|_{L^2_x}\leftesssim (C_1\epsilon)^3\leftangle\tau\rightangle^{-\frac{1}{2}},
\end{equation*}n
where we use \eqref{s3: gzpsiinf} and \eqref{s3: bs} in the last inequality. If follows that
\begin{equation*}
\|\Gamma^I\Psi(t)\|_{L^\infty_x}\lefte\left(C_0\epsilon+C(C_1\epsilon)^3\leftn(2+t)\right)\leftangle t\rightangle^{-\frac{1}{2}},\quad\mathrm{for\;any\;}|I|\lefte N-1.
\end{equation*}
By Lemma \rightef{s2: lem psi-}, we have
\begin{equation*}gin{equation}\leftabel{s3.2: gpsi-}
\sum_{|I'|\lefte N-2}|[\hat{\Gamma}^{I'}\psi]_{-}|\leftesssim\leftangle t\rightangle^{-1}\sum_{|I|\lefte N-1}\|\Gamma^{I}\Psi\|_{L^\infty_x}
\lefte C\left(C_0\epsilon+C(C_1\epsilon)^3\right)\leftangle t\rightangle^{-\frac{3}{2}}\leftn(2+t).
\end{equation}
Note that we also obtain from Lemma \rightef{s2: lem psi-} that
\begin{equation*}gin{equation}\leftabel{s3.2: gpsi}
\sum_{|I'|\lefte N-2}|\hat{\Gamma}^{I'}\psi|\leftesssim\leftangle t-r\rightangle^{-1}\sum_{|I|\lefte N-1}\|\Gamma^{I}\Psi\|_{L^\infty_x}
\leftesssim\left(\epsilon+(C_1\epsilon)^3\right)\leftangle t-r\rightangle^{-1}\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t).
\end{equation}
In particular, the estimate \eqref{s3.2: gpsi-} strictly improves the bootstrap estimate of $\sum_{|I|\lefte N-2}|[\hat{\Gamma}^I\psi]_{-}|$ in \eqref{s3: bs} for $C_1$ sufficiently large and $\epsilon$ sufficiently small (choose $C_1\ge 8CC_0$ and then $C^2(C_1\epsilon)^2\lefte 1/8$).
In conclusion, for all initial data $\psi_0$ satisfying the assumption in Theorem \rightef{s1: thm1}, we show that $T_*=\infty$ and hence the proof of Proposition \rightef{s3: prop 1} is complete.
\begin{equation*}gin{comment}
Applying the $L^2$ estimate for wave equation in Lemma \rightef{s2: DMY2.78}, we obtain
\begin{equation*}n
\|\Gamma^I\Psi(t)\|_{L^2_x}&\leftesssim&\leftog^{\frac{1}{2}}(2+t)\left(\|\partial_t\Gamma^I\Psi(0)\|_{L^1_x}+\|\partial_t\Gamma^I\Psi(0)\|_{L^2_x}\right)\\
&+&\sum_{|I'|\lefte|I|}\leftog^{\frac{1}{2}}(2+t)\int_0^t\left(\|\Gamma^{I'}F(\tau)\|_{L^1_x}+\|\Gamma^{I'}F(\tau)\|_{L^2_x}\right)\rightm{d}\tau.
\end{equation*}n
Similarly,
\begin{equation*}n
\|\Gamma^{I'}F(\tau)\|_{L^2_x}&\leftesssim&\sum_{|I_1|+|I_2|+|I_3|\lefte|I|}\||\Gamma^{I_1}\psi||\Gamma^{I_2}\psi||\Gamma^{I_3}\psi|\|_{L^2_x}\\
&\leftesssim&\sum_{\substack{|I_1|,|I_2|\lefte N-2,\\|I_3|\lefte N}}\|\Gamma^{I_1}\psi\|_{L^\infty_x}\|\Gamma^{I_2}\psi\|_{L^\infty_x}\|\Gamma^{I_3}\psi\|_{L^2_x}\leftesssim (C_1\epsilon)^3\leftangle\tau\rightangle^{-1},
\end{equation*}n
where we use that $N\ge 5$. Combining these estimates we get
\begin{equation*}
\|\Gamma^I\Psi(t)\|_{L^2_x}\leftesssim(\epsilon+(C_1\epsilon)^3)\leftangle t\rightangle^{1/2}\leftn^{\frac{1}{2}}(2+t).
\end{equation*}
By Lemma \rightef{s2: lem psi-}, for any $|I'|\lefte N-3$ we have
\begin{equation*}
|[\hat{\Gamma}^{I'}\psi]_{-}|\leftesssim\leftangle t\rightangle^{-3/2}\sum_{|I|\lefte N}\|\Gamma^{I}\Psi\|_{L^2_x}
\leftesssim(\epsilon+(C_1\epsilon)^3)\leftog^{\frac{1}{2}}(2+t)\leftangle t\rightangle^{-1}\leftesssim(\epsilon+(C_1\epsilon)^3)\leftangle t\rightangle^{-1+\delta}.
\end{equation*}
\end{comment}
\section{Scattering for the Dirac field}\leftabel{s4}
In this section we briefly discuss about the scattering of the Dirac equation \eqref{s1: 1.1s}. We show that for the nonlinearity $F=(\psi^*\gamma^0\psi)\psi$, the Dirac field $\psi$ scatters linearly in the Sobolev space of high regularity.
We need the following result from \cite[Theorem 4.4]{DL}, which provides a sufficient condition for the linear scattering of the Dirac equation.
\begin{equation*}gin{lem}\leftabel{s4: DL4.4}
Let $\psi$ be the global solution to the Dirac equation \eqref{s1: 1.1s}, where $\psi_0\in H^N(\mathbb{R}^2)$ for some $N\in\mathbb{N}$. Suppose that
\begin{equation*}gin{equation}\leftabel{s4: wd}
\int_0^{+\infty}\|F(\tau)\|_{H^N}{\rightm{d}}\tau<\infty,
\end{equation}
then there exists some $\psi^+\in H^N(\mathbb{R}^2)$, satisfying
\begin{equation*}gin{equation}\leftabel{s4: psi-psi+}
\|\psi(t)-S(t)\psi^+\|_{H^N}\lefte C\int_t^{+\infty}\|F(\tau)\|_{H^N}{\rightm{d}}\tau,\quad\mathrm{as}\ t\to+\infty,
\end{equation}
where $S(t):=e^{-t\gamma^0\gamma^a\partial_a}$ is the propagator for the linear Dirac equation, i.e, for any $\mathbb{C}^2$-valued function $f$,
\begin{equation*}
S(t)f=\mathcal{F}^{-1}e^{-it\gamma^0\gamma^a\xi_a}\mathcal{F}f(\xi).
\end{equation*}
Here $\mathcal{F}f$ is the Fourier transform of $f$ and $\mathcal{F}^{-1}$ is the inverse Fourier transform.
\begin{equation*}gin{proof}
Using the Fourier transform, we can derive the following integral formula for \eqref{s1: 1.1s}
\begin{equation*}gin{equation}\leftabel{s4: ifpsi}
\psi(t,x)=S(t)\psi_0-i\int_0^tS(t-\tau)\gamma^0F(\tau)\rightm{d}\tau,
\end{equation}
where $S(t)$ verifies the following properties:
\begin{equation*}gin{itemize}
\item[$i)$] $S(0)=I_2, S(t)S(\tau)=S(t+\tau)$;
\item[$ii)$] $\|\partial^I_xS(t)f\|_{L^2_x}=\|\partial^I_xf\|_{L^2_x}$, where $\partial^I_x:=\partial^{i_1}_{x_1}\partial^{i_2}_{x_2}$ for any multi-index $I=(i_1,i_2), i_1,i_2\in\mathbb{N}$, and hence $\|S(t)f\|_{H^N}=\|f\|_{H^N}$.
\end{itemize}
Let
\begin{equation*}
\psi^+:=\psi_0-i\int_0^{+\infty}S(-\tau)\gamma^0F(\tau){\rightm{d}}\tau=\psi_0-i\leftim_{T\to+\infty}\int_0^{T}S(-\tau)\gamma^0F(\tau){\rightm{d}}\tau.
\end{equation*}
We claim that $\psi^+$ is well-defined in $H^N(\mathbb{R}^2)$. Indeed, by the properties of $S(t)$ as above and the assumption \eqref{s4: wd}, we have
\begin{equation*}n
\left\|\int_{T_1}^{T_2}S(-\tau)\gamma^0F(\tau){\rightm{d}}\tau\right\|_{H^N}&\lefte&\int_{T_1}^{T_2}\|S(-\tau)\gamma^0F(\tau)\|_{H^N}{\rightm{d}}\tau\\
&\lefte&C\int_{T_1}^{T_2}\|F(\tau)\|_{H^N}{\rightm{d}}\tau\to 0\quad\mathrm{as}\quad T_2>T_1\to+\infty.
\end{equation*}n
Hence the claim follows. The function $\varphi:=S(t)\psi^+$ solves the homogeneous Dirac equation $\mathcal{D}\varphi=0$ with $\varphi(0,\cdot)=\psi^+$. Furthermore, using the properties of $S(t)$ again, we obtain
\begin{equation*}n
\|\psi(t)-S(t)\psi^+\|_{H^N}&=&\left\|\leftim_{T\to+\infty}\int_{t}^TS(t-\tau)\gamma^0F(\tau)d\tau\right\|_{H^N}\\
&\lefte&\leftim_{T\to+\infty}\int_{t}^T\|S(t-\tau)\gamma^0F(\tau)\|_{H^N}\rightm{d}\tau\\
&\lefte&C\int_t^{+\infty}\|F(\tau)\|_{H^N}\rightm{d}\tau.
\end{equation*}n
\end{proof}
\end{lem}
\begin{equation*}gin{comment}
we have the following integral formula
\begin{equation*}gin{equation}\leftabel{s4: ifpsi}
\psi(t,x)=S(t)\psi_0-i\int_0^tS(t-\tau)\gamma^0F(\tau)\rightm{d}\tau,
\end{equation}
where $S(t):=e^{it\cdot i\gamma^0\gamma^a\partial_a}$ is the matrix group propagator, and for any vector valued function $f\in\mathbb{C}^2$,
\begin{equation*}
S(t)f=\mathcal{F}^{-1}e^{-it\gamma^0\gamma^a\xi_a}\mathcal{F}f(\xi).
\end{equation*}
Here $\mathcal{F}f$ is the Fourier transform of $f$ and $\mathcal{F}^{-1}$ the inverse Fourier transform. The operator $S(t)$ satisfies the following properties:
\begin{equation*}gin{itemize}
\item[i)] $S(0)=I_2, S(t)S(\tau)=S(t+\tau)$;
\item[ii)] $S(t)$ commutes with the operators $\partial_1,\partial_2$, and $\|S(t)f\|_{L^2_x}=\|f\|_{L^2_x}$.
\end{itemize}
In addition, assume
\begin{equation*}
\int_0^{+\infty}\|F(\tau)\|_{H^N}d\tau<\infty,
\end{equation*}
then (see \cite[Theorem 4.4]{DL})
\begin{equation*}gin{equation}\leftabel{s4: psi-psi+}
\|\psi(t)-S(t)\psi^+\|_{H^N}\lefte C\int_t^{+\infty}\|F(\tau)\|_{H^N}\rightm{d}\tau,
\end{equation}
where
\begin{equation*}
\psi^+:=\psi_0-i\int_0^{+\infty}S(-\tau)\gamma^0F(\tau)\rightm{d}\tau.
\end{equation*}
It is easily seen from the integral formula \eqref{s4: ifpsi} that $\varphi:=S(t)\psi^+$ is the solution to the Dirac equation $i\gamma^\mu\partial_\mu\varphi=0$ with initial data $\varphi(0,\cdot)=\psi^+$. To show that $\psi-S(t)\psi^+\to 0$ in the Sobolev space $H^N$, we need to estimate the right hand side of \eqref{s4: psi-psi+}. For this we employ the ghost weight energy estimate and pointwise decay estimate of $\psi$ obtained in Theorem \rightef{s1: thm1}.
\end{comment}
\noindent$\textit{Proof\ of\ Theorem\ \rightef{s1: thm2}.}$ Let $\psi$ be the global solution to \eqref{s1: 1.1s} obtained in Theorem \rightef{s1: thm1} with $F(\psi)=(\psi^*\gamma^0\psi)\psi$. By Lemma \rightef{s4: DL4.4}, we only need to verify that for any $|I|\lefte N$,
\begin{equation*}gin{equation}\leftabel{s4: intFN}
\int_t^{+\infty}\|\partial^IF(\tau)\|_{L^2_x}{\rightm{d}}\tau\lefte C\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t),
\end{equation}
and for any $|I|\lefte N-2$,
\begin{equation*}gin{equation}\leftabel{s4: intFN-2}
\int_t^{+\infty}\|\partial^IF(\tau)\|_{L^2_x}{\rightm{d}}\tau\lefte C(t)\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t),\quad\quad\mathrm{with}\quad\quad\leftim_{t\to+\infty}C(t)=0.
\end{equation}
Note that the proof of Theorem \rightef{s1: thm1} in Sect. \rightef{ss3.2} gives (see \eqref{s3: gzpsiinf}, \eqref{s3.2: gpsi-} and \eqref{s3.2: gpsi})
\begin{equation*}n
&&\sup_{t\ge 0}\left\{\sum_{|I|\lefte N}[E^D_{gst}(t,\hat{\Gamma}^I\psi)]^{\frac{1}{2}}\right.\\
&+&\left.\sum_{|I|\lefte N-2}\left[\leftangle t+r\rightangle^{\frac{1}{2}}\leftangle t-r\rightangle^{\frac{1}{2}}|\Gamma^{I}\psi|+[\leftn(2+t)]^{-1}\left(\leftangle t\rightangle^{\frac{3}{2}}|[\hat{\Gamma}^{I}\psi]_{-}|+\leftangle t-r\rightangle\leftangle t\rightangle^{\frac{1}{2}}|\hat{\Gamma}^{I}\psi|\right)\right]\right\}\\
&\leftesssim&C_1\epsilon.
\end{equation*}n
For any $|I|\lefte N$, using Lemma \rightef{s2: DL4.2}, we have
\begin{equation*}n
|\partial^IF|&=&\left|\sum_{I_1+I_2+I_3=I}(\partial^{I_1}\psi)^*\gamma^0(\partial^{I_2}\psi)(\partial^{I_3}\psi)\right|\\
&\leftesssim&\sum_{|I_1|+|I_2|+|I_3|\lefte|I|}\left|[\partial^{I_1}\psi]_{-}^*\gamma^0[\partial^{I_2}\psi]_{+}+[\partial^{I_1}\psi]_{+}^*\gamma^0[\partial^{I_2}\psi]_{-}\right|\cdot|\partial^{I_3}\psi|\\
&\leftesssim&\sum_{|I_1|+|I_2|+|I_3|\lefte|I|}|[\partial^{I_1}\psi]_{-}|\cdot|\partial^{I_2}\psi|\cdot|\partial^{I_3}\psi|,
\end{equation*}n
which implies
\begin{equation*}gin{equation}n\leftabel{s4: scFtau}
\|\partial^IF(\tau)\|_{L^2_x}&\leftesssim&\sum_{\substack{|I_2|,|I_3|\lefte N-2,\\|I_1|\lefte N}}\|\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}\partial^{I_2}\psi\|_{L^\infty_x}\cdot\|\partial^{I_3}\psi\|_{L^\infty_x}\cdot\left\|\frac{[\partial^{I_1}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}\nonumber\\
&+&\sum_{\substack{|I_1|,|I_2|\lefte N-2,\\|I_3|\lefte N}}\|[\partial^{I_1}\psi]_{-}\|_{L^\infty_x}\cdot\|\partial^{I_2}\psi\|_{L^\infty_x}\cdot\|\partial^{I_3}\psi\|_{L^2_x}\\
&\leftesssim&(C_1\epsilon)^2\leftangle\tau\rightangle^{-1}\leftn(2+\tau)\sum_{|I'|\lefte N}\left\|\frac{[\partial^{I'}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}+(C_1\epsilon)^3\leftangle\tau\rightangle^{-2}\leftn(2+\tau)\nonumber.
\end{equation}n
It follows that
\begin{equation*}n
\int_t^{+\infty}\|\partial^IF(\tau)\|_{L^2_x}\rightm{d}\tau&\leftesssim&(C_1\epsilon)^2\sum_{|I'|\lefte N}\left(\int_t^{+\infty}\leftangle\tau\rightangle^{-2}\leftn^2(2+\tau) \rightm{d}\tau\right)^{\frac{1}{2}}\left(\int_t^{+\infty}\left\|\frac{[\partial^{I'}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\rightm{d}\tau\right)^{\frac{1}{2}}\\
&+&(C_1\epsilon)^3\int_t^{+\infty}\leftangle\tau\rightangle^{-2}\leftn(2+\tau) \rightm{d}\tau\\
&\leftesssim&(C_1\epsilon)^3\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t),
\end{equation*}n
which implies \eqref{s4: intFN}.
It remains to prove \eqref{s4: intFN-2}. For $|I|\lefte N-2$, we can omit the second sum in \eqref{s4: scFtau} and derive
\begin{equation*}n
\int_t^{+\infty}\|\partial^IF(\tau)\|_{L^2_x}{\rightm{d}}\tau&\leftesssim&(C_1\epsilon)^2\sum_{|I'|\lefte N}\left(\int_t^{+\infty}\leftangle\tau\rightangle^{-2}\leftn^2(2+\tau) \rightm{d}\tau\right)^{\frac{1}{2}}\left(\int_t^{+\infty}\left\|\frac{[\partial^{I'}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\rightm{d}\tau\right)^{\frac{1}{2}}\\
&\leftesssim& C(t)(C_1\epsilon)^2\leftangle t\rightangle^{-\frac{1}{2}}\leftn(2+t),
\end{equation*}n
where
\begin{equation*}
C(t):=\sum_{|I'|\lefte N}\left(\int_t^{+\infty}\left\|\frac{[\partial^{I'}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2{\rightm{d}}\tau\right)^{\frac{1}{2}}\to 0\quad\mathrm{as}\ t\to+\infty.
\end{equation*}
\begin{equation*}gin{comment}
\begin{equation*}n
\|\partial^IF(\tau)\|_{L^2_x}&\leftesssim&\sum_{|I_1|+|I_2|+|I_3|\lefte N-2}\|\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}\partial^{I_2}\psi\|_{L^\infty_x}\cdot\|\partial^{I_3}\psi\|_{L^\infty_x}\cdot\left\|\frac{[\partial^{I_1}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}\\
&\leftesssim&(C_1\epsilon)^2\tau^{-1}\leftog^2\tau\sum_{|I'|\lefte N}\left\|\frac{[\partial^{I'}\psi]_{-}}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}
\end{equation*}n
and thus
\end{comment}
\begin{equation*}gin{comment}
\section{The case $m>0$}
For the massive Dirac equation
\begin{equation*}gin{equation}\leftabel{s5: 1.1}
i\gamma^\mu\partial_\mu\psi+m\psi=F(\psi),\quad m>0,
\end{equation}
with prescribed initial data \eqref{s1: 1.2} and nonlinearities $F$ in \eqref{s1: 1.4}, assume without loss of generality that $m=1$. Acting the Dirac operator $i\gamma^\mu\partial_\mu$ on both sides of \eqref{s5: 1.1}, we obtain that $\psi$ solves the following Klein-Gordon equation
\begin{equation*}gin{equation}\leftabel{s5: 1.2}
-\Box \psi+\psi=F(\psi)-i\gamma^\mu\partial_\mu F(\psi)
\end{equation}
with the initial data
\begin{equation*}gin{equation}\leftabel{s5: 1.3}
(\psi,\partial_t\psi)|_{t=0}=(\psi_0,\psi_1),\quad\quad\psi_1:=-i\gamma^0F(\psi_0)-\gamma^0\gamma^a\partial_a\psi_0.
\end{equation}
For the order set
\begin{equation*}
\{\tilde{\Gamma}_k\}_{k=1}^6:=\{\partial_0,\partial_1,\partial_2,L_1,L_2,\Omega_{12}\}
\end{equation*}
and any multi-index $I=(i_1,i_2,\dots,i_6)\in\mathbb{N}^6$ of length $|I|=\sum_{k=1}^6i_k$, we denote
$$\tilde{\Gamma}^I=\prod_{k=1}^6\tilde{\Gamma}_k^{i_k},\quad\mathrm{where}\quad\tilde{\Gamma}=(\tilde{\Gamma}_1,\tilde{\Gamma}_2,\dots,\tilde{\Gamma}_6).$$
\subsection{Sobolev inequality and energy estimates}\leftabel{ss5.1}
\begin{equation*}gin{lem}(See \cite[Lemma 2.5]{DMY})\leftabel{s5: K-S'}
Let $u=u(t,x)$ be a sufficiently smooth function. Then we have
\begin{equation*}
|u(t,x)|\leftesssim\leftangle t\rightangle^{-\frac{1}{2}}\sum_{|I|\lefte 3}\|\tilde{\Gamma}^Iu(t,x)\|_{L^2_x}.
\end{equation*}
\end{lem}
\begin{equation*}gin{lem}(Standard energy estimate)\leftabel{s5: EK}
Let $u$ be the solution of the Cauchy problem
$$\left\{\begin{equation*}gin{array}{rcl}
(-\Box +1)u(t,x)&=&f(t,x),\\
(u,\partial_t u)|_{t=0}&=&(u_0,u_1),
\end{array}\right.$$
where $f(t,x)$ is a sufficiently smooth function. Then
\begin{equation*}
[E_1(t,u)]^{\frac{1}{2}}\leftesssim [E_1(0,u)]^{\frac{1}{2}}+\int_0^t\|f(\tau,x)\|_{L^2_x}\rightm{d}\tau,
\end{equation*}
where
\begin{equation*}
E_1(t,u)=\int_{\mathbb{R}^2}\left((\partial_t u)^2+(\partial_1 u)^2+(\partial_2 u)^2+u^2\right)(t,x)\rightm{d}x.
\end{equation*}
\end{lem}
\subsection{Estimates on $2D$ linear Klein-Gordon equation}\leftabel{ss5.2}
\begin{equation*}gin{lem}(See \cite[Corollary 2.13]{DMY})\leftabel{s5: DMY2.13}
Let $u$ be the solution of the Cauchy problem
$$\left\{\begin{equation*}gin{array}{rcl}
(-\Box +1)u(t,x)&=&f(t,x),\\
(u,\partial_t u)|_{t=0}&=&(u_0,u_1),
\end{array}\right.$$
where $f(t,x)$ is a sufficiently smooth function satisfying
\begin{equation*}
\sum_{|I|\lefte 4}\max_{0\lefte \tau\lefte t}\leftangle \tau\rightangle^{\delta_0}\|\leftangle\tau+|x|\rightangle\tilde{\Gamma}^If(\tau,x)\|_{L^2_x}\lefte C_f
\end{equation*}
for some $\delta_0>0$, then we have
\begin{equation*}
\leftangle t+r\rightangle|u(t,x)|\leftesssim\frac{C_f}{1-2^{-\delta_0}}+\sum_{|I|\lefte 5}\|\leftangle |x|\rightangle\leftn(2+|x|)\tilde{\Gamma}^Iu(0,x)\|_{L^2_x}.
\end{equation*}
\end{lem}
\subsection{Bootstrap assumptions and global existence}\leftabel{ss5.3}
{$\mathbf{Bootstrap\ setting.}$} Fix $N\ge 9$. The global existence result is based on the following bootstrap setting: for $C_1\gg 1$ and $0<\epsilon\leftl C_1^{-1}$ to be chosen later
\begin{equation*}gin{equation}\leftabel{s5: bs}
\sum_{|I|\lefte N}[E_1(t,\tilde{\Gamma}^I\psi)]^{\frac{1}{2}}+\sum_{|I|\lefte N-5}\leftangle t+r\rightangle|\tilde{\Gamma}^I\psi|\lefte C_1\epsilon,
\end{equation}
where $\epsilon$ measures the size of the initial data. Let $\psi_0$ satisfy \eqref{s5: bs} and $\psi$ be the corresponding solution of \eqref{s5: 1.2}-\eqref{s5: 1.3}. Define
\begin{equation*}gin{equation}\leftabel{s5: T_*}
T_*=\sup\{t\in[0,\infty): \psi\ \mathrm{satisfies\ }\eqref{s5: bs}\ \mathrm{on}\ [0,t]\}.
\end{equation}
We only need to show that $T_*=\infty$. Acting the vector fields $\tilde{\Gamma}^I, |I|\lefte N$ on both sides of \eqref{s5: 1.2}, we obtain
\begin{equation*}gin{equation}\leftabel{s5: bgz^Ipsi}
(-\Box+1)\tilde{\Gamma}^I\psi=\tilde{\Gamma}^IF=\tilde{\Gamma}^I\left((\psi^*H\psi)\psi\right).
\end{equation}
${\mathbf{Step\ 1.}}$ Refining the estimate of $E_1(t,\tilde{\Gamma}^I\psi)$ in \eqref{s5: bs}. Apply the standard energy estimate in Lemma \rightef{s5: EK} to $\tilde{\Gamma}^I\psi$ and using \eqref{s5: bgz^Ipsi}, we derive
\begin{equation*}
[E_1(t,\tilde{\Gamma}^I\psi)]^{\frac{1}{2}}\leftesssim [E_1(0,\tilde{\Gamma}^I\psi)]^{\frac{1}{2}}+\int_0^t\|\tilde{\Gamma}^IF(\tau,x)\|_{L^2_x}\rightm{d}\tau,
\end{equation*}
where
\begin{equation*}
E_1(t,\tilde{\Gamma}^I\psi)=\int_{\mathbb{R}^2}\left((\partial_t \tilde{\Gamma}^I\psi)^2+(\partial_1 \tilde{\Gamma}^I\psi)^2+(\partial_2 \tilde{\Gamma}^I\psi)^2+(\tilde{\Gamma}^I\psi)^2\right)(t,x)\rightm{d}x.
\end{equation*}
For any $|I|\lefte N$, using the bootstrap assumption and recalling that $N\ge 9$, we have
\begin{equation*}n
\|\tilde{\Gamma}^IF(\tau)\|_{L^2_x}&=&\left\|\sum_{I_1+I_2+I_3=I}(\tilde{\Gamma}^{I_1}\psi)^*H(\tilde{\Gamma}^{I_2}\psi)(\tilde{\Gamma}^{I_3}\psi)\right\|_{L^2_x}\\
&\leftesssim&\sum_{\substack{|I_1|,|I_2|\lefte N-5,\\|I_3|\lefte N}}\|\tilde{\Gamma}^{I_1}\psi\|_{L^\infty_x}\cdot\|\tilde{\Gamma}^{I_2}\psi\|_{L^\infty_x}\cdot\|\tilde{\Gamma}^{I_3}\psi\|_{L^2_x}\\
&\leftesssim&(C_1\epsilon)^3\leftangle\tau\rightangle^{-2},
\end{equation*}n
which implies that
\begin{equation*}
[E_1(t,\tilde{\Gamma}^I\psi)]^{\frac{1}{2}}\leftesssim \epsilon+(C_1\epsilon)^3.
\end{equation*}
This strictly improves the bootstrap estimate of $\sum_{|I|\lefte N}[E_1(t,\tilde{\Gamma}^I\psi)]^{\frac{1}{2}}$ in \eqref{s5: bs} for $C_1$ sufficiently large and $\epsilon$ sufficiently small.
$\mathbf{Step\ 2.}$ Refining the estimate of $|\tilde{\Gamma}^I\psi|$ in \eqref{s5: bs}. For any $|I|\lefte N-1$, similar to the previous step, we have
\begin{equation*}
\max_{0\lefte \tau\lefte t}\leftangle \tau\rightangle^{\frac{1}{2}}\|\leftangle\tau+|x|\rightangle\tilde{\Gamma}^If(\tau,x)\|_{L^2_x}\leftesssim (C_1\epsilon)^3\leftangle\tau\rightangle^{-\frac{1}{2}},
\end{equation*}
which implies that for any $|I'|\lefte N-5$
\begin{equation*}
\sum_{|J|\lefte 4}\max_{0\lefte \tau\lefte t}\leftangle \tau\rightangle^{\frac{1}{2}}\|\leftangle\tau+|x|\rightangle\tilde{\Gamma}^J\tilde{\Gamma}^{I'}f(\tau,x)\|_{L^2_x}\leftesssim (C_1\epsilon)^3.
\end{equation*}
By Lemma \rightef{s5: DMY2.13},
\begin{equation*}
\leftangle t+r\rightangle|\tilde{\Gamma}^{I'}\psi(t,x)|\leftesssim (C_1\epsilon)^3+\sum_{|J|\lefte 5}\|\leftangle |x|\rightangle\leftn(2+|x|)\tilde{\Gamma}^J\tilde{\Gamma}^{I'}\psi(0,x)\|_{L^2_x}\leftesssim \epsilon+(C_1\epsilon)^3.
\end{equation*}
This strictly improves the bootstrap estimate of $\sum_{|I|\lefte N-5}\leftangle t+r\rightangle|\tilde{\Gamma}^I\psi|$ in \eqref{s5: bs} for $C_1$ sufficiently large and $\epsilon$ sufficiently small.
In conclusion, for all initial data $\psi_0$ sufficiently small, we show that $T_*=\infty$ and hence obtain the global-in-time existence of the solution to \eqref{s5: 1.1}.
\end{comment}
\begin{equation*}gin{appendices}
\begin{equation*}gin{comment}
\section{}
In this section we prove the local existence for \eqref{s1: 1.1s}, for any initial data satisfying the assumption in Theorem \rightef{s1: thm1}. We denote $\hat{f}$ or $\mathcal{F}f$ by the Fourier transform of a (scalar or $\mathbb{C}^2$-valued) function $f$.
Multiplying on both sides of the first equation in \eqref{s1: 1.1s} by $-i\gamma^0$, we obtain
\begin{equation*}gin{equation}\leftabel{sA: 1.1s'}
\partial_t\psi+\gamma^0\gamma^a\partial_a\psi=-i\gamma^0F,\quad\quad\psi(0,x)=\psi_0(x).
\end{equation}
Taking the Fourier transform in \eqref{sA: 1.1s'} with respect to $x$, we have
\begin{equation*}
\partial_t\hat{\psi}(t,\xi)+i\xi_a\gamma^0\gamma^a\hat{\psi}(t,\xi)=-i\gamma^0\hat{F}(t,\xi),\quad\quad\hat{\psi}(0,\xi)=\hat{\psi_0}(\xi).
\end{equation*}
We solve the above first-order ODE in $t$ to arrive at the expression of the solution $\psi$ in Fourier space
\begin{equation*}gin{equation}\leftabel{sA: hatpsi}
\hat{\psi}(t,\xi)=e^{-it\xi_a\gamma^0\gamma^a}\hat{\psi_0}(\xi)-i\int_0^te^{-i(t-\tau)\xi_a\gamma^0\gamma^a}\gamma^0\hat{F}(\tau,\xi)d\tau,
\end{equation}
where we recall that, for any matrix $A$, $e^A$ is defined as
\begin{equation*}
e^A=\sum_{j=0}^\infty\frac{A^j}{j!}.
\end{equation*}
Taking the inverse Fourier transform in \eqref{sA: hatpsi}, we derive
\begin{equation*}gin{equation}\leftabel{sA: psi}
\psi(t,x)=S(t)\psi_0-i\int_0^tS(t-\tau)\gamma^0[F(\psi)](\tau)\rightm{d}\tau
\end{equation}
where $S(t)=e^{-t\gamma^0\gamma^a\partial_a}$ is the matrix group propagator, i.e., for any $\mathbb{C}^2$-valued function $f$,
\begin{equation*}
S(t)f=\mathcal{F}^{-1}e^{-it\xi_a\gamma^0\gamma^a}\mathcal{F}f(\xi).
\end{equation*}
Since $\gamma^0\gamma^a$ is a Hermitian matrix, then $B:=i\gamma^0\gamma^a\partial_a$ is a self-adjoint operator on the complex-valued Hilbert space $L^2\times L^2$, with $D(B)=H^1\times H^1$. It follows that $S(t)=e^{itB}$ forms a unitary group on $L^2\times L^2$, and we have the following properties of $S(t)$:
\begin{equation*}gin{itemize}
\item[$i)$] $S(0)=I_2, S(t)S(\tau)=S(t+\tau)$;
\item[$ii)$] $\|\partial^I_xS(t)f\|_{L^2_x}=\|\partial^I_xf\|_{L^2_x}$, where $\partial^I_x:=\partial^{i_1}_{x_1}\partial^{i_2}_{x_2}$ for any multi-index $I=(i_1,i_2), i_1,i_2\in\mathbb{N}$, and hence $\|S(t)f\|_{H^N}=\|f\|_{H^N}$ for any integer $N\ge 0$.
\end{itemize}
Fix $N\in\mathbb{N}$ with $N\ge 3$. Let $T>0$ (small) and $M>0$ be two constants to be chosen later. For any $t\in[0,T)$, we denote
\begin{equation*}
\|\psi\|_{E_N}:=\sup_{t\in[0,T),|I|\lefte N}\|\hat{\Gamma}^I\psi(t)\|_{L^2_x},\quad\quad X:=\left\{\psi(t,\cdot)\in H^N, t\in[0,T): \|\psi\|_{E_N}<\infty\right\},
\end{equation*}
and
\begin{equation*}
Y:=\left\{\psi\in X: \|\psi\|_{E_N}\lefte M,\psi(0,\cdot)=\psi_0\right\}.
\end{equation*}
We define $\mathcal{T}: Y\to X$ as
\begin{equation*}gin{equation}\leftabel{sA: Tpsi}
\mathcal{T}\psi:=S(t)\psi_0+(-i)\int_0^tS(t-\tau)\gamma^0[F(\psi)](\tau)\rightm{d}\tau=\psi_1+\psi_2.
\end{equation}
By straightforward computation, we find that $\psi_1$ is the solution to the homogeneous Dirac equation $\mathcal{D}\psi_1=0$, and $\psi_2$ is the solution to $\mathcal{D}\psi_2=F(\psi)$. Hence $\mathcal{T}\psi$ solves the equation
\begin{equation*}
\mathcal{D}(\mathcal{T}\psi)=F(\psi),\quad\quad(\mathcal{T}\psi)(0,\cdot)=\psi_0.
\end{equation*}
Acting the vector fields $\hat{\Gamma}^I, |I|\lefte N$ on these equations, we obtain
\begin{equation*}
\mathcal{D}\hat{\Gamma}^I(\mathcal{T}\psi)=\hat{\Gamma}^IF+\sum_{|I'|<|I|}c_{I'}\hat{\Gamma}^{I'}F
\end{equation*}
for some constants $c_{I'}$. The energy estimate for the Dirac equation gives
\begin{equation*}n
\sum_{|I|\lefte N}\|\hat{\Gamma}^I\mathcal{T}\psi\|_{L^2_x}&\lefte&\sum_{|I|\lefte N}\|(\hat{\Gamma}^I\mathcal{T}\psi)(0,\cdot)\|_{L^2_x}+C\sum_{|I'|\lefte N}\int_0^t\|\hat{\Gamma}^{I'}F\|_{L^2_x}\\
&\lefte&C\sum_{|I|\lefte N}\|(\Gamma^I\mathcal{T}\psi)(0,\cdot)\|_{L^2_x}+CT\sum_{\substack{|I_1|,|I_2|\lefte N-2,\\
|I_3|\lefte N}}\|\Gamma^{I_1}\psi\|_{L^\infty_x}\|\Gamma^{I_2}\psi\|_{L^\infty_x}\|\Gamma^{I_3}\psi\|_{L^2_x}\\
&\lefte&C\sum_{|I|\lefte N}\|(\Gamma^I\psi)(0,\cdot)\|_{L^2_x}+CT\left(\sum_{|I'|\lefte N}\|\Gamma^{I'}\psi\|_{L^2_x}\right)^3\\
&\lefte&C\sum_{|I|\lefte N}\|(\hat{\Gamma}^I\psi)(0,\cdot)\|_{L^2_x}+CTM^2\sum_{|I'|\lefte N}\|\hat{\Gamma}^{I'}\psi\|_{L^2_x},
\end{equation*}n
where we use the Sobolev imbedding theorem and the Klainerman-Sobolev equality. Choose $M:=4C\sum_{|I|\lefte N}\|(\hat{\Gamma}^I\psi)(0,\cdot)\|_{L^2_x}$ ($M$ is determined by $\psi_0$, smaller than the smallness condition in the global existence Theorem \rightef{s1: thm1}, not depend on $\psi$) and $T$ small such that $CTM^2\lefte\frac{1}{4}$, and we obtain that $\|\mathcal{T}\psi\|_{E_N}\lefte M$. Similarly, we can prove that
\begin{equation*}
\|\mathcal{T}\psi-\mathcal{T}\tilde{\psi}\|_{E_N}\lefte\frac{1}{2}\|\psi-\tilde{\psi}\|_{E_N},\quad\mathrm{for\;any\;}\psi,\tilde{\psi}\in Y.
\end{equation*}
Banach's fixed point theorem implies that there exists a unique fixed point $\psi\in Y$, i.e., $\psi$ is the solution to \eqref{s1: 1.1s} in $[0,T)$.
\section{}
In this section we show that the assumption on the initial data in Theorem \rightef{s1: thm1} can be relaxed.
\subsection{Estimates on $2D$ linear wave equations}
Let $u$ be the solution to
\begin{equation*}
\left\{\begin{equation*}gin{array}{rcl}
-\Box u(t,x)&=&f(t,x),\\
(u,\partial_tu)|_{t=0}&=&(u_0,u_1),
\end{array}\right.
\end{equation*}
where $f$ is sufficiently smooth. Denote
\begin{equation*}n
\mathcal{E}(t,u):&=&\|\partial u(t,x)\|_{L^2_x}^2,\quad\quad|\partial u|^2=\sum_{\alpha}(\partial_\alpha u)^2,\\
\mathcal{E}_{con}(t,u):&=&\|(|L_0 u+u|+|\Omega u|+\sum_{a}|L_a u|)(t,x)\|_{L^2_x}^2,\\
\mathcal{E}_{gst}(t,u):&=&\|\partial u(t,x)\|_{L^2_x}^2+\int_0^t\left\|\frac{\sum_a|G_au|}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\rightm{d}\tau.
\end{equation*}n
\begin{equation*}gin{lem}(See \cite{A, LZ}.)
The following hold:
\begin{equation*}gin{itemize}
\item[$\cdot$] $\mathcal{E}(t,u)^{\frac{1}{2}}\leftesssim \mathcal{E}(0,u)^{\frac{1}{2}}+\int_0^t\|f(\tau,x)\|_{L^2_x}\rightm{d}\tau$;
\item[$\cdot$] $\mathcal{E}_{con}(t,u)^{\frac{1}{2}}\leftesssim \mathcal{E}_{con}(0,u)^{\frac{1}{2}}+\int_0^t\|\leftangle \tau+|x|\rightangle f(\tau,x)\|_{L^2_x}\rightm{d}\tau$;
\item[$\cdot$] $\mathcal{E}_{gst}(t,u)^{\frac{1}{2}}\leftesssim \mathcal{E}_{gst}(0,u)^{\frac{1}{2}}+\int_0^t\|f(\tau,x)\|_{L^2_x}\rightm{d}\tau$;
\item[$\cdot$] ($L^2$-estimates) $\|u(t,x)\|_{L^2_x}\leftesssim\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\left[\|u_1\|_{L^1_x\cap L^2_x}+\int_0^t\|f(\tau,x)\|_{L^1_x\cap L^2_x}\rightm{d}\tau\right]$;
\item[$\cdot$] ($L^\infty$-estimates)
\begin{equation*}
\|u(t,x)\|_{L^\infty_x}\!\leftesssim\!\leftangle t\rightangle^{-\frac{1}{2}}\!\left[\|u_0\|_{W^{2,1}}\!+\!\|u_1\|_{W^{1,1}}\!+\!\sum_{|I|\lefte 1}\!\int_0^t\leftangle s\rightangle^{-\frac{1}{2}}\|\Gamma^If(\tau,x)\|_{L^1_x}\rightm{d}\tau\right].
\end{equation*}
\end{itemize}
\end{lem}
\subsection{Estimates on $2D$ linear Klein-Gordon equations}
Let $u$ be the solution to
\begin{equation*}
\left\{\begin{equation*}gin{array}{rcl}
(-\Box +1)v(t,x)&=&f(t,x),\\
(v,\partial_tv)|_{t=0}&=&(v_0,v_1),
\end{array}\right.
\end{equation*}
where $f$ is sufficiently smooth. Denote
\begin{equation*}n
\mathcal{E}_1(t,v):&=&\|(|\partial v|+|v|)(t,x)\|_{L^2_x}^2,\quad\quad|\partial u|^2=\sum_{\alpha}(\partial_\alpha u)^2,\\
\mathcal{E}_{gst,1}(t,v):&=&\|(|\partial v|+|v|)(t,x)\|_{L^2_x}^2+\int_0^t\left\|\frac{|v|+\sum_a|G_au|}{\leftangle r-\tau\rightangle^{\frac{1+\delta}{2}}}\right\|_{L^2_x}^2\rightm{d}\tau.
\end{equation*}n
\begin{equation*}gin{lem}(Energy estimates)
The following hold:
\begin{equation*}gin{itemize}
\item[$\cdot$] $\mathcal{E}_1(t,v)^{\frac{1}{2}}\leftesssim \mathcal{E}_1(0,v)^{\frac{1}{2}}+\int_0^t\|f(\tau,x)\|_{L^2_x}\rightm{d}\tau$;
\item[$\cdot$] $\mathcal{E}_{gst,1}(t,v)^{\frac{1}{2}}\leftesssim \mathcal{E}_{gst,1}(0,v)^{\frac{1}{2}}+\int_0^t\|f(\tau,x)\|_{L^2_x}\rightm{d}\tau$;
\end{itemize}
\end{lem}
Let $\{p_j\}_{j=0}^\infty$ be a Littlewood-Paley partition of unity, i.e.
$$\sum_{j=0}^\infty p_j=1,\quad\quad p_j\in C_0^\infty(\mathbb{R}),\quad\quad p_j\ge 0,\quad\mathrm{for}\quad j\ge 1,$$
and
$$\mathrm{supp} p_0\subset[0,2],\quad\quad\mathrm{supp} p_j\subset[2^{j-1},2^{j+1}]\quad\mathrm{for}\quad j\ge 1.$$
\begin{equation*}gin{lem}($L^\infty$ estimate, see \cite{Ge})
We have
\begin{equation*}n
\leftangle t+|x|\rightangle|v(t,x)|\leftesssim\sum_{j=0}^\infty\sum_{|I|\lefte 5}\|\leftangle|x|\rightangle p_j(|x|)\tilde{\Gamma}^Iv(0,x)\|_{L^2_x}+\sum_{j=0}^\infty\sum_{|I|\lefte 4}\max_{0\lefte s\lefte t} p_j(s)\|\leftangle s+|x|\rightangle\tilde{\Gamma}^If(s,x)\|_{L^2_x}\rightm{d}s.
\end{equation*}n
In particular, if
$$\sum_{|I|\lefte 4}\max_{0\lefte s\lefte t}\leftangle s\rightangle^{\delta_0}\|\leftangle s+|x|\rightangle\tilde{\Gamma}^If(s,x)\|_{L^2_x}\rightm{d}s\lefte C_f$$
for some $\delta_0>0$, then
$$\leftangle t+|x|\rightangle|v(t,x)|\leftesssim\frac{C_f}{1-2^{-\delta_0}}+\sum_{|I|\lefte 5}\|\leftangle|x|\rightangle\leftn(2+|x|)\tilde{\Gamma}^Iv(0,x)\|_{L^2_x}.$$
In the above $\tilde{\Gamma}\in\{\partial,L_a,\Omega\}$.
\end{lem}
\end{comment}
\section{}\leftabel{sB}
In this section we show that the assumption on the initial data in Theorem \rightef{s1: thm1} can be relaxed.
Let $u$ be the solution to
\begin{equation*}gin{equation}\leftabel{sB: uh}
\left\{\begin{equation*}gin{array}{rcl}
-\Box u(t,x)&=&f(t,x),\\
(u,\partial_tu)|_{t=0}&=&(u_0,u_1).
\end{array}\right.
\end{equation}
Denote
\begin{equation*}n
\mathcal{E}(t,u):&=&\|\partial u(t,x)\|_{L^2_x}^2,\quad\quad|\partial u|^2=\sum_{\alpha}(\partial_\alpha u)^2,\\
\mathcal{E}_{con}(t,u):&=&\sum_{a=1}^2\|(|L_0 u+u|+|\Omega_{12} u|+|L_a u|)(t,x)\|_{L^2_x}^2.
\end{equation*}n
\begin{equation*}gin{lem}(See \cite{A, LZ}.)\leftabel{sB: hw}
Let $u$ be the solution to \eqref{sB: uh}. Then the following estimates hold:
\begin{equation*}gin{itemize}
\item[$i)$] (Standard energy estimate)
$$\mathcal{E}(t,u)^{\frac{1}{2}}\leftesssim \mathcal{E}(0,u)^{\frac{1}{2}}+\int_0^t\|f(\tau,x)\|_{L^2_x}\rightm{d}\tau;$$
\item[$ii)$] (Conformal energy estimate)
$$\mathcal{E}_{con}(t,u)^{\frac{1}{2}}\leftesssim \mathcal{E}_{con}(0,u)^{\frac{1}{2}}+\int_0^t\|\leftangle \tau+|x|\rightangle f(\tau,x)\|_{L^2_x}\rightm{d}\tau;$$
\item[$iii)$] ($L^2$ estimate)
$$\|u(t,x)\|_{L^2_x}\leftesssim\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\left[\|u_1\|_{L^1_x\cap L^2_x}+\int_0^t\|f(\tau,x)\|_{L^1_x\cap L^2_x}\rightm{d}\tau\right].$$
\end{itemize}
\end{lem}
\begin{equation*}gin{lem}\leftabel{sB: gu}
Let $u$ be the solution to \eqref{sB: uh} with $f\equiv 0$. Then for any integer $N\ge 3$, we have
\begin{equation*}gin{equation}n\leftabel{sB: gammaIu}
\sum_{|I|\lefte N-2}\|\Gamma^Iu\|_{L^\infty_x}&\leftesssim&\leftangle t\rightangle^{-\frac{1}{2}}\Bigg\{\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\|u_1\|_{L^1_x\cap L^2_x}\nonumber\\
&+&\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k}\nabla^ku_0\|_{L^2_x}+\sum_{k\lefte N-1}\|\leftangle|x|\rightangle^{k+1}\nabla^ku_1\|_{L^2_x}\Bigg\}.
\end{equation}n
\begin{equation*}gin{proof}
By Lemma \rightef{sB: hw}, we have
\begin{equation*}gin{equation}\leftabel{sB: uL2}
\|u(t,x)\|_{L^2_x}\leftesssim\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\|u_1\|_{L^1_x\cap L^2_x}
\end{equation}
and
\begin{equation*}
\mathcal{E}(t,u)^{\frac{1}{2}}+\mathcal{E}_{con}(t,u)^{\frac{1}{2}}\leftesssim \mathcal{E}(0,u)^{\frac{1}{2}}+\mathcal{E}_{con}(0,u)^{\frac{1}{2}},
\end{equation*}
which implies
\begin{equation*}gin{equation}n\leftabel{sB: gamma1}
\sum_{|I|\lefte 1}\|\Gamma^Iu\|_{L^2_x}&\leftesssim&\|u\|_{L^2_x}+\sum_{|I|\lefte 1}\|\Gamma^Iu(0)\|_{L^2_x}\nonumber\\
&\leftesssim&\|u\|_{L^2_x}+\sum_{k\lefte 1}\|\leftangle|x|\rightangle^{k}\nabla^ku_0\|_{L^2_x}+\sum_{k=0}\|\leftangle|x|\rightangle^{k+1}\nabla^ku_1\|_{L^2_x}.
\end{equation}n
Acting the vector fields $\Gamma_k,k=1,\cdots,7$ on both sides of \eqref{sB: uh} and apply the standard energy and conformal energy estimates in Lemma \rightef{sB: hw}, we have
\begin{equation*}
\mathcal{E}(t,\Gamma_ku)^{\frac{1}{2}}+\mathcal{E}_{con}(t,\Gamma_ku)^{\frac{1}{2}}\leftesssim \mathcal{E}(0,\Gamma_ku)^{\frac{1}{2}}+\mathcal{E}_{con}(0,\Gamma_ku)^{\frac{1}{2}},
\end{equation*}
which combined with \eqref{sB: gamma1} gives
\begin{equation*}n\leftabel{sB: gamma2}
\sum_{|I|\lefte 2}\|\Gamma^Iu\|_{L^2_x}&\leftesssim&\|u\|_{L^2_x}+\sum_{|I|\lefte 2}\|\Gamma^Iu(0)\|_{L^2_x}\nonumber\\
&\leftesssim&\|u\|_{L^2_x}+\sum_{k\lefte 2}\|\leftangle|x|\rightangle^{k}\nabla^ku_0\|_{L^2_x}+\sum_{k\lefte 1}\|\leftangle|x|\rightangle^{k+1}\nabla^ku_1\|_{L^2_x}.
\end{equation*}n
By induction, for any $N\in\mathbb{N}$ with $N\ge 1$, we have
\begin{equation*}gin{equation}n\leftabel{sB: gammaN}
\sum_{|I|\lefte N}\|\Gamma^Iu\|_{L^2_x}&\leftesssim&\|u\|_{L^2_x}+\sum_{|I|\lefte N}\|\Gamma^Iu(0)\|_{L^2_x}\nonumber\\
&\leftesssim&\|u\|_{L^2_x}+\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k}\nabla^ku_0\|_{L^2_x}+\sum_{k\lefte N-1}\|\leftangle|x|\rightangle^{k+1}\nabla^ku_1\|_{L^2_x}\nonumber\\
&\leftesssim&\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\|u_1\|_{L^1_x\cap L^2_x}+\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k}\nabla^ku_0\|_{L^2_x}+\sum_{k\lefte N-1}\|\leftangle|x|\rightangle^{k+1}\nabla^ku_1\|_{L^2_x},\nonumber\\
\end{equation}n
where we use \eqref{sB: uL2} in the last inequality. By Lemma \rightef{s2: K-S}, for any $|I|\lefte N-2$, we have
\begin{equation*}n
\|\Gamma^Iu\|_{L^\infty_x}
&\leftesssim&\leftangle t\rightangle^{-\frac{1}{2}}\sum_{|I|\lefte N}\|\Gamma^Iu\|_{L^2_x}\nonumber\\
&\leftesssim&\leftangle t\rightangle^{-\frac{1}{2}}\left\{\|u_0\|_{L^2_x}+\leftn^{\frac{1}{2}}(2+t)\|u_1\|_{L^1_x\cap L^2_x}+\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k}\nabla^ku_0\|_{L^2_x}+\sum_{k\lefte N-1}\|\leftangle|x|\rightangle^{k+1}\nabla^ku_1\|_{L^2_x}\right\},
\end{equation*}n
hence \eqref{sB: gammaIu} follows.
\end{proof}
\end{lem}
We recall \eqref{s3: DL5.3'} and the estimate \eqref{s3: gPsiB} in $\mathbf{Step\ 2}$ in the proof of Proposition \rightef{s3: prop 1}. We decompose $\Psi$ into the homogeneous wave component $\Psi_1$ and the inhomogeneous wave component $\Psi_2$, and use \eqref{sB: gammaIu} for $\Gamma^I\Psi_1$ and Lemma \rightef{s2: DMY2.78} for $\Gamma^I\Psi_2$. For any $|I|\lefte N-2$, we obtain
\begin{equation*}gin{equation}n\leftabel{sB: gPsi}
\|\Gamma^I\Psi(t)\|_{L^\infty_x}&\leftesssim&\leftangle t\rightangle^{-\frac{1}{2}}\Bigg\{\leftn^{\frac{1}{2}}(2+t)\|\psi_0\|_{L^1_x\cap L^2_x}+\sum_{k\lefte N-1}\|\leftangle|x|\rightangle^{k+1}\nabla^k\psi_0\|_{L^2_x}\nonumber\\
&+&\|\Gamma^I\Psi_2(0,\cdot)\|_{W^{2,1}}+\|\partial_t\Gamma^I\Psi_2(0,\cdot)\|_{W^{1,1}}+\sum_{|I'|\lefte|I|+1}\int_0^t\leftangle\tau\rightangle^{-\frac{1}{2}}\|\Gamma^{I'}F(\tau)\|_{L^1_x}\rightm{d}\tau\Bigg\}.\nonumber\\
\end{equation}n
Then following the proof of Proposition \rightef{s3: prop 1} and choosing $N$ larger ($N\ge 5$), we can obtain Theorem \rightef{s1: thm1} with relaxed condition on the smallness of the initial data, i.e.,
\begin{equation*}
\|\psi_0\|_{L^1_x}+\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k+1}\nabla^k\psi_0\|_{L^2_x}\lefte\epsilon.
\end{equation*}
\begin{equation*}gin{rem}
In \eqref{sB: gPsi}, we note that initial data $(\Gamma^I\Psi_2,\partial_t\Gamma^I\Psi_2)|_{t=0}\neq (0,0)$. However, since the nonlinearity $F$ is cubic (see \eqref{s3: DL5.3'}), using the H\"older inequality, we can obtain
\begin{equation*}
\sum_{|I|\lefte N-2}\left(\|\Gamma^I\Psi_2(0,\cdot)\|_{W^{2,1}}+\|\partial_t\Gamma^I\Psi_2(0,\cdot)\|_{W^{1,1}}\right)\leftesssim\sum_{k\lefte N}\|\leftangle|x|\rightangle^{k+1}\nabla^k\psi_0\|_{L^2_x}.
\end{equation*}
\end{rem}
\begin{equation*}gin{comment}
\section{}\leftabel{sC}
In this section we generalize the results in \cite{DL} to general nonlinearities $F$ as in \eqref{s1: 1.4}. Given a mass $m\ge 0$, we consider the Cauchy problem
\begin{equation*}gin{equation}\leftabel{sC: 1.1s}
\mathcal{D}\psi+m\psi=F(\psi)=(\psi^*H\psi)\psi,\quad\quad\psi(0,\cdot)=\psi_0,
\end{equation}
where $H\in\mathbb{C}^{2\times 2}$ is an arbitrary matrix.
\subsection{Preliminaries}
In the sequel, we consider functions supported in the interior of the light cone $\mathcal{K}:=\{(t,x): t\ge 2, r<t-1\}$, with vertex $(1,0,0)$. A hyperboloidal hypersurface $\mathcal{H}_s$ with hyperbolic time $s\ge s_0=2$ is defined as
\begin{equation*}
\mathcal{H}_s:=\{(t,x): t^2-r^2=s^2\}.
\end{equation*}
For any $(t,x)\in\mathcal{K}\cap\mathcal{H}_s$ with $s\ge 2$, we have
\begin{equation*}
s^2\lefte t^2=r^2+s^2\lefte(t-1)^2+s^2,
\end{equation*}
which implies
\begin{equation*}
2t\lefte 1+s^2\lefte 2s^2.
\end{equation*}
We also have
\begin{equation*}
s^2=(t-r)(t+r)\lefte(t-r)2t\lefte(t-r)2s^2,
\end{equation*}
which gives
\begin{equation*}
t-r\ge\frac{1}{2}.
\end{equation*}
Combining these estimates, we obtain
\begin{equation*}gin{equation}\leftabel{sC: tles^2}
r\lefte t,\quad\quad s\lefte t\lefte s^2,\quad\quad 1\leftesssim t-r.
\end{equation}
We also denote by $\mathcal{K}_{[s_0,s_1]}:=\bigcup_{s_0\lefte s\lefte s_1}\mathcal{K}\cap\mathcal{H}_s$ the subsets of $\mathcal{K}$ limited by two hyperboloids. For a (scalar or vector-valued) function $\phi$ defined in $\mathbb{R}^{1+2}$, we denote
\begin{equation*}
\|\phi\|_{L^p_f(\mathcal{H}_s)}^p=\int_{\mathcal{H}_s}|\phi|^p{\rightm{d}}x:=\int_{\mathbb{R}^2}|\phi(\sqrt{s^2+r^2},x)|^p{\rightm{d}}x,\quad 1\lefte p<\infty.
\end{equation*}
The semi-hyperboloidal frame was introduced in \cite{DLW,LM} as follows
\begin{equation*}
\underline{\partial}_0:=\partial_t,\quad\quad\underline{\partial}_a:=\frac{L_a}{t}=\frac{x_a}{t}\partial_t+\partial_a.
\end{equation*}
For any $I=(i_1,i_2,i_3)\in\mathbb{N}^3,| J=(j_1,j_2)\in\mathbb{N}^2$, we denote $\partial^I:=\partial_0^{i_1}\partial_1^{i_2}\partial_2^{i_3}$, $L^J:=L_1^{j_1}L_2^{j_2}$ and similarly $\hat{L}^J:=(\hat{L}_1^{j_1}\hat{L}_2^{j_2})$.
We also recall the hyperboloidal energy of the Dirac equation introduced in \cite{DLW}. For a $\mathbb{C}^2$-valued function $\psi$ on $\mathcal{H}_s$, we define
\begin{equation*}gin{equation}\leftabel{sC: (psi)_}
(\psi)_{-}:=\psi-\frac{x_a}{t}\gamma^0\gamma^a\psi,\quad\quad(\psi)_{+}:=\psi+\frac{x_a}{t}\gamma^0\gamma^a\psi
\end{equation}
and the energy functionals
\begin{equation*}n
\mathcal{E}^{\mathcal{H}}(s,\psi):&=&\int_{\mathcal{H}_s}\psi^*(\psi)_{-}{\rightm{d}}x=\int_{\mathcal{H}_s}\left(\psi^*\psi-\frac{x_a}{t}\psi^*\gamma^0\gamma^a\psi\right){\rightm{d}}x,\\
\mathcal{E}^+(s,\psi):&=&\int_{\mathcal{H}_s}(\psi)_{-}^*(\psi)_{-}{\rightm{d}}x=\int_{\mathcal{H}_s}\left(\psi-\frac{x_a}{t}\gamma^0\gamma^a\psi\right)^*\left(\psi-\frac{x_a}{t}\gamma^0\gamma^a\psi\right){\rightm{d}}x.
\end{equation*}n
We have the following estimates on the energy functionals defined as above.
\begin{equation*}gin{prop}(See \cite[Lemma 2.2, Proposition 2.3]{DLW})\leftabel{sC: EH}
Let $m\in\mathbb{R}$ and $\psi(t,x):\mathbb{R}^{1+2}\to\mathbb{C}^2$ be a sufficiently smooth function with support in $\mathcal{K}_{[s_0,s_1]}$. Then the following statements hold for all $s\in[s_0,s_1]$:
\begin{equation*}gin{itemize}
\item[$i)$] $$\mathcal{E}^{\mathcal{H}}(s,\psi)=\frac{1}{2}\mathcal{E}^+(s,\psi)+\frac{1}{2}\int_{\mathcal{H}_s}\frac{s^2}{t^2}\psi^*\psi{\rightm{d}}x.$$
\item[$ii)$] $$\left\|\frac{s}{t}\psi\right\|_{L^2_f(\mathcal{H}_s)}+\left\|(\psi)_{-}\right\|_{L^2_f(\mathcal{H}_s)}\lefte 2[\mathcal{E}^{\mathcal{H}}(s,\psi)]^{\frac{1}{2}}.$$
\item[$iii)$] $$\mathcal{E}^{\mathcal{H}}(s,\psi)\lefte\mathcal{E}^{\mathcal{H}}(s_0,\psi)+2\int_{s_0}^s\int_{\mathcal{H}_{\tau}}\frac{\tau}{t}|\psi^*\gamma^0(\mathcal{D}\psi+m\psi)|{\rightm{d}}x{\rightm{d}}\tau.$$
\end{itemize}
\end{prop}
The following commutator estimates were given in \cite{LM14,LM}.
\begin{equation*}gin{lem}\leftabel{sC: DL2.3}
Let $\Phi,\phi$ be sufficiently regular $\mathbb{C}^2$-valued (resp. scalar) function supported in the region $\mathcal{K}$. Then, for any multi-indices $I,J$, there exist generic constants $C=C(|I|,|J|)$ such that
\begin{equation*}n
&&|[\partial_\alpha,L_a]\Phi|+ |[\partial_\alpha,\hat{L}_a]\Phi|\lefte C|\partial\Phi|,\\
&&|[L_a,L_b]\Phi|+ |[\hat{L}_a,\hat{L}_b]\Phi|\lefte C\sum_c|L_c\Phi|,\\
&&|[\partial^IL^J,\partial_\alpha]\phi|\lefte C\sum_{|J'|<|J|}|\partial\partial^IL^{J'}\phi|,\\
&&|[\partial^IL^J,\underline{\partial}_a]\phi|\lefte C\sum_{\substack{|J'|<|J|,b\\|I'|\lefte|I|}}|\underline{\partial}_b\partial^{I'}L^{J'}\phi|+t^{-1}\sum_{\substack{|I'|\lefte|I|\\|J'|\lefte|J|}}|\partial^{I'}L^{J'}\phi|.
\end{equation*}n
\end{lem}
We next state the weighted Sobolev inequalities on hyperboloids \cite{LM14}.
\begin{equation*}gin{prop}\leftabel{sC: DL2.4}
Let $\phi$ be a sufficiently smooth function supported in the region $\mathcal{K}$. Then for all $s\ge 2$, we have
\begin{equation*}n
&&\sup_{\mathcal{H}_s}|t\phi(t,x)|\lefte C\sum_{|J|\lefte 2}\|L^J\phi\|_{L^2_f(\mathcal{H}_s)},\\
&&\sup_{\mathcal{H}_s}|s\phi(t,x)|\lefte C\sum_{|J|\lefte 2}\left\|\frac{s}{t}L^J\phi\right\|_{L^2_f(\mathcal{H}_s)}.
\end{equation*}n
\end{prop}
\begin{equation*}gin{cor}\leftabel{sC: DL2.6}
Let $\Phi=\Phi(t,x)$ be a sufficiently smooth $\mathbb{C}^2$-valued function supported in the region $\mathcal{K}$. Then for all $s\ge 2$, we have
\begin{equation*}n
&&\sup_{\mathcal{H}_s}|t\Phi(t,x)|\lefte C\sum_{|J|\lefte 2}\|\hat{L}^J\Phi\|_{L^2_f(\mathcal{H}_s)},\\
&&\sup_{\mathcal{H}_s}|s\Phi(t,x)|\lefte C\sum_{|J|\lefte 2}\left\|\frac{s}{t}\hat{L}^J\Phi\right\|_{L^2_f(\mathcal{H}_s)}.
\end{equation*}n
\end{cor}
For any $\mathbb{C}^2$-valued function $\Phi$, we define the Hermitian matrices
\begin{equation*}
A_{-}:=I_2-\frac{x_a}{t}\gamma^0\gamma^a,\quad\quad A_{+}:=I_2+\frac{x_a}{t}\gamma^0\gamma^a,
\end{equation*}
and let $(\Phi)_{\pm}$ be defined as in \eqref{sC: (psi)_}, i.e.,
\begin{equation*}
(\Phi)_{-}=A_{-}\Phi=\Phi-\frac{x_a}{t}\gamma^0\gamma^a\Phi,\quad\quad (\Phi)_{+}=A_{+}\Phi=\Phi+\frac{x_a}{t}\gamma^0\gamma^a\Phi.
\end{equation*}
\begin{equation*}gin{lem}\leftabel{sC: DL2.7}
Let $\Phi,\Psi$ be two $\mathbb{C}^2$-valued functions, then
\begin{equation*}gin{equation}\leftabel{sC: Psi*Phi}
\Psi^{*}\gamma^0\Phi=\frac{1}{4}\left[(\Psi)^*_{-}\gamma^0(\Phi)_{+}+(\Psi)^*_{+}\gamma^0(\Phi)_{-}+2\frac{s^2}{t^2}\Psi^*\gamma^0\Phi\right].
\end{equation}
\begin{equation*}gin{proof}
We write
\begin{equation*}
\Psi=\frac{(\Psi)_{-}+(\Psi)_{+}}{2},\quad\quad \Phi=\frac{(\Phi)_{-}+(\Phi)_{+}}{2}.
\end{equation*}
By a direct computation,
\begin{equation*}gin{equation}n\leftabel{sC: A-}
A_{-}\gamma^0A_{-}&=&\left(I_2-\frac{x_b}{t}\gamma^0\gamma^b\right)\gamma^0\left(I_2-\frac{x_a}{t}\gamma^0\gamma^a\right)\nonumber\\
&=&\gamma^0-\frac{x_b}{t}\gamma^0\gamma^b\gamma^0-\frac{x_a}{t}\gamma^0\gamma^0\gamma^a+\frac{x_ax_b}{t^2}\gamma^0\gamma^b\gamma^0\gamma^0\gamma^a\nonumber\\
&=&\gamma^0+\frac{x_a^2}{t^2}\gamma^0(\gamma^a)^2+\frac{x_1x_2}{t^2}\gamma^0\left(\gamma^1\gamma^2+\gamma^2\gamma^1\right)\nonumber\\
&=&\frac{s^2}{t^2}\gamma^0,
\end{equation}n
and similarly,
\begin{equation*}gin{equation}n\leftabel{sC: A+}
A_{+}\gamma^0A_{+}&=&\left(I_2+\frac{x_b}{t}\gamma^0\gamma^b\right)\gamma^0\left(I_2+\frac{x_a}{t}\gamma^0\gamma^a\right)\nonumber\\
&=&\gamma^0+\frac{x_b}{t}\gamma^0\gamma^b\gamma^0+\frac{x_a}{t}\gamma^0\gamma^0\gamma^a+\frac{x_ax_b}{t^2}\gamma^0\gamma^b\gamma^0\gamma^0\gamma^a\nonumber\\
&=&\frac{s^2}{t^2}\gamma^0.
\end{equation}n
It follows that
\begin{equation*}
(\Psi)_{-}^*\gamma^0(\Phi)_{-}=\Psi^*A_{-}\gamma^0A_{-}\Phi=\frac{s^2}{t^2}\Psi^*\gamma^0\Phi,\quad\quad (\Psi)_{+}^*\gamma^0(\Phi)_{+}=\Psi^*A_{+}\gamma^0A_{+}\Phi=\frac{s^2}{t^2}\Psi^*\gamma^0\Phi.
\end{equation*}
\end{proof}
\end{lem}
\subsection{Bootstrap assumptions}
Fix an integer $N\ge 3$. Suppose $\psi$ is the solution to \eqref{sC: 1.1s}. Let $C_1\gg 1$ and $0<\epsilon\leftl C_1^{-1}$ be two constants to be chosen later and assume
\begin{equation*}gin{equation}\leftabel{sC: bs}
\sum_{|I|+|J|\lefte N}[\mathcal{E}^{\mathcal{H}}(s,\partial^I\hat{L}^J\psi)]^{\frac{1}{2}}\lefte C_1\epsilon,
\end{equation}
where $\epsilon$ measures the size of the initial data. We define
\begin{equation*}
s_1:=\sup\{s\in(s_0,\infty): \eqref{sC: bs}\ \mathrm{holds\ in\ }[s_0,s]\}.
\end{equation*}
We only need to prove that $s_1=+\infty$.
\begin{equation*}gin{prop}(See \cite[Proposition 3.1]{DL})
Assume that \eqref{sC: bs} holds. Then for all $s\in[s_0,s_1)$, we have
\begin{equation*}gin{equation}\leftabel{sC: DL(3.3)}
\left\|\frac{s}{t}\partial^I\hat{L}^J\psi\right\|_{L^2_f(\mathcal{H}_s)}+\|(\partial^I\hat{L}^J\psi)_{-}\|_{L^2_f(\mathcal{H}_s)}\leftesssim C_1\epsilon,\quad|I|+|J|\lefte N,
\end{equation}
\begin{equation*}gin{equation}\leftabel{sC: DL(3.4)}
\sup_{\mathcal{H}_s}\left(s|\partial^I\hat{L}^J\psi|+t|(\partial^I\hat{L}^J\psi)_{-}|\right)\leftesssim C_1\epsilon,\quad|I|+|J|\lefte N-2.
\end{equation}
\end{prop}
Acting the vector fields $\partial^I\hat{L}^J, |I|+|J|\lefte N$ on both sides of \eqref{sC: 1.1s}, we obtain
\begin{equation*}
\mathcal{D}(\partial^I\hat{L}^J\psi)+m(\partial^I\hat{L}^J\psi)=\partial^I\hat{L}^JF(\psi)=\partial^I\hat{L}^J[(\psi^*H\psi)\psi].
\end{equation*}
By Proposition \rightef{sC: EH}, we have
\begin{equation*}
\mathcal{E}^{\mathcal{H}}(s,\partial^I\hat{L}^J\psi)\lefte\mathcal{E}^{\mathcal{H}}(s_0,\partial^I\hat{L}^J\psi)+2\int_{s_0}^s\int_{\mathcal{H}_{\tau}}\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)^*\gamma^0(\partial^I\hat{L}^JF)|{\rightm{d}}x{\rightm{d}}\tau.
\end{equation*}
By Lemma \rightef{sC: DL2.7}, we have
\begin{equation*}n
&&\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)^*\gamma^0(\partial^I\hat{L}^JF)|\\
&\leftesssim&\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)^*_{-}\gamma^0(\partial^I\hat{L}^JF)_{+}|+\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)^*_{+}\gamma^0(\partial^I\hat{L}^JF)_{-}|+\frac{\tau^3}{t^3}|(\partial^I\hat{L}^J\psi)^*\gamma^0(\partial^I\hat{L}^JF)|\\
&\leftesssim&\sum_{\substack{|I_1|+|I_2|\lefte|I|\\|J_1|+|J_2|\lefte|J|}}\left[\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)_{-}|\cdot|\partial^{I_1}L^{J_1}(\psi^*H\psi)|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|+\frac{\tau}{t}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}L^{J_1}(\psi^*H\psi)|\cdot|(\partial^{I_2}\hat{L}^{J_2}\psi)_{-}|\right]\\
&+&\sum_{\substack{|I_1|+|I_2|\lefte|I|\\|J_1|+|J_2|\lefte|J|}}\frac{\tau^3}{t^3}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}L^{J_1}(\psi^*H\psi)|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\\
&\leftesssim&II_1+II_2+II_3.
\end{equation*}n
By \eqref{sC: DL(3.4)}, we have
\begin{equation*}n
II_1&\leftesssim&\sum_{\substack{|I_1|+|I_2|+|I_3|\lefte|I|\\|J_1|+|J_2|+|J_3|\lefte|J|}}\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)_{-}|\cdot|\partial^{I_1}L^{J_1}\psi|\cdot|\partial^{I_2}L^{J_2}\psi|\cdot|\partial^{I_3}\hat{L}^{J_3}\psi|\\
&\leftesssim&\sum_{\substack{|I_1|+|I_2|+|I_3|\lefte|I|\\|J_1|+|J_2|+|J_3|\lefte|J|}}\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)_{-}|\cdot|\partial^{I_1}\hat{L}^{J_1}\psi|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\cdot|\partial^{I_3}\hat{L}^{J_3}\psi|\\
&\leftesssim&\sum_{\substack{|I_1|+|J_1|\lefte N-2\\|I_2|+|J_2|\lefte N-2\\
|I_3|+|J_3|\lefte N}}\frac{\tau}{t}|(\partial^I\hat{L}^J\psi)_{-}|\cdot|\partial^{I_1}\hat{L}^{J_1}\psi|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\cdot|\partial^{I_3}\hat{L}^{J_3}\psi|\\
&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\frac{1}{\tau t}|(\partial^I\hat{L}^J\psi)_{-}|\cdot|\partial^{I'}\hat{L}^{J'}\psi|,
\end{equation*}n
which implies
\begin{equation*}n
\|II_1\|_{L^1_f(\mathcal{H}_\tau)}&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\frac{1}{\tau^2}\left\|(\partial^I\hat{L}^J\psi)_{-}\right\|_{L^2_f(\mathcal{H}_\tau)}\cdot\left\|\frac{\tau}{t}\partial^{I'}\hat{L}^{J'}\psi\right\|_{L^2_f(\mathcal{H}_\tau)}\\
&\leftesssim&(C_1\epsilon)^4\tau^{-2},
\end{equation*}n
where we use \eqref{sC: DL(3.3)}. Similarly,
\begin{equation*}n
II_2&\leftesssim&\sum_{\substack{|I_1|+|I_2|+|I_3|\lefte|I|\\|J_1|+|J_2|+|J_3|\lefte|J|}}\frac{\tau}{t}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}L^{J_1}\psi|\cdot|\partial^{I_2}L^{J_2}\psi|\cdot|(\partial^{I_3}\hat{L}^{J_3}\psi)_{-}|\\
&\leftesssim&\sum_{\substack{|I_1|+|J_1|\lefte N-2\\|I_2|+|J_2|\lefte N-2\\|I_3|+|J_3|\lefte N}}\frac{\tau}{t}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}\hat{L}^{J_1}\psi|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\cdot|(\partial^{I_3}\hat{L}^{J_3}\psi)_{-}|\\
&+&\sum_{\substack{|I_2|+|J_2|\lefte N-2\\|I_3|+|J_3|\lefte N-2\\
|I_1|+|J_1|\lefte N}}\frac{\tau}{t}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}\hat{L}^{J_1}\psi|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\cdot|(\partial^{I_3}\hat{L}^{J_3}\psi)_{-}|\\
&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\left[\frac{1}{\tau t}|\partial^I\hat{L}^J\psi|\cdot|(\partial^{I'}\hat{L}^{J'}\psi)_{-}|+\frac{1}{t^2}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I'}\hat{L}^{J'}\psi|\right],
\end{equation*}n
which implies
\begin{equation*}n
\|II_2\|_{L^1_f(\mathcal{H}_\tau)}&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\frac{1}{\tau^2}\left\|\frac{\tau}{t}\partial^{I}\hat{L}^{J}\psi\right\|_{L^2_f(\mathcal{H}_\tau)}\left[\|(\partial^{I'}\hat{L}^{J'}\psi)_{-}\|_{L^2_f(\mathcal{H}_\tau)}+\left\|\frac{\tau}{t}\partial^{I'}\hat{L}^{J'}\psi\right\|_{L^2_f(\mathcal{H}_\tau)}\right]\\
&\leftesssim&(C_1\epsilon)^4\tau^{-2}.
\end{equation*}n
For $II_3$, we have
\begin{equation*}n
II_3&\leftesssim&\sum_{\substack{|I_1|+|I_2|+|I_3|\lefte|I|\\|J_1|+|J_2|+|J_3|\lefte|J|}}\frac{\tau^3}{t^3}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}\hat{L}^{J_1}\psi|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\cdot|\partial^{I_3}\hat{L}^{J_3}\psi|\\
&\leftesssim&\sum_{\substack{|I_1|+|J_1|\lefte N-2\\|I_2|+|J_2|\lefte N-2\\|I_3|+|J_3|\lefte N}}\frac{\tau^3}{t^3}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I_1}\hat{L}^{J_1}\psi|\cdot|\partial^{I_2}\hat{L}^{J_2}\psi|\cdot|\partial^{I_3}\hat{L}^{J_3}\psi|\\
&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\frac{\tau}{t^3}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I'}\hat{L}^{J'}\psi|\\
&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\frac{1}{t^2}|\partial^I\hat{L}^J\psi|\cdot|\partial^{I'}\hat{L}^{J'}\psi|,
\end{equation*}n
which yields
\begin{equation*}n
\|II_3\|_{L^1_f(\mathcal{H}_\tau)}&\leftesssim&(C_1\epsilon)^2\sum_{|I'|+|J'|\lefte N}\frac{1}{\tau^2}\left\|\frac{\tau}{t}\partial^{I}\hat{L}^{J}\psi\right\|_{L^2_f(\mathcal{H}_\tau)}\cdot\left\|\frac{\tau}{t}\partial^{I'}\hat{L}^{J'}\psi\right\|_{L^2_f(\mathcal{H}_\tau)}\\
&\leftesssim&(C_1\epsilon)^4\tau^{-2}.
\end{equation*}n
Combining these estimates, we obtain
\begin{equation*}
\sum_{|I|+|J|\lefte N}[\mathcal{E}^{\mathcal{H}}(s,\partial^I\hat{L}^J\psi)]^{\frac{1}{2}}\leftesssim\epsilon+(C_1\epsilon)^2.
\end{equation*}
This strictly improves \eqref{sC: bs} if $C_1\gg 1$ and $0<\epsilon\leftl C_1^{-1}$.
We obtain that \eqref{sC: 1.1s} admits a global solution $\psi$ with small data $\psi_0$. In addition, the estimate \eqref{sC: DL(3.4)} holds for all $s\in[s_0,\infty)$. In particular, we deduce the following pointwise estimates
\begin{equation*}gin{equation}\leftabel{sC: DL(3.8)}
\sup_{\mathcal{H}_s}s|\psi(t,x)|\leftesssim C_1\epsilon.
\end{equation}
For $(t,x)\in\mathcal{K}\cap\mathcal{H}_s$, we have $s\sim t^{\frac{1}{2}}(t-r)^{\frac{1}{2}}$. It follows that
\begin{equation*}gin{equation}\leftabel{sC: DL(3.9)}
|\psi(t,x)|\leftesssim\frac{C_1\epsilon}{t^{\frac{1}{2}}(t-r)^{\frac{1}{2}}}.
\end{equation}
We next derive a unified pointwise decay estimate for $m\in[0,1]$. Using the semi-hyperboloidal frame, we can rewrite \eqref{sC: 1.1s} as
\begin{equation*}
i\left(\gamma^0-\frac{x_a}{t}\gamma^a\right)\partial_t\psi+i\gamma^a\underline{\partial}_a\psi+m\psi=(\psi^*H\psi)\psi.
\end{equation*}
Since $\underline{\partial}_a=\frac{L_a}{t}$ and
$$\left(\gamma^0-\frac{x_a}{t}\gamma^a\right)\partial_t\psi=\gamma^0\left(I_2-\frac{x_a}{t}\gamma^0\gamma^a\right)\partial_t\psi=(\partial_t\psi)_{-},$$
the estimate \eqref{sC: DL(3.4)} gives
\begin{equation*}n
mt|\psi(t,x)|&\leftesssim&t|(\partial_t\psi)_{-}|+|\hat{L}_a\psi|+|\psi|+s^2|\psi|^3\\
&\leftesssim&C_1\epsilon+(C_1\epsilon)^3\leftesssim C_1\epsilon.
\end{equation*}n
Combining with \eqref{sC: DL(3.9)}, we conclude that
\begin{equation*}gin{equation}\leftabel{sC: mpsi}
|\psi(t,x)|\leftesssim\frac{C_1\epsilon}{t^{\frac{1}{2}}(t-r)^{\frac{1}{2}}+mt}.
\end{equation}
\end{comment}
\end{appendices}
\section*{Data Availability Statement}
Data sharing is not applicable to this article as no datasets were generated or analysed during the current study.
\begin{equation*}gin{thebibliography}{99}
\bibitem{Al1} \leftabel{Al1} Alinhac, S.:
\textit{The null condition for quasilinear wave equations in two space dimensions I,}
Invent. Math. $\mathbf{145}$ (3) (2001), 597-618.
\begin{equation*}gin{comment}
\bibitem{Al2} \leftabel{Al2} Alinhac, S.:
\textit{The null condition for quasi linear wave equations in two space dimensions II,}
Amer. J. Math. $\mathbf{123}$ (6) (2001), 1071-1101.
\end{comment}
\bibitem{A} \leftabel{A} Alinhac, S.:
\textit{Hyperbolic Partial Differential Equations.}
Springer-Verlag, New York, 2009.
\bibitem{Ba} \leftabel{Ba} Bachelot, A.:
\textit{Probl\`eme de Cauchy global pour des syst\`emes de Dirac-Klein-gordon.}
Ann. Inst. H. Poincar\'e Phys. Th\'eor. $\mathbf{48}$ (4) (1988), 387-422.
\bibitem{BH} \leftabel{BH} Bejenaru, I., Herr, S.:
\textit{The cubic Dirac equation: small initial data in $H^1(\mathbb{R}^3)$.}
Comm. Math. Phys. $\mathbf{335}$ (1), (2015), 43-82.
\bibitem{BH2} \leftabel{BH2} Bejenaru, I., Herr, S.:
\textit{The cubic Dirac equation: small initial data in $H^{\frac{1}{2}}(\mathbb{R}^2)$.}
Comm. Math. Phys., $\mathbf{343}$ (2) (2016), 515-562.
\bibitem{Bo} Bournaveas, N.:
\textit{Low regularity solutions of the Dirac-Klein-Gordon equations in two space dimensions.}
Comm. Partial Differential Equations $\mathbf{26}$ (7-8) (2001), 1345-1366.
\bibitem{BC} Bournaveas, N., Candy, T.:
\textit{Global well-posedness for the massless cubic Dirac equation.}
Int. Math. Res. Not. IMRN 2016, no. 22, 6735-6828.
\begin{equation*}gin{comment}
\bibitem{CD} \leftabel{CD} Cacciafesta, F., D'Ancona, P.:
\textit{Endpoint estimates and global existence for the nonlinear Dirac equation with potential.}
J. Differential Equations $\mathbf{254}$ (5), (2013), 2233-2260.
\bibitem{Ca} \leftabel{Ca} Candy, T.:
\textit{Global existence for an $L^2$ critical nonlinear Dirac equation in one dimension.}
Adv. Differential Equations $\mathbf{16}$ (7-8) (2011), 643-666.
\begin{equation*}gin{comment}
\bibitem{Ch} \leftabel{Ch} Christodoulou, D.:
\textit{Global solutions of nonlinear hyperbolic equations for small initial data,}
Commun. Pure Appl. Math. $\mathbf{39}$ (2) (1986), 267-282.
\begin{equation*}gin{comment}
\bibitem{De} \leftabel{De} Delgado, V.:
\textit{Global solutions of the Cauchy problem for the (classical) coupled Maxwell-Dirac and other nonlinear Dirac equations in one space dimension.}
Proc. Amer. Math. Soc. $\mathbf{69}$ (2) (1978), 289-296.
\bibitem{DF86} \leftabel{DF86} Dias J. P., Figueira, M.:
\textit{Time decay for the solutions of a nonlinear Dirac equation in one space dimension.}
Ricerche Mat. $\mathbf{35}$ (2) (1986), 309-316.
\bibitem{DF} Dias, J. P., Figueira, M.:
\textit{Global existence of solutions with small initial data in $H^s$ for the massive nonlinear Dirac equations in three space dimensions.}
Boll. Un. Mat. Ital. B (7) $\mathbf{1}$ (3) (1987), 861–874.
\end{comment}
\bibitem{D21} \leftabel{D21} Dong, S.,
\textit{Global solution to the wave and Klein-Gordon system under null condition in dimension two.}
J. Funct. Anal. $\mathbf{281}$ (11) (2021), Paper No. 109232, 29 pp.
\bibitem{DL} \leftabel{DL} Dong, S., Li, K.:
\textit{Global solution to the cubic Dirac equation in two space dimensions.}
arXiv:2111.04048
\bibitem{DLMY} \leftabel{DLMY} Dong, S. Li, K., Ma, Y., Yuan, X.:
\textit{Global behavior of small data solutions for the $2D$ Dirac-Klein-Gordon Equations.}
arXiv:2205.12000
\begin{equation*}gin{comment}
\bibitem{DMY} \leftabel{DMY} Dong, S., Ma, Y., Yuan, X.:
\textit{Asymptotic behavior of 2D wave-Klein-Gordon coupled system under null condition.}
arXiv:2202.08139
\end{comment}
\bibitem{DLW} \leftabel{DLW} Dong, S., LeFloch, P.G., Wyatt, Z.:
\textit{Global evolution of the U(1) Higgs Boson: nonlinear stability and uniform energy bounds.}
Ann. Henri Poincar\'e $\mathbf{22}$ (3) (2021), 677-713.
\bibitem{DW} \leftabel{DW} Dong, S., Wyatt, Z.:
\textit{Hidden structure and sharp asymptotics for the Dirac-Klein-Gordon system in two space dimensions.}
arXiv:2105.13780
\bibitem{Ev} \leftabel{Ev} Escobedo, M., Vega, L.:
\textit{A semilinear Dirac equation in $H^s(\mathbb{R}^3)$ for $s>1$.}
SIAM J. Math. Anal. $\mathbf{28}$ (2) (1997), 338-362.
\begin{equation*}gin{comment}
\bibitem{FL} \leftabel{FL} Finkelstein, R., Lelevier, R., Ruderman, M.:
\textit{Non-linear spinor fields.}
Phys. Rev. (2) $\mathbf{83}$ (1951), 326-332.
\bibitem{FF} \leftabel{FF} Finkelstein, R., Fronsdal, C., Kaus., P.:
\textit{Nonlinear spinor field.}
Physical Review $\mathbf{103}$ (5) (1956), 1571-1579.
\end{comment}
\bibitem{Ge} \leftabel{Ge} Georgiev, V.:
\textit{Decay estimates for the Klein-Gordon equation.}
Comm. Partial Differential Equations $\mathbf{17}$ (7-8) (1992), 1111-1139.
\begin{equation*}gin{comment}
\bibitem{Ho88} \leftabel{Ho88} H\"ormander, L.:
\textit{$L^1, L^\infty$ estimates for the wave operator.}
Analyse math\'ematique et applications, 211-234, Gauthier-Villars, Montrouge, 1988.
\end{comment}
\bibitem{Ho} \leftabel{Ho} H\"ormander, L.:
\textit{Lectures on nonlinear hyperbolic differential equations.}
Math\'ematiques \& Applications (Berlin) [Mathematics \& Applications], vol. 26, Springer-Verlag, Berlin, 1997, viii+289 pp.
\begin{equation*}gin{comment}
\bibitem{J79}\leftabel{J79} John F.:
\textit{Blow-up of solutions of nonlinear wave equations in three space dimensions,}
Manuscripta Math. $\mathbf{28}$ (1-3) (1979), 235-268.
\end{comment}
\bibitem{J81}\leftabel{J81} John F.:
\textit{Blow up of solutions for quasi-linear wave equations in three space dimensions,}
Commun. Pure Appl. Math. $\mathbf{34}$ (1) (1981), 29-51.
\bibitem{K85} \leftabel{K85} Klainerman, S.:
\textit{Uniform decay estimates and the Lorentz invariance of the classical wave equation,}
Comm. Pure Appl. Math. $\mathbf{38}$ (3) (1985), 321-332.
\bibitem{K86} \leftabel{K86} Klainerman, S.:
\textit{The null condition and global existence to nonlinear wave equations.}
Nonlinear systems of partial differential equations in applied mathematics, Part 1 (Santa Fe, N.M., 1984), 293-326, Lectures in Appl. Math., vol. 23, Amer. Math. Soc., Providence, RI, 1986.
\begin{equation*}gin{comment}
\bibitem{K2} \leftabel{K2} Klainerman, S.:
\textit{Global existence of small amplitude solutions to nonlinear Klein-Gordon equations in four spacetime dimensions.}
Commun. Pure Appl. Math. $\mathbf{38}$ (5) (1985), 631-641.
\bibitem{LM14} \leftabel{LM14} Lefloch, P.G., Ma, Y.:
\textit{The hyperboloidal foliation method.}
Series in Applied and Computational Mathematics, World Scientific Press, Hackensack, NJ (2014).
\bibitem{LM} \leftabel{LM} Lefloch, P.G., Ma, Y.:
\textit{The global nonlinear stability of Minkowski space for self-gravitating massive fields. The wave-Klein-Gordon model.}
Commun. Math. Phys. $\mathbf{346}$ (2016) (2), 603-665.
\end{comment}
\bibitem{LZ} \leftabel{LZ} Li, T., Zhou, Y.:
\textit{Nonlinear wave equations.}
Vol. 2. Translated from the Chinese by Yachun Li. Series in Contemporary Mathematics, 2. Shanghai Science and Technical Publishers, Shanghai; Springer-Verlag, Berlin, 2017. xiv+391 pp.
\begin{equation*}gin{comment}
\bibitem{Ma1} \leftabel{Ma1} Ma, Y.:
\textit{Global solutions of quasilinear wave-Klein-Gordon system in two-space dimension: technical tools,}
J. Hyperbolic Differ. Equ. $\mathbf{14}$ (4) (2017), 591-625.
\bibitem{Ma2} \leftabel{Ma2} Ma, Y.:
\textit{Global solutions of quasilinear wave-Klein-Gordon system in two-space dimension: completion of the proof,}
J. Hyperbolic Differ. Equ. $\mathbf{14}$ (4) (2017), 627-670.
\end{comment}
\bibitem{MNN} \leftabel{MNN} Machihara, S., Nakamura, M., Nakanishi, K., Ozawa, T.:
\textit{Endpoint Strichartz estimates and global solutions for the nonlinear Dirac equation.}
J. Funct. Anal. $\mathbf{219}$ (1) (2005), 1-20.
\begin{equation*}gin{comment}
\bibitem{MNO} \leftabel{MNO} Machihara, S., Nakanishi, K., Ozawa, T.:
\textit{Small global solutions and the relativistic limit for the nonlinear Dirac equation.}
Rev. Math. Iberoamericana $\mathbf{19}$ (1) (2003), 179-194.
\bibitem{MNT} \leftabel{MNT} Machihara, S., Nakanishi, K., Tsugawa, K.:
\textit{Well-posedness for nonlinear Dirac equations in one dimension.}
Kyoto J. Math. $\mathbf{50}$ (2) (2010), 403-451.
\bibitem{MO} \leftabel{MO} Machihara S., Omoso, T.:
\textit{The explicit solutions to the nonlinear Dirac equation and Dirac-Klein-Gordon equation.}
Ric. Mat. $\mathbf{56}$ (1) (2007), 19-30.
\end{comment}
\bibitem{P} \leftabel{P} Pecher, H.:
\textit{Local well-posedness for the nonlinear Dirac equation in two space dimensions.}
Commun. Pure Appl. Anal. $\mathbf{13}$ (2), (2014), 673-685.
\begin{equation*}gin{comment}
\bibitem{P2} \leftabel{P2} Pecher, H.:
\textit{Corrigendum of "Local well-posedness for the nonlinear Dirac equation in two space dimensions".} Commun. Pure Appl. Anal., $\mathbf{14}$ (2), (2015), 737-742.
\bibitem{R} Ra\~nada, A.F.:
\textit{Classical nonlinear Dirac field models of extended particles.}
In Quantum Theory, Groups, Fields and Particles, D. Reidel, Amsterdam, 1982.
\end{comment}
\bibitem{S} \leftabel{S} Sogge, C.D.:
\textit{Lectures on non-linear wave equations.}
Second edition. International Press, Boston, MA, 2008. x+205 pp.
\bibitem{So} \leftabel{So} Soler, M.:
\textit{Classical, stable, nonlinear spinor field with positive rest energy.}
Phys. Rev. D $\mathbf{1}$ (1970), 2766-2769.
\begin{equation*}gin{comment}
\bibitem{ST} \leftabel{ST} Selberg, S., Tesfahun, A.:
\textit{Low regularity well-posedness for some nonlinear Dirac equations in one space dimension.}
Differential Integral Equations $\mathbf{23}$ (3-4) (2010), 265-278.
\bibitem{Tha} \leftabel{Tha} Thaller, B.:
\textit{The Dirac Equation.}
Texts and Monographs in Physics. Berlin, Springer, 1992.
\end{comment}
\bibitem{T} \leftabel{T} Thirring, W.E.:
\textit{A soluble relativistic field theory.}
Ann. Physics $\mathbf{3}$ (1) (1958), 91-112.
\bibitem{Tz} \leftabel{Tz} Tzvetkov, N.:
\textit{Existence of global solutions to nonlinear massless Dirac system and wave equation with small data.}
Tsukuba J. Math. $\mathbf{22}$ (1) (1998), 193-211.
\begin{equation*}gin{comment}
\bibitem{W} \leftabel{W} Wakano, M.:
\textit{Intensely localized solutions of the classical Dirac-Maxwell field equations.}
Progr. Theoret. Phys. $\mathbf{35}$ (6) (1966), 1117-1141.
\end{comment}
\end{thebibliography}
Qian Zhang
School of Mathematical Sciences, Beijing Normal University
Laboratory of Mathematics and Complex Systems, Ministry of Education Beijing 100875, China
Email: [email protected]
\end{document}
|
\begin{document}
\title[When does a RAAG split over $\mathbb{Z}$?]{When does a right-angled
Artin group split over $\mathbb{Z}$?}
\author[M.~Clay]{Matt Clay}
\address{Department of Mathematical Sciences \\
University of Arkansas\\
Fayetteville, AR 72701}
\email{\href{mailto:[email protected]}{[email protected]}}
\begin{abstract}
We show that a right-angled Artin group, defined by a graph $\Gamma$
that has at least three vertices, does not split over an infinite
cyclic subgroup if and only if $\Gamma$ is biconnected. Further, we
compute JSJ--decompositions of 1--ended right-angled Artin groups
over infinite cyclic subgroups.
\end{abstract}
\date{\today}
\maketitle
\section{Introduction}\langlebel{sec:intro}
Given a finite simplicial graph $\Gamma$, the right-angled Artin group
(RAAG) $A(\Gamma)$ is the group with generating set $\Gamma^0$, the
vertices of $\Gamma$, and with relations $[v,w] = 1$ whenever vertices
$v$ and $w$ span an edge in $\Gamma$. That is:
\begin{equation*}
A(\Gamma) = \langle \, \Gamma^0 \mid [v,w] = 1 \, \forall v,w \in \Gamma^0
\mbox{ that span an edge in } \Gamma \, \rangle
\end{equation*}
Right-angled Artin groups, simple to define, are at the focal point of
many recent developments in low-dimensional topology and geometric
group theory. This is in part due to the richness of their subgroups,
in part due to their interpretation as an interpolation between free
groups and free abelian groups and also in part due to the frequency
at which they arise as subgroups of geometrically defined groups.
Recent work of Agol, Wise and Haglund in regards to the Virtual Haken
Conjecture show a deep relationship between 3--manifold groups and
right-angled Artin
groups~\cite{ar:Agol13,ar:HW08,ar:HW12,un:Wise,bk:Wise12}.
One of the results of this paper computes JSJ--decompositions for
1--ended right-angled Artin groups. This decomposition is a special
type of graph of groups decomposition over infinite cyclic subgroups,
generalizing to the setting of finitely presented groups a tool from
the theory of 3--manifolds. So to begin, we are first concerned with
understanding when a right-angled Artin group splits over an infinite
cyclic subgroup. Recall, a group $G$ \emph{splits} over a subgroup
$Z$ if $G$ can be decomposed as an amalgamated free product $G =
A\ast_Z B$ with $A \neq Z \neq B$ or as an HNN-extension $G = A\ast_Z$.
Suppose $\Gamma$ is a finite simplicial graph. A subgraph $\Gamma_1
\subseteq \Gamma$ is \emph{induced} if two vertices of $\Gamma_1$ span an
edge in $\Gamma_1$ whenever they span an edge in $\Gamma$. If
$\Gamma_1 \subseteq \Gamma$ is a induced subgraph, then the natural map
induced by subgraph inclusion $A(\Gamma_1) \to A(\Gamma)$ is
injective. A vertex $v \in \Gamma^0$ is a \emph{cut vertex} if the
induced subgraph spanned by the vertices $\Gamma^0 - \{v\}$ has more
connected components than $\Gamma$. A graph $\Gamma$ is
\emph{biconnected} if for each vertex $v \in \Gamma^0$, the induced
subgraph spanned by the vertices $\Gamma^0 - \{v\}$ is connected. In
other words, $\Gamma$ is biconnected if $\Gamma$ is connected and does not contain a cut vertex.
Note, $K_2$, the complete graph on two vertices, is biconnected.
\begin{remark}\langlebel{rem:cut vertex}
There is an obvious sufficient condition for a right-angled Artin
group to split over a subgroup isomorphic to $\mathbb{Z}$. (In what follows
we will abuse notation and simply say that the group splits over
$\mathbb{Z}$.) Namely, if a finite simplicial graph $\Gamma$ contains two
proper induced subgraphs $\Gamma_1,\Gamma_2 \subset \Gamma$ such that
$\Gamma_1 \cup \Gamma_2 = \Gamma$ and $\Gamma_1 \cap \Gamma_2 = v
\in \Gamma^0$, then $A(\Gamma)$ splits over $\mathbb{Z}$. Indeed, in this
case we have $A(\Gamma) = A(\Gamma_1) \ast_{A(v)} A(\Gamma_2)$.
If $\Gamma$ has at least three vertices, such subgraphs exist if and
only if $\Gamma$ is disconnected or has a cut vertex, i.e., $\Gamma$
is not biconnected.
\end{remark}
Our first theorem, proved in Section~\ref{sec:split}, states that this
condition is necessary as well.
\begin{alphatheorem}[$\mathbb{Z}$--splittings of RAAGs]\langlebel{thmA}
Suppose $\Gamma$ is a finite simplicial graph that has at least three
vertices. Then $\Gamma$ is biconnected if and only if $A(\Gamma)$
does not split over $\mathbb{Z}$.
\end{alphatheorem}
If $\Gamma$ has one vertex, then $A(\Gamma) \cong \mathbb{Z}$, which does not
split over $\mathbb{Z}$. If $\Gamma$ has two vertices, then $A(\Gamma) \cong
F_2$ or $A(\Gamma) \cong \mathbb{Z}^2$, both of which do split over $\mathbb{Z}$ as
HNN-extensions.
\begin{remark}
\langlebel{rem:1-ended}
We recall for the reader the characterization of splittings of right-angled Artin groups over the trivial subgroup. Suppose $\Gamma$ is a finite simpicial graph with at least two vertices. Then $\Gamma$ is connected if and only if $A(\Gamma)$ is freely indecomposable, equivalently 1--ended. See for instance~\cite{ar:BM01}.
\end{remark}
In Section~\ref{sec:jsj}, for 1--ended right-angled Artin groups
$A(\Gamma)$ we describe a certain graph of groups decomposition,
$\mathcal{J}(\Gamma)$, with infinite cyclic edge groups. The base graph for
$\mathcal{J}(\Gamma)$ is defined by considering the biconnected components
of $\Gamma$, taking special care with the $K_2$ components that
contain a valence one vertex from the original graph $\Gamma$. Our
second theorem shows that this decomposition is a JSJ--decomposition.
\begin{alphatheorem}[JSJ--decompositions of RAAGs]\langlebel{thmB}
Suppose $\Gamma$ is a connected finite simplicial graph that has at
least three vertices. Then $\mathcal{J}(\Gamma)$ is a
JSJ--decomposition for $A(\Gamma)$.
\end{alphatheorem}
\section{Splittings of RAAGs over $\mathbb{Z}$}\langlebel{sec:split}
This section contains the proof of Theorem~\ref{thmA}. The outline is
as follows. First, we will exhibit a family of right-angled Artin
groups that do not split over $\mathbb{Z}$. Then we will show how if
$A(\Gamma)$ is sufficiently covered by subgroups that do not split
over $\mathbb{Z}$, then neither does $A(\Gamma)$. Finally, we will show how to
find enough subgroups to sufficiently cover $A(\Gamma)$ when $\Gamma$
has at least three vertices and is biconnected.
\subsection*{Property $\mathbf{F}(\mathcal{H})$} We begin by recalling some basic notions about group actions on trees, see \cite{bk:Serre03} for proofs. In what follows, all trees are simplicial and all actions are without inversions, that is $ge \neq \bar{e}$ for all $g \in G$ and edges $e$. When a group $G$ acts on a tree $T$, the \emph{length} of an element $g \in G$ is $|g| = \inf \{ d_{T}(x,gx) \mid x \in T \}$ and the
\emph{characteristic
subtree} is $T_g = \{ x \in T \mid d_{T}(x,gx) = |g| \}$. The characteristic subtree is always non-empty. If $|g| = 0$, then $g$ is said to be \emph{elliptic} and $T_{g}$ consists of the set of fixed points. Else, $|g| > 0$ and $g$ is said to be \emph{hyperbolic}, in which case $T_{g}$ is a linear subtree, called the \emph{axis} of $g$, and $g$ acts on $T_{g}$ as a translation by $|g|$.
The following property puts some
control over the subgroups that a given group can split over.
\begin{definition}\langlebel{def:FH}
Suppose $\mathcal{H}$ is a collection of groups. We say a group $G$
\emph{has property} $\mathbf{F}(\mathcal{H})$ if whenever $G$ acts on a tree,
then either there is a global fixed point or $G$ has a subgroup
isomorphic to some group in $\mathcal{H}$ that fixes an edge.
If $\mathcal{H} = \{ H \}$ we will write $\mathbf{F}(H)$.
\end{definition}
\begin{remark}\langlebel{rem:FH}
Bass--Serre theory~\cite{bk:Serre03} implies that if $G$ has
property $\mathbf{F}(\mathcal{H})$ and $G$ splits over a subgroup $Z$, then $Z$
has a subgroup isomorphic to some group in $\mathcal{H}$.
\end{remark}
For the sequel we consider the collection $\mathcal{H} = \{ F_2,\mathbb{Z}^2\}$,
where $F_2$ is the free group of rank 2. We can reformulate the
question posed in the title using the following proposition.
\begin{proposition}\langlebel{prop:Fz2f2}
Suppose $\Gamma$ is a finite simplicial graph that has at least three
vertices. Then $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$ if and only
if $A(\Gamma)$ does not split over $\mathbb{Z}$.
\end{proposition}
\begin{proof}
Bass--Serre theory (Remark~\ref{rem:FH}) implies that if $A(\Gamma)$ has
property $\mathbf{F}(\mathcal{H})$ then $A(\Gamma)$ does not split over $\mathbb{Z}$.
Conversely, suppose that $A(\Gamma)$ does not split over $\mathbb{Z}$ and $A(\Gamma)$ acts
on a tree $T$ without a global fixed point. The stabilizer of any edge is non-trivial as freely decomposable right-angled Artin groups whose defining graphs have at least three vertices split over $\mathbb{Z}$ (Remarks~\ref{rem:cut vertex} and~\ref{rem:1-ended}).
We claim the stabilizer
of any edge contains two elements that do not generate a cyclic
group. As a subgroup generated by two elements in a right-angled
Artin group is either abelian or isomorphic to $F_2$
\cite{ar:Baudisch81}, this shows that $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$.
To prove the claim, let $Z$ denote the stabilizer of some edge of
$T$ and suppose $\langle g, h \rangle \cong \mathbb{Z}$ for all $g,h \in Z$. Thus
$Z$ is abelian. Since abelian subgroups of right-angled Artin groups
are finitely generated (as the Salvetti complex is a finite $K(A(\Gamma),1)$~\cite{col:CD95}) we have $Z \cong \mathbb{Z}$. But this contradicts
our assumption that $A(\Gamma)$ does not split over $\mathbb{Z}$.
\end{proof}
Thus we are reduced to proving that property $\mathbf{F}(\mathcal{H})$ is
equivalent to biconnectivity for right-angled Artin groups whose
defining graph has at least three vertices.
\subsection*{A family of right-angled Artin groups that do not split over $\mathbb{Z}$}
The following simple lemma of Culler--Vogtmann relates the
characteristic subtrees of commuting elements. As the proof is short,
we reproduce it here.
\begin{lemma}[{Culler--Vogtmann~\cite[Lemma~1.1]{ar:CV96}}]\langlebel{lem:cv}
Suppose a group $G$ acts on a tree $T$ and let $g$ and $h$ be
commuting elements. Then the characteristic subtree of $g$ is
invariant under $h$. In particular, if $h$ is hyperbolic, then the
characteristic subtree of $g$ contains $T_h$.
\end{lemma}
\begin{proof}
As $h(T_g) = T_{hgh^{-1}}$ if $g$ and $h$ commute then $h(T_g) =
T_g$. If $h$ is hyperbolic, then every $h$--invariant subtree
contains $T_h$.
\end{proof}
\begin{corollary}
\langlebel{cor:cv}
If $\mathbb{Z}^{2}$ acts on a tree without a global fixed point, then for any basis $\{g,h\}$, one of the elements must act hyperbolically.
\end{corollary}
\begin{proof}
Suppose that both $g$ and $h$ are elliptic.
As $hT_{h} = T_{h}$ and $hT_{g} = T_{g}$ by Lemma~\ref{lem:cv}, the unique segment connecting $T_{g}$ to $T_{h}$ is fixed by $h$ and hence contained in $T_{h}$. In other words $T_{g} \cap T_{h} \neq \emptyset$ and therefore there is a global fixed point.
\end{proof}
Recall that a \emph{Hamiltonian cycle} in a graph is an embedded cycle that visits each vertex exactly once.
\begin{lemma}
\langlebel{lem:Hamiltonian}
If $\Gamma$ is a finite simplicial graph with at least three vertices that contains a Hamiltonian cycle, then $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$.
\end{lemma}
\begin{proof}
Enumerate the vertices of $\Gamma$ cyclically along the Hamiltonian cycle by
$v_1,\ldots,v_{n}$. Notice that $G_i = \langle v_i,v_{i+1} \rangle \cong \mathbb{Z}^2$ for all $1 \leq i \leq n$ where the indices are taken modulo $n$.
Suppose that $A(\Gamma)$ acts on a tree $T$ without a global fixed point. Further suppose that $G_i$ does not fix an edge, for all $1 \leq i \leq n$.
There are now two cases.
\noindent {\it Case I\textup{:} Each $G_i$ fixes a point.} The point fixed by $G_i$ is unique as $G_i$ does not fix an edge, denote it $p_i$. If the points $p_i$ are all the same, then there is a global fixed point, contrary to the hypothesis. Consider the subtree $S \subset T$ spanned by the $p_i$. Let $p$ be an extremal vertex of $S$. There is a non-empty proper subset $P \subset \{1,\ldots,n\}$ such that $p = p_i$ if and only if $i \in P$. Let
$i_1,j_0 \in P$ be such that the indices $i_0 = i_1 - 1 \mod n$ and $j_1 = j_0 + 1 \mod n$ do not lie in $P$. See Figure~\ref{fig:Cn-I}. It is possible that $i_1 = j_0$ or $i_0 = j_1$.
\begin{figure}
\caption{A portion of the subtree $S \subset T$ in Case I of
Lemma~\ref{lem:Hamiltonian}
\end{figure}
The element $v_{i_1} \in G_{i_0} \cap G_{i_1}$ stabilizes the non-degenerate segment $[p,p_{i_0}]$ and the element $v_{j_1} \in G_{j_0} \cap G_{j_1}$ stabilizes the non-degenerate segment $[p,p_{j_1}]$. As $p$ is extremal, these segments overlap and thus $\langle v_{i_1},v_{j_1} \rangle$ fixes an edge in $T$. This subgroup is isomorphic to either $F_2$ or $\mathbb{Z}^2$.
\noindent {\it Case II\textup{:} Some $G_i$ does not fix a point.} Without loss of generality, we can assume that $G_1$ does not fix a point and by Corollary~\ref{cor:cv} that $v_2$ acts hyperbolically. By Lemma~\ref{lem:cv}, $v_1$ leaves $T_{v_2}$ invariant and so there are integers $k_1,k_2$, where $k_1 \neq 0$, such that $v_1^{k_1}v_2^{k_2}$ fixes $T_{v_2}$. Likewise there are integers $\ell_2,\ell_3$, where $\ell_3 \neq 0$ such that $v_2^{\ell_2}v_3^{\ell_3}$ fixes $T_{v_2}$. Hence $\langle v_1^{k_1}v_2^{k_2},v_2^{\ell_2}v_3^{\ell_3} \rangle$ fixes $T_{v_2}$, in particular, this subgroup fixes an edge. This subgroup is isomorphic to either $F_{2}$ or $\mathbb{Z}^{2}$.
In either case, we have found a subgroup isomorphic to either $F_2$ or $\mathbb{Z}^2$ that fixes an edge. Hence $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$.
\end{proof}
\subsection*{Promoting property $\mathbf{F}(\mathcal{H})$}
We now show how to promote property $\mathbf{F}(\mathcal{H})$ to $A(\Gamma)$ if
enough subgroups have property $\mathbf{F}(\mathcal{H})$.
\begin{proposition}
\langlebel{prop:decompose}
Suppose $\Gamma$ is a connected finite simplicial graph with at least three vertices and suppose that there is a collection $\mathcal{G}$ of induced subgraphs $\Delta \subset \Gamma$ such that:
\begin{enumerate}
\item for each $\Delta \in \mathcal{G}$, $A(\Delta)$ has property
$\mathbf{F}(\mathcal{H})$, and\langlebel{enum:FH}
\item each two edge segment of $\Gamma$ is contained in some $\Delta
\in \mathcal{G}$.\langlebel{enum:two edge}
\end{enumerate}
Then $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$.
\end{proposition}
\begin{proof}
Suppose $A(\Gamma)$ acts on a tree $T$ without a global fixed point.
If for some $\Delta \in \mathcal{G}$, the subgroup $A(\Delta)$ does not have a fixed point, then by \eqref{enum:FH}, $A(\Delta)$, and hence $A(\Gamma)$, contains a subgroup isomorphic to either $F_2$ or $\mathbb{Z}^2$ that fixes an edge. Therefore, we assume that each $A(\Delta)$ has a fixed point. In particular, each vertex of $\Gamma$ acts elliptically in $T$. Also, given three vertices $u, v, w \in \Gamma^{0}$, such that $u$ and $v$ span an edge as do $v$ and $w$, the subgroup $\langle u, v, w \rangle$ by \eqref{enum:two edge} is contained in some $A(\Delta)$ and hence has a fixed point. We further may assume the fixed point of such a subgroup $\langle u,v,w \rangle$ to be unique for else $\langle u, v \rangle \cong \mathbb{Z}^{2}$ fixes an edge.
As there is no global fixed point, there are vertices $v, v' \in \Gamma^{0}$ that do not share a fixed point. Consider a path from $v$ to $v'$ and enumerate the vertices along this path $v = v_{1},\ldots,v_{n} = v'$. If for some $1 < i < n-1$, the fixed point of $\langle v_{i-1},v_{i},v_{i+1} \rangle$ is different from that of $\langle v_{i}, v_{i+1}, v_{i+2} \rangle$, then $\langle v_{i},v_{i+1} \rangle \cong \mathbb{Z}^{2}$ fixes an edge as this subgroup stabilizes the non-degenerate segment between the fixed points. If the fixed points are all the same then $v$ and $v'$ have a common fixed point, contrary to our assumptions.
\end{proof}
\subsection*{Proof of Theorem~\ref{thmA}}
Theorem~\ref{thmA} follows from Proposition~\ref{prop:Fz2f2} and the
following proposition.
\begin{proposition}
\langlebel{prop:biconnected}
Suppose $\Gamma$ is a finite simplicial graph that has at least three vertices. Then $\Gamma$ is biconnected if and only if $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$.
\end{proposition}
\begin{proof}
Suppose $\Gamma$ is biconnected. Consider the collection $\mathcal{G}$ of induced subgraphs $\Delta \subseteq \Gamma$ with at least three vertices that contain a Hamiltonian cycle. By Lemma~\ref{lem:Hamiltonian}, each $\Delta \in \mathcal{G}$ has property $\mathbf{F}(\mathcal{H})$.
Consider vertices $u,v,w \in \Gamma^0$ such that $u$ and $v$ span an edge $e$ and $v$ and $w$ span an edge $e'$. As $\Gamma$ is biconnected, there is an edge path from $u$ to $w$ that avoids $v$. Let $\rho$ be the shortest such path and let $\Delta$ be the induced subgraph of $\Gamma$ spanned by $v$ and vertices of $\rho$. The cycle $e \cup e' \cup \rho$ is a Hamiltonian cycle in $\Delta$ and hence $\Delta \in \mathcal{G}$. The two edge segment $e \cup e'$ is contained in $\Delta$ by construction.
Hence using the collection $\mathcal{G}$, Proposition~\ref{prop:decompose} implies that $A(\Gamma)$ has property $\mathbf{F}(\mathcal{H})$.
Conversely, If $\Gamma$ is not biconnected, then $A(\Gamma)$ splits over $\mathbb{Z}$ and hence does not have property $\mathbf{F}(\mathcal{H})$ (Remark~\ref{rem:cut vertex} and Proposition~\ref{prop:Fz2f2}).
\end{proof}
\section{JSJ--decompositions of 1--ended RAAGs}\langlebel{sec:jsj}
We now turn our attention towards understanding all $\mathbb{Z}$--splittings
of a 1--ended right-angled Artin group. These are exactly the groups
$A(\Gamma)$ with $\Gamma$ connected and having at least two vertices (Remark~\ref{rem:1-ended}).
The technical tool used for understanding splittings over some class
of subgroups are \emph{JSJ--decompositions}. There are several
loosely equivalent formulations of the notion of a JSJ--decomposition
of a finitely presented group, originally defined in this setting and
whose existence was shown by Rips--Sela~\cite{ar:RS97}. Alternative
accounts and extensions were provided by
Dunwoody--Sageev~\cite{ar:DS99}, Fujiwara--Papasogalu~\cite{ar:FP06}
and Guirardel--Levitt~\cite{un:GL}.
We have chosen to use Guirardel and Levitt's formulation of a
JSJ--decomposition as it avoids many of the technical definitions
necessary for the other formulations---most of which have no real
significance in the current setting---and as it is particularly easy
to verify in the current setting.
In this section we describe a JSJ--decomposition for a 1--ended
right-angled Artin group (Theorem~\ref{thmB}). It is straightforward
to verify, given the arguments that follow, that the described graph
of groups decomposition is a JSJ--decomposition in the other
formulations as well.
\subsection*{JSJ--decompositions \` a la Guirardel and Levitt} The
defining property of a JSJ--decomposition is that it gives a
parametrization of all splittings of a finitely presented group $G$
over some special class of subgroups, here the subgroups considered
are infinite cyclic. The precise definition is as follows.
Suppose $\mathcal{A}$ is a class of subgroups of $G$ that is closed under
taking subgroups and that is invariant under conjugation. An
\emph{$\mathcal{A}$--tree} is a tree with an action of $G$ such that every
edge stabilizer is in $\mathcal{A}$. An $\mathcal{A}$--tree is \emph{universally
elliptic} if its edge stabilizers are elliptic, i.e., have a fixed
point, in every $\mathcal{A}$--tree.
\begin{definition}[{\cite[Definition~2]{un:GL}}]\langlebel{def:jsj}
A \emph{JSJ--tree} of $G$ over $\mathcal{A}$ is a universally elliptic
$\mathcal{A}$--tree $T$ such that if $T'$ is a universally elliptic
$\mathcal{A}$--tree then there is a $G$--equivariant map $T \to T'$,
equivalently, every vertex stabilizer of $T$ is elliptic in every
universally elliptic $\mathcal{A}$--tree. The associated graph of group
decomposition is called a \emph{JSJ--decomposition}.
\end{definition}
We will now describe what will be shown to be the JSJ--decomposition
of a 1--ended right-angled Artin group.
Suppose $\Gamma$ is a connected finite simplicial graph with at least
three vertices. By $B_\Gamma$ we denote the \emph{block tree}, that
is, the bipartite tree with vertices either corresponding to cut
vertices of $\Gamma$ (black) or bicomponents of $\Gamma$, i.e.,
maximal biconnected induced subgraphs of $\Gamma$, (white) with an edge
between a black and a white vertex if the corresponding cut vertex
belongs to the bicomponent. See Figure~\ref{fig:jsj} for some
examples.
For a black vertex $x \in B_\Gamma^0$, denote by $v_x$ the
corresponding cut vertex of $\Gamma$. For a white vertex $x \in
B_\Gamma^0$, denote by $\Gamma_x$ the corresponding bicomponent of
$\Gamma$. A white vertex $x \in B_\Gamma^0$ is call \emph{toral} if
$\Gamma_x \cong K_2$, the complete graph on two vertices. A toral
vertex $x \in B_\Gamma$ that has valence one in $B_\Gamma$ is called
\emph{hanging}.
Associated to $\Gamma$ and $B_\Gamma$ is a graph of groups
decomposition of $A(\Gamma)$, denoted $\mathcal{J}_0(\Gamma)$. The base
graph of $\mathcal{J}_0(\Gamma)$ is obtained from $B_\Gamma$ by attaching a
one-edge loop to each hanging vertex. The vertex group of a black
vertex $x \in B_\Gamma^0$ is $G_x = A(v_x) \cong \mathbb{Z}$. The vertex
group of a non-hanging white vertex $x \in B_\Gamma$ is $G_x =
A(\Gamma_x)$. The vertex group of a hanging vertex $x \in B_\Gamma$
is $G_x = A(v)$ where $v \in \Gamma_x^0$ is the vertex that has
valence more than one in $\Gamma$. Notice, in this latter case $v$ is
a cut vertex of $\Gamma$. For an edge $e = [x,y] \subseteq B_\Gamma$
with $x$ black we set $G_e = A(v_x) \cong \mathbb{Z}$ with inclusion maps
given by subgraph inclusion. If $e$ is a one-edge loop adjacent to a
hanging vertex $x$, we set $G_e = G_x$ where the two inclusion maps
are isomorphisms and the stable letter corresponding to the loop is
$w$ where $w \in \Gamma_x^0$ is the vertex that has valence one in
$\Gamma$.
By collapsing an edge adjacent to each valence two black vertex we
obtain a graph of groups decomposition of $A(\Gamma)$, which
we denote $\mathcal{J}(\Gamma)$. It is not necessary for what follows, but we remark that the graph is groups $\mathcal{J}(\Gamma)$ is \emph{reduced \textup{(}in the sense of Bestvina--Feighn~\cite{ar:BF91}\textup{)}}, that is, for each vertex of valence less than three the edge groups are proper subgroups of the vertex group. This property is required for a JSJ--decomposition as defined by Rips--Sela. Observe that all edge groups of $\mathcal{J}(\Gamma)$ are of the form $A(v)$ for some vertex $v \in \Gamma^0$ and in particular maximal infinite cyclic subgroups. By $T_{\mathcal{J}(\Gamma)}$ we denote the associated Bass--Serre tree.
\begin{example}\langlebel{ex:jsj}
Examples of $B_\Gamma$, $\mathcal{J}_0(\Gamma)$ and $\mathcal{J}(\Gamma)$ for
two different graphs are shown in Figure~\ref{fig:jsj}. We have
$A(\Gamma_1) \cong F_3 \times \mathbb{Z}$. The graph of groups
decomposition $\mathcal{J}_0(\Gamma_1)$ is already reduced so
$\mathcal{J}(\Gamma_1) = \mathcal{J}_0(\Gamma_1)$. In $\mathcal{J}(\Gamma_1)$ all of
the vertex and edge groups are infinite cyclic and all inclusion
maps are isomorphisms. Considering the other example,
$\mathcal{J}(\Gamma_2)$ corresponds to the graph of groups decomposition
$A(\Gamma_2) = \mathbb{Z}^3 \ast_\mathbb{Z} \mathbb{Z}^2 \ast_\mathbb{Z} \mathbb{Z}^3$ where the
inclusion maps have image a primitive vector and the images in
$\mathbb{Z}^2$ constitute a basis of $\mathbb{Z}^2$.
\begin{figure}
\caption{Examples of $B_\Gamma$, $\mathcal{J}
\end{figure}
\end{example}
\subsection*{Proof of Theorem~\ref{thmB}} Theorem~\ref{thmB} follows
immediately from the following lemma.
\begin{lemma}\langlebel{lem:Jvertex-elliptic}
Suppose $\Gamma$ is a connected finite simplicial graph that has at
least three vertices and let $\mathcal{A}$ be the collection of all cyclic
subgroups of $A(\Gamma)$. Every vertex stabilizer of
$T_{\mathcal{J}(\Gamma)}$ is elliptic in every $\mathcal{A}$--tree.
In particular, every edge stabilizer of $T_{\mathcal{J}(\Gamma)}$ is
elliptic in every $\mathcal{A}$--tree and so $T_{\mathcal{J}(\Gamma)}$ is
universally elliptic and every vertex stabilizer of
$T_{\mathcal{J}(\Gamma)}$ is elliptic in every universally elliptic
$\mathcal{A}$--tree.
\end{lemma}
\begin{proof}
Let $T$ be an $\mathcal{A}$--tree. As $A(\Gamma)$ is 1--ended, every edge
stabilizer of $T$ is infinite cyclic. As the vertex groups of a
black vertex is a subgroup of the vertex group of some white vertex,
we only need to consider white vertices. The vertex group of every
non-toral vertex of $\mathcal{J}(\Gamma)$ is elliptic by
Proposition~\ref{prop:biconnected}.
Let $x \in B_\Gamma$ be a non-hanging toral vertex. Denote the
vertices of $\Gamma_x \cong K_2$ by $v_1$ and $v_2$. Then there are
vertices $w_1, w_2 \in \Gamma^0$ such that $[v_i, w_j] = 1$ if and
only if $i = j$. In other words, the vertices $w_1,v_1,v_2,w_2$
span an induced subgraph of $\Gamma$ that is isomorphic to the path
graph with three edges.
If $v_1 \in G_x = A(\Gamma_x) \cong \mathbb{Z}^2$ acts hyperbolically, then
by Lemma~\ref{lem:cv} the characteristic subtree of both $w_1$ and
$v_2$ contains $T_{v_1}$, the axis of $v_1$. As in the proof of
Lemma~\ref{lem:Hamiltonian}, we find integers $k_0,k_1,\ell_0,\ell_1$ with
$k_1, \ell_1 \neq 0$ such that $\langle v_1^{k_0}w_1^{k_1},
v_1^{\ell_0}v_2^{\ell_1} \rangle \cong F_2$ fixes $T_{v_1}$ and hence
fixes an edge. As every edge stabilizer of $T$ is infinite cyclic,
this shows that $v_1$ must have a fixed point. By symmetry $v_2$
must also have a fixed point. Since $A(\Gamma_x) = \langle v_1,v_2 \rangle
\cong \mathbb{Z}^2$, by Corollary~\ref{cor:cv} this implies that $A(\Gamma_x)$ acts elliptically.
Finally, let $x \in B_\Gamma$ be a hanging vertex. Either $G_x$ is a subgroup of some non-hanging white vertex subgroup and so $G_x$ acts elliptically by the above argument, or $A(\Gamma) \cong F_n \times \mathbb{Z}$ for $n \geq 2$ where $G_x$ is the $\mathbb{Z}$ factor as is the case for $\Gamma_1$ in Example~\ref{ex:jsj}. In the latter case, as $G_{x}$ is central, by Lemma~\ref{lem:cv} if $G_{x}$ acts hyperbolically, then $F_{n} \times \mathbb{Z}$ acts on its axis. Therefore there is a homomorphism $F_n \times \mathbb{Z} \to \mathbb{Z}$ whose kernel fixes an edge. As every edge stabilizer of $T$ is infinite cyclic, $G_x$ must act elliptically.
\end{proof}
We record the following corollary of Lemma~\ref{lem:Jvertex-elliptic}.
\begin{corollary}
\langlebel{cor:thmB}
Suppose $\Gamma$ is a connected finite simplicial graph that has at least three vertices. If $A(\Gamma)$ acts on a tree $T$ such that the stabilizer of every edge is infinite cyclic, then every $v \in \Gamma^{0}$ that has valence greater than one acts elliptically in $T$.
\end{corollary}
\begin{proof}
This follows from Lemma~\ref{lem:Jvertex-elliptic} as each such vertex is contained in some bicomponent $\Gamma_{x}$ for some non-hanging $x \in B_{\Gamma}$ and hence acts elliptically in $T_{\mathcal{J}(\Gamma)}$.
\end{proof}
\end{document}
|
\begin{document}
\title{Note on the group edge irregularity strength of graphs}
\author{Marcin Anholcer$^1$, Sylwia Cichacz$^{2,3}$\\
$^1$Pozna\'n University of Economics and Business\\
$^2$AGH University of Science and Technology Krak\'ow, Poland\\
$^3$ University of Primorska, Koper, Slovenia}
\maketitle
\begin{abstract}
We investigate the \textit{edge group irregularity strength} ($es_g(G)$) of graphs, i.e. the smallest value of $s$ such that taking any Abelian group $\mathcal{G}$ of order $s$, there exists a function $f:V(G)\rightarrow \mathcal{G}$ such that the sums of vertex labels at every edge are distinct. In this note we provide some upper bounds on $es_g(G)$ as well as for edge irregularity strength $es(G)$ and harmonious order ${\rm{har}}(G)$.
\end{abstract}
\section{Introduction}
In 1988 Chartrand et al. \cite{ref_ChaJacLehOelRuiSab1} proposed the problem of irregular labeling. This problem was motivated by the well
known fact that a simple graph of order at least 2 must contain a pair of vertices with the same degree. The situation changes if we consider multigraphs. Each multiple edge may be represented with some integer label and the (\textit{weighted}) degree of any vertex $x$ is then calculated as the sum of labels of all the edges incident to $x$. The maximum label $s$ is called the \textit{strength} of the labeling. The labeling itself is called \textit{irregular} if the weighted degrees of \textbf{all} vertices are distinct. The smallest value of $s$ that allows an irregular labeling is called the \textit{irregularity strength of} $G$ and denoted by $s(G)$. This problem was one of the major sources of inspiration in graph theory \cite{AhmAlMBac,ref_AigTri,ref_AmaTog,ref_AnhCic1,ref_BacJenMilRya,ref_FerGouKarPfe,ref_KalKarPfe1,ref_KarLucTho,ref_MajPrz,ref_Nie,ref_ThoWuZha,refXuLiGe}. For example the concept of $\mathcal{G}$-irregular labeling is a generalization of irregular labeling on Abelian groups. The \textit{group irregularity strength} of $G$, denoted $s_g(G)$, is the smallest integer $s$ such that for every Abelian group $\mathcal{G}$ of order $s$ there exists a $\mathcal{G}$-irregular labeling $f$ of $G$. The following theorem, determining the value of $s_g(G)$ for every connected graph $G$ of order $n\geq 3$, was proved by Anholcer, Cichacz and Milani\v{c} \cite{ref_AnhCic1}.
\begin{mytheorem}[\cite{ref_AnhCic1}]\label{AnhCic1}
Let $G$ be an arbitrary connected graph of order $n\geq 3$. Then
$$
s_g(G)=\begin{cases}
n+2,&\text{if } G\cong K_{1,3^{2q+1}-2} \text{ for some integer }q\geq 1,\\
n+1,&\text{if } n\equiv 2 \imod 4 \wedge G\not\cong K_{1,3^{2q+1}-2} \text{ for any integer }q\geq 1,\\
n,&\text{otherwise.}
\end{cases}
$$
\end{mytheorem}
The notion of \textit{edge irregularity strength} was defined by Ahmad, Al-Mushayt and Ba\v{c}a \cite{AhmAlMBac}.
The weight of an edge $xy$ in $G$ is the sum of the labels of its end vertices $x$ and $y$. A vertex $k$-labeling $\phi\colon V(G)\rightarrow \{1,2,\ldots,k\}$ is called edge irregular $k$-labeling of $G$ if every two different edges have different weights. The minimum $k$ for which $G$ has an edge irregular $k$-labeling is called the \textit{edge irregularity strength} of $G$ and denoted by $es(G)$. They established the exact value of the edge irregularity strength of paths, stars, double stars and Cartesian product of two paths. They also gave a lower bound for $es(G)$.
In the literature there is also investigated the total version of this concept, namely \textit{edge irregular total labeling} \cite{ref_BacJenMilRya,refXuLiGe}.
Graham and~Sloane defined \textit{harmonious labeling} as a direct extension of additive bases and graceful labeling. We call a labeling $f \colon V (G) \rightarrow\mathbb{Z}_{|E(G)|}$ harmonious if it is an injection such that each edge $xy\in E(G)$ has different sum $f (x) + f (y)$ modulo $|E(G)|$. When
$G$ is a tree, exactly one label may be used on two vertices. They conjectured that any tree is harmonious (the conjecture is still unsolved) \cite{ref_GraSlo}. Beals et al. (see \cite{ref_BeaGalHeaJun}) considered the concept of harmoniousness with respect to arbitrary Abelian groups. \.Zak in \cite{ref_Zak} generalized the problem and introduced a new parameter, the \textit{harmonious order of $G$}, defined as the smallest number $t$ such that there exists an injection $f:V(G)\rightarrow \mathbb{Z}_t$ (or surjection if $t<V(G)$) that produces distinct edge weights.
The problem of harmonious order is connected with a~problem of sets in Abelian groups with distinct sums of pairs.
A subset $S$ of an Abelian group $\Gamma$, where $|S|=k$, is an $S_2$-set of size $k$ if all the sums of 2 different elements in $S$ are distinct in $\Gamma$. Let $s(G)$ denote the cardinality of the largest $S_2$-set in $\Gamma$. Two central functions in the study
of $S_2$-sets are $v(k)$ and $v_{\gamma}(k)$, which give the order of the smallest Abelian and cyclic group $G$,
respectively, for which $s(G)\geq k$. Since cyclic groups are a special case of Abelian groups, clearly
$v(k)\leq v_{\gamma}(k)$, and any upper bound on $v_{\gamma}(k)$ is also an upper bound on $v(k)$ \cite{Haa}. Note that $har(K_n)=v_{\gamma}(n)\leq n^2+O(n^{36/23})$ \cite{ref_Zak}.
Recently Montgomery, Pokrovskiy and Sudakov proved the following theorem.
\begin{mytheorem}[\cite{ref_Sudakov}]
Every tree $T$ of order $n$ has an injective $\Gamma$-harmonious labeling for any Abelian group $\Gamma$ of order $n + o(n)$.
\end{mytheorem}
In this paper we would like to introduce a new concept which gathers the ideas of $\mathcal{G}$-irregular labeling, edge irregularity strength and harmonious order, namely \textit{group edge irregularity strength}.
Assign an element of an Abelian group $\mathcal{G}$ of order $s$ to every vertex $v \in V(G)$. For every edge $e=uv \in E(G)$ the \textit{weight} is defined as:
\begin{eqnarray*}
wd(uv)=w(u)+w(v).
\end{eqnarray*}
The labeling $w$ is $\mathcal{G}$-edge irregular if for $e\neq f$ we have $wd(e) \neq wd(f)$. \textit{Group edge irregularity strength} $es_g(G)$ is the lowest $s$ such that for every Abelian group $\mathcal{G}$ of order $s$ there exists $\mathcal{G}$-edge irregular labeling of $G$.
\section{Bounds on $es_g(G)$}
Let us start with general lower bound on $es_g(G)$. Of course, the order of the group must be equal at least to the number of edges of $G$.
\begin{myobservation}\label{lemma_cycle_below0}
For each graph $G$ with $|E(G)|=m$, $es_g(G)\geq m$.
\end{myobservation}
The above bound can be improved e.g. for cycles.
\begin{myproposition}\label{lemma_cycle_below}
If $n \equiv 2 \imod 4$, then $es_g(C_n) \geq n+1$.
\end{myproposition}
\begin{proof}
Assume we can use some $\mathcal{G}$ of order $2(2k+1)$. Obviously $\mathcal{G}=\mathbb{Z}_2\times \mathcal{G}_1$ for some group $\mathcal{G}_1$ of order $2k+1$. There are $2k+1$ elements $(1,a)$ where $a\in\mathcal{G}_1$ and all of them have to appear as the edge weights, so
$$
\sum_{e\in E(G)}{wd(e)}=(1,b_1)
$$
For some $b_1\in \mathcal{G}_1$. On the other hand
$$
\sum_{e\in E(G)}{wd(e)}=2\sum_{v\in V(G)}{w(v)}=(0,b_2)
$$
for some $b_2\in\mathcal{G}_1$, a contradiction.
\end{proof}
Note that each $\mathcal{G}$-edge irregular labeling of $K_n$ has to be injection, what implies that ${\rm{har}}(K_n)\leq es_g(K_n)$. So $es_g(K_n)={n \choose 2}$ only for $n\leqslant 3$ \cite{ref_GraSlo}. Recall that for an integer $n$ that has all primes distinct in its factorization, there exists a unique Abelian group of order $n$, namely $\mathbb{Z}_n$. Therefore we obtain that $es_g(K_5)=11$, $es_g(K_6)=19$, $es_g(K_{12})=114$, $es_g(K_{14})=178$ and $es_g(K_{15})=183$ \cite{ref_Zak}. Although it is not known what are the exact values of ${\rm{har}}(K_n)$ for $n\geq16$, the lower bound $n^2-3n\leq {\rm{har}}(K_n)$ is given for each $n \geq 3$ \cite{ref_Zak}, hence we easily obtain the following observation.
\begin{myobservation} If $n \geq 3$, then $n^2-3n \leq es_g(K_{n})$.
\end{myobservation}
We give now several upper bounds on $es_g(G)$
In \cite{AhmAlMBac} Ahmad, Al-Mushayt and Ba\v{c}a obtained exponential upper bound on $es(G)$ depending on Fibonacci number with seed values $F_1=1, F_2=2$. However, because of $es(G)\leq har(K_n)$ we obtain the following.
\begin{myproposition}
If $G$ is a graph of order $n$, then $es(G)\leq n^2+O(n^{36/23})$.
\end{myproposition}
Let $|E(G)|=m$. Note that in general we do not know whether $es_g(G) \leq {\rm{har}}(K_n)$, however we are able to show the following.
\begin{mytheorem}\label{marcin}
For each graph $G$, $es(G) \leq es_g(G)\leq p(2es(G)) \leq p(2 {\rm{har}}(G))$, where $p(k)$ is the least prime greater than $k$.
\end{mytheorem}
\begin{proof}
The first inequality follows from the fact that if we can find a $\mathcal{G}$-irregular labeling for any group $\mathcal{G}$ with a given order $p$, then in particular we can do it for the cyclic group and if it distinguishes the weights modulo $p$, then also the labeling with integers (where we use $p$ instead of $0$ as a label) generates an irregular labeling.
For every prime $p$ there is only one Abelian group $\mathcal{G}$ of order $p$, namely $\mathbb{Z}_p$. If in the labeling one uses only the labels less than $p/2$, the addition inside the group is equivalent to the addition in $\mathbb{Z}$, so the second inequality follows.
The last inequality is implied by the fact that $es(G)\leq {\rm{har}}(G)$.
\end{proof}
From the Bertrand–Chebyshev theorem \cite{ref_The} it follows that for any positive integer $i$ there exists a prime number between $i$ and $2i$, which easily leads to the following.
\begin{mycorollary}
Let $G$ be a graph of order $n$. Then $es_g(G)\leq 4n^2 +O(n^{36/23}).$
\end{mycorollary}
However, for larger $n$, better bounds are known. For example, Naguro \cite{ref_Nag} proved that there is a prime between $i$ and $1.2i$ provided that $i\geq 25$. This gives us the following upper bound.
\begin{mycorollary}
Let $G$ be a graph of order $n\geq 25$. Then $es_g(G)\leq 2.4n^2 +O(n^{36/23}).$
\end{mycorollary}
Recently Baker, Harman and Pintz \cite{ref_BakHarPin} proved that for sufficiently large $i$, there is a prime between $i$ and $i+i^{0.525}$. This allows to obtain the following result for large graphs.
\begin{mycorollary}\label{corMA}
Let $G$ be a graph of sufficiently large order $n$. Then $es_g(G)\leq 2n^2 +O(n^{36/23}).$
\end{mycorollary}
The latter result can be reduced for some special classes of graphs. First, let us consider a graph $G$ of the following form. Assume that the vertices of $G$ can be divided into four sets $V_{11}$, $V_{12}$, $V_{21}$ and $V_{22}$ such that for every edge $xy$, we have that $x\in V_{ij}$ and $y\in V_{kl}$ implies $i=k$ or $j=l=1$. Moreover, assume that $|V_{11}|+|V_{12}|\leq \left\lceil n/2 \right\rceil$, $|V_{11}|+|V_{21}|\leq \left\lceil n/2 \right\rceil$ and $|V_{21}|+|V_{22}|\leq \left\lceil n/2 \right\rceil$. A special case is a graph in which $|V_{11}|+|V_{21}|=0$, i.e. a graph with two components having orders $\left\lceil n/2 \right\rceil$ and $\left\lfloor n/2 \right\rfloor$.
\begin{mycorollary}
Let $G$ be a graph defined as above, where $n$ is sufficiently large. Then $es_g(G)\leq 1.5n^2 +O(n^{36/23})$.
\end{mycorollary}
\begin{proof}
For any graph of sufficiently large order $\left\lceil n/2 \right\rceil$, there is a group of prime order $g=0.5n^2 +O(n^{36/23})$ that allows a group edge irregular labeling of this graph by Corollary~\ref{corMA}. Of course, $g$ is not divisible by $3$.
Let us start with modifying the graph $G$ by adding some edges so that the subgraphs with vertex sets $V_{11}\cup V_{12}$, $V_{11}\cup V_{21}$ and $V_{21}\cup V_{22}$ become complete graphs.
Let us take any group $\mathcal{G}$ of order $3g$. Of course such group must be of the form $\mathbb{Z}_3\times \mathcal{G}^\prime$ for some group $\mathcal{G}^\prime$ of order $g$. We label the vertices of $G$ with elements $(g_1,g_2)$ of $\mathcal{G}$, where $g_1\in \mathbb{Z}_3$ and $g_2\in \mathcal{G}^\prime$ in the following way. First we choose $g_2$ for the vertices in $V_{11}\cup V_{21}$ in such way that the edges are distinguished even if all $g_1$ are equal (it is possible, since any graph of order $\left\lceil n/2 \right\rceil$ can be labeled with a group of order $g$). Then we do the same with vertices of $V_{11}\cup V_{12}$, by labeling the vertices of $V_{12}$ with the elements of $\mathcal{G}^\prime$ not used in $V_{11}$. Finally we label the vertices of $V_{22}$ with the elements not used in $V_{21}$. This distinguishes the edges inside $V_{11}\cup V_{12}$ and inside $V_{21}\cup V_{22}$, no matter what are the values of $g_1$. Now we choose $g_1=0$ for the vertices in $V_{11}\cup V_{12}$ and $g_1=1$ for the vertices in $V_{21}\cup V_{22}$, which distinguishes the edges from different sets: the first coordinate of every edge inside $V_{11}\cup V_{12}$ is now $0$, inside $V_{21}\cup V_{22}$ equals to $2$ and for each edge between $V_{11}$ and $V_{12}$ it equals to $1$. This means that the labeling is the group edge irregular labeling of the modified graph, so also of each of its subgraphs, in particular of $G$. Thus
$$es_g(G)\leq 3g\leq 1.5n^2 +O(n^{36/23})$$.
\end{proof}
A similar reasoning allows as to strengthen the result for graphs having more than two components of almost the same order. As we know, in any group of odd (in particular prime) order $p$, $x=y$ if and only if $2x=2y$, provided that $x,y\neq 0$. Thus if one uses different value of $g_1$ in every component, it is enough to distinguish the edges inside every component by the elements $g_2$ of the subgroup $\mathcal{G}^\prime$ of $\mathcal{G}=\mathbb{Z}_p\times \mathcal{G}^\prime$ (of course $p$ must be prime and $|\mathcal{G}^\prime|$ not divisible by $p$ if we want this decomposition to be unique). It gives us the following result.
\begin{mycorollary}
Let $G$ be a graph of order $n$, consisting of $q\geq 2$ components with orders different by at most $1$, where $n$ is sufficiently large. Let $p$ be the smallest odd prime not less than $q$. Then
$$es_g(G)\leq \frac{2p}{q^2}n^2 +O(n^{36/23}).$$
\end{mycorollary}
Note that if also $q$ is sufficiently large, then we obtain $es_g(G)\leq 2n^2/q +O(n^{36/23}).$
\begin{myproposition}\label{forest}
For each forest $F$, $es_g(F)=m$. Moreover, any weighting of edges is possible for arbitrary choice of the label of one vertex in each component.
\end{myproposition}
\begin{proof}
Given any edge that is still not weighted, if one of the vertices has label $a$, and the edge is supposed to be weighted with $b$, it is enough to put $b-a$ on the other vertex.
\end{proof}
The notion of coloring number of a graph was introduced by Erd\H{o}s and Hajnal in \cite{ErdHaj}. For a given graph $G$ by ${\rm col}(G)$ we denote its coloring number, that is the least integer $k$ such that each subgraph of $G$ has minimum degree less than $k$. Equivalently, it is the smallest $k$ for which we may linearly order all vertices of $G$ into a sequence $v_1,v_2,\ldots,v_n$ so that every vertex $v_i$ has at most $k-1$ neighbors preceding it in the sequence.
Hence $\chi(G)\leq {\rm col}(G)\leq \Delta(G)+1$.
Note that ${\rm col}(G)$ equals the degeneracy of $G$ plus $1$, and thus the result below may
be formulated
in terms of either of the two graph invariants.\\
\begin{mytheorem}\label{col_upper}
For every graph $G=(V,E)$, there exists a $\mathcal{G}$-edge irregular labeling for any Abelian group $\mathcal{G}$ of order $|\mathcal{G}|\geq ({\rm col}(G)-1)(|E|-1)+1$.
\end{mytheorem}
\begin{proof}
By Proposition \ref{forest} we can assume that $G$ is not a forest.
Fix any Abelian group $\mathcal{G}$ of order $|\mathcal{G}|\geq ({\rm col}(G)-1)(|E(G)|-1)+1$.
Let $v_1,v_2,\ldots,v_n$ be the ordering of $V(G)$ witnessing the value of ${\rm col}(G)$. We start with putting arbitrary color on $v_1$. Then we will color the remaining vertices of $G$ with elements of $\mathcal{G}$ in $n-1$ stages, each corresponding to a consecutive vertex from among $v_2,v_3,\ldots,v_n$.
Initially no vertex except $v_1$ is colored.
Then at each stage $i$, $i=2,3,\ldots,n$,
we color the vertex $v_i$.
We will choose a color avoiding sum conflicts between already analyzed vertices and so that at all times the partial edge coloring has the desired property.
Namely we choose a color $w(v_i)\in \mathcal{G}$ so that $wd(v_iv_j)$ for $j<i$ and $v_iv_j\in E(G)$ is distinguished from any $wd(v_tv_k)$ where $1\leq t\leq k$ and $v_tv_k\in E(G)$. Thus we cannot use at most $({\rm col}(G)-1)(|E(G)|-1)$ colors.
\end{proof}
We immediately obtain the following result.
\begin{mycorollary}\label{nullcol}
For each graph $G$ of order at least $4$, $es_g(G)\leq ({\rm col}(G)-1)(|E(G)|-1)+1$.
\end{mycorollary}
Taking into account that for every planar graph $G$ we have ${\rm col}(G)\leq6$, we obtain the following corollary.
\begin{mycorollary}\label{Planar}
For each planar graph $G$ of order at least $4$, $es_g(G)\leq 5|E(G)|-4$.
\end{mycorollary}
Note also that if we additionally want to have injection of colors on vertices, then within the proof of Theorem~\ref{col_upper} above, we obtain at most $n-1$ constraints while choosing a color for a given vertex. Consequently, by a straightforward adaptation
of the proof above, we obtain the following.
\begin{mycorollary}\label{har}
For each graph $G$ of order at least $4$, ${\rm{har}}(G)\leq |V(G)|+ ({\rm col}(G)-1)(|E(G)|-1)$.
\end{mycorollary}
The exact value of $es_g(C_n)$, where $C_n$ is a cycle of order $n$, is given by the following theorem.
\begin{mytheorem}
Let $C_n$ be arbitrary cycle of order $n\geq 3$. Then
$$
es_g(G)=\begin{cases}
n+1&\text{when } n\equiv 2 \imod 4\\
n&\text{otherwise}
\end{cases}
$$
Moreover respective labeling exists for an arbitrary choice of the label of any vertex.
\end{mytheorem}
Remark: in fact, the labeling can be found for any group of order at least $es_g(C_n)$.
\begin{proof}
Labeling the vertices distinguishing the edge weights is in this case equivalent to the labeling of the edges distinguishing the vertex weighted degrees (we label the line graph, moreover $m$=$n$). Thus the theorem is a simple corollary of Theorem \ref{AnhCic1}.
\end{proof}
\begin{mytheorem}\label{dwudzielne}
Let $G=K_{m,n}$, then $es_g(G)=mn$.
\end{mytheorem}
\noindent\textbf{Proof.}
Let $\Gamma$ be an Abelian group of order $mn$. One of the consequences of the fundamental theorem of finite Abelian groups is that for any divisor $k$ of $|\Gamma|$ there exists a subgroup $H$ of $\Gamma$ of order $k$. Therefore there exists $\Gamma_0< \Gamma$ such that $|\Gamma_0|=n$.
Let $V_1$ and $V_2$ be the partition sets of $G$ such that $|V_1|=m$ and $|V_2|=n$. Put all elements of $\Gamma_0$ on the vertices of the set $V_1$, whereas on the vertices of $V_2$ put all coset representatives for $\Gamma/\Gamma_0$. Note that all vertices incident with a vertex $v\in V_2$ obtain different weights which are elements of the coset $w(v)\Gamma_0$. Hence using a coset decomposition of $\Gamma$ we are done.~\qed\\
From the above observation we obtain the following upper bound for bipartite graphs.
\begin{mycorollary}
Let $G$ be a bipartite graph of order $n$, then $es_g(G)\leq \left\lceil \frac{n^2-1}{4}\right\rceil$.
\end{mycorollary}
\noindent\textbf{Proof.}
Let $G$ have partition sets $V_1$ and $V_2$ of orders $n_1$ and $n-n_1$, respectively. Obviously $G$ is a subgraph of $K_{n_1,n-n_1}$, so by Theorem~\ref{dwudzielne} we obtain $es(G)\leq es(K_{n_1,n-n_1})= {n_1(n-n_1)}\leq \left\lceil \frac{n^2-1}{4}\right\rceil$.~\qed\\
\section{Final remarks}
In the paper we presented a new graph invariant, the group edge irregularity strength $es_g(G)$. We presented the relations between that and other parameters, like harmonious order ${\rm{har}}(G)$. We also gave some lower and upper bounds for $es_g(G)$.
Based on them, we state the following conjecture.
\begin{myconjecture}
There exists a constant $c>0$, such that for every graph $G$ of size $m$, $es_g(G)\leq 2m+c$
\end{myconjecture}
Let us consider a version of the problem for directed graphs. Assume that the weight of an arc $(u,v)$ with tail $u$ and head $v$ is now computed as
\begin{eqnarray*}
wd((u,v))=w(u)-w(v).
\end{eqnarray*}
For example, if one considers directed acyclic graphs (DAGs), the following result analogous to Theorem \ref{col_upper} easily follows from the fact that the vertices of such digraph may be ordered so that each of them is preceded by all its in-neighbors (or all the out-neighbors).
\begin{myproposition}
Let $D$ be a DAG with $m$ arcs, maximum indegree $\Delta^-$ and maximum outdegree $\Delta^+$. Then $es_g(D)\leq (m-1)\min\{\Delta^-,\Delta^+\}+1$.
\end{myproposition}
Observe that directed acyclic graphs are connected with an old~problem of \textit{difference basis} from the number theory \cite{ref_RedRen}, therefore the following problem would be interesting.
\begin{myproblem}
Find the $es_g(D)$ for arbitrary digraph $D$.
\end{myproblem}
\nocite{*}
\end{document}
|
\begin{document}
\title{Counting dominating sets and related structures in graphs}
\author{Jonathan Cutler}
\address[Jonathan Cutler]{Department of Mathematical Sciences, Montclair State University, Montclair, NJ 07043 USA}
\email{[email protected]}
\author{A.~J.~Radcliffe}
\address[A.~J.~Radcliffe]{Department of Mathematics, University of Nebraska-Lincoln, Lincoln, NE 68588 USA}
\email{[email protected]}
\maketitle
\begin{abstract}
We consider some problems concerning the maximum number of (strong) dominating sets in a regular graph, and their weighted analogues. Our primary tool is Shearer's entropy lemma. These techniques extend to a reasonably broad class of graph parameters enumerating vertex colorings satisfying conditions on the multiset of colors appearing in (closed) neighborhoods. We also generalize further to enumeration problems for what we call existence homomorphisms. Here our results are substantially less complete, though we do solve some natural problems.
\end{abstract}
\maketitle
\section{Introduction}
\ellambdabel{sec:intro}
Many interesting problems arise when one asks which graphs maximize some graph invariant over a fixed class of graphs. The history of these problems goes back to at least Mantel's theorem, where the class of graphs is that of triangle-free graphs on $n$ vertices and the graph invariant is simply the number of edges. Recently, there has been a lot of interest in maximizing the number of independent sets in a graph from some class. One of the earliest such results is due to Moon and Moser \cite{MM}.
\begin{theorem}[Moon, Moser]
If $G$ is a graph on $n\geq 2$ vertices, then the number of maximal independent sets in $G$ is at most
\[
\begin{cases}
3^{n/3} & \text{if $n\equiv 0 \pmod 3$},\\
4\cdot 3^{\deltaown{n/3}-1} & \text{if $n\equiv 1 \pmod 3$},\\
2\cdot 3^{\deltaown{n/3}} & \text{if $n\equiv 2 \pmod 3$}.
\end{cases}
\]
\end{theorem}
Another example of such a result is the following beautiful theorem due to Kahn \cite{K} and Zhao \cite{Z} concerning $i(G)$, the number of independent sets in a graph $G$.
\begin{theorem}[Kahn, Zhao]\ellambdabel{thm:KZ}
If $G$ is an $r$-regular graph on $n$ vertices, then
\[
i(G)\elleq i(K_{r,r})^{n/(2r)}=(2^{r+1}-1)^{n/(2r)}.
\]
\end{theorem}
Kahn proved Theorem~\ref{thm:KZ} for bipartite graphs using entropy methods and Zhao extended this result to all regular graphs by a clever use of the bipartite double cover. Motivated by a conjecture of Kahn about a weighted version of the bipartite case of Theorem~\ref{thm:KZ}, Galvin and Tetali \cite{GT} made the observation that independent sets are generalized by homomorphisms into a fixed image graph. Recall that a \emph{homomorphism from $G$ to $H$} is a function $\phi:V(G)\to V(H)$ such that $xy\in E(G)$ implies $\phi(x)\phi(y)\in E(H)$. Let $\Hom(G,H)$ be the set of homomorphisms from $G$ to $H$ and $\hom(G,H)=\abs{\Hom(G,H)}$. If we let $H_{\text{ind}}$ be the graph on two vertices with an edge between them and one of the vertices looped (see Figure~\ref{fig:hind}),
\begin{figure}
\caption{The graph $H_{\text{ind}
\end{figure}
then independent sets in $G$ correspond precisely to elements of $\Hom(G,H_{\text{ind}})$. (An independent set $I$ corresponds to a homomorphism to $H_{\text{ind}}$ in which the preimage of the unlooped vertex is $I$.) Galvin and Tetali \cite{GT} proved the following.
\begin{theorem}[Galvin, Tetali]
If $G$ is an $r$-regular bipartite graph on $n$ vertices and $H$ is any graph (which may have loops), then
\[
\hom(G,H)\elleq \hom(K_{r,r},H)^{n/(2r)}.
\]
\end{theorem}
A variety of homomorphism counting problems correspond directly to statistical physics models. For instance, $\Hom(G,H_{\text{ind}})$ is the hard-core model. In this context, it is common to weight homomorphisms by giving an activity to each vertex of the image graph. To be precise, we have the following.
\begin{definition}
Given a (possibly looped) graph $H$ and a function $\ellambda:V(H)\to (0,\infty)$, we define the \emph{weight} of a homomorphism $\phi\in \Hom(G,H)$ to be
\[
w^{\ellambda}(\phi)=\prod_{v\in V(G)} \ellambda(\phi(v)).
\]
The analogue of the number of homomorphisms is the total weight of all homomorphisms from $G$ to $H$. We write
\[
Z^{\ellambda}(G,H)=\sum_{\phi\in \Hom(G,H)} w^{\ellambda}(\phi).
\]
\end{definition}
Galvin and Tetali \cite{GT} also showed the following.
\begin{theorem}[Galvin, Tetali]
If $G$ is an $r$-regular bipartite graph on $n$ vertices, $H$ is any graph (which may have loops), and $\ellambda:V(H)\to (0,\infty)$, then
\[
Z^{\ellambda}(G,H)\elleq (Z^{\ellambda}(K_{r,r},H))^{n/(2r)}.
\]
\end{theorem}
We are primarily concerned in this paper with bounds on the number of dominating sets and strong dominating sets in graphs, for instance in $r$-regular graphs on a given number of vertices or in graphs with a fixed number of vertices and edges. Let us recall a few relevant definitions. If $G$ is a graph, we let $N(v)$ and $N[v]$ be the open and closed neighborhoods of a vertex $v$, respectively. Similarly, for $S\subseteq V(G)$, we let $N(S)=\bigcup_{v\in S} N(v)$ and $N[S]=\bigcup_{v\in S} N[v]$.
\begin{definition}
In a graph $G=(V,E)$, a set $S\subseteq V$ is a \emph{dominating set} if $N[S]=V$. We say $S$ is a \emph{strong dominating set} if $N(S)=V$.
\end{definition}
Fomin, Grandoni, Pyatkin, and Stepanov \cite{FGPS} were able to prove the following analogue of the Moon-Moser theorem, bounding the number of \emph{minimal} dominating sets in a graph.
\begin{theorem}[Fomin et al.]
If $G$ is a graph on $n$ vertices, then the number of minimal dominating sets in $G$ is at most $1.7159^n$.
\end{theorem}
One of the tools that we will use is entropy. Recall that if $X$ is a random variable, then the \emph{entropy of $X$} is the following property of the distribution of $X$:
\[
H(X)=\sum_{x\in \range(X)} \mathbb{P}(X=x)\ellog \frac{1}{\mathbb{P}(X=x)},
\]
where this logarithm and all other logarithms in this paper are base two. It is well-known that if $\abs{\range(X)}=n$, then $H(X)\elleq \ellog n$ with equality if and only if $X$ is uniform on its range. If $X=(X_1,X_2,\elldots,X_n)$ is a random sequence and $A\subseteq [n]$, then we let $X_A=(X_i)_{i\in A}$ be the restriction of $X$ to $A$. Our main entropy tool will be Shearer's Lemma \cite{CGFS}.
\begin{theorem}[Shearer's Lemma]
If $X=(X_1,X_2,\elldots,X_n)$ is a random sequence and $\mathcal{A}$ is a multiset of subsets of $[n]$ such that every $i\in [n]$ is in at least $k$ elements of $\mathcal{A}$, then
\[
H(X)\elleq \frac{1}k \sum_{A\in \mathcal{A}} H(X_A).
\]
\end{theorem}
In this paper, we use Shearer's Lemma to prove upper bounds on the number of dominating sets and strong dominating sets in regular graphs. We also show that these bounds extend to weighted dominating sets by generalizing the bounds to what we call legal closed neighborhood colorings. We prove similar results about weighted strong dominating sets. We derive some corollaries including results on the chromatic polynomial of the square of a regular graph and the number of rainbow colorings of neighborhood hypergraphs.
We also generalize strong dominating sets to graph functions called \emph{existence homomorphisms}. We prove bounds on the number of existence homomorphisms for some image graphs using elementary methods. We conclude with some open questions.
\section{Dominating sets and strong dominating sets}
\ellambdabel{sec:domsets}
In this section, we give best possible bounds on the weighted number of dominating (and strong dominating) sets in an $r$-regular graph on $n$ vertices. We do this by introducing the idea of a \emph{neighborhood legal coloring} of the vertices of a graph. However, we will start by giving a quick proof of the unweighted version for dominating sets since it lays bare the essential techniques. We let $\deltas(G)$ be the number of dominating sets in $G$.
\begin{theorem}
If $G$ is an $r$-regular graph on $n$ vertices, then
\[
\deltas(G)\elleq \deltas(K_{r+1})^{n/(r+1)}.
\]
\end{theorem}
\begin{proof}
Let $S$ be a random set chosen uniformly from the dominating sets in $G$, and let $X=(X_v)_{v\in V(G)}$ be its characteristic vector. We will apply Shearer's Lemma with the multiset $\mathcal{A}$ being the collection $\setof{N[v]}{v\in V(G)}$. Note that each vertex in $G$ is in exactly $r+1$ sets in $\mathcal{A}$. Also, since $S$ is a dominating set, it cannot be the case that $X_{N[v]}$ is identically $0$ for any $v\in V(G)$. Thus, the random variable $X_{N[v]}$ takes at most $2^{r+1}-1$ values. We have
\begin{align*}
\ellog \deltas(G)&=H(X)\\
&\elleq \frac{1}{r+1} \sum_{v\in V(G)} H(X_{N[v]})\\
&\elleq \frac{1}{r+1} \sum_{v\in V(G)} \ellog(2^{r+1}-1)\\
&= \frac{n}{r+1} \ellog(2^{r+1}-1)\\
&= \frac{n}{r+1} \ellog(\deltas(K_{r+1})),
\end{align*}
as we claim.
\end{proof}
The core idea that makes this proof work is that we can tell whether $S$ is a dominating set simply by examining $X_{N[v]}$ for each $v\in V(G)$. We generalize this idea to vertex colorings of $G$ such that the restrictions to each (closed) neighborhood falls into some class of legal colorings. The following definition makes this precise.
\begin{definition}
Let $K$ be a finite set of colors and let $\mathcal{L}$ be a collection of multisets of $K$. We say that $\phi:V(G)\to K$ is a \emph{$\mathcal{L}$-legal neighborhood coloring} of the graph $G$ if $\phi(N(v))\in \mathcal{L}$ for all $v\in V(G)$. Note that we are considering the multiset image of $N(v)$ under $\phi$. Similarly, we say that $\phi:V(G)\to K$ is a \emph{$\mathcal{L}$-legal closed neighborhood coloring} of the graph $G$ if $\phi(N[v])\in \mathcal{L}$ for all $v\in V(G)$. We let $\ell(G,\mathcal{L})$ be the number of $\mathcal{L}$-legal neighborhood colorings of $G$ and $\ell_c(G,\mathcal{L})$ the number of $\mathcal{L}$-legal closed neighborhood colorings of $G$. We refer to $\mathcal{L}$ as a \emph{coloring condition}.
\end{definition}
\begin{example}
If we set $K=\set{0,1}$ and let $\mathcal{L}$ be all multisets of $K$ containing at least one $1$, then an $\mathcal{L}$-legal neighborhood coloring of $G$ is precisely the characteristic function of a strong dominating set in $G$. Similarly, an $\mathcal{L}$-legal closed neighborhood coloring of $G$ is a dominating set in $G$.
\end{example}
In all of our results in this section, we will be considering regular graphs, and therefore only one size of multiset in $\mathcal{L}$ will be relevant.
It is straightforward to compute $\ell(K_{r,r},\mathcal{L})$ and $\ell_c(K_{r+1},\mathcal{L})$ for any coloring condition $\mathcal{L}$, but we need introduce a bit of notation. Let us write
\[
N(r,\mathcal{L})=\#\setof{f:[r]\to K}{f([r])\in \mathcal{L}},
\]
the number of functions from a domain of size $r$ whose multiset image is legal according to $\mathcal{L}$. For $L\in \mathcal{L}$ of size $r$, we write $\binom{r}{n(L)}$ for the multinomial coefficient\footnote{Note that $n(L)$ is the multiset of repetition counts of elements of $L$.} counting the number of functions $f:[r]\to K$ such that $f([r])=L$. For instance,
\[
\binom{5}{n(\set{a,a,b,b,c})}=\binom{5}{2,2,1}=30.
\]
Thus,
\begin{equation*}
N(r,\mathcal{L})=\sum_{\substack{L\in \mathcal{L}\\\abs{L}=r}} \binom{r}{n(L)}.
\end{equation*}
An $\mathcal{L}$-legal neighborhood coloring of $K_{r,r}$ simply consists of a coloring that uses an element of $\mathcal{L}$ on each side. Thus, $\ell(K_{r,r},\mathcal{L})=N(r,\mathcal{L})^{2}$. Similarly, an $\mathcal{L}$-legal closed neighborhood coloring of $K_{r+1}$ is one whose image is in $\mathcal{L}$, and so $\ell_c(K_{r+1},\mathcal{L})=N(r+1,\mathcal{L})$.
\begin{theorem}\ellambdabel{thm:legalcount}
If $G$ is an $r$-regular graph on $n$ vertices and $\mathcal{L}$ is a coloring condition, then
\[
\ell(G,\mathcal{L})\elleq N(r,\mathcal{L})^{n/r}=\ell(K_{r,r},\mathcal{L})^{n/(2r)},
\]
and
\[
\ell_c(G,\mathcal{L})\elleq N(r+1,\mathcal{L})^{n/(r+1)}=\ell_c(K_{r+1},\mathcal{L})^{n/(r+1)}.
\]
\end{theorem}
\begin{proof}
For the first inequality, let $\phi$ be a random $\mathcal{L}$-legal neighborhood coloring of $G$ chosen uniformly from the set of all such colorings, and let $\mathcal{A}$ be the multiset $\setof{N(v)}{v\in V(G)}$. Applying Shearer's Lemma, we have
\begin{align*}
\ellog \ell(G,\mathcal{L})&=H(\phi)\\
&\elleq \frac{1}{r} \sum_{v\in V(G)} H(\phi|_{N(v)})\\
&\elleq \frac{1}{r} \sum_{v\in V(G)} \ellog N(r,\mathcal{L})\\
&= \frac{n}{r} \ellog N(r,\mathcal{L})\\
&=\frac{n}{2r} \ellog N(r,\mathcal{L})^2\\
&= \frac{n}{2r} \ellog(\ell(K_{r,r},\mathcal{L})),
\end{align*}
as we claim.
In the proof of the second inequality, the covering we consider is $\mathcal{A}=\setof{N[v]}{v\in V(G)}$. The rest of the proof is the same, \emph{mutatis mutandis}.
\end{proof}
It is standard in many enumeration problems of this type to consider a weighted version in which there are ``activations'' $\ellambda_x$ associated to each $x\in K$. We next make this framework precise and prove a weighted version of Theorem~\ref{thm:legalcount}.
\begin{definition}
Let $K$ be a finite set and $\ellambda:K\to (0,\infty)$ be an \emph{activation function on $K$}. We define weighted versions of $N(r,\mathcal{L})$, $\ell$, and $\ell_c$. For a graph $G$ and $\phi:V(G)\to K$, let
\begin{align*}
w^{\ella}(\phi)&=\prod_{v\in V(G)} \ellambda(\phi(v)),\\
\elll(G,\mathcal{L})&=\sum w^{\ella}(\phi),
\shortintertext{where the sum is over all $\mathcal{L}$-legal neighborhood colorings $\phi$,}
\ellc^{\ella}(G,\mathcal{L})&=\sum w^{\ella}(\phi),
\shortintertext{this time summed over all $\mathcal{L}$-legal closed neighborhood colorings $\phi$, and}
\mathcal{N}l(r,\mathcal{L})&=\sum_{\substack{\phi:[r]\to K\\ \phi([r])\in \mathcal{L}}} w^{\ella}(\phi)\\
&=\sum_{\substack{L\in \mathcal{L}\\\abs{L}=r}} \binom{r}{n(L)}\prod_{x\in L} \ellambda(x).
\end{align*}
\end{definition}
\begin{theorem}\ellambdabel{thm:legalwt}
If $G$ is an $r$-regular graph on $n$ vertices, $\mathcal{L}$ is a coloring condition, and $\ellambda:K\to (0,\infty)$ is an activation function, then
\[
\elll(G,\mathcal{L})\elleq \mathcal{N}l(r,\mathcal{L})^{n/r}=\elll(K_{r,r},\mathcal{L})^{n/(2r)},
\]
and
\[
\ellc^{\ella}(G,\mathcal{L})\elleq \mathcal{N}l(r+1,\mathcal{L})^{n/(r+1)}=\ellc^{\ella}(K_{r+1},\mathcal{L})^{n/(r+1)}.
\]
\end{theorem}
\begin{proof}
We prove the result for rational weights, the general case follows from continuity. First observe that if we scale all weights by a positive factor $q$, then both sides of each inequality above scale by $q^{n}$. Thus, we may clear denominators and assume all weights $\ellambda(x)$ are integers. We will introduce modified versions of $K$ and $\mathcal{L}$ as follows. Let
\[
K'=\setof{(x,i)}{x\in K, i\in [\ellambda(x)]},
\]
and let $\mathcal{L}'$ be the set of multisets of $K'$ whose image under the projection onto the first coordinate is in $\mathcal{L}$. We have
\[
\elll(G,\mathcal{L})=\ell(G,\mathcal{L}'),\qquad \ellc^{\ella}(G,\mathcal{L})=\ell_c(G,\mathcal{L}'),\qquad \text{and} \qquad \mathcal{N}l(s,\mathcal{L})=N(s,\mathcal{L}'),
\]
for any integer $s$ and, in particular, for $s=r,r+1$. Applying Theorem~\ref{thm:legalcount}, the result follows.
\end{proof}
\subsection{Examples}
\ellambdabel{sub:examples}
We now give a few examples that illustrate the utility of Theorem~\ref{thm:legalwt}. Recall that $\deltas(G)$ is the number of dominating sets in $G$. We also write $\deltas_k(G)$ for the number of dominating sets in $G$ of size $k$. Similarly, we define $\sds(G)$ and $\sds_k(G)$ to be, respectively, the number of strong dominating sets in $G$ and the number of strong dominating sets in $G$ of size $k$. We define the \emph{dominating set polynomial}, $D_G(\mu)$, and the \emph{strong dominating set polynomial}, $D_G^s(\mu)$, of a graph $G$ to be the generating functions enumerated by size, i.e.,
\begin{align*}
D_G(\mu)&=\sum_k \deltas_k(G)\mu^{k},\\
D_G^s(\mu)&=\sum_k \sds_k(G)\mu^{k}.
\end{align*}
We have the following Corollary of Theorem~\ref{thm:legalwt}.
\begin{corollary}
If $G$ is an $r$-regular graph on $n$ vertices, and $\mu>0$, then
\begin{align*}
D_G(\mu)&\elleq D_{K_{r+1}}(\mu)^{n/(r+1)},\\
D_G^s(\mu)&\elleq D_{K_{r,r}}^s(\mu)^{n/(2r)}.
\end{align*}
\end{corollary}
\begin{proof}
Let $K=\set{0,1}$ and $\ellambda:K\to (0,\infty)$ be defined by $\ellambda(0)=1$ and $\ellambda(1)=\mu$. Also, let $\mathcal{D}$ be the collection of all multisets from $K$ containing at least one $1$. Then $D_G(\mu)=\ellc^{\ella}(G,\mathcal{D})$ and $D_G^s(\mu)=\elll(G,\mathcal{D})$. Thus, by Theorem~\ref{thm:legalwt}, we are done.
\end{proof}
A broad class of other applications come from vertex colorings of hypergraphs associated to a graph $G$. Given a graph $G$, we define $\mathcal{N}(G)$ to be the hypergraph on vertex set $V(G)$ with edge set $\setof{N(v)}{v\in V(G)}$. Note that if $G$ is $r$-regular, then $\mathcal{N}(G)$ is $r$-uniform. Analogously, we define $\mathcal{N}c(G)$ to be the hypergraph of the closed neighborhoods of $G$, so that if $G$ is $r$-regular, $\mathcal{N}c(G)$ is $(r+1)$-uniform. Vertex colorings of $G$ are also vertex colorings of $\mathcal{N}(G)$ and $\mathcal{N}c(G)$. Various conditions on these hypergraph colorings give rise to invariants of $G$ that are amenable to our techniques.
Recall that a vertex coloring of a hypergraph $\mathcal{H}$ is \emph{proper} if no edge is monochromatic, and \emph{rainbow} if no edge contains a repeated color. We let $\chi(\mathcal{H};q)$ be the number of proper $q$-colorings of $\mathcal{H}$ and $\invbreve{\chi}(\mathcal{H};q)$ be the number of rainbow colorings of $\mathcal{H}$.
\begin{theorem}\ellambdabel{thm:prorain}
Let $G$ be an $r$-regular graph on $n$ vertices and $q\geq 1$ be an integer, then
\begin{align*}
\chi(\mathcal{N}(G);q)&\elleq \chi(\mathcal{N}(K_{r,r});q)^{n/(2r)}=(q^r-q)^{n/r},\\
\chi(\mathcal{N}c(G);q)&\elleq \chi(\mathcal{N}c(K_{r+1});q)^{n/(r+1)}=(q^{r+1}-q)^{n/(r+1)},\\
\invbreve{\chi}(\mathcal{N}(G);q)&\elleq \invbreve{\chi}(\mathcal{N}(K_{r,r});q)^{n/(2r)}=(q(q-1)\cdots(q-r+1))^{n/r},\\
\invbreve{\chi}(\mathcal{N}c(G);q)&\elleq \invbreve{\chi}(\mathcal{N}c(K_{r+1});q)^{n/(r+1)}=(q(q-1)\cdots(q-r))^{n/(r+1)}.
\end{align*}
\end{theorem}
\begin{proof}
Let $K=[q]$, $\mathbb{P}r$ be the collection of all multisets from $K$ that contain at least two different colors, and $\mathbb{R}a$ be the collection of all subsets of $K$, i.e., multisets all of whose elements are distinct. The number of proper $q$-colorings of $\mathcal{N}(G)$ (respectively, $\mathcal{N}c(G)$) is exactly $\ell(G,\mathbb{P}r)$ (respectively, $\ell_c(G,\mathbb{P}r)$). Similarly, the number of rainbow $q$-colorings of $\mathcal{N}(G)$ (resp., $\mathcal{N}c(G)$) is exactly $\ell(G,\mathbb{R}a)$ (resp., $\ell_c(G,\mathbb{R}a)$). Thus, the result follows from Theorem~\ref{thm:legalcount}.
\end{proof}
Note that a weighted version of Theorem~\ref{thm:prorain} follows equally immediately from Theorem~\ref{thm:legalwt}.
\section{Existence homomorphisms}
\ellambdabel{sec:xhoms}
Some of our earlier results are rather naturally phrased in terms of what we call ``existence homomorphisms''.
\begin{definition}
Suppose that $G$ and $H$ are graphs with $H$ possibly having loops. We say that a map $\phi:V(G)\to V(H)$ is an \emph{existence homomorphism} if, for every $v\in V(G)$, there is a $w\in N(v)$ such that $\phi(v)\phi(w)\in E(H)$. We let $\XHom(G,H)$ be the set of all existence homomorphisms from $G$ to $H$ and set $\xhom(G,H)=\abs{\XHom(G,H)}$.
\end{definition}
\begin{example}
Let $H$ be $H_{\text{ind}}$ with $0$ the unlooped vertex and $1$ the looped one. If $G$ is a graph of minimum degree at least one, then elements of $\XHom(G,H)$ are precisely the characteristic functions of dominating sets of $G$. Every vertex that is mapped to $0$ must have some neighbor that is mapped to $1$. Since vertices mapped to $1$ have some neighbor in the graph, the existence homomorphism condition is satisfied at those vertices\footnote{Note that if $G$ does contain isolated vertices, then $\XHom(G,H_{\text{ind}})=0$. However, if $S$ is the set of isolates in $G$, then $\deltas(G)=\deltas(G\setminus S)=\xhom(G\setminus S)$.}. Thus, $\xhom(G,H)=\deltas(G)$.
\end{example}
\begin{example}
Existence homomorphisms into complete graphs correspond to proper closed neighborhood colorings. To be precise, if $\phi\in \XHom(G,K_q)$, then for each vertex $v$, there must be a neighbor $w$ with $\phi(v)\neq \phi(w)$. Hence, $\phi$ is a proper closed neighborhood coloring of $G$, i.e., a proper vertex coloring of $\mathcal{N}c(G)$. Conversely, if $\phi$ is a proper closed neighborhood coloring of $G$, then for all vertices $v$, at least two colors appear on $N[v]$. In particular, it cannot be that all the colors on $N(v)$ are the same as $\phi(v)$. Thus, $\phi\in \XHom(G,K_q)$. Summarizing, we have $\xhom(G,K_q)=\chi(\mathcal{N}c(G);q)$.
\end{example}
There seem to be some very interesting problems involving maximizing $\xhom(G,H)$ for fixed $H$. We discuss below two examples of such problems. In both cases, we consider $H=\Eq$, the graph on $q$ vertices with a loop on every vertex and no other edges. We can think of an existence homomorphism into $\Eq$ as a $q$-coloring in which each vertex is adjacent to another vertex of the same color. In other words, an ordered partition of $G$ into $q$ parts in which every non-trivial part has minimum degree at least one.
The first problem we discuss is that maximizing $\xhom(T,\Eq[2])$ where $T$ is a tree on $n$ vertices. For convenience, we let $\id(G)=\xhom(G,\Eq[2])$. We need, first, a simple lemma about $\id(P_n)$. Clearly, $\id(G)$ is even for every graph $G$ since switching the colors in any existence homomorphism into $\Eq[2]$ gives another such. In this section, it will be convenient for us to set $i_n=\frac{1}2 \id(P_n)$. We will think of $i_n$ as being the number of $\phi\in \XHom(P_n,\Eq[2])$ having a specified color at the left-hand end. We denote the Fibonacci numbers as $F_n$, where $F_0=1$, $F_1=1$, and $F_n=F_{n-1}+F_{n-2}$ for $n\geq 2$.
\begin{lemma}
For any integer $n\geq 2$,
\[
i_n=F_{n-2}.
\]
\end{lemma}
\begin{proof}
It is simple to check that $i_2=i_3=1$. We label the vertices of $\Eq[2]$ as $a$ and $b$. If $n\geq 4$ and $\phi\in \XHom(G,\Eq[2])$ uses $a$ on the left-hand end, then the adjacent vertices can be colored either $a$ then $a$, or $a$ then $b$. There are exactly $i_{n-1}$ of the first type and $i_{n-2}$ of the second.
\end{proof}
\begin{proposition}
If $T$ is a tree on $n$ vertices, then
\[
\id(T)\elleq \id(P_n),
\]
with equality if and only if $T=P_n$.
\end{proposition}
\begin{proof}
\begin{figure}
\caption{The basic operation transforming $T$ into $T'$}
\end{figure}
We will prove that if $T$ is a tree on $n$ vertices maximizing $\id$, then $T$ cannot contain a vertex of degree at least three having at least two pendant paths. This, then, by a simple argument, proves that $T=P_n$. Suppose that, as in Figure~\ref{fig:basicop}, $v$ is a vertex of degree at least three having pendant paths of lengths $k$ and $\ell$. We let $T'$ be the tree in which these paths are replaced by a single pendant path incident with $v$ of length $k+\ell$. We will prove that $\id(T')>\id(T)$. Let us write $S$ for $T$ with the pendant paths, but not $v$, removed (so that $v\in S$). Since $d_T(v)\geq 3$, we have $n(S)>1$. For convenience, we will count legal colorings of $T$ and $T'$ in which $v$ is colored $0$. Let us write $i_S=\frac{1}2 \id(S)$ for the number of legal colorings of $S$ in which $v$ is colored $0$. Also, we set $j_S$ to be the number of $\set{0,1}$-colorings of $S$ in which every vertex \emph{other than $v$} has a neighbor of the same color and $v$ is colored $0$. We analyze legal colorings of $T$ and $T'$ according to whether the paths contain a vertex colored $0$ adjacent to $v$. Thus,
\begin{align*}
\frac{1}2\id(T)&=j_S(i_{k+1}i_{\ell+1}+i_{k}i_{\ell+1}+i_{k+1}i_{\ell})+i_S i_k i_{\ell},\\
\frac{1}2\id(T')&=j_S i_{k+\ell+1}+i_S i_{k+\ell}.
\end{align*}
These follow from the fact that the number of legal colorings of a pendant path of length $m$ in which the vertex adjacent to $v$ is colored $0$ is $i_{m+1}$, whereas the number in which that vertex is colored $1$ is $i_m$. Now, using standard facts about the Fibonacci numbers, we have
\begin{align*}
i_{k+\ell+1}&=F_{k+\ell-1}=F_k F_{\ell-1}+F_{k-1} F_{\ell-2}=F_{k-1} F_{\ell-1} + F_{k-2} F_{\ell-1}+F_{k-1} F_{\ell-2}\\
&=i_{k+1}i_{\ell+1}+i_k i_{\ell+1}+i_{k+1}i_{\ell}, \shortintertext{and}
i_{k+\ell}&=F_{k+\ell-2}=F_{k-1}F_{\ell-1}+F_{k-2}F_{\ell-2}=i_{k+1}i_{\ell+1}+i_k i_{\ell}\\
&> i_k i_{\ell}.
\end{align*}
Thus, $\id(T')>\id(T)$ provided $\id(S)>0$, i.e., $n(S)>1$.
\end{proof}
Finally, we mention a simple result giving a bound for $\xhom(G,\Eq[2])$ for $2$-regular graphs. It would be very interesting to give a corresponding bound for $r$-regular graphs.
\begin{proposition}
If $G$ is a $2$-regular graph on $n$ vertices, then
\[
\xhom(G,\Eq[2])\elleq \xhom(C_6,\Eq[2])^{n/6}.
\]
\end{proposition}
\begin{proof}
In \cite{AFK,AFKerr}, Agur, Fraenkel, and Klein prove that $c_n=\xhom(C_n,\Eq[2])$ satisfies the recurrence
\[
c_n=2c_{n-1}-c_{n-2}+c_{n-4},\qquad n\geq 7.
\]
From this, it is straightforward to determine that $c_n^{1/n}\to \phi$, the golden ratio, as $n\to \infty$. Checking small values, one finds that $c_n^{1/n}$ is maximized at $n=6$. Since $\xhom(\cdot,\Eq[2])$ is multiplicative on disjoint unions, this proves the result.
\end{proof}
\end{document}
|
\begin{document}
\numberwithin{equation}{section}
\title{The $2$-adic Analysis of Stirling Numbers of the Second
Kind via Higher Order Bernoulli Numbers and Polynomials}
\author{Arnold Adelberg\\
Myra Steele Emeritus Professor of Mathematics\\
Department of Mathematics and Statistics\\
Grinnell College\\
Grinnell, IA 50112}
\maketitle
\begin{abstract}
Several new estimates for the $2$-adic valuations of Stirling numbers of the
second kind are proved. These estimates, together with criteria for when they
are sharp, lead to improvements in several known theorems and their
proofs, as well as to new theorems. The estimates and criteria all depend on
our previous analysis of powers of $2$ in the denominators of coefficients of
higher order Bernoulli polynomials. The corresponding estimates for Stirling
numbers of the first kind are also proved.
Some attention is given to asymptotic cases, which will be further explored in
subsequent publications.
Keywords: Stirling numbers of the second kind, $2$-adic analysis, higher order
Bernoulli numbers and polynomials, estimates and exact values, Newton polygons.
\textit{MSC[2010]}: 11B68, 11B73, 05A10, 11S05.
\end{abstract}
\section{Introduction}
This paper brings together and extends a collection of related results on the $2$-adic
analysis of Stirling numbers of the second kind. We hope that our approach, based on
our earlier results for higher order Bernoulli numbers and polynomials, provides a
coherent theoretical basis that others will find useful for further investigations.
The proofs we give for known results are shorter and simpler, often dramatically so.
The results themselves are typically sharper and broader. We also get some new
results, most of which involve new estimates that are stronger than those in the
literature.
The current paper is a continuation of [4] but is quite different in its goals and scope.
Whereas the previous paper considered all primes and Stirling numbers of both kinds,
for reasons of brevity and focus this paper will primarily consider only the even
prime and will concentrate on Stirling numbers of the second kind $S(n,k)$.
Lengyel [11] proved in 1994 that $\nu_2(S(2^h,k)) = \sigma_2(k) -1$, if $h$ is sufficiently
large and $k>0$, and conjectured that this formula holds whenever $1 \le k \le 2^h$,
where $\sigma_2(k) =$ sum of base $2$ digits. This was eventually proven in 2005 by
De Wannemaker [7]. Subsequently Lengyel [13] gave another proof and
adapted De Wannemaker's
proof to extend the theorem to $\nu_2(S(c2^h,k))=\sigma_2(k)-1$ if $c \ge 1$ and
$1 \le k \le 2^h$.
We found a much simpler proof of De Wannemaker's Theorem in [4], which we were
able to generalize to odd primes and to minimum zero cases (MZC), which are based
on the estimate
\begin{align}
\nu_2(S(n,k)) \ge \sigma_2(k) -\sigma_2(n)
\end{align}
which we call the minimum zero estimate. When it is sharp, we have the minimum
zero case (MZC).
In the current paper, we give several other useful estimates. One, which is based on
recursive properties of Stirling polynomials, is
\begin{align}
\nu_2(S(n,k)) \ge \sigma_2(k-1)-\sigma_2(n-1)
\end{align}
which we call the shifted minimum zero estimate. When this is sharp, we have the
shifted minimum zero case (SMZC).
Significantly better than these estimates are our new almost minimum zero estimate
\begin{align}
\nu_2(S(n,k)) \ge \sigma_2(k) -\sigma_2(n) + \#(\text{common 2-powers in~} n
\text{~and~} n-k)
\end{align}
and our new shifted almost minimum zero estimate
\begin{align}
\nu_2(S(n,k)) \ge \sigma_2(k-1)-\sigma_2(n-1) +\#(\text{common 2-powers in~} n-1
\text{~and~} n-k)
\end{align}
An almost minimum zero case (AMZC) is one where the estimate (1.3) is sharp, but which
is not a MZC, while a shifted almost minimum zero case (SAMZC) is one which is not
a SMZC and the estimate (1.4) is sharp. When the distinction between MZC and AMZC
is unimportant, we may use AMZC for the sharp almost minimum zero estimate. We
may also adopt the analogous convention for SAMZC.
Unlike the minimum zero and shifted minimum zero estimates, these estimates are never
vacuous (negative). This leads to very simple new necessary and sufficient conditions for
when a Stirling number $S(n,k)$ is odd. [Theorem 3.2]
Most of the significant analysis of this paper rests on the fact that since $B_{n-k}^{(-k)}$
is a cofficient of $B_{n-k}^{(-k)}(x)$ the $2$-adic pole of $B_{n-k}^{(-k)}$, i.e., the
highest power of $2$ in its denominator, is less than or equal to the maximum pole of
$B_{n-k}^{(-k)}(x)$, which is the highest power of $2$ in the denominators of
all the coefficients. We have a
simple formula for this maximum pole (cf. [1,2]), which is given in the Appendix.
The geometry of these cases is instructive: The MZC occurs when the Newton polygon
of $B_{n-k}^{(-k)}(x)$ is strictly decreasing; the SMZC occurs when the Newton
polygon of $B_{n-k}^{(-k+1)}(x)$ is strictly decreasing; the AMZC occurs when the
Newton polygon of $B_{n-k}^{(-k)}(x)$ is weakly decreasing, i.e., the last segment
of the Newton polygon is horizontal; the SAMZC holds when the pole of
$B_{n-k}^{(-k+1)}(1)$ is the maximum pole of $B_{n-k}^{(-k+1)}(x)$ , but this pole
also occurs in at least one coefficient other than the constant coefficient.
In our study of the literature, we have found that every significant estimate or exact value
of $\nu_2(S(n,k))$ we considered arises from one of our estimates or cases.
For example, the proofs in ([9], Theorems 1.1, 1.2, 1.3) are very lengthy and
highly technical, while ours are much shorter and more efficient.
Also, in [12] Lengyel gives many proofs of estimates for $\nu_2(S(c2^h,2^h+a))$,
which we handle easily by our methods with far less computation. He also gives
estimates for $\nu_2(S(c2^h+u,k))$ which are not as good as our almost
minimum zero estimates (unless $u$ is a power of $2$), and his proofs are more
involved than ours.
The organization of this paper is as follows: Section 2 states a number of elementary,
useful facts about base two arithmetic, gives some basic definitions, and states the
main theorems of this paper. Our statements include the estimate or case that leads to
our proof, since that provides insight into why the theorems are true.
Section 3 provides simple, effective criteria for the four cases,
establishes certain invariance properties for these estimates and cases,
and proves a couple of new theorems. Included in this section are new necessary and
sufficient conditions for the Stirling number $S(n,k)$ to be odd, which generalize our
conditions for the central Stirling numbers $S(2k,k)$ (cf. [4]). We also state,
for reference, the
estimates and cases for Stirling numbers of the first kind. Section 4 proves the
main non-asymptotic theorems. Section 5 proves an illustrative asymptotic theorem,
which is more simply stated and with an exponentially better estimate for when
the limit is attained than in the literature. Our proof in this section does not depend
on ths estimates given in the Introduction but depends instead on a new estimate for the
partition dependent terms, which is given in the Appendix [Theorem 6.1].
Section 6 collects the material
on higher order Bernoulli numbers and polynomials needed for this paper.
\section{Base two preliminaries, definitions, and statement of main theorems}
Since we deal only with the prime two in this paper, we will omit the prime in our
notations; e.g., we will write $\nu$ instead of $\nu_2$ for the $2$-adic valuation and
$\sigma$ instead of $\sigma_2$ for the sum of the base two digits, which is the
same as the number of powers of two in the base two representation.
We extend our previous notion of pole to allow a $2$-adic unit to be considered as a pole
of order zero, so if $N \ge 0$ and $\nu(a) \ge -N$, then we say that $a$ has at most a
pole of order $N$.
Let $[n]=$ set of $2$-powers in its base two expansion, so if $n= \sum a_i2^i$ with
all $a_i \in \{0,1\}$, then $[n]$ corresponds to the ones in the expansion, i.e., the ones
in the base two representation of $n$. The following facts are obvious but useful:
\begin{align}
&\#([n])=\sigma(n), \quad \text{min}([n])=2^{\nu(n)}, \quad \text{max}([n])=
2^{\lfloor \log_2(n)\rfloor}, \notag\\
&[n-1]=\left([n]-\{2^{\nu(n)}\}\right) \cup \{1,2,\ldots, 2^{\nu(n)-1}\}~
(\text{disjoint union}),
\notag\\
\text{so~}&\sigma(n-1)=\sigma(n)-1+\nu(n) ~(cf. [4]), \text{~and}\notag\\
&2^{\nu(n-m)} = \text{min}([n] \cup [m]-[n] \cap [m])
\end{align}
The basic facts that we need about binomial coefficients are as follows:
\begin{align}
\nu\binom{a+b}{a} = \sigma(a)+\sigma(b)-\sigma(a+b) = \#(\text{base two carries for~}
a+b)
\end{align}
If $a= \sum a_i2^i$ and $b=\sum b_i 2^i$, then we have a base $2$ carry if either
$a_i=b_i=1$, or if $\{a_i,b_i\} = \{0,1\}$ and the carry results from previous carries. A carry
where $a_i=b_i=1$ is called a forced carry, and other carries are said to be unforced. The
number of forced carries is $\#([a] \cap [b])$.
\begin{lemma} If $n \ge m$ then $\#([n] \cap [n-m]) \ge \sigma(n)-\sigma(m)$, i.e.,
$\#([n]-[n-m]) \le \sigma(m)$, using the set difference.
\end{lemma}
\begin{proof}
If $m=2^i$, then $n-m$ removes the smallest $2$-power in $[n]$ which is greater
than or equal to $2^i$, and if this power is bigger than $2^i$ we insert the powers down to
$2^i$. We continue subtracting $m$ by subtracting its $2$-powers one at a time,
iterating the process.
\end{proof}
\begin{lemma} If $\binom{b}{a}$ is odd, then $\#([b] \cap [b-a]) = \#([b-a])=
\sigma(b)-\sigma(a)$; if $b$ is odd and greater than $1$, then
$\#([b] \cap [b-3])=\sigma(b)-2$, while
if $b$ is even and greater than $2$, then $\#[b] \cap [b-3]) = \sigma(b)-1$.
\end{lemma}
\begin{proof}
The first assertion follows from (2.2). The other parts follow from $2^\alpha -2
= 2^{\alpha -1} + \cdots + 2$, first subtracting $2$ from $b$, then subtracting $1$.
\end{proof}
Next we list the main theorems, most of which come from [9,12]. We have edited them
them to conform to our notations and conventions and include the relevant estimates
or cases.
\begin{theorem} (cf. [9],Theorem 1.2) Let $a,c,h \in \mathbb{N}$ with $c\ge 1$ being odd
and $1 \le a \le 2^h$. Then $S(c2^h,(c-1)2^h+a)$ is a MZC and
\[
\nu(S(c2^h,(c-1)2^h+a))= \sigma(a)-1
\]
\end{theorem}
~\\
Note. In ([9] Theorem 1.2), it is assumed that $h \ge 2$, which appears from our proof
to be an unnecessary assumption.
\begin{theorem} (cf. [9] Theorem 1.1) Let $h,a,b,c \in \mathbb{N}$ with $0<a<2^{h+1}$,
$b2^{h+1} +a \le c2^h$ and $c\ge 1$ being odd. Then the almost minimum zero estimate
is
\[
\nu(S(c2^h,b2^{h+1}+a)) \ge \sigma(a)-1
\]
\end{theorem}
\begin{theorem} (cf.[9] Theorem 1.4) Let $a,b,c,m,h \in \mathbb{Z}^+$ with $0<a<2^{h+1}$,
$b2^{h+1}+2^h < 2^m$, and $c \ge 1$ being odd. Then if $a<2^{h+1} -1$, the almost
minimum zero estimate is $\nu(S(c2^m+b2^{h+1}+2^h,b2^{h+2}+a)) \ge \sigma(a)-1$,
which is not sharp, i.e., $\nu(S(c2^m+b2^{h+1}+2^h,b2^{h+2}+a) \ge \sigma(a)$.
If $a=2^{h+1}-1$, we have a AMZC with the same estimate, i.e.,
$\nu(S(c2^m+b2^{h+1}+2^h,b2^{h+2}+a)) = \sigma(a)-1=h$.
\end{theorem}
\begin{theorem} ([cf. [12] Theorem 6) Let $h,u,c \in \mathbb{N}$ with $u \le 2^{h}$. Then
if $u<2^h$, the shifted almost minimum zero estimate is
\[
\nu(S(c2^h+u,2^h)) \ge h-1-\nu(u)
\]
Furthermore $u$ is even and $u \le 2^{h-1}$, or $u=1$, or $u=1+2^{h-1}$
are all the cases where the estimate is sharp, i.e., where
\[
\nu(S(c2^h+u,2^h)) = h-1-\nu(u)
\]
Finally if $u=2^h$, then $\nu(S(c2^{h}+u,2^h)=0$.
\end{theorem}
\begin{theorem} (cf. [12] Theorem 7) Let $h,k,u,c \in \mathbb{N}$ with $1 \le k \le 2^h$ and
$u \le 2^{\nu(k)}$. Then if $u<k$ the shifted almost minimum zero estimate is
\[
\nu(S(c2^h+u,k) \ge \nu(k)+\sigma(k)-\nu(u)-2
\]
Furthermore we have the sharp estimate
\[
\nu(S(c2^h+u,k))=\nu(k)+\sigma(k)-\nu(u)-2
\]
if and only if $u=1$, or $1 \le u \le 2^{\nu(k)-1}$ and $u$ is even, or $u=1+2^{\nu(k)-1}$,
or $u=2^{\nu(k)}$. Finally if $u=k$, so that $\sigma(k)=1$, then $\nu(S(c2^{h}+u,k))=0$.
\end{theorem}
~\\
Notes. Theorem (2.4) is the special case of Theorem (2.5) for $k=2^h$. The estimates
given by Lengyel in [12] are considerably weaker than ours, since $\nu(u) < \lfloor \log_2(u)
\rfloor$ unless $u=2^m$. Also he gets exact values only for the $2$-powers instead
of for all the even numbers less than or equal to $2^{\nu(k)-1}$.
~\\
The next asymptotic result does not depend on the estimates or cases given in the
Introduction but depends instead on the new estimate given in the Appendix.
\begin{theorem} (cf. [12] Theorem 5)
If $\nu(k) < \nu(n)$ or if $\nu(k)=\nu(n)$ and $2^{\nu(n-k)} \in [k]$, then
\[
\lim_{h\rightarrow\infty} \nu(S(2^h n, 2^h k)) = \sigma(k)-\sigma(n) +
\nu\binom{n+n-k}{n}
\]
and this limit is attained if $2^{h-1+\nu(n-k)} \ge \nu\binom{n+n-k}{n}$. If $\nu(n)<\nu(k)$
or $\nu(n)=\nu(k)$ and $2^{\nu(n-k)} \in [n]$, then
\[
\lim_{h\rightarrow\infty} \nu(S(2^h n, 2^h k)) = \sigma(k-1)-\sigma(n-1) +
\nu\binom{n-1+n-k}{n-1}
\]
Furthermore,
if $\nu(n)<\nu(k)$, the limit is attained if $2^{h-1+\nu(n)} \ge \nu\binom{n-1+n-k}{n-1}$,
while if $\nu(n)=\nu(k)$ and $2^{\nu(n-k)} \in [n]$, the limit is attained if
$2^{h-1+\nu(n)} > \nu\binom{n-1+n-k}{n-1}$.
\end{theorem}
~\\
Remark. Our formulas for the limit are simplier than Lengyel's, and the estimates
for when the limits are attained are exponentially better.
\section{Basic properties of the estimates and cases, some examples and new results}
The key formula is
\begin{align}
S(n,k) = \binom{n}{k} B_{n-k}^{(-k)}
\end{align}
and since $B_{n-k}^{(-k)} = (k/n) B_{n-k}^{(-k+1)}(1)$, we get
\begin{align}
S(n,k)=\binom{n-1}{k-1}B_{n-k}^{(-k+1)}(1)
\end{align}
Hence, from our maximum pole formula, we have
\begin{align}
\nu(S(n,k)) = \nu\binom{n}{k} + \nu\left( B_{n-k}^{(-k)}\right) \ge \sigma(k)
-\sigma(n) + \#([n] \cap [n-k])
\end{align}
and
\begin{align}
\nu(S(n,k)) \ge \sigma(k-1)-\sigma(n-1) + \#([n-1] \cap [n-k])
\end{align}
Formula (3.3) is the almost minimum zero estimate, which is sharp
without the MZC iff the Newton
polygon of $B_{n-k}^{(-k)}(x)$ is weakly decreasing, i.e., its final segment is
horizontal. The geometry of sharpness for the shifted almost minimum zero case
is less clear, namely we may or may not have a horizontal final segment.
\begin{theorem}
The almost minimum zero and shifted almost minum zero estimates are non-negative.
\end{theorem}
\begin{proof}
The almost minimum zero estimate is
\[
\nu(S(n,k)) \ge \sigma(k)-\sigma(n) + \#([n] \cap [n-k])
\]
which is non-negative by (Lemma 2.1). The proof for the shifted estimate is identical, replacing
$(n,k)$ by $(n-1,k-1)$.
\end{proof}
Note that $\nu\left( B_{n-k}^{(-k+1)}(1)\right) = -\sigma(n-k)$ iff $\nu\left(
B_{n-k}^{(-k+1)}\right) = -\sigma(n-k)$ iff $B_{n-k}^{(-k+1)}(x)$ is a maximum
pole case iff $S(n-1,k-1)$ is a MZC. This gives an alternative proof of the Amdeberhan
conjecture [5] which was proved in [8]. Thus $\nu(S(n+1,k+1)) = \sigma(k)-\sigma(n)$
iff $S(n,k)$ is a MZC, in which case $\nu(S(n+1,k+1))=\nu(S(n,k))$.
\begin{theorem} (Odd Stirling numbers of the second kind) The following are
equivalent:
\begin{itemize}
\item[(a)] $S(n,k)$ is odd.
\item[(b)] $\#([n] \cap [n-k]) = \sigma(n)-\sigma(k)$ and $S(n,k)$ is a MZC
or AMZC.
\item[(c)] $\#([n-1] \cap [n-k]) = \sigma(n-1)-\sigma(k-1)$ and $S(n,k)$ is a SMZC
or SAMZC.
\end{itemize}
\end{theorem}
\begin{proof}
Since $\nu(S(n,k)) \ge \sigma(k)-\sigma(n) + \#([n] \cap [n-k]) \ge 0$, we have
$\nu(S(n,k))=0$ iff the almost minimum zero estimate is sharp and zero, i.e.,
$\#([n] \cap [n-k]) = \sigma(n) -\sigma(k)$ and the estimate is sharp.
The argument is similar for the shifted case.
\end{proof}
A different necessary and sufficient condition for $S(n,k)$ to be odd is proved in
([6], Theorem 2.1), which has no obvious relation to ours.
The preceding theorem is particularly helpful once we have established criteria for the
different cases.
\begin{theorem} (Criteria for the four cases)
\begin{itemize}
\item[(i)] $S(n,k)$ is a MZC iff $[n-k] \cap [n] = \emptyset$.
\item[(ii)] $S(n,k)$ is a SMZC iff $[n-k]\cap [n-1] = \emptyset$.
\item[(iii)] $S(n,k)$ is a AMZC iff $[n-k]\cap [n] \ne \emptyset$ and precisely one
of the following conditions holds:
\begin{itemize}
\item[(a)] $\nu\binom{n+n-k}{n} = \#([n] \cap [n-k])$, i.e., $n+n-k$ has no unforced
carries.
\item[(b)] $\nu\binom{n+n-k-1}{n} = \#([n]\cap [n-k])-1$, i.e., $\nu(n) =\nu(n-k)$
and $n+n-k-1$ has no unforced carries.
\item[(c)] $n-k$ is odd and $\nu\binom{n+n-k-2}{n} = \#([n] \cap [n-k])-1$, i.e.,
$n-k$ is odd, the least positive exponent in $[n]$ is the same as the least positive
exponent in $[n-k]$, and $n+n-k-2$ has no unforced carries.
\end{itemize}
\item[(iv)] $S(n,k)$ is a SAMZC iff $[n-k]\cap [n-1] \ne \emptyset$ and precisely
one of the following conditions holds:
\begin{itemize}
\item[(a)] $\nu\binom{n-1+n-k}{n-1} = \#([n-1]\cap [n-k])$, i.e., $n-1+n-k$
has no unforced carries.
\item[(b)] $n-k$ is odd and
$\nu\binom{n-1+n-k-2}{n-1} =\#([n-1]\cap [n-k])-1$, i.e., $n-k$ is odd,
the least positive exponent in $[n-1]$ is the least positive exponent in $[n-k]$, and
$n-1+n-k-2$ has no unforced carries.
\end{itemize}
\end{itemize}
\end{theorem}
~\\
Remark. Note that the shift is generally advisable only if $\nu(n) < \nu(k)$.
\begin{proof}
We omit the proof details, which follow from the material on maximum poles in the
Appendix, other than to note that in (iii), (a) comes from the partition where $u_1 =n-k$,
and (b) comes from the partition where $u_1 = n-k-1$, and (c) comes from the
partition where $u_1 = n-k-3$ and $u_3=1$. Similarly for (iv). Conditions $(b)$ and $(c)$
cannot both hold by Lemma 2.2, since if $2^0 \in [n] \cap [n-k]$ and $2^1 \in [n]-
[n-k-2]$ or $2^1 \in [n-k-2]-[n]$, then $n+n-k-2$ has an unforced carry in place $2^1$.
Lemma 2.2 also eliminates the a priori possibility that the partition where $u_1=
n-k-4$ and $u_3=1$, with $d=n-k-3$ being odd, gives the maximum pole. Thus the
partitions noted in (iii) or (iv) are the only ones that can give the maximum pole
of $B_{n-k}^{(-k)}(x)$ or $B_{n-k}^{(-k+1)}(x)$.
\end{proof}
\begin{corollary} (Hong-Amdeberhan [5,8])
$\nu(S(n,k)) = \sigma(k-1)-\sigma(n-1)$ iff $S(n,k)$ is a SMZC iff $S(n-1,k-1)$ is a
MZC iff $\binom{n-1+n-k}{n-1}$ is odd.
\end{corollary}
\begin{corollary} (Central Stirling numbers)
For the central Stirling number $S(2k,k)$ the almost minimum zero estimate is
\begin{itemize}
\item[(a)] $\nu(S(2k,k)) \ge \#(\text{pairs of consecutive ones in~} k)$, and
\item[(b)] $\nu(S(2k,k)) = 1$ iff $S(2k,k)$ is a AMZC iff $k = 3+8k'$ with $k'$ Fibbinary.
\end{itemize}
\end{corollary}
\begin{proof}
For (a), if $n=2k$, then $\sigma(n) = \sigma(k)$ and $\#(\text{pairs of consecutive
ones in~k})\linebreak
= \#([k] \cap [2k]) = \#([n] \cap [n-k])$ so the inequality is just the almost zero minimum
zero estimate. For (b), we have $\nu(S(2k,k)) =1$ iff $\#([n] \cap [n-k]) = 1$ iff $S(n,k)$ is a AMZC.
If $k=3+8k'$ where $k'$ is Fibbinary then $n-k=k$ is odd, so the least positive exponent in
$k$ is $1$, which is the least positive exponent in $n$. Also $n+n-k-2 = 6+16k' +1+
8k' = 2+4+8(2k' +k')+1$ has no carries, so (iii) part (c) applies. Finally, if there is a
different pair of consecutive ones in $k$, it is easy to see that none of the conditions in
(iii) apply, so $S(n,k)$ is not a AMZC.
\end{proof}
The parts of the next theorem can be found in the literature, e.g., in ([9] and [12]).
It is included here as an excellent example of our estimates and cases.
\begin{theorem}
Let $c \ge 3$ be odd, $n=c2^h$, and $1 \le k \le 2^{h+1}$. Then
\begin{itemize}
\item[(i)] If $k \le 2^h$ then $S(n,k)$ is a AMZC and $\nu(S(n,k)) = \sigma(k)-1$.
(Lengyel's extension of De Wannemacker's theorem.)
\item[(ii)] If $2^h < k < 2^{h+1}$ and $k=2^h+a$, then the almost minimum zero
estimate is $\nu(S(n,k)) = \nu(S(n,2^h+a)) \ge \sigma(a) = \sigma(k)-1$.
\item[(iii)] If $k=2^h+a$ with $0<a<2^h -1$, so $k <2^{h+1}-1$, then $S(n,k)$
is not a AMZC, so $\nu(S(n,k)) \ge \sigma(a)+1 = \sigma(k)$, while if $a=2^h-1$,
so that $k = 2^{h+1}-1$, then $S(n,k)$ is a AMZC , and $\nu(S(n,k)) = \sigma(a)=h$.
\item[(iv)] If $a=2^h$, i.e., $k=2^{h+1}$, then $S(n,k)$ is a AMZC and SAMZC,
and $\nu(S(n,k))=0$.
\end{itemize}
\end{theorem}
\begin{proof}
For (i), we have $n-k=(c-1)2^h+2^h-k$, so $\#([n-k]\cap [n])=\sigma(c)-1$. For the
sum $n-k+n$, the carries are the same as for $c-1+c$, which are all unforced. Also
$\nu(n-k) \ne \nu(n)=h$, and the smallest positive exponent in $n-k$ is not equal to
the smallest positive exponent in $n$. Thus $S(n,k)$ is a AMZC with $\nu(S(n,k)) =
\sigma(k)-\sigma(n)+\sigma(c)-1=\sigma(k)-\sigma(c)+\sigma(c)-1=\sigma(k)-1$.
For (ii), if $k=2^h+a$ with $0< a<2^h$, then $\sigma(k)-\sigma(n)=1+\sigma(a)
-\sigma(c)$ and if $\alpha =\nu(c-1)$ and $T=c-1$, then $n-k=(T-2^\alpha)2^h
+(2^{\alpha+h}-2^h) +2^h -k = (T-2^\alpha)2^h + 2^{\alpha+h-1} + \cdots+2^h +2^h-a$,
so $[n-k] \cap [n] = [T] - \{2^{\alpha +h}\} \cup \{2^h\}$ and $\#([n-k] \cap [n])=
\sigma(c)-1$. Thus the minimum zero estimate for $S(n,k)$ is $1+\sigma(a)-\sigma(c)
+\sigma(c)-1 = \sigma(a)=\sigma(k)-1$.
For (iii), if $0<a<2^h-1$ then $n-k+n$ has an unforced carry for exponent $\alpha$,
and the other partitions in Theorem 3.3(iii) are also not valid, so $S(n,k)$ is not a AMZC,
and $\nu(S(n,k)) \ge \sigma(a)+1 = \sigma(k)$. If $a=2^h-1$, so $k=2^{h+1}-1$, it is
easy to verify that the first two partitions in Theorem 3.3(iii) still fail to meet the
conditions, but the third
partition, when $u_1=n-k-3$ and $u_3=1$ now works, so $S(n,k)$ is a AMZC, and
$\nu(S(n,k)) = \sigma(a)=\sigma(2^h-1)=h=\sigma(k)-1$.
For (iv), if $a=2^h$, i.e., $k=2^{h+1}$, we now have $n-k=(T-2^\alpha)2^h+
2^{\alpha+h-1} + \cdots+ 2^h$, so the partitions of type (a) and (c) in Theorem 3.3(iii)
now fail, but the partition of type (b) where $u_1=n-k-1$ works, so $S(n,k)$ is a AMZC
and $\nu(S(n,k)) = \sigma(k)-\sigma(n)+\#([n] \cap [n-k]) = 1-\sigma(c)+\sigma(c)-1=0$.
Finally, the shifted minimum zero estimate is $\nu(S(n,k)) \ge \sigma(k-1)-\sigma(n-1)
+\#([n-1] \cap [n-k]) = \sigma(k)-1+\nu(k)-(\sigma(n)-1+\nu(n)) + \sigma(c)-2=0$,
and now the partition of type (a) works and the partition of type (c) doesn't (since
$n-k$ is even), so $S(n,k)$ is a SAMZC.
\end{proof}
The following theorem can be easily proved using the criteria for the cases, so will not
give the proof. It does show that Lengyel's extension of De Wannemacker's Theorem
follows formally from DeWannemacker's Theorem.
\begin{theorem} (Invariance)
Suppose $\Delta>0$ and all $2$-powers in $\Delta$ are greater than all $2$-powers in $n$.
Then
\begin{itemize}
\item[(a)] for all four estimates, the estimate for $\nu(S(n,k))$ is the same as the
estimate for $\nu(S(n+\Delta,k))$ and also the same for $\nu(S(n+\Delta,k+\Delta))$.
\item[(b)] $S(n+\Delta,k)$ is a AMZC iff $S(n,k)$ is a AMZC or MZC, and if any
of the cases hold, then $\nu(S(n+\Delta,k))=\nu(S(n,k))$.
\item[(c)] The same results hold if we replace cases by their shifts.
\item[(d)] If $\nu(\Delta) > \lfloor \log_2(n) \rfloor +1$, then $S(n+\Delta,k+\Delta)$
is a AMZC if $S(n,k)$ is a MZC or AMZC. Similarly for the the shifts. For all of these cases,
we have $\nu(S(n+\Delta,k+\Delta))=\nu(S(n,k))$.
\end{itemize}
\end{theorem}
~\\
Remark. The assumption in (d) gives a ``gap" in the $2$-powers between $n$ and $n+\Delta$.
This is necessary to preserve the no unforced carries conditiion as we pass from $(n,k)$
to $(n+\Delta,k+\Delta)$.
For reference purposes, we include the basic material about Stirling numbers of the
first kind $s(n,k)$:
\begin{align}
s(n,k)=\binom{n-1}{k-1}B_{n-k}^{(n)}
\end{align}
Thus by the recursive formula (6.3), we get
\begin{align}
s(n,k)=\binom{n}{k}B_{n-k}^{(n+1)}(1)
\end{align}
From the Appendix, the maximum pole of $B_{n-k}^{(n)}(x)$ is
$\#([n-k]\cap [k-1])$, so we get the following four estimates:\\
Minimum zero estimate:
\begin{align}
\nu(s(n,k)) \ge \sigma(k-1)-\sigma(n-1)
\end{align}
Shifted minimum zero estimate:
\begin{align}
\nu(s(n,k)) \ge \sigma(k)-\sigma(n)
\end{align}
Almost minimum zero estimate:
\begin{align}
\nu(s(n,k)) \ge \sigma(k-1)-\sigma(n-1)+\#([n-k]-[k-1])
\end{align}
Shifted almost minimum zero estimate:
\begin{align}
\nu(s(n,k)) \ge \sigma(k)-\sigma(n) +\#([n-k]-[k])
\end{align}
\section{Proofs of theorems 2.1-2.5}
\textit{Proof of Theorem 2.1}.
Let $n=c2^h$ and $k=(c-1)2^h+a$, with $1 \le a \le 2^h$. $n-k=2^h-a$, so $[n]\cap [n-k]
=\emptyset$, so $S(n,k)$ is a MZC and $\nu(S(n,k)) = \sigma(k)-\sigma(n)=\sigma(c-1)
+\sigma(a)-\sigma(n)=\sigma(a)-1$. (If $a=2^h$, the theorem is trivial.)
\begin{flushright}
$\Box$
\end{flushright}
\pagebreak
~\\
\textit{Proof of Theorem 2.2}.
Let $n=c2^h$ and $k=b2^{h+1} +a$. Then $\sigma(k)-\sigma(n)=\sigma(a)+\sigma(b)
-\sigma(c)$, so by the almost minimum zero estimate, it will suffice to show that
$\#([n] \cap [n-k]) \ge \sigma(c)-\sigma(b)-1$.
If $a \le 2^h$, then $n-k=(c-2b-1)2^h+2^h-a$,
so $[n] \cap [n-k]=[c2^h] \cap \left([(c-2b-1)2^h] \cup [2^h-a]\right) = [c2^h] \cap
[(c-2b-1)2^h] = [c] \cap [c-2b-1]$, so by Lemma 2.1, we have
$\#([n] \cap [n-k]) \ge \sigma(c)-\sigma(2b+1)
=\sigma(c)-\sigma(b)-1$, which completes the proof in this case.
On the other hand if $a>2^h$, then $n-k=(c-2b-2)2^h +2^{h+1}-a$ and $0\le 2^{h+1}-a
<2^h$, so $[n-k] \cap [n] = [(c-2b-2)2^h] \cap [c2^h] =[c] \cap [c-2b-2] =
([1] \cup [c-1]) \cap ([1] \cup [c-1-(2b+1)])$ so $\#([c] \cap [c-2b-2]) \ge 1+\sigma(c-1)
-\sigma(2b+1) = \sigma(c)-\sigma(b)-1$.
\begin{flushright}
$\Box$
\end{flushright}
~\\
\textit{Proof of Theorem 2.3}.
The proof of this theorem is similar to our Theorem (3.4). Let $n=c2^m+b2^{h+1}+2^h$
and $k=b2^{h+2}+a$, where $0<a<2^{h+1}$. First assume $0<a \le 2^h$. Then
$n-k=(c-1)2^m +2^m-b2^{h+1}+2^h-a$, so $[n-k] \cap [n] = ([(c-1)2^m] \cap [c2^m])
\cup ([b2^{h+1}] \cap [2^m-b2^{h+1}])$, so $\#([n-k] \cap [n]) = \sigma(c)-1+1=\sigma(c)$.
Thus the almost minimum zero estimate is $\nu(S(n,k)) \ge \sigma(b)+\sigma(a)-(\sigma(c)
+\sigma(b)+1) +\sigma(c)=\sigma(a)-1$. But $n+n-k$ has an unforced carry
for exponent $m$ and $\nu(n-k)
\ne \nu(n)$ and the first positive exponent in $n-k$ is not equal to the first positive exponent
in $n$, so $S(n,k)$ is not a AMZC, by the criteria.
Next assume $2^h<a<2^{h+1}$. Then $n-k =(c-1)2^m +2^m-(2b+1)2^h+2^{h+1}-a$,
so again $\#([n-k] \cap [n]) = \sigma(c)$. If $a<2^{h+1}-1$, then once more the three
partitions don't satisfy the AMZC criterion. Finally if $a=2^{h+1}-1$, so that $n-k=
(c-1)2^m+2^m-(2b+1)2^h+1$, then the partitions when $u_1=n-k$ and when $u_1=n-k-1$
fail the the criteria, but the partition when $u_1=n-k-3$ and $u_3=1$ does meet the
criteria. Hence $S(n,k)$ is a AMZC when $a=2^{h+1}-1$, and $\nu(S(n,k))=\sigma(a)-1=h$.
\begin{flushright}
$\Box$
\end{flushright}
Since Theorem 2.4 is a special case of the next one, we will not prove it.
~\\
\textit{Proof of Theorem 2.5}.
Let $n=c2^h+u$ and $1 \le k \le 2^h$ with $0 < u \le 2^{\nu(k)}$. Then $n-k=(c-1)2^h+u
+2^h-k$.
Without loss of generality, we can assume $c$ is odd. Then $n-1=c2^h+u-1$ so $\sigma(k-1)
-\sigma(n-1)=\sigma(k-1)-(\sigma(c)+\sigma(u-1))=
\sigma(k)-1+\nu(k)-\sigma(c)-\sigma(u)+1-\nu(u)=\sigma(k)+
\nu(k)-\nu(u)-\sigma(c)-\sigma(u)$. Also $[n-1] \cap [n-k] = [(c-1)2^h]\cup ([u] \cap [u-1])$,
so $\#([n-1] \cap [n-k] = \sigma(c)-1+\sigma(u)-1$. Therefore, the shifted almost minimum zero
estimate if $u\ne k$ is
\begin{align}
\nu(S(n,k)) &\ge \sigma(k)+\nu(k)-\nu(u)-\sigma(c)-\sigma(u)+\sigma(c)+\sigma(u)-2 \notag\\
&= \nu(k)+\sigma(k)-\nu(u)-2 \notag
\end{align}
It remains to show that $S(n,k)$ is a SAMZC (sharp estimate)
iff $u=1$, or $u$ is a positive even integer which is less
than or equal to $2^{\nu(k)-1}$, or $u=1+2^{\nu(k)-1}$, or $u=2^{\nu(k)}<k$.
But $n-k+n-1=((c-1)2^h+(2^h-k)+u)+(c2^h+u-1)$, which has
no unforced carry as long as $u \le 2^{\nu(k)-1}$. Thus we have a SAMZC
(sharp estimate) iff the partition where
$u_1=n-k-3$ and $u_3=1$ fails the criterion. If $u$ is even then $n-k$ is even, so this
partition fails while if $u$ is odd and $u \ne 1$, then the criterion is met, so again we don't have a
sharp estimate unless $u=1+2^{\nu(k)-1}$. For all other $u$, the criterion for a sharp estimate
fails. This proof illustrates the fact that precisely one of the
partitions must satisfy the criterion for a sharp estimate.
It is easy to see that if $u=k$ so that $\sigma(k)=1$, then $\nu(S(c2^h+u,k))=0$.
\begin{flushright}
$\Box$
\end{flushright}
\section{Proof of the asymptotic theorem 2.6}
~\\
\textit{Proof of Theorem 2.6}. We use the notations of the Appendix.
(i) First consider the case where $\nu(k) < \nu(n)$, so $\nu(n-k)=\nu(k)$ and $2^{\nu(k)}
\notin [n]$. Let $w(u) \le n-k$. Then if $d=n-k-2^{\nu(k)}$, the number of carries for
$d+n$ is the same as the number of carries for $n-k+n$, so if $n-k \ge d \ge n-k-2^{\nu(k)}$
then $\nu\binom{d+n}{n} \ge \nu\binom{n-k+n}{n}$. Since $n-k-\nu(u) \ge 0$, with
equality iff $u_1=n-k$, it follows that $\nu(t_u) \ge \nu\binom{n-k+n}{n}$, with equality
iff $u_1=n-k$. Thus the single partition $u_1=n-k$ has the least $2$-adic value among all
these terms in this case.
If on the other hand $d < n-k-2^{\nu(k)}$ then $n-k-d > 2^{\nu(k)}$. If $h$ is such
that $2^{\nu(k)+h-1} \ge \nu\binom{n-k+n}{n}$, replace $(n,k)$ by $(2^hn,2^hk)$.
Then by Corollary 6.1, we have $n-k-\nu(u) > \nu\binom{n-k+n}{n}$, so again
$\nu(t_u) > \nu\binom{n-k+n}{n}$.
Therefore the single partition $u_1=n-k$ gives the least value if
$2^{\nu(k)+h-1} \ge \nu\binom{n-k+n}{n}$, and $\nu(S(2^h n,2^h k)) =\sigma(k)
-\sigma(n)+\nu\binom{n+n-k}{n}$.
(ii) The case where $\nu(n)=\nu(k)$ and $2^{\nu(n-k)} \in [k]$ is similar, namely in this
case $2^{\nu(n-k)} \notin [n]$, so we get the same value for $\nu(S(2^h n,2^h k))$ if
$2^{\nu(n-k)+h-1} \ge \nu\binom{n-k+n}{n}$.
(iii) If $\nu(k)>\nu(n)$, then $\nu(n-k)=\nu(n)$, and we consider $\nu(S(n,k))=
\nu\binom{n-1}{k-1} +\nu\left(B_{n-k}^{(-k+1)}(1)\right)$. Since $2^{\nu(n-k)}=
\text{min}([n])$, we now have $2^{\nu(n-k)} \notin [n-1]$, so essentially the same
argument shows that the term when $u_1=n-k$ is the single dominant term if
$2^{\nu(n)+h-1} \ge \nu\binom{n-1+n-k}{n-1}$ and $\nu(S(2^h n,2^h k)) = \sigma(k-1)
-\sigma(n-1)+\nu\binom{n-1+n-k}{n-1}$.
(iv) The final case, when $\nu(n)=\nu(k)$ and $2^{\nu(n-k)} \in [n]$ is slightly more
delicate. In this case $\nu(n) < \nu(n-k)$, and if $d> n-k -2^{\nu(n)} =n-k-2^{\nu(n-k)}+
2^{\nu(n-k)}-2^{\nu(n)}$ then $d=n-k-2^{\nu(n-k)}
+2^{\nu(n)} +\delta$, where $0< \delta< 2^{\nu(n)}$. Then $[\delta] \cap [n-1]
\ne \emptyset$, so if
we consider $d+n-1$, we get an unforced carry in power $2^{\nu(n)}$, which in turn
leads to an unforced carry in power $2^{\nu(n-k)}$. Thus $\nu\binom{d+n-1}{n-1}
\ge \nu\binom{n-k+n-1}{n-1}$. Finally, if $2^{\nu(n)+h-1} > \nu\binom{n-1+n-k}{n-1}$
then the terms when $d \le n-k-2^n$ have bigger value, so again $\nu(S(2^h n,2^h k))=
\sigma(k-1)-\sigma(n-1)+\nu\binom{n-1+n-k}{n-1}$. The partition when $u_1=n-k$
is again dominant.
\begin{flushright}
$\Box$
\end{flushright}
\begin{corollary} (Central Stirling numbers)
$\lim_{h \rightarrow\infty} \nu(S(2^{h+1}k,2^hk)) = \nu\binom{3k}{k}$ and
$\nu(S(2^{h+1}k,2^hk)) = \nu\binom{3k}{k}$ if $2^{h-1+\nu(k)} \ge \nu\binom{3k}{k}$.
\end{corollary}
\begin{proof}
This follows immediately from the first case of the preceding theorem.
\end{proof}
~\\
Remark. Since $\binom{n+n-k}{n} = \frac{n+n-k}{n}\binom{n-1+n-k}{n-1}$, if
$\nu(k) < \nu(n)$ then $\nu\left(\frac{n+n-k}{n}\right) = \nu\left(\frac{k}{n}\right)
=\nu(k)-\nu(n)$ so $\sigma(k)-\sigma(n) +\nu\binom{n+n-k}{n} = \sigma(k-1)-
\sigma(n-1)+\nu\binom{n-1+n-k}{n-1}$. If $\nu(k)=\nu(n)$ then
$\nu\left(\frac{n+n-k}{n}\right)=0$ and $\sigma(k-1)-\sigma(n-1)=\sigma(k)-\sigma(n)$,
so again we get the same value for $\nu(S(2^h n, 2^h k))$. Thus
\[
\lim_{h\rightarrow\infty} \nu(S(2^h n,2^h k)) = \sigma(k-1)-\sigma(n-1)+
\nu\binom{n-1+n-k}{n-1}
\]
in all cases, which can easily be shown to agree with Lengyel's limit.
\section{Appendix --- Higher order Bernoulli numbers and polynomials}
The higher order Bernoulli polynomials $B_n^{(l)}(x)$ are defined by
\begin{align}
\left(\frac{t}{e^t-1}\right)^l e^{tx} = \sum_{n=0}^\infty B_n^{(l)}(x)t^n/n!
\end{align}
In this paper we assume the order $l \in \mathbb{Z}$. If $x=0$, we get the higher
order Bernoulli numbers $B_n^{(l)}$, and we get the Appell property $B_n^{(l)}(x) =
\sum_{i=0}^n \binom{n}{i} B_{n-i}^{(l)} x^i$. This polynomial is rational, monic
and in $\mathbb{Q}[x]$.
These polynomials satisfy two recursions:
\begin{align}
\left(B_n^{(l)}(x)\right)' =nB_{n-1}^{(l)}(x) ~ \text{and} ~ \Delta(B_n^{(l)}(x))
= B_n^{(l)}(x+1)-B_n^{(l)}(x) =nB_{n-1}^{(l-1)}(x)
\end{align}
These recursions yield the recursive formula
\begin{align}
B_n^{(l)} = \frac{l}{l-n} B_n^{(l+1)}(1)
\end{align}
If $u=(u_1,u_2, \ldots)$ is a sequence of natural numbers eventually zero, we regard $u$
as a partition of the number $w=w(u)=\sum i u_i$, where $u_i$ is the multiplicity of
$i$ in the partition and $d=d(u)=\sum u_i$ is the number of summands.
There is an explicit representation of $B_n^{(l)}$ in terms of the partitions, namely
(cf. [1,2])
\begin{align}
B_n^{(l)} = (-1)^n n! \sum_{w \le n} t_u(l-n-1)
\end{align}
and also
\begin{align}
B_n^{(l)}(1) =(-1)^n n! \sum_{w=n} t_u(l-n)
\end{align}
where $t_u=t_u(s)=\binom{s}{d}\binom{d}{u}/\Lambda^u$, where $\binom{d}{u} =
\binom{d}{u_1 u_2 \ldots}$
is a multinomial coefficient, $\Lambda^u = 2^{u_1}3^{u_2} \cdots$, and
$\nu(u)=\nu(\Lambda^u)=\sum u_i\nu(i+1)$.
There is a companion sequence $\tau_u = \tau_u(s) = (n)_w t_u$, where
$s=l-n-1$, which is important
for the study of the $B_n^{(l)}(x)$. In particular, the maximum pole of $B_n^{(l)}(x)$
is the maximum pole of $\{\tau_u(l-n-1) : w \le n\}$. In [1] we showed that for $p=2$
the maximum pole of $B_n^{(l)}(x)$ is $\#\{2^i \in [n] : 2 \nmid \binom{l-n-1}{2^i}\}$.
We can use the same reduction method for $p=2$ as in the proof of ([1], Lemma 3.1)
to show that if $\tau_u$ has the maximum pole, then $u_i=0$ for all $i>1$, with the
possible exception $u_3=1$, i.e., $u$ is concentrated in places $1$ and $3$, with $u_3\le 1$:
If $i \ne 1,3$ and $u_i\ne 0$ or if $i=3$ and $u_i \ge 2$, delete $u_i$ and increase
$u_1$ by $u_i$. (We call this a transfer from place $i$ to place $1$.) This preserves $d$
and decreases $w$. It is easy to see that this also decreases $\nu(\tau_u)$, so is
impossible if $\tau_u$ has the maximum pole.
Since $n! t_u =(n-w)! \tau_u$, we see that $n! t_u$ has the maximum pole of $B_n^{(l)}(x)$
iff $\tau_u$ has the maximum pole and $w=n-1$ or $w=n$.
For our application to Stirling numbers of the second kind, we replace $n$ by $n-k$ and $l$
by $-k$. It follows that the maximum pole is $\#([n-k]-[n])$, and by our analysis (cf. [1]), the
first pole has order one, and occurs in codegree of the smallest element of $[n-k]-[n]$,
etc. That is how we get the Newton polygon of the higher order Bernoulli polynomial,
which is particularly simple, the poles coming in increasing order without gaps (cf. [3]).
Newton polygons are used in [10] in a different way.
Furthermore from our analysis of the possible maximum pole terms, we can show that
$B_{n-k}^{(-k)}$ has the maximum pole iff precisely one of the following terms gives
the maximum pole:
\begin{align}
\text{(i)~} &u_1=n-k, \text{~so~} w=n-k=d \text{~and~}\notag\\
&\qquad t_u=(-1)^{n-k} \binom{n+n-k}{n}/2^{n-k}\\
\text{(ii)~} &u_1 =n-k-1, \text{~so~} w=n-k-1=d \text{~and~}\notag\\
&\qquad t_u=(-1)^{n-k-1} \binom{n+n-k-1}{n}/2^{n-k-1} \\
\text{or (iii)~} &u_1=n-k-3 \text{~and~} u_3=1, \text{~so~} w=n-k \text{~and~} d=n-k-2
\notag\\
&\qquad \text{~~~and~} n-k \text{~is odd and greater than 1 and}\notag\\
&\qquad t_u=(-1)^{n-k} \binom{n+n-k-2}{n}(n-k-2)/2^{n-k-1}
\end{align}
~\\
Remark. These three partitions are the ones that determine the mod $4$ congruence for
$2^{n-k} B_{n-k}^{(-k)}/(n-k)!$. The a priori possible term with $u_1=n-k-4$ and
$u_3=1$ is eliminated in the proof of Theorem 3.3.
Finally, we give a new estimate that is very useful for our asymptotic analysis.
\begin{theorem}
Let $w \le n$. Then $n- \nu(u) \ge n-w +(w-d)/2$ and $n-\nu(u) =(w-d)/2$ iff $n=w$ and
$u$ is concentrated in places $1$ and $3$.
\end{theorem}
\begin{proof}
Since $n-\nu(u)= n-w+w-\nu(u)$,
it will suffice to prove that $w-\nu(u) \ge (w-d)/2$, with equality iff $u$ is concentrated
in places $1$ and $3$, i.e., we can assume $w=n$. But $w-\nu(u)-(w-d)/2 =
\sum u_i (i-\nu(i+1) -(i-1)/2) = \frac{1}{2} \sum u_i(i+1-2\nu(i+1)) = \frac{1}{2}
\sum u_i(j -2\nu(j))$, where $j=i+1$. But if $j>0$, it is easy to see that $j \ge 2\nu(j)$ with
equality iff $j=2$ or $j=4$.
\end{proof}
\begin{corollary}
If $w \le n$ then $n-\nu(u) \ge (n-d)/2$.
\end{corollary}
~\\
Note: For our applications we will often only have an estimate for $n-d$, so this is
how typically we will use Theorem 6.1.
~\\
\centerline{ACKNOWLEDGEMENTS}
~\\
The author would like to thank E. A. Herman for his invaluable help in preparing
this paper, and T. Lengyl for his generous advice and encouragement throughout its
development.
~\\
\centerline{REFERENCES}
~\\
1. A. Adelberg, On the degrees of irreducible factors of higher order Bernoulli
polynomials, \textit{Acta Arith.} \textbf{62} (1992), 329-342.\\
2. A. Adelberg, Congruences of $p$-adic integer order Bernoulli numbers,
\textit{J. Number Theory} \textbf{59} No. 2 (1996), 374-388.\\
3. A. Adelberg, Higher order Bernoulli polynomials and Newton polygons,
G. E. Bergum et al (eds.), \textit{Applications of Fibonacci Numbers} \textbf{7}
(1998), 1-8.\\
4. A. Adelberg, The $p$-adic analysis of Stirling numbers via higher order
Bernoulli numbers, \textit{Int. J. Number Theory} \textbf{14} (2018), No. 10,
2767-2779.\\
5. T. Amdeberhan, D. Manna and V. Moll, The $2$-adic valuation of Stirling
numbers, \textit{Experimental Math.} \textbf{17} (2008), 69-82.\\
6. O-Y. Chan and D. Manna, Divisibility properties of Stirling numbers of the
second kind, Proceedings of the Conference on Experimental Math., T.
Amdeberhan, L. A. Medina, and V. Moll eds., \textit{Experimental Math.}
(2009).\\
7. S. De Wannemacker, On $2$-adic orders of Stirling numbers of the second
kind, \textit{Integers Electronic Journal of Combinatorial Number Theory},
\textbf{5} (1) (2005), A21, 7 pp. (electronic).\\
8. S. Hong J. Zhao, and W. Zhao, The $2$-adic valuations of Stirling numbers of
the second kind, \textit{Int. J. Number Theory} \textbf{8} (2012), 1057-1066.\\
9. S. Hong, J. Zhao, and W. Zhao, Divisibility by $2$ of Stirling numbers of the
second kind and their differences, \textit{J. Number Theory} \textbf{140}
(2014), 324-348.\\
10. T. Komatsu and P. T. Young, Exact $p$-adic valuations of Stirling numbers
of the first kind, \textit{J. Number Theory} \textbf{177} (2017), 20-27.\\
11. T. Lengyel, On the divisibility by $2$ of the Stirling numbers of the second
kind, \textit{Fibonacci Quart.} \textbf{32} (3) (1994), 194-201.\\
12. T. Lengyel, On the $2$-adic order of Stirling numbers of the second kind and
their differences, \textit{DMTCS Proc. AK} (2009), 561-572.\\
13. T. Lengyel, Alternative proofs on the $2$-adic order of Stirling numbers
of the second kind, \textit{Integers} \textbf{10} (2010), A38, 453-468.\\
\end{document}
|
\begin{document}
\title{Slices of matrices --- a scenario for spectral theory}
\author{Ricardo S. Leite}
\address{Departamento de Matem\'atica, PUC-Rio, R. Mq. de S. Vicente 225, 22453-900, Rio de
Janeiro Brazil}
\email{[email protected]}
\thanks{The authors were supported by CNPq, FINEP and FAPERJ.}
\author{Carlos Tomei}
\email{[email protected]}
\subjclass{Primary 58F07, 15A18; Secondary 15A23}
\date{February 5, 2002}
\keywords{Toda flows, $QR$ decompositions}
\begin{abstract}
Given a real, symmetric matrix $S$, we define the slice ${\mathcal{ F}_S}$ through $S$
as being the connected component containing $S$ of two orbits under
conjugation: the first by the orthogonal group, and the second by
the upper triangular group. We describe some classical constructions in
eigenvalue computations and integrable systems which keep slices invariant ---
their properties are clarified by the concept. We also parametrize the
closure of a slice in terms of a convex polytope.
\end{abstract}
\maketitle
\section*{The basic definition}
Let $S$ be a real $n \times n$ symmetric matrix with simple spectrum
$\sigma(S) = \{\lambda_1 > \ldots > \lambda_n\}.$ When is a matrix
simultaneously an orthogonal and an upper triangular conjugation of $S$?
More precisely, we consider the \emph{slice}
${\mathcal{ F}_S}$ through $S$, defined to be the intersection of the sets
$\{Q^T S Q$, for arbitrary real orthogonal matrices $Q$ with $\det Q = 1 \}$
and $\{R S R^{-1}, $
for arbitrary real upper triangular matrices $R$ with $\hbox{\rm diag}\, R > 0\}.$
How large is a slice? Clearly, if $Q^T S Q = R S R^{-1}$ then $ SQR = QRS$, and hence,
since $S$ has simple spectrum, $QR = f(S),$ for some (real) polynomial $f$.
The matrices $Q$ and $R$ are uniquely determined from $f(S)$ if
$f(S)$ is invertible: this is the standard \emph{QR-factorization} of a matrix (\cite{G}).
For convenience, we write $f(S) = QR = \GS{f(S)} \TS{f(S)}$.
Slices have been appearing in disguised form in the literature of
numerical analysis and integrable systems, and questions about the geometry of slices
have come up intermittently. By putting these three aspects side by side, we expect
to convince the reader that the concept is indeed a natural one.
\section*{Slices and the computation of eigenvalues}
Francis, in his fundamental work on the $QR$ algorithm
(\cite{Fr}) considered the following map between matrices. Take $S$ invertible symmetric,
factor $S = Q R$ and define the\emph{ $QR$ step} $S' = R Q$.
It is clear that $S'$ is symmetric with the same spectrum of $S$, since $S' = Q^T S Q$.
But more is true: $S' =R S R^{-1}$. In particular, as Francis had already pointed out,
$S$ and $S'$ have the same bandwidth (the reader should compare this argument with
the usual one \cite{P}). Also, $S'$ belongs to the slice through $S$:
in the notation for the elements of ${\mathcal{ F}_S}$ presented in the introduction,
$S'$ is associated to the function $f(x) = x$.
Numerical analysts also know well that $S^{(k)} = \GST{ S^k} S \GS{ S^k}$ equals the matrix
obtained by applying $k$ times the $QR$ step starting from $S$: in
other words, $S^{(k)}$ is the $k$-th step of the $QR$ iteration starting from $S = S^{(0)}$. Again,
$S^{(k)}$ belongs to ${\mathcal{ F}_S}$, and is associated to $f(x) = x^k$.
Instead, by taking $f(x) = x^{1/k}$, the resulting
matrix $S^{(1/k)}$ is, in a precise sense, the $1/k$-th $QR$ step.
Taking the limit (\cite{T1})
$$ \lim_{k \mapsto \infty} (S^{(1/k)} - S^{(0)})k=
\lim_{\epsilon\mapsto 0} \frac{S^{(\epsilon)} - S}{\epsilon}$$
yields a vector field $\dot{S} = X(S)$,
whose solution $S(t)$ starting at $S(0) = S$ at time $k$ equals $S^{(k)}$!
The computation of this limit appears in a number of arguments
in the subject. The expression for $S^{(\epsilon)}$ is
$$S^{(\epsilon)} = \GST{S^{\epsilon}} S \GS{S^{\epsilon}}, \hbox{ where }
S^{\epsilon} = \GS{S^{\epsilon}} \TS{S^{\epsilon}}.$$
Evaluating the derivatives in $\epsilon$, we learn that
$$\frac{ d\ }{d\epsilon} S^{(\epsilon)} =
(\frac{d\ }{d\epsilon}\GST{S^{\epsilon}}) S\GS{S^{\epsilon}} +
\GST{S^{\epsilon}} S (\frac{d\ }{d\epsilon}\GS{S^{\epsilon}}),$$
$$(\log S) S^{\epsilon} =
(\frac{d\ }{d\epsilon}\GS{S^{\epsilon}}) \TS{S^{\epsilon}} +
\GS{S^{\epsilon}}(\frac{d\ }{d\epsilon}\TS{S^{\epsilon}}).$$
From the last equation,
$$ \GST{S^{\epsilon}} (\log S) S^{\epsilon}\TSI{S^{\epsilon}} =
\GST{S^{\epsilon}}(\frac{d\ }{d\epsilon}\GS{S^{\epsilon}}) +
(\frac{d\ }{d\epsilon}\TS{S^{\epsilon}})\TSI{S^{\epsilon}},$$
which obtains
$$ \GST{S^{\epsilon}} (\log S) \GS{S^{\epsilon}} \TS{S^{\epsilon}} \TSI{S^{\epsilon}}
=\log S^{(\epsilon)} =
\GST{S^{\epsilon}}(\frac{d\ }{d\epsilon}\GS{S^{\epsilon}}) +
(\frac{d\ }{d\epsilon}\TS{S^{\epsilon}})\TSI{S^{\epsilon}}.$$
Now evaluate the derivatives at $\epsilon = 0$: we must have
$I = S^0 = I . I = \GS{S^0} \TS{S^0}$ and $S^{(0)} = S$. The equation above yields
$$ \log S = \frac{d\ }{d\epsilon}\GS{S^{\epsilon}}|_{\epsilon=0} +
\frac{d\ }{d\epsilon}\TS{S^{\epsilon}}|_{\epsilon=0}.$$
The two terms in the right hand side of the last equation are
special matrices: they are respectively skew symmetric and upper triangular.
Consider the (unique, linear)
decomposition of a matrix $M = \Pi_a M + \Pi_u M$ as a sum of a skew symmetric and an
upper triangular matrix. Then, from the expression for the derivative of $S^{(\epsilon)}$,
$\frac{d\ }{d\epsilon}{\GS{S^{\epsilon}}}|_{{\epsilon = 0}} = \Pi_a \log S$.
The vector field which interpolates
the $QR$ iteration then is $ X(S)= [ S , \Pi_a \log S]$.
The iterations and flows defined above lie in ${\mathcal{ F}_S}$.
In particular, both preserve the eigenvalues of the initial condition, its symmetry and
its bandwidth.
For numerical analysts, the
asymptotic behavior of the $QR$ iteration, well known to Francis, is of capital importance.
Say, for example, that $J$ is an arbitrary Jacobi matrix
(i.e., a real, tridiagonal matrix whose entries
$J_{k,k+1} = J_{k+1,k},k=1,\ldots,n-1$ are strictly positive).
Starting with $J$, the $QR$ iteration converges to
a diagonal matrix $D$. Not only $D$ and $J$ have the same spectrum but $D$ must
lie in the closure of ${\mathcal{ F}_J}$, the slice through $J$. Steps of $QR$ type related to
different functional parameters $f$ give rise to iterations which, starting from $J$,
always converge to diagonal matrices, with diagonal entries consisting of the (distinct)
eigenvalues of $J$ in an arbitrary order. The closure of the slice ${\mathcal{ F}_J}$ clearly ought to
contain additional points. In a nutshell, they correspond to \emph{reducible} Jacobi matrices,
i.e., matrices for which some entry $J_{k,k+1} = J_{k+1,k}$ is zero. This will be
explained in the sequel.
\section*{Slices and the Toda flows}
We now describe briefly the Toda lattice, which is an integrable system for which slices
appear as natural phase spaces. Consider $n$ particles on the line with positions
$x_k$ and velocities $y_k$ evolving under the Hamiltonian
$$ H = \frac{1}{2} \sum_{k=1}^n y_k^2 + \sum_{k=1}^{n-1} e^{(x_k - x_{k+1})}.$$
This Hamiltonian was introduced as a model of wave propagation in one dimensional
crystals (\cite{To}).
It was Flaschka's remarkable discovery (\cite{F}) that this dynamical system is
equivalent to the matrix differential equation
$$ \dot{J} = [ J , \Pi_a J ],$$
where $J$ is the Jacobi matrix with nonzero entries
$$J_{k,k} = - y_k / 2 \ \hbox{ and }\ J_{k,k+1} = J_{k+1,k} = e^{(x_k - x_{k+1})/2}/2.$$
The fact that the differential equation is in the so called
\emph{Lax pair form} (\cite{L}) implies that
the evolution $J(t)$ is actually an orthogonal conjugation of the initial condition
$J(0)$. But again more is true: the evolution stays within the set of Jacobi matrices,
as we should expect, since there are no other physically significant variables in
the problem (velocities essentially are the diagonal entries, and distances between
particles with neighboring indices correspond to the off-diagonal entries).
Indeed, it is not hard to check that
$$J(t) = \GST{e^{t J(0)}} J(0) \GS{e^{t J(0)}}
= \TS{e^{t J(0)}} J(0) \TSI{e^{t J(0)}},$$ the celebrated solution of the Toda lattice
by factorization (\cite{S1}). To check this formula, proceed as in the computation of
the limit of the previous section. Thus, again, the orbit $J(t)$ lies within the
slice through $J(0)$.
Moser also computed the asymptotic behavior of the Toda lattice (\cite{M}): for $f(x) = x$,
the orbit $J(t)$ starting from a Jacobi matrix $J(0)$ converges to a diagonal matrix,
with eigenvalues disposed in decreasing order. This is in accordance with the
relationship between Toda and $QR$ and has a natural physical interpretation.
Diagonal entries are velocities: in the long run, particles move apart and tend to
undergo uniform motion, each with speed given by a different eigenvalue. Faster
particles move ahead, explaining the orderd outcome of the eigenvalues along the
diagonal found in applications of the $QR$ iteration. The
remarkable fact that asymptotic speeds both at $- \infty$ and $+ \infty$ are the same
will not be relevant to us: this is strong evidence of the integrability of the system ---
each orbit 'remembers' this data. Other matrices belonging to the boundary of the
slice through $J(0)$ correspond to asymptotic behavior in which the system of particles
breaks into essentially disconnected components, the so called \emph{clustering}.
For a symmetric matrix $S$ and an arbitrary function $g$, the matrix equation
$ \dot{S} = [ S , \Pi_a g(S)]$ admits a similar
solution by factorization: conjugate the initial condition by $\GS{e^{t g(S(0))}}$.
It is this last fact which historically was responsible for relating
the $QR$ iteration and Toda flows (\cite{S2}, \cite{DNT}):
when $g(x) = \log (x)$, the resulting differential equation gives rise to the orbits
interpolating the $QR$ iteration. For applications of these differential equations
to numerical analysis, the reader may consult \cite{DLT}.
\section*{Geometric aspects of slices}
Detailed study of variables for which the Toda flow becomes especially simple led
Moser (\cite{M}) to a parametrization of Jacobi matrices. It turns out that Jacobi
matrices have simple spectrum and its eigenvectors always have nonzero first coordinates ---
in particular, they can be normalized so as to have (strictly) positive first coordinates.
Moser proved that the map taking Jacobi matrices to $n$-uples of distinct real numbers
(the eigenvalues) and a point in the first octant of the unit sphere in $\RR^{n}$
(the first coordinates of the normalized eigenvectors) is a diffeomorphism. He then
showed that an appropriate choice of Toda flow (i.e., of functional parameter $g$)
gives rise to orbits joining any two Jacobi matrices with the same spectrum.
Thus, given a Jacobi matrix $J$,
${\mathcal{ F}_J}$ is the set of all Jacobi matrices with the same spectrum as $J$.
(We remind the reader that Jacobi matrices always have simple spectrum.) From
Moser's parametrization, \emph{Jacobi slices} are diffeomorphic to $\RR^{n-1}$.
More generally, getting back to the definition of slices, we have seen that a matrix
$S' \in {\mathcal{ F}_S}$ is of the form $S' = Q^T S Q = R S R^{-1}$, where $f(S) = QR$
for some function $f$. Keeping account of the requests on $Q$ and $R$, one may
obtain a simple coordinatization of a slice.
A matrix $S$ is \emph{irreducible} if $S$ has no invariant subspace
generated by a subset of the canonical vectors $e_1, \ldots, e_n$. Indeed, there
is a diffeomorphism between positive polynomials up to scalar multiplication and elements
of ${\mathcal{ F}_S}$, for an irreducible symmetric matrix (\cite{LT}).
Which matrices are in the boundary $\partial {\mathcal{ F}_J}$ of the slice through a
Jacobi matrix $J$? Numerical analysts knew that the diagonal
matrix $\lambda = (\lambda_1,\ldots,\lambda_n)$ and its permuted counterparts
$\lambda_\pi = (\lambda_{\pi(1)},\ldots,\lambda_{\pi(n)})$.
As we shall see, the simplest possible way of combining such
$n!$ points to form a reasonable boundary turns out to be the
topological description of $\partial {\mathcal{ F}_J}$.
Let ${\mathcal{ P}_{\Lambda}}$ be the \emph{permutohedron},
obtained by taking the convex closure of the points of the form $\lambda_{\pi} \in \RR^n$.
Notice that actually ${\mathcal{ P}_{\Lambda}}$ has an interior of dimension $n-1$, since the sum of the coordinates
of any point in ${\mathcal{ P}_{\Lambda}}$ equals the trace of $J$.
The permutohedron ${\mathcal{ P}_{\Lambda}}$ is homeomorphic to the closure
of ${\mathcal{ F}_J}$, as was proved in \cite{T2} with combinatorial arguments.
The figure below describes the situation for a $3 \times 3$ Jacobi matrix $J$ with
spectrum $\sigma(J) =\{4,2,1\}$. On the left is a topological representation of the
closure of the slice ${\mathcal{ F}_J}$. Interior points of the hexagon are points in ${\mathcal{ F}_J}$.
The boundary consists of two kinds of points: the six diagonal matrices, corresponding
to the vertices, and the matrices which form the edges, which have either entry
$(1,2)$ or $(2,3)$ equal to zero. On the right, the permutohedron associated to
$\sigma(L)$ is projected on the $(x,y)$ plane. Notice that a map taking vertices
to vertices, say, taking $\hbox{\rm diag}\,(1,2,4)$ to $(1,2,4)$ and $\hbox{\rm diag}\,(2,1,4)$ to $(2,1,4)$
may not behave so na\"\i vely on all vertices, for continuity reasons.
\begin{figure}
\caption{Slice and permutohedron for $\sigma(J)=\{4,2,1\}
\label{fig1}
\end{figure}
The permutohedron associated to a Jacobi $4 \times 4$ matrices is also drawn below.
Faces have a clear meaning: there must be four hexagons associated
to matrices for which entry $(1,2)$ equals zero (and hence contained a Jacobi
$3 \times 3$ block of fixed spectrum), six quadrilaterals corresponding to matrices
with entry $(2,3)$ equal to zero, and four more hexagons for matrices with
entry $(3,4)$ equal to zero. There are also $4!$ vertices.
\begin{figure}
\caption{The permutohedron for $\sigma(J)=\{4,3,2,1\}
\label{permu1}
\end{figure}
The permutohedron is familiar to spectral theorists, from the well known Schur-Horn
theorem (\cite{MO}). Let $S$ be a symmetric matrix with simple spectrum
$\sigma(S) = \{\lambda_1,\ldots,\lambda_n\}$,
and consider the set ${\mathcal{ O}_{S}} = \{ Q^T S Q, Q \hbox { orthogonal } \}$. Then the map
$S \mapsto \hbox{\rm diag}\, S$ is surjective from ${\mathcal{ O}_{S}}$ to ${\mathcal{ P}_{\Lambda}}$. Such a map is highly not
injective. Slices are in a sense minimal sets in ${\mathcal{ O}_{S}}$ in bijection with permutohedra.
The presence of a convex polytope raised the possibility of relating slices to moment maps
of symplectic toroidal actions (\cite{A},\cite{GS}). Indeed, following this route, Bloch
Flaschka and Ratiu obtained an \emph{explicit} diffeomorphism between the closure of
${\mathcal{ F}_J}$ and ${\mathcal{ P}_{\Lambda}}$, which we now describe. Let $S$ be a symmetric matrix with simple spectrum,
and consider the spectral decomposition
$S = Q^T \Lambda Q$, where, as usual, $Q$ is orthogonal and $\Lambda$ is diagonal.
Now, order the diagonal entries of $\Lambda$ in descending order:
$Q$ is then defined up to a choice of sign for each
column of $Q^T$ (since they are normalized eigenvectors of $S$). Thus,
$S = Q^T D \Lambda D Q$, for some diagonal matrix $D$ of signs.
The matrix $ D Q \Lambda Q^T D$ is dependent on $D$, but
its diagonal is not! The map $S \mapsto \hbox{\rm diag}\,( Q \Lambda Q^T)$ is the \emph{BFR map}.
Its restriction to the closure of a Jacobi slice ${\mathcal{ F}_J}$ is the required diffeomorphism to
${\mathcal{ P}_{\Lambda}}$ (\cite{BFR}).
The proof of the result above makes use of sophisticated machinery. However, once
we knew what had to be proved, a simpler argument appeared,
yielding a more general result (\cite{LT}). Let $S$ be an arbitrary real, symmetric,
irreducible matrix with simple spectrum. Call the diagonal matrices in the closure
of ${\mathcal{ F}_S}$ \emph{accessible}, and their images under the BFR map the \emph{accessible vertices}.
Now let ${\mathcal{ P}_{S}}$, the \emph{spectral polytope} of $S$, be the convex closure of the
accessible vertices of $S$.
\noindent\textbf{Theorem: } The BFR map is a diffeomorphism between the closure of the
slice ${\mathcal{ F}_S}$ and the spectral polytope ${\mathcal{ P}_{S}}$.
Generically, the spectral polytope of a symmetric matrix with simple spectrum is
indeed the permutohedron associated to the $n!$ diagonal arrangements of the eigenvalues.
But this is not the case in general. A self-contained description of the spectral
polytope of an irreducible matrix $S$ is as follows.
Consider the spectral decomposition,
$S = Q^T \Lambda Q$. We denote by $Q_{\{r_1,\ldots,r_k\},\{c_1,\ldots,c_k\}}$ the minor
of $Q$ consisting of the entries in the intersection of rows with indices
$r_1,\ldots, r_k$ with columns with indices in $c_1,\ldots, c_k$. For a
permutation $pi$ in $n$ symbols, let $\Pi$ denote
the permutation matrix with entries $\Pi_{i,j} = \delta_{i,\pi(j)}$.
\noindent\textbf{Theorem: } A diagonal matrix $\Lambda_\pi = \Pi^T \Lambda \Pi$ is an accessible
vertex of the slice ${\mathcal{ F}_S}$ if and only if the minors
$Q_{\{\pi(1)\},\{1\}}, Q_{\{\pi(1),\pi(2)\},\{1,2\}},\ldots,
Q_{\{\pi(1),\ldots,\pi(n)\},\{1,\ldots,n\}}$ have nonzero determinant.
As an example, let $S = Q^T \Lambda Q$ be a $3 \times 3$ matrix with eigenvalues
$\Lambda = \hbox{\rm diag}\,(4,2,1)$, and
so that the entry $Q_{1,1}$ equals zero, but no other minor of $Q$ has determinat
equal to zero. The slice ${\mathcal{ F}_S}$ and the spectral polytope have only four vertices:
diagonal matrices with entry $(1,1)$ equal to 4 do not belong to
$\bar{{\mathcal{ F}_S}}$, and the vertices of the spectral polytope are $(2,1,4), (2,4,1),
(1,2,4)$ and $(1,4,2)$ --- $\bar{{\mathcal{ F}_S}}$ is a quadrilateral.
\end{document}
|
\begin{document}
\pagecolor{white}
\title{Time Dependent Quantum Mechanics}
\author{Peter G. Morrison}
\email{[email protected]}
\affiliation{Morrison Industrial Company, Sydney, Australia}
\date{August 1st, 2012}
\begin{abstract}
\noindent We present a systematic method for dealing with time dependent
quantum dynamics, based on the quantum brachistochrone and matrix
mechanics. We derive the explicit time dependence of the Hamiltonian
operator for a number of constrained finite systems from this formalism.
Once this has been achieved we go on to calculate the wavevector as
a function of time, in order to demonstrate the use of matrix methods
with respect to several concrete examples. Interesting results are
derived for elliptic curves and periodic orbits on higher dimensional
non-commutative geometries.
\pacs{03.67.Lx, 02.30.Xx, 02.30.Yy, 03.65.Ca}
\end{abstract}
\keywords{time optimisation, quantum control, autonomous systems theory }
\maketitle
\section{Introduction}
Recent developments in quantum mechanical theory have focused on the
time-dependent dynamical problem, which proposes the use of Floquet
theory in order to derive the time evolution operators for the dependent
system. In our paper, we consider a new set of time-dependent quantum
mechanical operators which define the physics of finite systems. Standard
quantum mechanics postulates the existence of a Hamiltonian operator
$\widetilde{H}$ and an initial state $\left|\Psi(0)\right\rangle $
and seeks to extrapolate to find our final state $\left|\Psi(t)\right\rangle $,
via the von Neumann equation. This is an initial value problem; we
wish to find and calculate a boundary value problem that is consistent
with quantum physics. This has already been calculated in the papers
of Carlini et. al \cite{Carlini}. The principal methodology is to
construct a time-optimal Hamiltonian matrix which moves us from state-to-state
on the projective manifold, in least time. These paths are geodesic
flows; they define the surface of the state space in the same way
that the lines of longitude on the sphere define the globe itself.
In addressing this question, we are naturally led to the analysis
of explicitly time-dependent quantum systems. These dynamical systems
are generally periodic in nature for the examples we have been able
to calculate; this periodicity allows us to use Floquet theory in
order to find solutions for the problems in question. In considering
these types of general dynamical problems we find a number of time
dependent states with constant energies; wave-vector analysis allows
us to consider the geometry of the state space for higher-dimensional
realms where it is no longer possible to explicitly graph the paths
the state takes over time. Experiments have been conducted into these
areas with the particular aim of creating these exotic qutrit states,
as in \cite{Bogdanov}, \cite{Ranabir} and as such it is important
that we be able to predict best methods of state control within this
regime. Relevant references to the geometry of qutrits and SU(3) operators
may be found in \cite{Caves}.Our method differs from standard quantum
mechanics in that we do not assume the form of the Hamiltonian operator;
we derive it using the quantum brachistochrone hypothesis, being that
the physical path is the one of least time.
\section{Quantum Fermat Principle}
This section reproduces the arguments of \cite{Carlini} in deriving
the quantum brachistochrone equation from the Quantum Fermat Principle,
being the hypothesis that the state vector travels along a path of
least time on the complex projective manifold. These results are not
new but are included in order to outline the fundamentals of this
methodology. Firstly, let us consider the energy variance on the state
space. We may write this as:
\begin{equation}
({\cal D}elta E)^{2}=\left\langle \psi\right|(\hat{H}-\left\langle \hat{H}\right\rangle \hat{\mathbf{1}})^{2}\left|\psi\right\rangle
\end{equation}
We may define the metric of the projective state space:
\begin{equation}
ds={\cal D}elta E.dt
\end{equation}
The quantum state may be expanded in an orthonormal basis over a set
of probabilities:
\begin{equation}
\left|\psi\right\rangle =\sum_{j}^{1/2}t{p_{j}}e^{i\varphi_{j}}\left|j\right\rangle
\end{equation}
which then define the state space metric:
\begin{equation}
\dfrac{1}{4}ds^{2}=1-\left|\left\langle \psi+d\psi|\psi\right\rangle \right|^{2}=\left\langle d\psi\left|(\hat{\mathbf{1}}-\hat{P})\right|d\psi\right\rangle
\end{equation}
where $\hat{P}=\left|\psi\left\rangle \right\langle \psi\right|$,
the standard pure state projection operator. For the time-dependent
quantum control problem, the action principle must be recast in such
a way that we can apply it to find optimal transfers between states.
Our solution, where it exists, will be a time dependent Hamiltonian
that takes us time optimally from one state to another. We do not
rule out the existence of systems where there are a number of extremal
trajectories; however, our principal aim to search for global extrema
where the time is a minimum regardless of our particular choice of
initial and final states. Writing out the expression for the time
optimal quantum control action we have:
\begin{equation}
S=\int_{t_{0}}^{t_{f}}(\mathcal{L}_{T}+\mathcal{L}_{S}+\mathcal{L}_{C})dt
\end{equation}
\begin{equation}
\mathcal{L}_{T}=\dfrac{^{1/2}t{\left\langle \dot{\psi}\left|(\hat{\mathbf{1}}-\hat{P})\right|\dot{\psi}\right\rangle }}{{\cal D}elta E}=\dfrac{1}{{\cal D}elta E}\dfrac{ds}{dt}
\end{equation}
\[
\mathcal{L}_{S}=i(\left\langle \phi\left|\right.\dot{\psi}\right\rangle -\left\langle \dot{\psi}\left|\right.\phi\right\rangle )-(\left\langle \phi\left|\hat{H}\right|\psi\right\rangle +\left\langle \psi\left|\hat{H}\right|\phi\right\rangle )
\]
\begin{equation}
\mathcal{L}_{C}=\sum_{j}\lambda_{j}f_{j}(\hat{H})
\end{equation}
These equations are gauge invariant; we therefore make a particular
choice of the gauge, being the reference frame in which the Hamiltonian
is traceless:
\begin{equation}
\hat{H}\mapsto\hat{H}-\dfrac{\hat{\mathbf{1}}_{N}}{N}Tr(\hat{H})=\tilde{H}
\end{equation}
\begin{equation}
Tr(\tilde{H})=0
\end{equation}
The first term is unity:
\begin{equation}
\mathcal{L}_{T}=\dfrac{{\cal D}elta E}{{\cal D}elta E}=1
\end{equation}
and therefore contributes a term $\int dt$ to the action, justifying
our claim of time optimality. This is the quantum analogue of Fermat's
principle of least time. Writing the Euler-Lagrange equations that
result from the variations of the action integral, we find the system
of equations:
\begin{equation}
i\left|\dot{\psi}\right\rangle =\tilde{H}\left|\psi\right\rangle
\end{equation}
\begin{equation}
-\dfrac{i}{2}\dfrac{d}{dt}(\dfrac{\tilde{H}-\left\langle \tilde{H}\right\rangle \hat{\mathbf{1}})}{{\cal D}elta E^{2}})\left|\psi\right\rangle =i\left|\dot{\phi}\right\rangle -\tilde{H}\left|\phi\right\rangle
\end{equation}
\begin{equation}
\dfrac{(\{\tilde{H},\hat{P}\}-2\left\langle \tilde{H}\right\rangle \hat{P})}{2{\cal D}elta E^{2}}+\left|\phi\left\rangle \right\langle \psi\right|+\left|\psi\left\rangle \right\langle \phi\right|=\sum_{j}\lambda_{j}\dfrac{\partial f_{j}}{\partial\tilde{H}}
\end{equation}
\begin{equation}
f_{j}(\tilde{H})=0\,\forall j
\end{equation}
Defining our constraint operator:
\begin{equation}
\hat{A}=\sum_{j}\lambda_{j}\dfrac{\partial f_{j}}{\partial\tilde{H}}=\dfrac{(\{\tilde{H},\hat{P}\}-2\left\langle \tilde{H}\right\rangle \hat{P})}{2{\cal D}elta E^{2}}+\left|\phi\left\rangle \right\langle \psi\right|+\left|\psi\left\rangle \right\langle \phi\right|
\end{equation}
Taking traces and expectation values of the numerator of the first
term in the constraint operator:
\begin{equation}
Tr(\{\tilde{H},\hat{P}\}-2\left\langle \tilde{H}\right\rangle \hat{P})=0
\end{equation}
\begin{equation}
<(\{\tilde{H},\hat{P}\}-2\left\langle \tilde{H}\right\rangle \hat{P})>=0
\end{equation}
We derive the fundamental result:
\begin{equation}
<\hat{A}>=Tr(\hat{A})=\left\langle \psi\left|\right.\phi\right\rangle +\left\langle \phi\left|\right.\psi\right\rangle
\end{equation}
Computing the anticommutator of the A-operator and the projector we
obtain:
\begin{equation}
\left\{ \hat{A},\hat{P}\right\} =\hat{A}+\left\langle \hat{A}\right\rangle \hat{P}
\end{equation}
We now define an associated operator to specify the boundary condition
equations:
\begin{equation}
\hat{G}=\hat{A}-\left\langle \hat{A}\right\rangle \hat{P}
\end{equation}
\begin{equation}
\left\{ \hat{G}(t),\hat{P}(t)\right\} =\hat{G}(t)
\end{equation}
\begin{equation}
Tr(\hat{G})=2<\hat{G}>=0
\end{equation}
We now perform some simple algebraic manipulations amongst our original
Euler-Lagrange equations. Left-multiplying (11) with $\left\langle \phi\right|$
and the conjugate of (12) with $\left|\psi\right\rangle $ we find:
\begin{equation}
i\left\langle \phi\right.\left|\dot{\psi}\right\rangle =\left\langle \phi\right|\tilde{H}\left|\psi\right\rangle
\end{equation}
\begin{equation}
i\left\langle \dot{\phi}\right.\left|\psi\right\rangle =-\left\langle \phi\right|\tilde{H}\left|\psi\right\rangle -\dfrac{i}{2}\left\langle \psi\right|\dfrac{d}{dt}(\dfrac{\tilde{H}-\left\langle \tilde{H}\right\rangle \hat{\mathbf{1}})}{{\cal D}elta E^{2}})\left|\psi\right\rangle
\end{equation}
Evaluating the time derivative of the expectation value of the Hamiltonian:
\begin{equation}
\dfrac{d}{dt}<\tilde{H}>=\left\langle \dot{\psi}\right|\tilde{H}\left|\psi\right\rangle +\left\langle \psi\right|\tilde{H}\left|\dot{\psi}\right\rangle +<\dfrac{d\tilde{H}}{dt}>
\end{equation}
\begin{equation}
\dfrac{d}{dt}<\tilde{H}>=<\dfrac{d\tilde{H}}{dt}>
\end{equation}
This relationship allows us to differentiate under the expectation
sign. We now calculate the time derivative of the overlap:
\begin{equation}
\dfrac{d}{dt}\left\langle \phi\left|\right.\psi\right\rangle =-\dfrac{1}{2}<\dfrac{d}{dt}(\dfrac{\tilde{H}-\left\langle \tilde{H}\right\rangle \hat{\mathbf{1}}}{{\cal D}elta E^{2}})>=0
\end{equation}
Therefore we obtain a constant of the motion given by:
\begin{equation}
\left\langle \phi\left|\right.\psi\right\rangle =const.=-\left\langle \psi\left|\right.\phi\right\rangle +<\hat{A}>
\end{equation}
A similar exercise from the variational equation for the Hamiltonian
yields:
\begin{equation}
\left|\phi\right\rangle =(\left\langle \psi\left|\right.\phi\right\rangle -\dfrac{(\tilde{H}-\left\langle \tilde{H}\right\rangle \hat{\mathbf{1}})}{2{\cal D}elta E^{2}}+\hat{G})\left|\psi\right\rangle
\end{equation}
Directly differentiating this expression with respect to time and
substituting the constant of the motion into the equation we derive
the fundamental law of motion:
\begin{equation}
(i\dfrac{d\hat{G}}{dt}+[\hat{G},\tilde{H}])\left|\psi\right\rangle =0
\end{equation}
Some trivial algebra using Hermitian conjugates and right multiplication
proves that the G-operator and projection operator follow the Heisenberg
equation of motion:
\begin{equation}
i\dfrac{d\hat{G}}{dt}=[\tilde{H},\hat{G}]
\end{equation}
\begin{equation}
i\dfrac{d\hat{P}}{dt}=[\tilde{H},\hat{P}]
\end{equation}
We then use these expressions to evaluate the time derivative of the
A-operator by direct differentiation:
\begin{equation}
i\dfrac{d\hat{A}}{dt}=[\tilde{H},\hat{A}]+i\hat{P}\dfrac{d}{dt}<\hat{A}>
\end{equation}
\begin{equation}
\dfrac{d}{dt}<\hat{A}>=0
\end{equation}
\begin{equation}
i\dfrac{d\hat{A}}{dt}=[\tilde{H},\hat{A}]
\end{equation}
It is thus proved that the G-operator, A-operator and projection operator
all obey the Heisenberg equation of motion. We shall refer to this
important relationship as the Quantum Brachistochrone Equation. This
expression forms the foundation of applied time optimal quantum state
control and we shall use it repeatedly throughout this paper. As part
of our quantum control methodology we will consider an energetic constraint
as a Lagrange multiplier in the action functional. This restriction
holds the total energy used in the process to be some finite value,
and is given by:
\begin{equation}
f_{0}(\tilde{H})=Tr(\dfrac{\tilde{H}^{2}}{2})-k=0
\end{equation}
The other constraints which restrict the degrees of freedom for the
Hamiltonian of the system will often be linear in $\tilde{H}$:
\begin{equation}
f_{j}(\tilde{H})=Tr(\tilde{H}\hat{g}_{j})
\end{equation}
where the $\hat{g}_{j}$'s are traceless Hermitian generators of the
unitary space. In this particular instance the Hamiltonian and A-operator
will obey the matrix equations:
\begin{equation}
\hat{A}=\lambda_{0}\tilde{H}+\tilde{F}
\end{equation}
\begin{equation}
Tr(\tilde{H}\tilde{F})=0
\end{equation}
\begin{equation}
\tilde{F}=\sum_{j}\lambda_{j}\hat{g}_{j}
\end{equation}
where the Hamiltonian and associated linear constraint are tracefree
Hermitian matrices. We take these results as fundamental for the time
optimal evolution of quantum states and may write the equations of
motion for our quantum dynamical system as:
\begin{singlespace}
\noindent \begin{flushleft}
\begin{equation}
i\frac{d}{dt}\left|\Psi(t)\right\rangle =\tilde{H}[t]\left|\Psi(t)\right\rangle
\end{equation}
\begin{equation}
i\frac{d}{dt}(\tilde{H}[t]+\tilde{F}[t])=\tilde{H}[t]\tilde{F}[t]-\tilde{F}[t]\tilde{H}[t]
\end{equation}
\begin{equation}
Tr(\tilde{H}[t]\tilde{F}[t])=0
\end{equation}
\begin{equation}
Tr(\tilde{H}^{2}[t]/2)=\mathrm{constant}
\end{equation}
\begin{equation}
\{\hat{G}(t),\hat{P}(t)\}=\hat{G}(t)
\end{equation}
\par\end{flushleft}
\end{singlespace}
\paragraph*{\textmd{\textup{where $\hat{G}=(\tilde{H}+\tilde{F})-\left\langle \tilde{H}+\tilde{F}\right\rangle _{\psi}$,
and the derivative indicates an explicit differentiation of the matrices
with respect to the time parameter. The constraint, boundary condition
and Hamiltonian are all Hermitian matrices, so they will evolve unitarily
via the Heisenberg equation. When we use co-ordinate transformations,
we must use partial derivatives, but the distinction is made clear.
Our formalism is explicitly time dependent, in that the matrices derived
will be (in general) functions of the time parameter. These are more
difficult dynamic problems to solve than the time invariant case,
as the matrices within the differential equations to be solved are
functions of time instead of constants. }}}
\section{Isometry Groups Of Differential Operators And Matrices}
In this paper, often we will use isometric transformations to move
between various different equivalent physical pictures. Mainly we
confine ourselves to the interaction picture or the co-rotating reference
frame of Floquet; as such it is quite important that we have a concrete
method in place for understanding the meaning and application of isometry
and similarity transforms. Firstly, consider the coupled matrix differential
equations:
\begin{equation}
i\dfrac{d}{dt}\left|{\rm ch}i(t)\right\rangle =(\hat{U}^{-1}\tilde{H}\hat{U})\left|{\rm ch}i(t)\right\rangle
\end{equation}
\begin{equation}
i\dfrac{d\hat{U}}{dt}=(\sum_{k}\lambda_{k}(t)\hat{P}_{k})\hat{U}(t,0)
\end{equation}
We may then write the solution as:
\begin{equation}
\left|{\rm ch}i(t)\right\rangle =\hat{Q}(t,0)\left|{\rm ch}i(0)\right\rangle
\end{equation}
\begin{equation}
\hat{Q}(t,0)=exp(-i\int_{0}^{t}\hat{U}^{-1}(s,0)\tilde{H}(s)\hat{U}(s,0)ds)
\end{equation}
\begin{equation}
\hat{U}(s,0)=exp(-i\int_{0}^{s}\sum_{k}\lambda_{k}(s')\hat{P}_{k}ds')
\end{equation}
Another relevant picture we may use is the Floquet co-rotating reference
frame. In this case, we have dynamical equations of motion given by:
\begin{equation}
i\dfrac{d}{dt}\left|\varphi(t)\right\rangle =(\dfrac{\partial\hat{S}}{\partial t}+e^{-i\hat{S}}\tilde{H}(t)e^{+i\hat{S}})\left|\varphi(t)\right\rangle
\end{equation}
which has a solution that can be written in closed form as:
\begin{equation}
\left|\varphi(t)\right\rangle =\hat{K}(t,0)\left|\varphi(0)\right\rangle
\end{equation}
\begin{equation}
\hat{K}(t,0)=exp(-i\int_{0}^{t}\hat{W}^{-1}(s,0)\tilde{H}(s)\hat{W}(s,0)ds)
\end{equation}
\begin{equation}
\hat{W}(s,0)=exp(-i\int_{0}^{s}\dfrac{\partial\hat{S}}{\partial\tau}d\tau)
\end{equation}
\section{Time Optimal Control For SU(2)}
For our first example, we consider the simple SU(2) system. In this
case, as the state space is equivalent to a double cover of the Bloch
sphere, we expect to recover great circles as our solutions. Our fundamental
relations are:
\begin{equation}
Tr(\dfrac{\tilde{H}^{2}}{2})-k=0
\end{equation}
\begin{equation}
Tr(\tilde{H}\hat{\sigma}_{z})=0
\end{equation}
\begin{equation}
Tr(\tilde{H})=0
\end{equation}
We may write the expression for the Hamiltonian as the ansatz:
\begin{equation}
\tilde{H}=\left[\begin{array}{cc}
\alpha & \varepsilon\\
\varepsilon^{*} & -\alpha
\end{array}\right]
\end{equation}
Evaluating the trace of the Hamiltonian multiplied with the sigma-z
matrix we find directly that $\alpha=0$. Our time dependent Hamiltonian
operator is then:
\begin{equation}
\tilde{H}=\left[\begin{array}{cc}
0 & \varepsilon\\
\varepsilon^{*} & 0
\end{array}\right]
\end{equation}
Computing the quantum brachistochrone and calculating the right hand
side of the expression via ordinary matrix multiplication:
\begin{equation}
i\dfrac{d}{dt}(\tilde{H}+\tilde{F})=\tilde{H}\tilde{F}-\tilde{F}\tilde{H}
\end{equation}
we find the explicitly time dependent matrix which defines the optimal
control fields to be given by:
\begin{equation}
i\dfrac{d}{dt}\left[\begin{array}{cc}
\Omega & \varepsilon\\
\varepsilon^{*} & -\Omega
\end{array}\right]=2\Omega\left[\begin{array}{cc}
0 & -\varepsilon\\
\varepsilon^{*} & 0
\end{array}\right]
\end{equation}
By inspection, $\Omega=\textrm{const.}$ We may write the differential
equations for the complex control fields in the form:
\begin{equation}
i\dfrac{d}{dt}\left[\begin{array}{c}
\varepsilon(t)\\
\varepsilon^{*}(t)
\end{array}\right]=2\Omega\left[\begin{array}{cc}
-1 & 0\\
0 & 1
\end{array}\right]\left[\begin{array}{c}
\varepsilon(t)\\
\varepsilon^{*}(t)
\end{array}\right]
\end{equation}
\begin{equation}
\tilde{H}_{opt}(t)=\left[\begin{array}{cc}
0 & \varepsilon(0)e^{2i\Omega t}\\
\varepsilon^{*}(0)e^{-2i\Omega t} & 0
\end{array}\right]
\end{equation}
At this point the rotating wave approximation would usually be invoked,
but as the constraint is a constant of the motion, we may write:
\begin{equation}
(\tilde{H}_{opt}(t)+\Omega\hat{\sigma}_{z})\hat{U}(t,0)=\hat{U}(t,0)(\tilde{H}_{opt}(0)+\Omega\hat{\sigma}_{z})
\end{equation}
Using the Schrödinger equation, we find the dynamical equation for
the unitary:
\begin{equation}
i\dfrac{d\hat{U}}{dt}=\hat{U}(\tilde{H}_{opt}(0)+\Omega\hat{\sigma}_{z})-\Omega\hat{\sigma}_{z}\hat{U}
\end{equation}
This expression has an explicit solution:
\begin{equation}
\hat{U}(t,0)=exp(i\Omega\hat{\sigma}_{z}t)exp(-i(\tilde{H}_{opt}(0)+\Omega\hat{\sigma}_{z})t)
\end{equation}
Some simple algebra using the identity $exp(i\vec{n}.\mathbf{\hat{\sigma}})=\cos\theta\hat{\mathbf{1}}+i\vec{n}.\mathbf{\hat{\sigma}}\sin\theta$gives
the expansion for our unitary over the generators of SU(2) as:
\[
+\dfrac{(\varepsilon(0)-\varepsilon^{*}(0))}{2\Omega'}\sin(\Omega't)[\hat{\sigma}_{x}\sin(\Omega t)+\hat{\sigma}_{y}\cos(\Omega t)]
\]
\begin{equation}
+i\hat{\sigma}_{z}[\cos(\Omega't)\sin(\Omega t)-\dfrac{\Omega}{\Omega'}\cos(\Omega t)\sin(\Omega't)]
\end{equation}
where $\Omega'=^{1/2}t{k+\Omega^{2}}$. We require a unitary that satisfies
boundary conditions of the form:
\begin{equation}
\hat{U}(T,0)\hat{\sigma}_{x}\hat{U}^{\dagger}(T,0)=-\hat{\sigma}_{x}
\end{equation}
which gives us the quantisation condition $\Omega'T=m\pi/2$, $m\in\mathbb{N}$.
Choosing an initial state $\left|\Psi(0)\right\rangle =\dfrac{1}{^{1/2}t{2}}[1,1]^{T}$and
a final state $\left|\Psi(T)\right\rangle =\dfrac{1}{^{1/2}t{2}}[1,-1]^{T}$,
we find from the boundary conditions $(5)$ the equivalent conditions
on the control fields:
\begin{equation}
\varepsilon(0)=-\varepsilon^{*}(0)
\end{equation}
\begin{equation}
\varepsilon(T)=-\varepsilon^{*}(T)
\end{equation}
We may rewrite this as:
\begin{equation}
\sin(2\Omega T)=0
\end{equation}
which gives a second quantisation condition:
\begin{equation}
\Omega T=\dfrac{n\pi}{2},n\in\mathbb{N}
\end{equation}
Combining this with our first quantisation condition, we find the
period of the wave-vector to be:
\begin{equation}
T^{2}=\dfrac{\pi^{2}}{4k}(n^{2}-m^{2})
\end{equation}
The left hand side of this expression is a positive number, and therefore
$n>m$. The minimum time of operation is then:
\begin{equation}
T_{min}=\dfrac{\pi}{2^{1/2}t{k}}
\end{equation}
which is of the form of the Heisenberg energy-time uncertainty principle.
We may write the Hamiltonian matrix as:
\begin{equation}
\tilde{H}_{opt}(t)=\nu_{0}(\sin2\Omega t\hat{\sigma}_{x}+\cos2\Omega t\hat{\sigma}_{y})
\end{equation}
where $\nu_{0}^{2}=k$. Hence the best we can do in terms of energy
and time is:
\begin{equation}
\nu_{0}T_{min}=\dfrac{\pi}{2}
\end{equation}
In summary, we have managed to derive the energy-time relation for
a revolving quantum state on SU(2); the arc that is described by our
unitary time evolution operator has the appropriate symmetry to be
considered a sinusoid convolved with a great circle.
\section{Unitary Matrices For SU(2)}
For completeness, we list a number of useful unitary matrices that
appear in the quaternionic geodesic calculation. These are related
in various ways through phase angles and transformations; we shall
not go into this here as the principal part of the calculation is
new work on SU(3). These matrices are:
\begin{equation}
\hat{U}_{1}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cc}
1 & -1\\
1 & 1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{2}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cc}
e^{-i\theta} & -e^{-i\theta}\\
e^{i\theta} & e^{-i\theta}
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{3}=i\hat{U}_{1}\hat{U}_{2}({\rm ch}i)=\left[\begin{array}{cc}
\sin{\rm ch}i & -i\cos{\rm ch}i\\
i\cos{\rm ch}i & -\sin{\rm ch}i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{4}=\left[\begin{array}{cc}
0 & 1\\
1 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{5}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cc}
e^{-i\alpha} & -e^{-i\alpha}\\
1 & 1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{6}=exp(-i\vartheta\hat{\sigma}_{z})=e^{i\vartheta}\left[\begin{array}{cc}
1 & 0\\
0 & e^{-2i\vartheta}
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{j}\hat{U}_{j}^{\dagger}=\hat{U}_{j}^{\dagger}\hat{U}_{j}=\hat{\mathbf{1}}
\end{equation}
One may transform various Hamiltonians and density matrices to find
interesting relationships between different arrangements of constraints:
\begin{equation}
\hat{H}_{P}(\varsigma)=\hat{U}_{P}(\varsigma)\hat{H}(\varsigma)\hat{U}_{P}^{\dagger}(\varsigma)
\end{equation}
\begin{equation}
\dfrac{1}{2}\left[\begin{array}{cc}
1 & e^{-i\alpha}\\
1 & -e^{-i\alpha}
\end{array}\right]\left[\begin{array}{cc}
0 & e^{-i\alpha}\\
e^{+i\alpha} & 0
\end{array}\right]\left[\begin{array}{cc}
1 & 1\\
e^{i\alpha} & -e^{i\alpha}
\end{array}\right]=\left[\begin{array}{cc}
1 & 0\\
0 & -1
\end{array}\right]
\end{equation}
\section{Bilinear Quantum Algebra On SU(2)}
\noindent \begin{flushleft}
Writing the quantum brachistochrone in vector form for a single qubit
we find:
\par\end{flushleft}
\begin{equation}
i\dfrac{d}{dt}(\left|h(t)\right\rangle +\left|f(t)\right\rangle )=\sum\left|j\right\rangle \left\langle h(t)\right|\hat{A}_{j}\left|f(t)\right\rangle
\end{equation}
\paragraph*{\textmd{\textup{where the $\left|j\right\rangle $'s are the standard
basis set, and the matrices $\hat{A}_{j}$ have the property that
$\left\langle h(t)\right|\hat{A}_{j}\left|f(t)\right\rangle +\left\langle f(t)\right|\hat{A}_{j}\left|h(t)\right\rangle =0$.
Let us calculate the general SU(2) case as an application of the formula.
In this situation we have system Hamiltonian and matrix constraints
given by:}}}
\begin{equation}
\tilde{H}(t)=\sum_{i}h_{i}(t)\hat{\sigma}_{i\,}\mathrm{and\,}\tilde{F}(t)=\sum_{j}f_{j}(t)\hat{\sigma}_{j}
\end{equation}
where the Pauli matrices have matrix representation
\begin{equation}
\hat{\sigma}_{i}\in\left\{ \left[\begin{array}{cc}
1 & 0\\
0 & -1
\end{array}\right],\left[\begin{array}{cc}
0 & 1\\
1 & 0
\end{array}\right],\left[\begin{array}{cc}
0 & -i\\
i & 0
\end{array}\right]\right\}
\end{equation}
Our constraints state that the Hamiltonian and $\tilde{F}(t)$ are
orthogonal, in that their dot product is zero:
\begin{equation}
Tr(\tilde{H}(t)\tilde{F}(t))=\left\langle h(t)\right|\left.f(t)\right\rangle =\left\langle f(t)\right|\left.h(t)\right\rangle =0
\end{equation}
\noindent \begin{flushleft}
$i\dfrac{d}{dt}\left[\begin{array}{cc}
(h_{z}+f_{z}) & (\varepsilon_{h}+\varepsilon_{f})\\
(\varepsilon_{h}^{*}+\varepsilon_{f}^{*}) & -(h_{z}+f_{z})
\end{array}\right]$
\par\end{flushleft}
\begin{equation}
=\left[\begin{array}{cc}
(\varepsilon_{h}\varepsilon_{f}^{*}-\varepsilon_{f}\varepsilon_{h}^{*}) & +2(h_{z}\varepsilon_{f}-f_{z}\varepsilon_{h})\\
-2(h_{z}\varepsilon_{f}^{*}-f_{z}\varepsilon_{h}^{*}) & -(\varepsilon_{h}\varepsilon_{f}^{*}-\varepsilon_{f}\varepsilon_{h}^{*})
\end{array}\right]
\end{equation}
\noindent This may be written in the equivalent vector form as:
\begin{equation}
i\dfrac{d}{dt}\left(\left[\begin{array}{c}
^{1/2}t{2}h_{z}\\
\varepsilon_{h}\\
\varepsilon_{h}^{*}
\end{array}\right]+\left[\begin{array}{c}
^{1/2}t{2}f_{z}\\
\varepsilon_{f}\\
\varepsilon_{f}^{*}
\end{array}\right]\right)=\left[\begin{array}{c}
^{1/2}t{2}(\varepsilon_{h}\varepsilon_{f}^{*}-\varepsilon_{f}\varepsilon_{h}^{*})\\
2(h_{z}\varepsilon_{f}-f_{z}\varepsilon_{h})\\
-2(h_{z}\varepsilon_{f}^{*}-f_{z}\varepsilon_{h}^{*})
\end{array}\right]
\end{equation}
\paragraph*{\textmd{\textup{Our Hamiltonian and constraint vectors obey the relation}}}
\begin{equation}
\left\langle f(t)\right|\hat{A}_{j}\left|h(t)\right\rangle +\left\langle h(t)\right|\hat{A}_{j}\left|f(t)\right\rangle =0
\end{equation}
\paragraph*{\textmd{\textup{and hence we finally obtain the control matrices
for the vector control system on SU(2) as:}}}
\begin{equation}
\hat{A}_{1}=^{1/2}t{2}\left[\begin{array}{ccc}
0 & 0 & 0\\
0 & 1 & 0\\
0 & 0 & -1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{A}_{2}=^{1/2}t{2}\left[\begin{array}{ccc}
0 & -1 & 0\\
0 & 0 & 0\\
1 & 0 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{A}_{3}=^{1/2}t{2}\left[\begin{array}{ccc}
0 & 0 & 1\\
-1 & 0 & 0\\
0 & 0 & 0
\end{array}\right]
\end{equation}
\noindent \begin{flushleft}
By explicit calculation one can show the identity $\left\langle h(t)\right|\hat{A}_{j}\left|f(t)\right\rangle +\left\langle f(t)\right|\hat{A}_{j}\left|h(t)\right\rangle =0$
holds true for all $j$; adjoint states are defined on the space $\left[\begin{array}{ccc}
\mathbb{R^{\mathrm{n-1}}}, & \mathbb{C}^{k*}, & \mathbb{C}^{k}\end{array}\right]$, being the complex conjugate and transposed row vector of the column
we started with. Let us now apply this formulation to a well-known
example, being the time-dependent Hamiltonian used by Carlini et.
al in {[}1{]}. The purpose of writing the quantum brachistochrone
in vector form is that it enables us to consider the physics directly,
which helps with understanding. In this case we have Hamiltonian and
constraint matrices as given by:
\par\end{flushleft}
\begin{equation}
\tilde{H}(t)=\left[\begin{array}{cc}
0 & \varepsilon_{h}(t)\\
\varepsilon_{h}^{*} & 0
\end{array}\right];\tilde{F}(t)=\left[\begin{array}{cc}
f_{z}(t) & 0\\
0 & -f_{z}(t)
\end{array}\right]
\end{equation}
\noindent \begin{flushleft}
The vectors defined by these matrices may be written as:
\par\end{flushleft}
\begin{equation}
\left|h(t)\right\rangle =\left[\begin{array}{c}
0\\
\varepsilon_{h}(t)\\
\varepsilon_{h}^{*}(t)
\end{array}\right];\left|f(t)\right\rangle =\left[\begin{array}{c}
^{1/2}t{2}f_{z}(t)\\
0\\
0
\end{array}\right]
\end{equation}
\noindent \begin{flushleft}
where we have used the factor of $^{1/2}t{2}$ to maintain the trace
product relation. These are multivectors in many respects, having
a real part and two complex components. The link between the matrix
form and the vector control equation is clear; we may use either technique
in order to calculate the time optimal Hamiltonian operator.
\par\end{flushleft}
\noindent \begin{flushleft}
In this situation we may write the quantum brachistochrone in vector
form as:
\par\end{flushleft}
\noindent $i\dfrac{d}{dt}\left[\begin{array}{c}
^{1/2}t{2}f_{z}(t)\\
\varepsilon_{h}(t)\\
\varepsilon_{h}^{*}(t)
\end{array}\right]=$
~
\begin{singlespace}
$\,\,\left[\begin{array}{c}
1\\
0\\
0
\end{array}\right]\left[\begin{array}{ccc}
^{1/2}t{2}f_{z}, & 0, & 0\end{array}\right]^{1/2}t{2}\left[\begin{array}{ccc}
0 & 0 & 0\\
0 & 1 & 0\\
0 & 0 & -1
\end{array}\right]\left[\begin{array}{c}
0\\
\varepsilon_{h}\\
\varepsilon_{h}^{*}
\end{array}\right]$
$+\left[\begin{array}{c}
0\\
1\\
0
\end{array}\right]\left[\begin{array}{ccc}
^{1/2}t{2}f_{z}, & 0, & 0\end{array}\right]^{1/2}t{2}\left[\begin{array}{ccc}
0 & -1 & 0\\
0 & 0 & 0\\
1 & 0 & 0
\end{array}\right]\left[\begin{array}{c}
0\\
\varepsilon_{h}\\
\varepsilon_{h}^{*}
\end{array}\right]$
$+\left[\begin{array}{c}
0\\
0\\
1
\end{array}\right]\left[\begin{array}{ccc}
^{1/2}t{2}f_{z}, & 0, & 0\end{array}\right]^{1/2}t{2}\left[\begin{array}{ccc}
0 & 0 & 1\\
-1 & 0 & 0\\
0 & 0 & 0
\end{array}\right]\left[\begin{array}{c}
0\\
\varepsilon_{h}\\
\varepsilon_{h}^{*}
\end{array}\right]$
\end{singlespace}
\begin{flushleft}
\begin{equation}
\therefore i\dfrac{d}{dt}\left[\begin{array}{c}
^{1/2}t{2}f_{z}(t)\\
\varepsilon_{h}(t)\\
\varepsilon_{h}^{*}(t)
\end{array}\right]=\left[\begin{array}{c}
0\\
-2f_{z}\varepsilon_{h}\\
+2f_{z}\varepsilon_{h}^{*}
\end{array}\right]
\end{equation}
\par\end{flushleft}
\noindent \begin{flushleft}
as required. This system may be rewritten in the compact form:
\par\end{flushleft}
\begin{equation}
\left\langle h\right|\hat{D}\left|h\right\rangle =\mathrm{const.}=\lambda^{2}+|\varepsilon|^{2}
\end{equation}
\begin{equation}
\left\langle f\right|\hat{E}\left|h\right\rangle =\left\langle h\right|\hat{E}\left|f\right\rangle =0=2\lambda{\cal G}amma+\varepsilon^{*}\Pi+\varepsilon\Pi^{*}
\end{equation}
\begin{equation}
\left\langle f\right|\hat{B}_{j}\left|h\right\rangle =-\left\langle h\right|\hat{B}_{j}\left|f\right\rangle
\end{equation}
\begin{equation}
i\dfrac{d}{dt}(\left|h\right\rangle +\left|f\right\rangle )=\sum_{j}\left|j\right\rangle \left\langle h\right|\hat{B}_{j}\left|f\right\rangle
\end{equation}
\begin{eqnarray}
\hat{B}_{1}=\left[\begin{array}{ccc}
0 & 0 & 0\\
0 & -1 & 0\\
0 & 0 & 1
\end{array}\right] & & \hat{B}_{2}=2\left[\begin{array}{ccc}
0 & 1 & 0\\
0 & 0 & 0\\
-1 & 0 & 0
\end{array}\right]
\end{eqnarray}
\begin{equation}
\hat{B}_{3}=2\left[\begin{array}{ccc}
0 & 0 & -1\\
1 & 0 & 0\\
0 & 0 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{D}=\frac{1}{2}\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & \Upsilon & 0\\
0 & 0 & (1-\Upsilon)
\end{array}\right]
\end{equation}
\begin{eqnarray}
& \hat{E}=\frac{1}{2}\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 1/2 & 0\\
0 & 0 & 1/2
\end{array}\right]
\end{eqnarray}
\begin{equation}
\left|h\right\rangle =\left[\begin{array}{c}
\lambda\\
\varepsilon\\
\varepsilon^{*}
\end{array}\right],\left|f\right\rangle =\left[\begin{array}{c}
{\cal G}amma\\
\Pi\\
\Pi^{*}
\end{array}\right]\in\left[\begin{array}{c}
\mathbb{R}\\
\mathbb{C^{\downharpoonleft}}
\end{array}\right]
\end{equation}
\begin{equation}
\left\langle h\right|=\left[\lambda,\varepsilon^{*},\varepsilon\right],\left\langle f\right|=\left[{\cal G}amma,\Pi^{*},\Pi\right]\in\left[\mathbb{R},\mathbb{C^{\rightharpoondown}}\right]
\end{equation}
These types of mappings must preserve inner product relationships
between the Hamiltonian and constraint.
\section{SO(3) Matrix Analysis}
Consider a Hamiltonian matrix of the form:
\begin{equation}
\tilde{H}[t]=\vec{n}(t)\centerdot\hat{S}=\left[\begin{array}{ccc}
n_{z}(t) & 0 & \varepsilon(t)\\
0 & 0 & 0\\
\varepsilon^{*}(t) & 0 & -n_{z}(t)
\end{array}\right]
\end{equation}
We form a constraint matrix:
\begin{equation}
\tilde{F}[t]=\left[\begin{array}{ccc}
u & K_{1} & 0\\
K_{1}^{*} & -2u & K_{2}\\
0 & K_{2}^{*} & u
\end{array}\right]
\end{equation}
The quantum brachistochrone equation reads as:
\begin{equation}
i\frac{d}{dt}(\tilde{H}(t)+\tilde{F}(t))=\tilde{H}(t)\tilde{F}(t)-\tilde{F}(t)\tilde{H}(t)
\end{equation}
Evaluating this for our matrix system, we find the set of differential
equations:
\begin{equation}
\dot{u}=\dot{n}_{z}=\dot{\varepsilon}=\dot{\varepsilon}^{*}=0;\tilde{H}[t]=\tilde{H}[0]
\end{equation}
Writing our wave-vector in standard form we find the relations:
\begin{equation}
c_{2}(t)=c_{2}(0)
\end{equation}
\begin{eqnarray}
i\frac{d}{dt}\left[\begin{array}{c}
c_{1}(t)\\
c_{3}(t)
\end{array}\right]=\left[\begin{array}{cc}
n_{z} & \varepsilon\\
\varepsilon^{*} & -n_{z}
\end{array}\right]\left[\begin{array}{c}
c_{1}(t)\\
c_{3}(t)
\end{array}\right]
\end{eqnarray}
\begin{equation}
Tr(\tilde{H}^{2}/2)=\mathrm{const.}=n_{z}^{2}+|\varepsilon|^{2}=R^{2}
\end{equation}
The propagator may be written in the compact form:
\begin{equation}
\hat{U}(t,0)=\hat{\mathbf{1}}-i\frac{\mathrm{sin}Rt}{R}\tilde{H}[0]+(\mathrm{cos}Rt-1)\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 0 & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
Choosing an initial state $\left|\psi(0)\right\rangle =\left[\begin{array}{ccc}
1 & 0 & 0\end{array}\right]^{T}$ we find the wave-vector:
\begin{equation}
\left|\psi(t)\right\rangle =\hat{U}(t,0)\left|\psi(0)\right\rangle =\left[\begin{array}{c}
\mathrm{cos}Rt-i\frac{n_{z}}{R}\mathrm{sin}Rt\\
0\\
-\frac{i}{R}\varepsilon^{*}\mathrm{sin}Rt
\end{array}\right]
\end{equation}
This is $2\pi-$periodic and satisfies $-\left|\psi(\frac{m\pi}{R})\right\rangle =\left|\psi(0)\right\rangle =\left|\psi(\frac{2m\pi}{R})\right\rangle $,
where m is an integer.
\section{Time Dependent Elliptic Hamiltonian on SU(3)}
\noindent In this section we shall consider three examples of SU(3)
Hamiltonians. The physical motivation for this is to explicitly calculate
brachistochrones for some model problems which are more general than
the SU(2) models considered in \cite{Carlini}. Boscain et. al has
examined various situations on SU(3) that have similar properties,
see for example \cite{Boscain}. Our first Hamiltonian matrix is given
by:
\noindent
\begin{equation}
\tilde{H}(t)=\left[\begin{array}{ccc}
0 & \alpha(t) & 0\\
\alpha(t) & 0 & -i\beta(t)\\
0 & i\beta(t) & 0
\end{array}\right]
\end{equation}
\begin{singlespace}
\noindent with an associated constraint matrix
\end{singlespace}
\begin{equation}
\tilde{F}(t)=\left[\begin{array}{ccc}
\omega_{1}(t) & -i\gamma(t) & \varepsilon(t)\\
i\gamma(t) & \omega_{2}(t) & \kappa(t)\\
\varepsilon^{*}(t) & \kappa(t) & \omega_{3}(t)
\end{array}\right];\sum_{n=1}^{3}\omega_{j}=0.
\end{equation}
\noindent The quantum brachistochrone equation in vector form then
reads as:
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
\omega_{1}(t)\\
\omega_{2}(t)\\
\omega_{3}(t)
\end{array}\right]=2\left[\begin{array}{c}
\gamma\alpha\\
-(\gamma\alpha+\beta\kappa)\\
\beta\kappa
\end{array}\right];
\end{equation}
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
\gamma(t)\\
\kappa(t)
\end{array}\right]=\left[\begin{array}{c}
-\beta v-(\omega_{1}-\omega_{3})\alpha\\
-\alpha v+(\omega_{2}-\omega_{3})\beta
\end{array}\right];
\end{equation}
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
\alpha(t)\\
\beta(t)
\end{array}\right]=-i\Omega\left[\begin{array}{cc}
0 & -i\\
i & 0
\end{array}\right]\left[\begin{array}{c}
\alpha\\
\beta
\end{array}\right];
\end{equation}
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
\Omega(t)\\
v(t)
\end{array}\right]=\left[\begin{array}{c}
0\\
\alpha\kappa+\beta\gamma
\end{array}\right];
\end{equation}
\noindent where the complex variable $\varepsilon(t)=\Omega(t)-iv(t)$.
This has a solution for the time-optimal Hamiltonian which is given
by the matrix:
\begin{equation}
\tilde{H}(t)=R\left[\begin{array}{ccc}
0 & \mathrm{cos}(\Omega t) & 0\\
\mathrm{cos}(\Omega t) & 0 & -i\mathrm{sin}(\Omega t)\\
0 & i\mathrm{sin}(\Omega t) & 0
\end{array}\right]
\end{equation}
\noindent We now apply this Hamiltonian to a wave-vector, given as
a superposition of energy eigenstates which may be (in general) time
dependent. This has the expansion:
\noindent \begin{flushleft}
\begin{equation}
\left|\Psi(t)\right\rangle ={\cal D}elta_{1}(t)\left|+R(t)\right\rangle +{\cal D}elta_{2}(t)\left|0(t)\right\rangle +{\cal D}elta_{3}(t)\left|-R(t)\right\rangle
\end{equation}
\par\end{flushleft}
\begin{singlespace}
\noindent \begin{flushleft}
We obtain the formula for the wave-vector in the standard basis $\left|\Psi(t)\right\rangle =\left[\begin{array}{ccc}
c_{1}(t), & c_{2}(t), & c_{3}(t)\end{array}\right]^{T}$ as given by:
\par\end{flushleft}
\end{singlespace}
\begin{flushleft}
\begin{equation}
\begin{array}{cc}
c_{1}(t)= & -\dfrac{\mathrm{sin}(\Omega't)}{\Omega'}\left[i\dfrac{z_{2}R}{\Omega'}+\Omega\left[\dfrac{z_{1}}{\Omega'}\mathrm{cos}(\Omega't)-z_{3}\mathrm{sin}(\Omega't)\right]\right]\\
& +\mathrm{cos}(\Omega t)\left[z_{3}\mathrm{cos}(\Omega't)+z_{1}\dfrac{\mathrm{sin}(\Omega't)}{\Omega'}\right]
\end{array}
\end{equation}
\par\end{flushleft}
\begin{equation}
c_{2}(t)=\frac{1}{(\Omega')^{2}}\left[\Omega z_{2}+iR\left[z_{1}\mathrm{cos}(\Omega't)-z_{3}\mathrm{sin}(\Omega't)\right]\right]
\end{equation}
\begin{equation}
\begin{array}{cc}
c_{3}(t)= & \dfrac{\mathrm{cos}(\Omega t)}{\Omega'}\left[i\dfrac{z_{2}R}{\Omega'}+\Omega\left[z_{1}\dfrac{\mathrm{cos}(\Omega't)}{\Omega'}-z_{3}\mathrm{sin}(\Omega't)\right]\right]\\
& -i\mathrm{sin}(\Omega t)\left[z_{3}\mathrm{cos}(\Omega't)+\dfrac{z_{1}}{\Omega'}\mathrm{sin}(\Omega't)\right]
\end{array}
\end{equation}
where the parameters $z_{1},z_{2},z_{3}$ are given by the formulae:
\begin{equation}
\left[\begin{array}{c}
z_{1}\\
z_{2}\\
z_{3}
\end{array}\right]=\left[\begin{array}{ccc}
\Omega & -iR & 0\\
-iR & \Omega & 0\\
0 & 0 & 1
\end{array}\right]\left[\begin{array}{c}
{\cal D}elta_{2}(0)\\
{\cal D}elta_{-}(0)\\
{\cal D}elta_{+}(0)
\end{array}\right]
\end{equation}
\begin{flushleft}
\begin{equation}
{\cal D}elta_{\pm}=\frac{1}{^{1/2}t{2}}({\cal D}elta_{1}\pm{\cal D}elta_{3})
\end{equation}
\begin{equation}
\Omega'=^{1/2}t{R^{2}+\Omega^{2}}
\end{equation}
which define our initial state via:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\left|\Psi(0)\right\rangle =\left[\begin{array}{c}
z_{3}\\
\dfrac{1}{\Omega'^{2}}(\Omega z_{2}+iRz_{1})\\
-i\dfrac{(\Omega z_{1}+iRz_{2})}{\Omega'^{2}}
\end{array}\right];
\end{equation}
In this case we were able to find a solution despite the explicit
time dependence of the system. This example is particularly useful
as it indicates a general method of solution for the quantum brachistochrone
equation which we may use on the semisimple subgroups.
\par\end{flushleft}
\section{Time Dependent Geodesic on SU(3)}
We now take a slightly more general Hamiltonian operator and constraint
than the previous example, being the matrices:
\begin{equation}
\tilde{H}(t)=\left[\begin{array}{ccc}
0 & \varepsilon_{1}(t) & 0\\
\varepsilon_{1}^{*}(t) & 0 & \varepsilon_{2}(t)\\
0 & \varepsilon_{2}^{*}(t) & 0
\end{array}\right];
\end{equation}
\begin{equation}
\tilde{F}(t)=\left[\begin{array}{ccc}
\omega_{1} & 0 & \kappa\\
0 & \omega_{2} & 0\\
\kappa^{*} & 0 & -(\omega_{1}+\omega_{2})
\end{array}\right]
\end{equation}
\paragraph*{\textmd{\textup{Using the quantum brachistochrone equation we obtain
the set of coupled differential equations:}}}
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
\kappa(t)\\
\kappa^{*}(t)
\end{array}\right]=\left[\begin{array}{c}
0\\
0
\end{array}\right]
\end{equation}
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
\omega_{1}\\
\omega_{2}
\end{array}\right]=\left[\begin{array}{c}
0\\
0
\end{array}\right]
\end{equation}
\begin{equation}
i\frac{d}{dt}\left[\begin{array}{c}
\varepsilon_{1}(t)\\
\varepsilon_{1}^{*}(t)\\
\varepsilon_{2}(t)\\
\varepsilon_{2}^{*}(t)
\end{array}\right]=\left[\begin{array}{c}
-(\omega_{1}-\omega_{2})\varepsilon_{1}-\kappa\varepsilon_{2}^{*}\\
(\omega_{1}-\omega_{2})\varepsilon_{1}^{*}+\kappa^{*}\varepsilon_{2}\\
\kappa\varepsilon_{1}^{*}-(\omega_{1}+2\omega_{2})\varepsilon_{2}\\
-\kappa^{*}\varepsilon_{1}+(\omega_{1}+2\omega_{2})\varepsilon_{2}^{*}
\end{array}\right]
\end{equation}
\noindent from which we immediately observe that $\omega_{1},\omega_{2}$
and $\kappa$ are constants of the motion. We now assume boundary
conditions on the state, such that initially at time zero the wave-vector
is in the state $\left[\begin{array}{ccc}
1, & 0, & 0\end{array}\right]^{T}$and it is in the state $[\begin{array}{ccc}
0, & 0, & 1\end{array}]^{T}$ after a time $t=T$. We then have boundary conditions on the projection
operator as given by the matrices:
\begin{equation}
\hat{P}(0)=\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 0 & 0\\
0 & 0 & 0
\end{array}\right],\,\hat{P}(T)=\left[\begin{array}{ccc}
0 & 0 & 0\\
0 & 0 & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
\noindent From \cite{Carlini}, we may write the boundary conditions
on the Hamiltonian as:
\begin{flushleft}
\begin{equation}
\hat{G}(t)=\left\{ \hat{G}(t),\hat{P}(t)\right\} ,0\leq t\leq T
\end{equation}
where
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{G}(t)=(\tilde{H}(t)+\tilde{F}(t))-\left\langle \psi(t)\right|(\tilde{H}(t)+\tilde{F}(t))\left|\psi(t)\right\rangle \hat{P}(t)
\end{equation}
After some elementary linear algebra, we find that the boundary conditions
are equivalent to a series of relations on the control fields:
\par\end{flushleft}
\begin{equation}
\omega_{1}=\omega_{2}=0
\end{equation}
\begin{equation}
\varepsilon_{2}(0)=\varepsilon_{2}^{*}(0)=0
\end{equation}
\begin{equation}
\varepsilon_{1}^{*}(T)=\varepsilon_{1}(T)=0
\end{equation}
As the diagonal elements of $\tilde{F}(t)$ are constant in time,
and from the boundary conditions are initially zero, then we can conclude
that they are zero throughout the entire motion. This means that the
entire constraint matrix is constant for this situation, and we may
write the time evolution operator as:
\begin{equation}
\hat{U}(t,0)=\mathrm{exp}(+it\tilde{F})\mathrm{\mathrm{exp}}(-it(\tilde{H}(0)+\tilde{F}))
\end{equation}
\noindent Evaluating these matrices via the eigenvalue determinant,
and Laufer's formulae\cite{Laufer} we find an explicit formula for
the state as a function of time:
\begin{equation}
\left|\psi(t)\right\rangle =\left[\begin{array}{c}
\mathrm{cos(\mathit{t{\cal D}elta})cos(\left|\kappa\right|\mathit{t\mathrm{)}+\frac{\left|\kappa\right|}{{\cal D}elta}\mathrm{sin(\left|\kappa\right|\mathit{t})}\mathrm{sin}\mathrm{(\mathit{t}{\cal D}elta)}}}\\
-\frac{i\varepsilon_{1}^{*}(0)}{{\cal D}elta}\mathrm{sin(\mathit{t}{\cal D}elta)}\\
i\kappa^{*}\left[\mathrm{\frac{1}{\left|\kappa\right|}sin(\left|\kappa\right|\mathit{t})cos(\mathit{t}{\cal D}elta)-\frac{1}{{\cal D}elta}sin(\mathit{t}{\cal D}elta)cos(\left|\kappa\right|\mathit{t})}\right]
\end{array}\right]
\end{equation}
\noindent \begin{flushleft}
where ${\cal D}elta=^{1/2}t{\left|\kappa\right|^{2}+\left|\varepsilon_{1}(0)\right|^{2}}$
\par\end{flushleft}
\noindent Calculating the projection operator, we obtain boundary
conditions on the state vector:
\begin{flushleft}
$\hat{P}(t)=\left|\psi(t)\right\rangle \left\langle \psi(t)\right|$
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
=\left[\begin{array}{ccc}
\left|c_{1}(t)\right|^{2} & c_{1}(t)c_{2}^{*}(t) & c_{1}(t)c_{3}^{*}(t)\\
c_{1}^{*}(t)c_{2}(t) & \left|c_{2}(t)\right|^{2} & c_{2}(t)c_{3}^{*}(t)\\
c_{1}^{*}(t)c_{3}(t) & c_{2}^{*}(t)c_{3}(t) & \left|c_{3}(t)\right|^{2}
\end{array}\right]
\end{equation}
\par\end{flushleft}
\noindent After a time $t=T$ the matrix elements of the projection
operator satisfy:
\noindent
\begin{equation}
\left|c_{1}(T)\right|^{2}=\left|c_{2}(T)\right|^{2}=0;\left|c_{3}(T)\right|^{2}=1
\end{equation}
\noindent and hence we may write the equivalent boundary conditions
as:
\begin{equation}
\left|c_{2}(T)\right|^{2}=0=(-\frac{i\varepsilon_{1}^{*}(0)}{{\cal D}elta}\mathrm{sin(\mathit{T}{\cal D}elta))^{2}}
\end{equation}
\noindent thereby obtaining the condition of time quantisation:
\begin{equation}
\mathrm{sin}(T{\cal D}elta)=0\Leftrightarrow T=\dfrac{n\pi}{^{1/2}t{\left|\kappa\right|^{2}+\left|\varepsilon_{1}(0)\right|^{2}}}
\end{equation}
Considering the boundary conditions on the first parameter from the
wavevector, we find immediately that $\mathrm{cos}(T\left|\kappa\right|)=0$
which implies that:
\begin{equation}
T=\dfrac{(2n'+1)\pi}{2\left|\kappa\right|},\, n'\in\mathfrak{\mathbb{N}}
\end{equation}
\noindent For these to both be the minimum time, they must be equal.
From this we find the relationship:
\begin{equation}
\left|\kappa\right|^{2}(1-(\frac{(n'+1/2)}{n})^{2})=(\frac{(n'+1/2)}{n})^{2}\left|\varepsilon_{1}(0)\right|^{2}
\end{equation}
\noindent The quantity on the right hand side of the equality is the
square of a real number, and hence is positive. This means that the
function multiplying $\left|\kappa\right|^{2}$on the left hand side
must be positive, and hence we finally derive the minimum time for
this particular state transfer as:
\begin{equation}
\begin{array}{c}
n\geq n'+1/2\\
T_{min}=\dfrac{\pi}{2\left|\kappa\right|}=\dfrac{^{1/2}t{3}\pi}{2\left|\varepsilon_{1}(0)\right|}
\end{array}
\end{equation}
\noindent After some further algebra we obtain the time-optimal Hamiltonian
operator as given by the matrix:
\begin{equation}
\tilde{H}(t)=R\left[\begin{array}{ccc}
0 & \mathrm{cos}(kt) & 0\\
\mathrm{cos}(kt) & 0 & e^{-i\theta}\mathrm{sin}(kt)\\
0 & e^{+i\theta}\mathrm{sin}(kt) & 0
\end{array}\right]
\end{equation}
\noindent where $R=\left|\varepsilon_{1}(0)\right|$and $k=\left|\kappa\right|$.
Note that as the constraint was calculated to be a constant matrix,
we were able to evaluate the time evolution operator without having
to calculate the full time-dependent case. All that was required was
the time dependence of the constraint, the initial condition of the
Hamiltonian operator, and the boundary conditions on the state. This
is a very useful technique, as it allows us to move to an alternative
reference frame in which for some situations the analysis may be more
amenable to solution. Our solution is consistent with the results
of \cite{Boscain}. Much of the work that has been conducted on SU(3)
Hamiltonians has been related to STIRAP \cite{Bergmann}, \cite{Eckert},
\cite{Greentree}. We do not require their assumption of adiabaticity
nor the rotating wave approximation to derive our results. Our method
may be correctly considered to be a competing idea, as it is technically
the fastest way to achieve the desired transition as opposed to a
slow, steady process as in STIRAP.
\section{Triangular Matrices}
\noindent We now move to the more technical part of this paper, and
derive a number of results on matrix groups. The reason for this is
to develop a number of representations for angular symmetry on this
elliptic group, and also to see if there are any general formulae
which may be derived. The intent is to go from simple examples and
steadily increase in difficulty in order to solve the main problems
contained within. The space of complex matrices has a rich structure,
and the dynamics of the semigroups constitutes the behaviour of the
time dependent, periodic state. For this reason, we use a one-at-a-time
parameter approach, and show how each individual semigroup works.
\noindent The simplest way in which a set of square matrices can be
divided is into the values above the diagonal, the values below the
diagonal, and the diagonal itself. Our space of time dependent Hamiltonian
matrix operators have the standard Hermitian property, and this brings
us to the consideration of triangular matrices, both upper and lower
before we attack the main problem. Form a series of matrices $\hat{A}$:
\begin{equation}
\hat{A}(a,b,c)=\left[\begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]
\end{equation}
We now input some other co-ordinate to form the matrix $\hat{A}'$:
\begin{equation}
\hat{A}'=\hat{A}(a',b',c')=\left[\begin{array}{ccc}
1 & a' & b'\\
0 & 1 & c'\\
0 & 0 & 1
\end{array}\right]
\end{equation}
The structure of the group is given by the anti-commutator and commutator,
being the fundamental symmetric and antisymmetric operators:
\[
\left\{ \hat{A}',\hat{A}\right\} =\left[\begin{array}{ccc}
1 & 2(a+a') & (b+b')+a'c+c'a\\
0 & 1 & (b+b')+2cc'\\
0 & 0 & 1
\end{array}\right]
\]
\begin{flushleft}
\begin{equation}
=\hat{A}(a'',b'',c'')
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\left[\hat{A},\hat{A}'\right]=\hat{A}\hat{A}'-\hat{A}\hat{'A}=\left[\begin{array}{ccc}
0 & 0 & 1\\
0 & 0 & 1\\
0 & 0 & 0
\end{array}\right](b-b')
\end{equation}
As we can see, these matrices form a nilpotent semigroup:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\left[\begin{array}{ccc}
0 & 0 & 1\\
0 & 0 & 1\\
0 & 0 & 0
\end{array}\right]\left[\begin{array}{ccc}
0 & 0 & 1\\
0 & 0 & 1\\
0 & 0 & 0
\end{array}\right]=\left[\begin{array}{ccc}
0 & 0 & 0\\
0 & 0 & 0\\
0 & 0 & 0
\end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{A}^{2}(a,b,c)=\left[\begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]\left[\begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
=\left[\begin{array}{ccc}
1 & 2a & 2b+ca\\
0 & 1 & 2c\\
0 & 0 & 1
\end{array}\right]=\hat{A}(2a,2b+ca,2c)
\end{equation}
Useful results:
\begin{equation}
\hat{A}^{2}=\hat{A}+\hat{X}=\left[\begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]+\left[\begin{array}{ccc}
1 & a & b+ca\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right];[\hat{A},\hat{X}]=0
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]\left[\begin{array}{ccc}
1 & a & b+ca\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]=\left[\begin{array}{ccc}
1 & 2a & 2(b+ca)\\
0 & 1 & 2c\\
0 & 0 & 1
\end{array}\right]
\end{equation}
The exponential matrix allows us to find the time dependence of the
state, and hence solve the differential equations:
\begin{equation}
\exp(-i\hat{A}t)=1-i\hat{A}t+\frac{1}{2!}(-i\hat{A}t)^{2}+...+\frac{1}{n!}(-i\hat{A}t)^{n}+...
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\exp(-i\hat{A}t)=1-i\hat{A}t+\frac{1}{2!}(-it)^{2}(\hat{A}+\hat{X})
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
+\frac{1}{3!}(-it)^{3}\hat{A}(\hat{A}+\hat{X})+\frac{1}{4!}(-it)^{4}(\hat{A}+\hat{X})^{2}+...
\end{equation}
Using the determinant matrix we find the relevant polynomial eigenvalue
equation:
\begin{equation}
\left\Vert \begin{array}{ccc}
1-\lambda & a & b\\
0 & 1-\lambda & c\\
0 & 0 & 1-\lambda
\end{array}\right\Vert =det(\hat{A}-\lambda\hat{1})=(1-\lambda)^{3}
\end{equation}
We can find a matrix which under the commutator sends upper triangular
to upper triangular:
\begin{equation}
\left[\begin{array}{ccc}
\omega_{1} & 0 & 0\\
0 & \omega_{2} & 0\\
0 & 0 & \omega_{3}
\end{array}\right]=\hat{\Omega}
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
1 & a(\omega_{1}-\omega_{2}) & b(\omega_{1}-\omega_{3})\\
0 & 1 & c(\omega_{2}-\omega_{3})\\
0 & 0 & 1
\end{array}\right]=\hat{A}\hat{\Omega}-\hat{\Omega}\hat{A}
\end{equation}
We observe the determinant allows us to invert the system of equations:
\begin{equation}
det(\hat{A})=\left\Vert \begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right\Vert =1
\end{equation}
Writing the differential equations in equivalent matrix form:
\begin{equation}
\hat{A}\overrightarrow{x}=\left[\begin{array}{ccc}
1 & a & b\\
0 & 1 & c\\
0 & 0 & 1
\end{array}\right]\left[\begin{array}{c}
x_{1}\\
x_{2}\\
x_{3}
\end{array}\right]=\left[\begin{array}{c}
x_{1}+ax_{2}+bx_{3}\\
x_{2}+cx_{3}\\
x_{3}
\end{array}\right]
\end{equation}
\begin{equation}
\hat{A}\overrightarrow{x}=\frac{d\vec{x}}{dt}\leftrightarrow\left[\begin{array}{c}
x_{1}+ax_{2}+bx_{3}\\
x_{2}+cx_{3}\\
x_{3}
\end{array}\right]=\left[\begin{array}{c}
dx_{1}/dt\\
dx_{2}/dt\\
dx_{3}/dt
\end{array}\right]
\end{equation}
We solve the system:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\frac{d^{n}x_{1}}{dt^{n}}=x_{1}+nax_{2}+(nb+ac)x_{3}
\end{equation}
\begin{equation}
x_{1}(t)=\left(\frac{ac}{2}t^{2}+(bc+ak)t+\varrho\right)e^{t}
\end{equation}
\begin{equation}
x_{2}(t)=(k+ct)e^{t}
\end{equation}
\begin{equation}
x_{3}(t)=ce^{t}
\end{equation}
The dimensionality of the matrix group we are after, being the hermitian
operators can be broken down into a sum of triangular arrays. The
size of the dimension of the space is given by:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
1+2(1)=2^{2}-1
\end{equation}
\begin{equation}
2+2(1+2)=3^{2}-1
\end{equation}
\begin{equation}
3+2(1+2+3)=4^{2}-1
\end{equation}
\begin{equation}
n^{2}=n+2\sum_{j=1}^{n-1}j
\end{equation}
Taking a lower triangular matrix, and evaluating the commutator with
an upper dimensional matrix, we find
\begin{equation}
\hat{B}=\left[\begin{array}{ccc}
1 & 0 & 0\\
x & 1 & 0\\
y & z & 1
\end{array}\right]
\end{equation}
\begin{equation}
[\hat{A},\hat{B}]=\left[\begin{array}{ccc}
-(ax+by) & -bz & 0\\
-cy & ax-cz & bx\\
0 & ay & by+cz
\end{array}\right]
\end{equation}
Let
\begin{eqnarray*}
x & = & a^{*},\, y=b^{*},\, z=c^{*}
\end{eqnarray*}
then we find:
\begin{equation}
[\hat{A},\hat{B}]=\left[\begin{array}{ccc}
-(\left|a\right|^{2}+\left|b\right|^{2}) & -bc^{*} & 0\\
-cb^{*} & \left|a\right|^{2}-\left|c\right|^{2} & ba^{*}\\
0 & b^{*}a & \left|b\right|^{2}+\left|c\right|^{2}
\end{array}\right]
\end{equation}
Other than the diagonal entries, this matrix is close to the required
symmetry of the elliptic matrix operators. The relationship between
triangular matrices and Hermitian matrices is quite useful for understanding
many of the problems we deal with. Any Hermitian matrix may be given
as a sum of an upper triangular, its complex conjugate transpose,
and a diagonal matrix. We now move to consideration of the particular
Hamiltonian matrix. The Cayley-Hamilton formula for this matrix reads
as:
\begin{equation}
\tilde{H}^{3}=(\alpha^{2}+\beta^{2})\tilde{H}
\end{equation}
and we can therefore write the propagator in the form:
\begin{equation}
exp(-i\theta\tilde{H})=1+g_{1}(\theta)\tilde{H}+g_{2}(\theta)\tilde{H}^{2}
\end{equation}
\begin{equation}
\left\Vert \begin{array}{ccc}
-\lambda & \alpha & 0\\
\alpha & -\lambda & -i\beta\\
0 & i\beta & -\lambda
\end{array}\right\Vert =-\lambda^{3}+\lambda(\alpha^{2}+\beta^{2})
\end{equation}
\begin{equation}
\left[\tilde{H},\frac{d\tilde{H}}{dt}\right]=i(\dot{\alpha}\beta-\alpha\dot{\beta})\left[\begin{array}{ccc}
0 & 0 & 1\\
0 & 0 & 0\\
1 & 0 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\frac{d\tilde{H}}{dt},\tilde{H}^{2}\right]=(\alpha^{2}+\beta^{2})\frac{d\tilde{H}}{dt}+(\alpha\dot{\alpha}+\beta\dot{\beta})\left[\begin{array}{ccc}
0 & +\alpha & 0\\
-\alpha & 0 & i\beta\\
0 & -i\beta & 0
\end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\alpha\dot{\alpha}+\beta\dot{\beta}=0{\mathbb R}ightarrow\left[\frac{d\tilde{H}}{dt},\tilde{H}^{2}\right]=C\frac{d\tilde{H}}{dt}
\end{equation}
\begin{equation}
\tilde{H}(T)=\tilde{H}(0){\mathbb R}ightarrow\tilde{H}(T)=\hat{U}^{\dagger}(T,0)\tilde{H}(0)\hat{U}(T,0)
\end{equation}
\begin{eqnarray}
\left[\tilde{H}(T),\hat{U}(T,0)\right]= & \left[\tilde{H}(0),\hat{U}(T,0)\right]= & 0
\end{eqnarray}
A periodic system has $\tilde{H}(nT)=\tilde{H}(0)$ for $n\in\mathbb{N}$,
so we obtain:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\left[\tilde{H}(0),\hat{U}(nT,0)\right]=0
\end{equation}
\begin{equation}
\hat{U}(nT,0)=e^{-in\omega T}\hat{\mathbf{1}}
\end{equation}
Initially, the time evolution operator has value unity:
\begin{equation}
\hat{U}(0,0)=\hat{\mathbf{1}}
\end{equation}
\begin{equation}
i\frac{d\hat{U}}{dt}=\tilde{H}(t)\hat{U}(t,0)
\end{equation}
\begin{equation}
\tilde{H}(t)=R\left[\begin{array}{ccc}
0 & \mathrm{cos(}\omega t) & 0\\
\mathrm{cos(}\omega t) & 0 & -i\mathrm{sin(}\omega t)\\
0 & i\mathrm{sin(}\omega t) & 0
\end{array}\right]
\end{equation}
This operator is periodic:
\begin{equation}
\tilde{H}[\frac{\pi}{\omega}(2n+1)]=\tilde{H}[0]
\end{equation}
with initial value:
\begin{equation}
\tilde{H}[0]=\left[\begin{array}{ccc}
0 & R & 0\\
R & 0 & 0\\
0 & 0 & 0
\end{array}\right]
\end{equation}
Time commutating identities describe the actions of the subgroup:
\begin{equation}
\left[\tilde{H}(t),\tilde{H}(s)\right]=i(\beta(t)\alpha(s)-\beta(s)\alpha(t))\left[\begin{array}{ccc}
0 & 0 & -i\\
0 & 0 & 0\\
i & 0 & 0
\end{array}\right]
\end{equation}
We may directly integrate the Hamiltonian matrix to obtain:
\par\end{flushleft}
\begin{flushleft}
$\int_{t}^{t+T}\tilde{H}[s]ds=$
\[
\frac{R}{\omega}\mathrm{sin}(\omega t)\left[\begin{array}{ccc}
0 & \mathrm{cos}(\omega T)-1 & 0\\
\mathrm{cos}(\omega T)-1 & 0 & -i\mathrm{sin}(\omega T)\\
0 & i\mathrm{sin}(\omega T) & 0
\end{array}\right]
\]
\begin{equation}
+\frac{R}{\omega}\mathrm{cos}(\omega t)\left[\begin{array}{ccc}
0 & \mathrm{sin}(\omega T) & 0\\
\mathrm{sin}(\omega T) & 0 & i(\mathrm{cos}(\omega T)-1)\\
0 & -i(\mathrm{cos}(\omega T)-1) & 0
\end{array}\right]
\end{equation}
To calculate the full time dependence of the wave-vector, we must
exponentiate the above equation, which seems daunting. However, there
are useful tricks that may be applied consistently to bring the time
evolution operator into a more useful and physically intuitive format.
\par\end{flushleft}
\section{Invariants Of SU(3)}
The invariants of a system, in a sense, define the system itself.
Several combinations of the components of the wave-vector arise commonly
during calculations on SU(3), so by considering dynamical perspectives
from this transported reference frame we are able to see the crux
of the physics.
\begin{equation}
\tilde{H}(t)=\left[\begin{array}{ccc}
0 & \alpha & 0\\
\alpha & 0 & -i\beta\\
0 & i\beta & 0
\end{array}\right]
\end{equation}
Redefining our system of wave-vector co-ordinates and constraints:
\begin{eqnarray*}
f_{1}=c_{1}c_{3}^{*}-c_{1}^{*}c_{3}; & f_{2}=c_{2}c_{3}^{*}-c_{2}^{*}c_{3}; & f_{3}=c_{1}c_{2}^{*}+c_{1}^{*}c_{2};
\end{eqnarray*}
\begin{flushleft}
\begin{eqnarray*}
f_{4}=c_{2}c_{3}^{*}+c_{2}^{*}c_{3}; & f_{5}=c_{1}c_{3}^{*}+c_{1}^{*}c_{3}; & f_{6}=c_{1}c_{2}^{*}-c_{1}^{*}c_{2};
\end{eqnarray*}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\left|c_{1}\right|^{2}+\left|c_{2}\right|^{2}+\left|c_{3}\right|^{2}=1;\alpha^{2}+\beta^{2}=\mathrm{const.}
\end{equation}
We immediately find the system of equations:
\begin{equation}
{\mathbb R}e(f_{1,2})=0
\end{equation}
\begin{equation}
{\rm Im}(f_{3})=0
\end{equation}
This means the domains of these functions are constrained in the
complex plane. We also find in general that:
\begin{equation}
\frac{d}{dt}\left\langle \tilde{H}\right\rangle =\left\langle \frac{d\tilde{H}}{dt}\right\rangle
\end{equation}
Substituting our transformed variables into the original dynamical
equations we obtain the system:
\begin{eqnarray}
\frac{df_{1}}{dt}=\frac{\alpha}{i}f_{4}-\beta f_{3}; & & \frac{df_{2}}{dt}=\frac{\alpha}{i}f_{5}-2\beta\left|c_{2}\right|^{2};
\end{eqnarray}
\begin{eqnarray}
\frac{df_{3}}{dt}=-\beta f_{5}; & & \frac{df_{4}}{dt}=\frac{\alpha}{i}f_{1}-2\beta\left|c_{3}\right|^{2}
\end{eqnarray}
\begin{eqnarray}
\frac{df_{5}}{dt}=\frac{\alpha}{i}f_{2}-\beta f_{6};
\end{eqnarray}
\begin{equation}
\frac{df_{6}}{dt}=2\alpha(\left|c_{2}\right|^{2}-\left|c_{1}\right|^{2})-\beta f_{2};
\end{equation}
\begin{equation}
\alpha\frac{df_{3}}{dt}+i\beta\frac{df_{2}}{dt}=0
\end{equation}
Defining an observable $\hat{O}$ and our algebra of states:
\begin{equation}
\left\langle \hat{O}\right\rangle =\left\langle \psi\left|\hat{O}\right|\psi\right\rangle
\end{equation}
\begin{equation}
\left|\psi\right\rangle =\left[\begin{array}{c}
c_{1}(t)\\
c_{2}(t)\\
c_{3}(t)
\end{array}\right],\left\langle \psi\right|=\left[\begin{array}{ccc}
c_{1}^{*} & c_{2}^{*} & c_{3}^{*}\end{array}\right]=(\left|\psi\right\rangle {}^{*})^{T}
\end{equation}
we derive the useful relations:
\begin{equation}
\frac{d}{dt}\left\langle \hat{O}\right\rangle =\left\langle \frac{d\hat{O}}{dt}\right\rangle +i\left\langle \left[\tilde{H},\hat{O}\right]\right\rangle
\end{equation}
\begin{equation}
\frac{d}{dt}\left(\hat{O}^{2}\right)=\left\{ \tilde{O},\frac{d\hat{O}}{dt}\right\}
\end{equation}
After some work, we obtain the fundamental dynamics:
\[
\left\langle \left\{ \tilde{H},\frac{d\tilde{H}}{dt}\right\} \right\rangle =i(\alpha\dot{\beta}+\dot{\alpha}\beta)f_{1}
\]
\begin{equation}
+2(\alpha\dot{\alpha}\left|c_{1}\right|^{2}+(\alpha\dot{\alpha}+\dot{\beta}\beta)\left|c_{2}\right|^{2}+\beta\dot{\beta}\left|c_{3}\right|^{2})
\end{equation}
\begin{equation}
\left[\tilde{H},\tilde{H}^{2}\right]=0
\end{equation}
\begin{equation}
\frac{d}{dt}\left\langle \tilde{H}^{2}\right\rangle =\left\langle \left\{ \tilde{H},\frac{d\tilde{H}}{dt}\right\} \right\rangle
\end{equation}
The average of the energy, energy variance and energy squared may
be given in terms of these functions for this system.
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
{\cal D}elta E=^{1/2}t{\left\langle \dot{\psi}\left|(\mathbf{1}-\left|\psi\right\rangle \left\langle \psi\right|)\right|\dot{\psi}\right\rangle }
\end{equation}
\begin{equation}
\left\langle \tilde{H}\right\rangle =\alpha(c_{1}^{*}c_{2}+c_{1}c_{2}^{*})+i\beta(c_{2}c_{3}^{*}-c_{2}^{*}c_{3})
\end{equation}
\[
\left\langle \tilde{H}^{2}\right\rangle =\alpha^{2}(\left|c_{1}\right|^{2}+\left|c_{2}\right|^{2})+\beta^{2}(\left|c_{2}\right|^{2}+\left|c_{3}\right|^{2})
\]
\begin{equation}
+i\beta\alpha(c_{1}c_{3}^{*}-c_{1}^{*}c_{3)}
\end{equation}
The total amount of probability is conserved:
\begin{equation}
\frac{d}{dt}\left\langle \psi(t)\right.\left|\psi(t)\right\rangle =0
\end{equation}
hence we find:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\beta^{2}\frac{d}{dt}\left|c_{1}\right|^{2}+\alpha^{2}\frac{d}{dt}\left|c_{3}\right|^{2}=i\beta\alpha\frac{d}{dt}(c_{1}c_{3}^{*}-c_{1}^{*}c_{3})
\end{equation}
\begin{equation}
\alpha\beta(\alpha c_{2}^{*}c_{3}+\beta c_{1}^{*}c_{2})=0
\end{equation}
\begin{eqnarray}
\alpha^{2}+\beta^{2}=R^{2} & {\mathbb R}ightarrow & \alpha\dot{\alpha}+\beta\dot{\beta}=0
\end{eqnarray}
\begin{eqnarray}
\frac{d}{dt}(\left|c_{1}\right|^{2}+\left|c_{2}\right|^{2}+\left|c_{3}\right|^{2})=0 & {\mathbb R}ightarrow & \frac{\alpha}{i}f_{3}+\beta f_{4}=0
\end{eqnarray}
To calculate the probability of any particular event, we use:
\begin{equation}
P(j|\psi)=\left|c_{j}(t)\right|^{2}=\left|\left\langle j|\psi\right\rangle \right|^{2}
\end{equation}
Obviously this is exhaustive and sums to unity.
\par\end{flushleft}
\section{Quantum Frenet-Serre Formula}
The Frenet-Serre formula of classical differential geometry relates
the path of a particle to various properties of the surface it is
rolling along, being the torsion and the curvature. This equation
has a fundamental similarity to our Hamiltonian matrix, and as such
we consider a new example, which is equivalent in all respects to
the Frenet-Serre formula. By applying our method to a problem with
known solution, we are providing a valuable check to our formalism.
Firstly, assume the dynamical system:
\begin{flushleft}
\begin{equation}
i\frac{d}{dt}\left[\begin{array}{c}
c_{1}\\
c_{2}\\
c_{3}
\end{array}\right]=\left[\begin{array}{ccc}
0 & -iK(t) & 0\\
iK(t) & 0 & -iT(t)\\
0 & iT(t) & 0
\end{array}\right]\left[\begin{array}{c}
c_{1}\\
c_{2}\\
c_{3}
\end{array}\right]
\end{equation}
Writing the explicitly parameter dependent Hamiltonian matrix and
constraint:
\begin{equation}
\tilde{H}(t)=\left[\begin{array}{ccc}
0 & -iK(t) & 0\\
iK(t) & 0 & -iT(t)\\
0 & iT(t) & 0
\end{array}\right];\tilde{F}=\left[\begin{array}{ccc}
\omega_{1} & \alpha & {\rm ch}i\\
\alpha & \omega_{2} & \beta\\
{\rm ch}i^{*} & \beta & \omega_{3}
\end{array}\right]
\end{equation}
\begin{equation}
K,T,\alpha,\beta,\omega_{j}\in f:[0,\infty)\rightarrow\mathbb{R}
\end{equation}
\begin{equation}
{\rm ch}i,{\rm ch}i^{*}\in f:[0,\infty)\rightarrow\mathbb{C}
\end{equation}
\begin{equation}
\omega_{1}+\omega_{2}+\omega_{3}=0
\end{equation}
Evaluating the quantum brachistochrone equation:
\begin{equation}
i\frac{d}{dt}\left(\tilde{H}+\tilde{F}\right)=\left[\tilde{H},\tilde{F}\right]
\end{equation}
we obtain the equivalent set of relations:
\begin{equation}
{\rm Im}({\rm ch}i)=\mathrm{const.}=\eta
\end{equation}
\begin{equation}
\frac{d}{dt}(\frac{{\rm ch}i-{\rm ch}i^{*}}{2i})=0
\end{equation}
Hence we may write the dynamics of the curve in matrix form as:
\begin{equation}
\frac{d}{dt}\left[\begin{array}{c}
K(t)\\
T(t)
\end{array}\right]=\left[\begin{array}{cc}
0 & -\eta\\
\eta & 0
\end{array}\right]\left[\begin{array}{c}
K(t)\\
T(t)
\end{array}\right]
\end{equation}
This admits solutions:
\begin{equation}
T(t)=A\mathrm{sin}(\eta t)+B\mathrm{cos}(\eta t)
\end{equation}
\begin{equation}
K(t)=C\mathrm{sin}(\eta t)+N\mathrm{cos}(\eta t)
\end{equation}
There is no real difference between our method and the Frenet-Serre
method other than where (and if) you take the complex unit:
\begin{equation}
\frac{d\left|\psi\right\rangle }{dt}=\hat{A}(t)\left|\psi\right\rangle
\end{equation}
\begin{equation}
\tilde{H}(t)=i\hat{A}(t)
\end{equation}
The solution Hamiltonian has periodicity and initial values given
by:
\begin{equation}
\tilde{H}(0)=\tilde{H}(\frac{2n\pi}{\eta})=i\left[\begin{array}{ccc}
0 & -N & 0\\
N & 0 & -B\\
0 & B & 0
\end{array}\right]
\end{equation}
\begin{equation}
\tilde{H}(\frac{n\pi}{\eta})=(-1)^{n}i\left[\begin{array}{ccc}
0 & -C & 0\\
C & 0 & -A\\
0 & A & 0
\end{array}\right]
\end{equation}
In terms of the unitary this means we can write:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{U}(T,0)\tilde{H}(0)\hat{U}^{\dagger}(T,0)=\tilde{H}(T)=\tilde{H}(0)
\end{equation}
Our eigenvalue equation reads as:
\begin{equation}
det(\tilde{H}-\lambda\hat{\mathbf{1}})=\left\Vert \begin{array}{ccc}
-\lambda & -iK & 0\\
iK & -\lambda & -iT\\
0 & iT & -\lambda
\end{array}\right\Vert =0
\end{equation}
Giving us the cubic polynomial:
\begin{equation}
-\lambda^{3}+\lambda(K^{2}+T^{2})=0
\end{equation}
Now, our initial Hamiltonian was constrained to finite energy:
\begin{equation}
Tr(\tilde{H}^{2}/2)=\mathrm{const.}
\end{equation}
Hence:
\begin{equation}
K^{2}+T^{2}=R^{2}
\end{equation}
The solution is a circle. This fact is well known. One may consider
various different scenarios with respect to the Hamiltonian-time symmetry:
\begin{equation}
\left\langle \psi(0)\left|\tilde{H}(t)\right|\psi(0)\right\rangle =\left\langle \psi(t)\left|\tilde{H}(0)\right|\psi(t)\right\rangle
\end{equation}
Computing this for our Hamiltonian matrix we find:
\begin{equation}
\left\langle \psi(t)\left|\tilde{H}(0)\right|\psi(t)\right\rangle =iR\left[e^{i\omega t}z-e^{-i\omega t}z^{*}\right]
\end{equation}
\begin{equation}
\frac{d}{dt}\left\{ \left\langle \psi(0)\left|\tilde{H}(t)\right|\psi(0)\right\rangle \right\} =\frac{1}{i}\left\langle \psi(t)\left|\left[\tilde{H}(0),\tilde{H}(t)\right]\right|\psi(t)\right\rangle
\end{equation}
If we rescale the time parameter, the Hamiltonian takes the simpler
form:
\begin{equation}
\tilde{H}(t){\mathbb R}ightarrow\left[\begin{array}{ccc}
0 & -i\mathrm{cos}(t) & 0\\
i\mathrm{cos}(t) & 0 & -i\mathrm{sin}(t)\\
0 & i\mathrm{sin}(t) & 0
\end{array}\right],t{\mathbb R}ightarrow Rt
\end{equation}
The dynamical equation of state is:
\begin{equation}
\tilde{H}\left|\psi\right\rangle =R(C_{+}\left|+\right\rangle -C_{-}\left|-\right\rangle )
\end{equation}
which has eigenvectors given by:
\begin{equation}
\left|+\right\rangle =\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
-i\mathrm{cos}(t)\\
1\\
i\mathrm{sin}(t)
\end{array}\right]
\end{equation}
\begin{equation}
\left|0\right\rangle =\left[\begin{array}{c}
i\mathrm{sin}(t)\\
0\\
i\mathrm{cos}(t)
\end{array}\right]
\end{equation}
\begin{equation}
\left|-\right\rangle =\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
i\mathrm{cos}(t)\\
1\\
-i\mathrm{sin}(t)
\end{array}\right]
\end{equation}
It is pleasing that this methodology delivers the standard answer
to such a well-known question. The total curvature is a constant;
the geometric figure is periodic.
\par\end{flushleft}
\section{Unitary Transformations of SU(3)}
We wish to find all the unitary transformations of SU(3), in order
to understand the geometry of states. In this case, our first step
is to examine the transformations that take the time dependent Hamiltonian
matrix from the previous worked example to a diagonal representation:
\begin{flushleft}
\begin{equation}
\tilde{H}_{FS}(t)=\hat{D}(t)\hat{L}\hat{D}^{\dagger}(t)
\end{equation}
\begin{equation}
\hat{L}=\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & -1 & 0\\
0 & 0 & 0
\end{array}\right]
\end{equation}
We find:
\begin{equation}
\hat{D}(t)=\left[\begin{array}{ccc}
-\dfrac{i}{^{1/2}t{2}}\mathrm{cos}(t) & \dfrac{i}{^{1/2}t{2}}\mathrm{cos}(t) & i\mathrm{sin}(t)\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{i}{^{1/2}t{2}}\mathrm{sin}(t) & -\dfrac{i}{^{1/2}t{2}}\mathrm{sin}(t) & i\mathrm{cos}(t)
\end{array}\right]
\end{equation}
This has the required unitary properties:
\begin{equation}
\hat{D}(t)\hat{D}^{\dagger}(t)=\hat{D^{\dagger}}(t)\hat{D}(t)=\hat{\mathbf{1}}
\end{equation}
with initial value given by:
\begin{equation}
\hat{D}(0)=\left[\begin{array}{ccc}
-\dfrac{i}{^{1/2}t{2}} & \dfrac{i}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & i
\end{array}\right]
\end{equation}
We may write the time evolution of this operator in terms of an integral
equation:
\begin{equation}
\hat{D}(t)=exp(i\hat{L}t)\hat{D}(0)-i\int_{0}^{t}\tilde{H}(s)\hat{D}(s)ds
\end{equation}
Now, consider the eigenvectors of our elliptic Hamiltonian matrix:
\begin{equation}
\left[\begin{array}{ccc}
0 & \mathrm{cos}\phi & 0\\
\mathrm{cos}\phi & 0 & -i\mathrm{sin}\phi\\
0 & i\mathrm{sin}\phi & 0
\end{array}\right]\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
\mathrm{cos}\phi\\
1\\
i\mathrm{sin}\phi
\end{array}\right]=\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
\mathrm{cos}\phi\\
1\\
i\mathrm{sin}\phi
\end{array}\right]
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
0 & \mathrm{cos}\phi & 0\\
\mathrm{cos}\phi & 0 & -i\mathrm{sin}\phi\\
0 & i\mathrm{sin}\phi & 0
\end{array}\right]\left[\begin{array}{c}
-\mathrm{sin}\phi\\
0\\
i\mathrm{cos}\phi
\end{array}\right]=\left[\begin{array}{c}
0\\
0\\
0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
0 & \mathrm{cos}\phi & 0\\
\mathrm{cos}\phi & 0 & -i\mathrm{sin}\phi\\
0 & i\mathrm{sin}\phi & 0
\end{array}\right]\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
-\mathrm{cos}\phi\\
1\\
-i\mathrm{sin}\phi
\end{array}\right]=\frac{-1}{^{1/2}t{2}}\left[\begin{array}{c}
-\mathrm{cos}\phi\\
1\\
-i\mathrm{sin}\phi
\end{array}\right]
\end{equation}
Writing a matrix with these eigenvectors as columns, we obtain:
\begin{equation}
\hat{J}(\phi)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}}\mathrm{cos}\phi & -\dfrac{1}{^{1/2}t{2}}\mathrm{cos}\phi & -\mathrm{sin}\phi\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{i}{^{1/2}t{2}}\mathrm{sin}\phi & -\dfrac{i}{^{1/2}t{2}}\mathrm{sin}\phi & i\mathrm{cos}\phi
\end{array}\right]
\end{equation}
This matrix is unitary:
\begin{equation}
\hat{J}(\phi)\hat{J^{\dagger}}(\phi)=\hat{J^{\dagger}}(\phi)\hat{J}(\phi)=\hat{\mathbf{1}}
\end{equation}
\begin{equation}
\hat{J}(\phi)\hat{L}\hat{J^{\dagger}}(\phi)=\left[\begin{array}{ccc}
0 & \mathrm{cos}\phi & 0\\
\mathrm{cos}\phi & 0 & -i\mathrm{sin}\phi\\
0 & i\mathrm{sin}\phi & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{J}(0)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}} & -\dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & i
\end{array}\right]
\end{equation}
and has initial value given by a Hadamard gate with a $\frac{\pi}{2}$
rotation on the ancilla. Finally, using the completely complex elliptic
Hamiltonian
\begin{equation}
\tilde{H}=\left[\begin{array}{ccc}
0 & \epsilon_{1} & 0\\
\epsilon_{1}^{*} & 0 & \epsilon_{2}\\
0 & \epsilon_{2}^{*} & 0
\end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{flushleft}
Eigenvectors:
\begin{eqnarray}
\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
\mathrm{cos}\varphi\\
1\\
ie^{i\varrho}\mathrm{sin}\varphi
\end{array}\right] & \& & \left[\begin{array}{c}
ie^{-i\varrho}\mathrm{sin}\varphi\\
0\\
\mathrm{cos}\varphi
\end{array}\right]
\end{eqnarray}
\[
\&\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
-\mathrm{cos}\varphi\\
1\\
-ie^{i\varrho}\mathrm{sin}\varphi
\end{array}\right],\,\varrho=\mathrm{const.}\in[0,2\pi)
\]
\begin{equation}
\hat{Q}(\varphi)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}}\mathrm{cos}\varphi & -\dfrac{1}{^{1/2}t{2}}\mathrm{cos}\varphi & ie^{-i\varrho}\mathrm{sin}\varphi\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{i}{^{1/2}t{2}}e^{i\varrho}\mathrm{sin}\varphi & -\dfrac{i}{^{1/2}t{2}}e^{i\varrho}\mathrm{sin}\varphi & \mathrm{cos}\varphi
\end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{Q}(0)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}} & -\dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
The initial value of this operator is a Hadamard matrix with an ancilla
bit set to 1. It is unitary:
\begin{equation}
\hat{Q^{\dagger}}(\varphi)\hat{Q}(\varphi)=\hat{Q}(\varphi)\hat{Q^{\dagger}}(\varphi)=1
\end{equation}
\begin{equation}
\hat{Q}(\varphi)\hat{L}\hat{Q^{\dagger}}(\varphi)=\left[\begin{array}{ccc}
0 & \mathrm{cos\varphi} & 0\\
\mathrm{cos}\varphi & 0 & -ie^{-i\varrho}\mathrm{sin}\varphi\\
0 & ie^{i\varrho}\mathrm{sin}\varphi & 0
\end{array}\right]
\end{equation}
We may summarise these formulae on unitaries into the compact identities:
\begin{equation}
S(\hat{K}[t];t)=\int_{0}^{t}\hat{K}(s)\hat{L}\hat{K}^{\dagger}(s)ds
\end{equation}
\begin{equation}
\dfrac{\delta S}{\delta\Lambda}=0{\mathbb R}ightarrow\dfrac{\delta\hat{K}}{\delta\Lambda}\hat{L}\hat{K}^{\dagger}+\hat{K}\hat{L}\dfrac{\delta\hat{K^{\dagger}}}{\delta\Lambda}=0
\end{equation}
\begin{equation}
\left[\tilde{H},\hat{K}\right]\hat{L}\hat{K}^{\dagger}=\hat{K}\hat{L}\left[\tilde{H},\hat{K}^{\dagger}\right]
\end{equation}
\begin{equation}
\left\{ \tilde{H},\hat{K}\hat{L}\hat{K}^{\dagger}\right\} =\hat{K}\left\{ \tilde{H},\hat{L}\right\} \hat{K}^{\dagger}
\end{equation}
Our propagator for the full Hamiltonian may be written in the compact
form:
\[
\hat{U}(\theta,0)=exp(-i\int_{0}^{\theta}ds\tilde{H}(s))
\]
\[
=\left[\begin{array}{ccc}
1+(c-1)c^{2} & -isc & -i(c-1)sc\\
-isc & c & -s^{2}\\
i(c-1)sc & s^{2} & 1+(c-1)s^{2}
\end{array}\right]
\]
\begin{equation}
c=\mathrm{cos}\theta,\, s=\mathrm{sin}\theta
\end{equation}
\begin{equation}
\hat{U}^{\dagger}(\theta,0)\hat{U}(\theta,0)=\hat{U}(\theta,0)\hat{U^{\dagger}}(\theta,0)=\hat{\mathbf{1}}
\end{equation}
\begin{equation}
\hat{U}(\theta,0)=\hat{\mathbf{1}}-i\mathrm{sin}\theta\tilde{H}+(\mathrm{cos}\theta-1)\tilde{H}^{2}
\end{equation}
One final useful unitary transformation is the equivalent NOT-gate,
which can be used to flip two state labels:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{N}\left[\begin{array}{ccc}
0 & \epsilon_{1} & 0\\
\epsilon_{1}^{*} & 0 & \epsilon_{2}\\
0 & \epsilon_{2}^{*} & 0
\end{array}\right]\hat{N}^{\dagger}=\left[\begin{array}{ccc}
0 & \epsilon_{2}^{*} & 0\\
\epsilon_{2} & 0 & \epsilon_{1}^{*}\\
0 & \epsilon_{1} & 0
\end{array}\right];\hat{N}=\left[\begin{array}{ccc}
0 & 0 & 1\\
0 & 1 & 0\\
1 & 0 & 0
\end{array}\right]
\end{equation}
Writing $\left|v_{j}\right\rangle $for the j-th column of $\hat{U}$,
it is simple to show that $\left\langle v_{j}\right.\left|v_{k}\right\rangle =\delta_{jk}$.
Any rotation in this space is composed of these fundamental operators
. We now compute commutators of our fundamental transformations with
the matrix $\hat{L}$:
\par\end{flushleft}
\begin{flushleft}
$\left[\hat{L},\hat{U}(\theta,0)\right]=$
\begin{equation}
\left[\begin{array}{ccc}
0 & -2i\mathrm{sin}\theta\mathrm{cos}\theta & i(\mathrm{cos}\theta-1)\mathrm{sin}\theta\mathrm{cos}\theta\\
-2i\mathrm{sin}\theta\mathrm{cos}\theta & 0 & \mathrm{sin}^{2}\theta\\
-i\mathrm{sin}\theta\mathrm{cos}\theta(\mathrm{cos}\theta-1) & -\mathrm{sin}^{2}\theta & 0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\hat{L},\hat{J}(\phi)\right]=\left[\begin{array}{ccc}
0 & -^{1/2}t{2}\mathrm{cos}\phi & -\mathrm{sin}\phi\\
-^{1/2}t{2} & 0 & 0\\
-\frac{i}{^{1/2}t{2}}\mathrm{sin}\phi & -\frac{i}{^{1/2}t{2}}\mathrm{sin}\phi & 0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\hat{L},\hat{Q}(\varphi)\right]=\left[\begin{array}{ccc}
0 & -^{1/2}t{2}\mathrm{cos}\varphi & ie^{-i\varrho}\mathrm{sin\varphi}\\
-^{1/2}t{2} & 0 & 0\\
-\frac{i}{^{1/2}t{2}}e^{i\varrho}\mathrm{sin}\varphi & -\frac{i}{^{1/2}t{2}}e^{i\varrho}\mathrm{sin}\varphi & 0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\hat{L},\hat{D}({\rm ch}i)\right]=\left[\begin{array}{ccc}
0 & i^{1/2}t{2}\mathrm{cos}{\rm ch}i & i\mathrm{sin}{\rm ch}i\\
-^{1/2}t{2} & 0 & 0\\
-\frac{i}{^{1/2}t{2}}\mathrm{sin}{\rm ch}i & -\frac{i}{^{1/2}t{2}}\mathrm{sin}{\rm ch}i & 0
\end{array}\right]
\end{equation}
Hence our fundamental rotations form compact semi-groups with the
operator $\hat{L}$. One may define a sequence of polynomials via
the time evolution operator, viz.:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{U}^{-1}(\theta,0)=\left[\dfrac{P_{l,m}(\mathrm{cos}\theta)}{q(\mathrm{cos}\theta)}\right]
\end{equation}
We have eigenreflections:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{M}_{j}=\hat{\mathbf{1}}-\left|\left.v_{k}\right\rangle \left\langle v_{k}\right.\right|
\end{equation}
\begin{equation}
\hat{M}_{1}=\frac{1}{2}\left[\begin{array}{ccc}
1+\mathrm{sin}^{2}\xi & -\mathrm{cos}\xi & i\mathrm{sin}\xi\mathrm{cos}\xi\\
-\mathrm{cos}\xi & 1 & i\mathrm{sin}\xi\\
-i\mathrm{sin}\xi\mathrm{cos}\xi & -i\mathrm{sin}\xi & 1+\mathrm{cos^{2}}\xi
\end{array}\right]
\end{equation}
\begin{equation}
\hat{M}_{2}=\left[\begin{array}{ccc}
\mathrm{sin}^{2}\xi & 0 & -i\mathrm{sin}\xi\mathrm{cos}\xi\\
0 & 1 & 0\\
i\mathrm{sin}\xi\mathrm{cos}\xi & 0 & 1+\mathrm{cos^{2}}\xi
\end{array}\right]
\end{equation}
\begin{equation}
\hat{M}_{3}=\frac{1}{2}\left[\begin{array}{ccc}
1+sin^{2}\xi & \mathrm{cos}\xi & i\mathrm{sin}\xi\mathrm{cos}\xi\\
\mathrm{cos}\xi & 1 & -i\mathrm{sin}\xi\\
-i\mathrm{sin}\xi\mathrm{cos}\xi & i\mathrm{sin}\xi & 1+\mathrm{cos^{2}}\xi
\end{array}\right]
\end{equation}
These eigenflections obey the periodicity condition:
\begin{equation}
\hat{M}_{2}(-\xi)=\hat{M}_{1}(\xi)+\hat{M}_{3}(\xi)
\end{equation}
which is directly related to the equation of state:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\left|3\right\rangle _{(t+\frac{\pi}{2})}-\left|1\right\rangle _{(t+\frac{\pi}{2})}=^{1/2}t{2}\left|2\right\rangle _{(t)}
\end{equation}
\par\end{flushleft}
\section{Elliptic Polynomials}
\begin{flushleft}
Using the formula for the propagator, and making the substitution
$z=\mathrm{cos}\theta$ we obtain the associated z-polynomials:
\begin{equation}
P_{l,m}(z)=q(z)\left[\hat{U}^{-1}\right]_{l,m}
\end{equation}
We display several of these functions:
\begin{equation}
q(z)=4z^{7}-4z^{6}-8z^{5}+8z^{4}+4z^{3}-6z^{2}+1
\end{equation}
\begin{equation}
P_{1,1}(z)=2z^{4}-z^{3}-3z^{2}+1=p(z)
\end{equation}
\begin{equation}
P_{3,3}(z)=-2z^{4}+z^{3}+z^{2}-z=P(z)
\end{equation}
\begin{equation}
q(z)+P_{3,3}(z)=Q(z)
\end{equation}
\begin{equation}
Q(z)=4z^{7}-4z^{6}-8z^{5}+6z^{4}+5z^{3}-5z^{2}-z+1
\end{equation}
We may Laplace z-transform:
\begin{equation}
L[F(z)]=L_{F}[s]=\int_{0}^{\infty}F[z(\theta)]e^{-s\theta}d\theta
\end{equation}
These transformations enable us to move between the different representations
in this space. They are analogous to the matrices used earlier to
transform to the eigenbasis. These transformed functionals are rational
polynomials with various interesting properties:
\begin{equation}
L_{q}[s]=\frac{b_{q}(s)}{r_{q}(s)};
\end{equation}
\begin{eqnarray}
L_{Q}[s]=\frac{b_{Q}(s)}{r_{Q}(s)}; & & L_{p}[s]=\frac{b_{p}(s)}{r_{p}(s)}
\end{eqnarray}
\begin{equation}
b_{q}(s)=s^{7}-125s^{5}+24s^{4}+192s^{3}-960s^{2}-2880s+20160
\end{equation}
\begin{equation}
r_{q}(s)=s^{8}
\end{equation}
\[
b_{Q}(s)=-2(s^{12}+120s^{10}+5016s^{8}+86527s^{6}
\]
\begin{equation}
+550413s^{4}+895923s^{2}+396900)
\end{equation}
\begin{equation}
r_{Q}=s(s^{2}+1^{2})(s^{2}+2^{2})\times\cdots\times(s^{2}+7^{2})=s\prod_{k=1}^{7}(s^{2}+k^{2})
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
b_{p}(s)=-(s^{8}+29s^{6}+208s^{4}+306s^{2}-144)
\end{equation}
\begin{equation}
r_{p}(s)=r_{P}(s)=s\prod_{k=1}^{4}(s^{2}+k^{2})
\end{equation}
For a small circle centred on the origin:
\begin{eqnarray}
\ointop_{{\cal G}amma}\frac{b_{q}(z)}{r_{q}(z)}dz=1 & & \ointop_{{\cal G}amma}\frac{b_{p}(z)}{r_{p}(z)}dz=\frac{1}{4}
\end{eqnarray}
\begin{eqnarray}
\ointop_{{\cal G}amma}\frac{b_{Q}(z)}{r_{Q}(z)}dz=-\frac{1}{2} & & \ointop_{{\cal G}amma}\frac{b_{P}(z)}{r_{P}(z)}dz=-\frac{1}{4}
\end{eqnarray}
We may further Fourier transform our functions to find e.g.:
\begin{equation}
F_{W}(k)=\int_{0}^{\infty}e^{-iks}L_{W}(s)ds
\end{equation}
\begin{equation}
F_{P}(k)=\int_{0}^{\infty}e^{-iks}L_{P}(s)ds
\end{equation}
\begin{equation}
F_{P}(k)=\dfrac{i\pi}{4}\left(u[-k]H[k]-u[k]H[-k]\right)
\end{equation}
\begin{equation}
u[k]=e^{4k}-e^{3k}+2e^{2k}+e^{k}+1
\end{equation}
\begin{equation}
H[k]=\left\{ \begin{array}{c}
0;x<0\\
1;x>0
\end{array}\right)
\end{equation}
The other transformation we require is given by the Mellin integral:
\begin{equation}
M[\alpha]=\int_{0}^{\infty}z^{\alpha-1}f(z)dz
\end{equation}
One immediately obtains the p-adic sequences:
\begin{eqnarray*}
& M_{FLP}[\alpha]=M[F_{P}(z)], & M_{FLQ}[\alpha]=M[F_{Q}(z)]
\end{eqnarray*}
\[
M_{FLp}[\alpha]=M[F_{p}(z)],
\]
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
M_{FLP}[\alpha]=\dfrac{i\pi}{4}{\cal G}amma(\alpha)\left\{ \frac{1}{4^{\alpha}}-\dfrac{1}{3^{\alpha}}+\dfrac{2}{2^{\alpha}}+1\right\} +\dfrac{i\pi\Xi(\alpha)}{4}
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
M_{FLp}[\alpha]=\dfrac{i\pi}{4}{\cal G}amma(\alpha)\left\{ -\frac{1}{4^{\alpha}}+\dfrac{1}{3^{\alpha}}+\dfrac{2}{2^{\alpha}}+3\right\} -\dfrac{i\pi\Xi(\alpha)}{4}
\end{equation}
\begin{equation}
M[F_{Q}(z)]=\dfrac{i\pi}{16}{\cal G}amma(\alpha)\left\{ -\frac{1}{7^{\alpha}}+\dfrac{2}{6^{\alpha}}+\dfrac{1}{5^{\alpha}}+\dfrac{22}{2^{\alpha}}+1\right\} +\dfrac{i\pi\Xi(\alpha)}{2}
\end{equation}
Mellin integrating the original Laplace transform, we find for $|\alpha|<1$:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
M_{LP}[\alpha]=\dfrac{\pi}{8}\dfrac{Z(\alpha)}{\mathrm{sin}\left(\dfrac{\pi(\alpha-1)}{2}\right)}
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
M_{LQ}[\alpha]=\dfrac{\pi}{96}\dfrac{Y(\alpha)}{\mathrm{sin}\left(\dfrac{\pi(\alpha-1)}{2}\right)}
\end{equation}
\begin{equation}
M_{Lp}[\alpha]=-\dfrac{\pi}{8}\dfrac{X(\alpha)}{\mathrm{sin}\left(\dfrac{\pi(\alpha-1)}{2}\right)}
\end{equation}
\begin{equation}
X(\alpha)=\left(-3+2^{\alpha}-3^{\alpha-1}+2^{2(\alpha-1)}\right)
\end{equation}
\begin{equation}
Y(\alpha)=(3+33\times2^{\alpha}-3^{\alpha}+3\times5^{(\alpha-1)}+6^{\alpha}-3\times7^{(\alpha-1)})
\end{equation}
\begin{equation}
Z(\alpha)=\left(1+2^{\alpha}-3^{\alpha-1}+2^{2(\alpha-1)}\right)
\end{equation}
The other rational function has a pole at the origin. In this description
of the space the Fourier, Laplace, Mellin integrals and cosine transform
play a complementary role to the unitary matrices used earlier. The
order of integration is compositional, in that we are fully integrating
over the variables at each time and the order must not be interchanged
except in unique circumstances. These functions, and their related
theta transforms are part of a general group of elliptic symmetry
operators. One may plot these simply on a polar plot diagram.
\par\end{flushleft}
\section{Semigroup Transformations}
\begin{flushleft}
Consider the unitary matrices $\left\{ \hat{Q},\hat{D},\hat{J}\right\} $.
We may write these matrices as the outer product of unit eigenvectors
with column vectors:
\begin{equation}
\hat{Q}=\left[\begin{array}{ccc}
\vdots & \vdots & \vdots\\
q_{1} & q_{2} & q_{3}\\
\vdots & \vdots & \vdots
\end{array}\right]=\left[\begin{array}{ccc}
\left|q_{1}\right\rangle & \left|q_{2}\right\rangle & \left|q_{3}\right\rangle \end{array}\right]
\end{equation}
\begin{equation}
\hat{J}=\left[\begin{array}{ccc}
\vdots & \vdots & \vdots\\
j_{1} & j_{2} & j_{3}\\
\vdots & \vdots & \vdots
\end{array}\right]=\left[\begin{array}{ccc}
\left|j_{1}\right\rangle & \left|j_{2}\right\rangle & \left|j_{3}\right\rangle \end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\hat{D}=\left[\begin{array}{ccc}
\vdots & \vdots & \vdots\\
d_{1} & d_{2} & d_{3}\\
\vdots & \vdots & \vdots
\end{array}\right]=\left[\begin{array}{ccc}
\left|d_{1}\right\rangle & \left|d_{2}\right\rangle & \left|d_{3}\right\rangle \end{array}\right]
\end{equation}
Writing $\left|A_{l}(s)\right\rangle _{m}=\left[\hat{A}(s)\right]_{l,m}=\Psi_{l,m}^{A}(s)$.
We first consider the various transformations on the unitaries:
\begin{equation}
\hat{L}^{a_{i}}(\alpha)\left|a_{i}(\sigma)\right\rangle =\left|a_{i}(\sigma+\alpha)\right\rangle
\end{equation}
Finding the matrix operators:
\[
\left\{ \hat{L}^{D}\right\} =\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & \mathrm{sin}\sigma\\
0 & 1 & 0\\
-\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right]\&\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & -\mathrm{sin}\sigma\\
0 & 1 & 0\\
+\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right]
\]
\begin{equation}
\end{equation}
\begin{eqnarray*}
\left\{ \hat{L}^{Q}\right\} =\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & ie^{-i\theta}\mathrm{sin}\sigma\\
0 & 1 & 0\\
ie^{i\theta}\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right],
\end{eqnarray*}
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\&\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & ie^{-i\theta}\mathrm{sin}\sigma\\
0 & 1 & 0\\
-ie^{i\theta}\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right]
\end{equation}
\[
\left\{ \hat{L}^{J}\right\} =\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & i\mathrm{sin}\sigma\\
0 & 1 & 0\\
i\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right]\&\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & -i\mathrm{sin}\sigma\\
0 & 1 & 0\\
-i\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right]
\]
\begin{equation}
\&\left[\begin{array}{ccc}
\mathrm{cos}\sigma & 0 & -i\mathrm{sin}\sigma\\
0 & 1 & 0\\
i\mathrm{sin}\sigma & 0 & \mathrm{cos}\sigma
\end{array}\right]
\end{equation}
\par\end{flushleft}
\section{Fermat Principle}
Take the action function given as the classical Fermat principle:
\begin{equation}
S(y_{\alpha},\dot{y}_{\alpha}|t')=\int_{0}^{t'}n(y_{\alpha})^{1/2}t{\dot{y}_{\alpha}^{2}}dt
\end{equation}
The Euler-Lagrange equations then read as:
\begin{equation}
^{1/2}t{\dot{y}_{\alpha}^{2}}\dfrac{\partial n}{\partial y_{\alpha}}-\dfrac{d}{ds}\left(\dfrac{\dot{y}_{\alpha}n(y_{\alpha})}{^{1/2}t{\dot{y}_{\alpha}^{2}}}\right)=0
\end{equation}
We consider the particular case of one co-ordinate:
\begin{equation}
n(y_{\alpha})=n(y)
\end{equation}
and transform the co-ordinate as $y=ia\sin\phi$. The differential
equation then reads as:
\begin{equation}
i^{1/2}t{a^{2}+y^{2}}\dfrac{\partial n}{\partial y}\dfrac{\partial\phi}{\partial t}-\dfrac{dn(y)}{ds}=0
\end{equation}
We may therefore write, after taking the appropriate limit, that:
\begin{equation}
\dfrac{\partial n}{\partial\phi}(i^{1/2}t{a^{2}+y^{2}}\dfrac{\partial\phi}{\partial t}\dfrac{\partial y}{\partial\phi}-1)=0
\end{equation}
In general, $\dfrac{\partial n}{\partial{\rm ch}i}\neq0$, so we therefore
have the parametric identity:
\begin{equation}
\dfrac{\partial\phi}{\partial t}=\dfrac{-i}{^{1/2}t{a^{2}+y^{2}}}\dfrac{\partial\phi}{\partial y}
\end{equation}
Applying this twice, we find the elliptic differential equation:
\begin{equation}
\dfrac{\partial^{2}\phi}{\partial t^{2}}=-\dfrac{1}{^{1/2}t{a^{2}+y^{2}}}\dfrac{\partial}{\partial y}(\dfrac{1}{^{1/2}t{a^{2}+y^{2}}}\dfrac{\partial\phi}{\partial y})
\end{equation}
\begin{equation}
(a^{2}+y^{2})\dfrac{\partial^{2}\phi}{\partial t^{2}}=-\dfrac{\partial^{2}\phi}{\partial y^{2}}+\dfrac{y}{^{1/2}t{a^{2}+y^{2}}}\dfrac{\partial\phi}{\partial y}
\end{equation}
\section{Elliptic Wave Equations}
\begin{flushleft}
Writing $\left|A_{l}(s)\right\rangle _{m}=\left[\hat{A}(s)\right]_{l,m}=\Psi_{l,m}^{A}(s)$,
we now consider various operators on the respective wave-functions.
It is important to note that the variable may properly be held to
be a periodic complex variable, and the function extended meromorphically
to the complex space. We will work only on the section where the wave
is continuous, invertible and free of poles, so we may multiply and
divide by trigonometric functions. In this section, as we are dealing
with changes of variables, we must use partial differential operators
to distinguish them from the strict time derivative treated elsewhere
in the paper. Let us examine a particular co-ordinate transform, to
show how we may write down the wave equations. We have:
\par\end{flushleft}
\begin{equation}
\left[D({\rm ch}i)\right]_{3,1}=\dfrac{i}{^{1/2}t{2}}\mathrm{sin}({\rm ch}i)=\tau_{3,1}^{D}=\tau
\end{equation}
where we drop the subscripts for convenience. Directly differentiating:
\[
\dfrac{d\tau}{d{\rm ch}i}=\dfrac{i}{^{1/2}t{2}}\mathrm{cos}{\rm ch}i
\]
Squaring the derivative:
\[
(d\tau)^{2}=-(\frac{1}{2}+\tau^{2})d{\rm ch}i^{2}
\]
We find after inverting the fraction and taking the square root:
\begin{equation}
\dfrac{d{\rm ch}i}{d\tau}=\dfrac{\partial{\rm ch}i}{\partial\tau}=\dfrac{-i}{^{1/2}t{\dfrac{1}{2}+\tau^{2}}}
\end{equation}
Partial differentiating a function twice with respect to our parameter:
\begin{equation}
\dfrac{\partial^{2}\Psi}{\partial\tau^{2}}=\left(\dfrac{\partial{\rm ch}i}{\partial\tau}\right)^{2}\dfrac{\partial^{2}\Psi}{\partial{\rm ch}i^{2}}+\dfrac{\partial\Psi}{\partial{\rm ch}i}\dfrac{\partial}{\partial\tau}\left(\dfrac{\partial{\rm ch}i}{\partial\tau}\right)
\end{equation}
Substitution of the appropriate values brings:
\begin{equation}
\dfrac{\partial^{2}\Psi}{\partial\tau^{2}}=\dfrac{1}{\frac{1}{2}+\tau^{2}}\left(-\dfrac{\partial^{2}\Psi}{\partial{\rm ch}i^{2}}+\dfrac{i\tau}{^{1/2}t{\frac{1}{2}+\tau^{2}}}\dfrac{\partial\Psi}{\partial{\rm ch}i}\right)
\end{equation}
\noindent \begin{flushleft}
We can then find the differential-integral equations for the wave-function.
In general it takes the form:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\Psi_{i,j}^{k}[\sigma]=\Psi_{i,j}^{k}[0]+\epsilon_{i,j}^{k}\int_{0}^{\sigma}d\nu.\hat{V}(\nu)\Psi_{i,j}^{k}[\nu]
\end{equation}
\begin{equation}
\hat{V}(\nu)=F_{1}^{i,j,k}(\nu)\left(\mu_{i,j,k}F_{2}^{i,j,k}(\nu)+\beta_{i,j}^{k}\nabla_{\nu}^{2}\right)
\end{equation}
\begin{equation}
\epsilon_{i,j}^{k}\in\{1,-1,i,-i\};\beta_{i,j}^{k}\in\{1,-1\};\nabla_{u}^{2}=\dfrac{\partial^{2}}{\partial u^{2}}
\end{equation}
\begin{equation}
(F_{1}^{i,j,k},F_{2}^{i,j,k})\in\left\{ (\mathrm{cot}\nu,\mathrm{cos^{2}}\nu),(\mathrm{tan}\nu,\mathrm{sin^{2}}\nu)\right\}
\end{equation}
Our entire wavefunction is then:
\begin{equation}
\Psi_{i,j}^{k}[s,\tau_{i,j}^{k}]={\cal G}amma_{i,j}^{k}[s]\Psi_{i,j}^{k}[\tau_{i,j}^{k}]
\end{equation}
\begin{equation}
\dfrac{\partial^{2}{\cal G}amma_{i,j}^{k}[s]}{\partial s^{2}}=\mu_{i,j,k}{\cal G}amma_{i,j}^{k}[s]
\end{equation}
These formulae are generated by a group of partial differential equations
of type:
\begin{equation}
(z^{2}+a^{2})\dfrac{\partial^{2}\Psi}{\partial z^{2}}=-\dfrac{\partial^{2}\Psi}{\partial{\rm ch}i^{2}}+\dfrac{iz}{^{1/2}t{z^{2}+a^{2}}}\dfrac{\partial\Psi}{\partial{\rm ch}i}
\end{equation}
\begin{equation}
z=f({\rm ch}i),|z|<a,f({\rm ch}i+m\omega)=f({\rm ch}i)
\end{equation}
and we take the angular variable so as to avoid any discontinuities
in the relevant trigonometric ratios other than at the extremum, where
we are guaranteed analytic continuity by the hyperbolic sine-sine
relationship. They are on the perimeter of the wavefunction; we are
guaranteed continuity and unitarity by virtue of our matrix construction.
One can label the functions by their $F_{1}^{i,j,k},\epsilon_{i,j}^{k}$
and $\beta_{i,j}^{k}$ . If we consider these functions as proper
analytic functions in the complex plane, it is straightforward to
derive various differential systems, analogous to the Cauchy-Riemann
equations. For example, if $z=re^{i\theta}$, $f(z)=u(z)+iv(z)$,
we obtain the cylindrical polar set:
\begin{equation}
\dfrac{\partial u}{\partial r}=\dfrac{1}{r}\dfrac{\partial v}{\partial\theta}
\end{equation}
\begin{equation}
\dfrac{\partial v}{\partial r}=-\dfrac{1}{r}\dfrac{\partial u}{\partial\theta}
\end{equation}
\begin{equation}
\dfrac{\partial^{2}f}{\partial r^{2}}+\dfrac{1}{r}\dfrac{\partial f}{\partial r}+\dfrac{1}{r^{2}}\dfrac{\partial^{2}f}{\partial\theta^{2}}=0
\end{equation}
If $z=r\cos\theta$, $f(z)=u(z)+iv(z)$, we find the symmetrical equations:
\begin{equation}
\dfrac{\partial u}{\partial r}=-\cot\theta\dfrac{\partial v}{\partial\theta}
\end{equation}
\begin{equation}
\dfrac{\partial v}{\partial r}=-\cot\theta\dfrac{\partial u}{\partial\theta}
\end{equation}
If $z=ir\cos\theta$, $f(z)=u(z)+iv(z)$, we find polar elliptic equations:
\begin{equation}
r\dfrac{\partial u}{\partial r}=-\dfrac{z}{^{1/2}t{z^{2}+r^{2}}}\dfrac{\partial v}{\partial\theta}
\end{equation}
\begin{equation}
r\dfrac{\partial v}{\partial r}=\dfrac{z}{^{1/2}t{z^{2}+r^{2}}}\dfrac{\partial u}{\partial\theta}
\end{equation}
Differentiating the correct parameterisation, analytically continuing
it to the complex plane and using separation of variables allows us
to solve for the ground state of the system, which can be quite difficult.
The radial part of the differential equation is generally the circular
Bessel function:
\par\end{flushleft}
\begin{flushleft}
\begin{equation}
\dfrac{d^{2}R(r)}{dr^{2}}+\dfrac{1}{r}\dfrac{dR(r)}{dr}+\dfrac{\alpha}{r^{2}}R(r)=0
\end{equation}
The angular part of the particular solution is then:
\begin{equation}
\Psi(r,\theta)=R(r)Y(\theta)
\end{equation}
\begin{equation}
\dfrac{d^{2}Y(\theta)}{d\theta^{2}}+F_{1}(\theta)\dfrac{dY(\theta)}{d\theta}+\alpha F_{2}(\theta)Y(\theta)=0
\end{equation}
\par\end{flushleft}
\section{Integral Relations }
Expanding the Hamiltonian matrix in the spectral representation, we
find:
\begin{equation}
\tilde{H}=\sum_{n}E_{n}\left|\phi_{n}\right\rangle \left\langle \phi_{n}\right|
\end{equation}
In this particular situation, the eigenenergies are equally separated
from zero, we may therefore write:
\begin{equation}
\tilde{H}=E(\left|\phi_{+}\right\rangle \left\langle \phi_{+}\right|-\left|\phi_{-}\right\rangle \left\langle \phi_{-}\right|)
\end{equation}
An alternative way of writing this Hamiltonian is:
\begin{equation}
\tilde{H}=\sum_{i,j}\epsilon_{ij}(t)\hat{Q}_{ij}+\epsilon_{ij}^{*}(t)\hat{Q}_{ij}^{T}
\end{equation}
\begin{equation}
\hat{Q}_{ij}=\left|i\right\rangle \left\langle j\right|
\end{equation}
Using the dynamical equations of motion,
\begin{equation}
i\dfrac{d}{dt}\left[\begin{array}{c}
\left|\phi_{+}\right\rangle \\
\left|\phi_{-}\right\rangle
\end{array}\right]=E\left[\begin{array}{cc}
1 & 0\\
0 & -1
\end{array}\right]\left[\begin{array}{c}
\left|\phi_{+}\right\rangle \\
\left|\phi_{-}\right\rangle
\end{array}\right]
\end{equation}
Let us now expand the wave-vector in the eigenbasis:
\begin{equation}
i\dfrac{d}{dt}(\sum_{j}c_{j}(t)\left|j(t)\right\rangle )=\sum_{j}E_{j}\left|j(t)\right\rangle
\end{equation}
Dropping the sums for convenience and assuming Einstein summation
convention, we evaluate the differential equation:
\begin{equation}
ic_{j}(t)\dfrac{d}{dt}\left|j(t)\right\rangle =(E_{j}-i\dfrac{dc_{j}}{dt})\left|j(t)\right\rangle
\end{equation}
Consider a path integral on our state space:
\begin{equation}
\left[K_{jk}(T,0)\right]=\sqiintop_{{\rm ch}eck{x}_{j}(0)}^{{\rm ch}eck{x}_{k}(T)}exp(-i\int_{0}^{T}ds\hat{\mathcal{L}}[E_{j},c_{j}|s])D[{\rm ch}eck{x}_{\alpha}(s)]
\end{equation}
We may write the Lagrangian density as the formula:
\begin{equation}
\hat{\mathcal{L}}=-E_{j}\dot{c}_{j}(s)\delta_{jk}+ic_{j}(s)\left\langle k(s)\right|\widetilde{H}(s)\left|j(s)\right\rangle
\end{equation}
Up to isomorphism, this fulfils an equivalent role to the time evolution
operator. As we have managed to already find a number of differential
relationships, the necessary question is to enquire as to whether
there are a series of equivalent integral identities that we may use
to evaluate these particular groups of elliptic differential equations.
Consider a simple toy problem of a wave-packet interacting with a
time dependent oscillating potential:
\begin{equation}
i\hbar\dfrac{\partial\Psi(x,t)}{\partial t}=-\dfrac{\hbar^{2}}{2m}\dfrac{\partial^{2}\Psi(x,t)}{\partial x^{2}}+V_{0}\cos\omega t.\Psi(x,t)
\end{equation}
Using natural units and rescaling the time, we may write this as a
neat operator equation:
\begin{equation}
i\dfrac{\partial\Psi(x,t)}{\partial t}=(\hat{H}_{0}+\hat{H}_{I}(t))\Psi(x,t)
\end{equation}
Now, if we cosine-transform the time co-ordinate, our equations of
motion transform:
\begin{equation}
z=\cos\omega t
\end{equation}
\begin{equation}
t=\dfrac{1}{\omega}\cos^{-1}(z)
\end{equation}
\begin{equation}
\hat{H}_{I}(t)\mapsto\hat{H}_{I}(z)=V_{0}z
\end{equation}
\begin{equation}
\hat{H}_{0}\mapsto\hat{H}_{0}
\end{equation}
\begin{equation}
\dfrac{\partial}{\partial t}\mapsto\dfrac{\partial z}{\partial t}\dfrac{\partial}{\partial z}=\omega^{1/2}t{1-z^{2}}\dfrac{\partial}{\partial z}
\end{equation}
Writing our original wave-equation in the transformed frame, we find:
\begin{equation}
i\omega^{1/2}t{1-z^{2}}\dfrac{\partial\Psi(x,z)}{\partial z}=-\dfrac{1}{2}\dfrac{\partial^{2}\Psi(x,z)}{\partial x^{2}}+V_{0}z\Psi(x,z)
\end{equation}
Using the method of separation of variables, we write an ansatz for
our wavefunction:
\begin{equation}
\Psi(x,z)=\phi(x)\beta(z)
\end{equation}
Finding the equivalent set of equations:
\begin{equation}
E\phi(x)=-\dfrac{1}{2}\dfrac{d^{2}\phi(x)}{dx^{2}}
\end{equation}
\begin{equation}
i\omega\dfrac{d\beta(z)}{dz}=\dfrac{(E-V_{0}z)}{^{1/2}t{1-z^{2}}}\beta(z)
\end{equation}
The first of these equations is readily solvable using complex exponentials,
however, the second presents us with some difficulties. Firstly, we
rescale the variables to place the differential equation into dimensionless
form, finding:
\begin{equation}
i\dfrac{d\beta(z)}{dz}=\dfrac{(1-\alpha z)}{^{1/2}t{1-z^{2}}}\beta(z)
\end{equation}
Directly differentiating:
\begin{equation}
(1-z^{2})\dfrac{d^{2}\beta}{dz^{2}}=(p(\alpha,z)-\dfrac{iq(\alpha,z)}{^{1/2}t{1-z^{2}}})\beta
\end{equation}
where the p's and q's are polynomial functions. This is one of our
basic elliptic differential equations. Note the useful integral formula:
\begin{equation}
\intop_{a}^{b}\sec z(1+\tan z)dz=\intop_{y_{a}}^{y_{b}}\dfrac{dy}{^{1/2}t{y^{2}-1}}+i(y_{b}-y_{a})
\end{equation}
where $y=\sec z$. To consider further transformations, consider first
one of the polynomials from the previous section:
\begin{equation}
b_{1}(z)=z^{8}+29z^{6}+208z^{4}+306z^{2}-144
\end{equation}
It is possible to completely specify this polynomial by its root set,
viz.:
\begin{equation}
b_{1}(z)=0,z\in\{\pm\alpha_{1},\pm i\beta_{1},\pm i\delta_{1},\pm i\gamma_{1}\}
\end{equation}
Moving sequentially through the other relevant polynomials, we find:
\begin{equation}
b_{2}(z)=z^{7}-125z^{5}+25z^{4}+192z^{3}-960z^{2}-2880z+20160
\end{equation}
\begin{equation}
b_{2}(z)=0,z\in\{\alpha_{2},\beta_{2},-\gamma_{2},z_{1},z_{1}^{*},z_{2},z_{2}^{*}\}
\end{equation}
This point set has the neat property of being a pentagram in the complex
plane with three non-symmetrical points on the real axis, one of which
is the primary vertice of the pentagon.
\[
b_{3}(z)=z^{6}+120z^{5}+5016z^{4}+86527z^{3}
\]
\begin{equation}
+550413z^{2}+896923z+396900
\end{equation}
This polynomial has its root set confined to the negative real axis:
\begin{equation}
b_{3}(z)=0,z\in\{-\alpha_{3},-\beta_{3},-\gamma_{3},-\delta_{3},-\epsilon_{3},-\zeta_{3}\}
\end{equation}
Finally, consider the polynomial $b_{4}(z)=b_{1}(^{1/2}t{z}).$As the
original polynomial is even in z, the new square-root transformed
polynomial is of the form:
\begin{equation}
b_{4}(z)=z^{4}+29z^{3}+208z^{2}+306z-144
\end{equation}
which has a root set:
\begin{equation}
b_{4}(z)=0,z\in\{\alpha_{4},-\beta_{4},-\gamma_{4},-\delta_{4}\}
\end{equation}
By examining the cosine and exponentially transformed polynomials,
$b_{j}(\cos\theta)$ \& $b_{j}(\exp i\theta)$, and real and imaginary
parts thereof, it is a simple exercise to develop a series of polar
representations for these polynomial functions. The complex analytic
behaviour is generated by these or similar sets of roulettes. They
are related to a number of interesting integral relations. We begin
with a simple example:
\begin{equation}
\int_{0}^{2\pi}\dfrac{b_{4}(u)du}{^{1/2}t{1-u^{2}}}=ia+Q(\pi)^{1/2}t{4\pi^{2}-1}
\end{equation}
The other polynomials have similar identities with respect to this
measure:
\begin{equation}
\int_{-1}^{+1}\dfrac{b_{1}(u)du}{^{1/2}t{1-u^{2}}}=\dfrac{12331\pi}{128}
\end{equation}
\begin{equation}
\int_{0}^{3\pi}b_{1}(u)^{1/2}t{1-u^{2}}du=b\pi+i^{1/2}t{9\pi^{2}-1}(c+(9\pi^{2}-1)P(\pi))
\end{equation}
\begin{equation}
P(\pi)=\sum_{n=0}^{3}C_{n}\pi^{2n+1}
\end{equation}
\begin{equation}
\int_{0}^{2\pi}b_{1}(u)^{1/2}t{1-u^{2}}du=k\pi+i^{1/2}t{4\pi^{2}-1}(l+(4\pi^{2}-1)Y(\pi))
\end{equation}
\begin{equation}
Y(\pi)=\sum_{n=0}^{3}D_{n}\pi^{2n+1}
\end{equation}
\begin{equation}
\{D_{n}\}\subseteq\{C_{n}\},D_{n}=0\mathrm{\, mod}C_{j}
\end{equation}
\begin{equation}
\int_{0}^{\pi/2}b_{1}(u)^{1/2}t{1-u^{2}}du=g\pi+i^{1/2}t{\pi^{2}-4}(f+(\pi^{2}-4)r(\pi))
\end{equation}
\begin{equation}
r(\pi)=\sum_{n=0}^{3}M_{n}\pi^{2n+1}
\end{equation}
\begin{equation}
\{M_{n}\}\subseteq\{C_{n}\}
\end{equation}
\begin{equation}
\int_{-l}^{+l}b_{1}(u)^{1/2}t{1-u^{2}}du=^{1/2}t{1-l^{2}}(\sum_{0}^{3}C_{n}l^{2n+1})-m\sin^{-1}(l)
\end{equation}
\begin{equation}
\int_{0}^{a}\dfrac{b_{1}(u)du}{^{1/2}t{1-u^{2}}}=ip(a)^{1/2}t{a^{2}-1}+\lambda a+i\sin^{-1}(a)
\end{equation}
Off the unit circle, these integrals lose analyticity unless we modify
the weighting function. One way to do this is by defining a co-distribution:
\begin{equation}
\int_{-1}^{+1}du(1-u^{2})^{\alpha/2}=^{1/2}t{\pi}\dfrac{{\cal G}amma(\dfrac{\alpha}{2}+1)}{{\cal G}amma(\dfrac{\alpha}{2}+\dfrac{3}{2})}
\end{equation}
We then form marginal distributions:
\begin{equation}
p_{k,W}(\alpha)=\dfrac{\int_{\Omega}p_{k}(s)W(s,\alpha)ds}{\int_{\Omega}W(s,\alpha)ds}
\end{equation}
\begin{equation}
b_{1,W}(\alpha)=-3\dfrac{\sum q_{n}\alpha^{n}}{\prod(\alpha+2(n+1)+1)}=-3\dfrac{p(\alpha)}{q(\alpha)}
\end{equation}
Using partial fractions to expand the denominator:
\[
\dfrac{1}{\prod(\alpha+2(n+1)+1)}=\dfrac{1}{48}(\dfrac{1}{\alpha+3}-\dfrac{1}{\alpha+9})
\]
\begin{equation}
+\dfrac{1}{16}(\dfrac{1}{\alpha+7}-\dfrac{1}{\alpha+5})
\end{equation}
\begin{equation}
p(\alpha)=48\alpha^{4}+1050\alpha^{3}+7538\alpha^{2}+17653\alpha-1214
\end{equation}
\begin{equation}
\lim_{\alpha\rightarrow\infty}b_{1,W}(\alpha)=-144
\end{equation}
This is not the only co-distribution, we may form an equivalent functional:
\begin{equation}
W'(\alpha,s)=(1-\alpha^{2})^{-s/2}=W(-s,\alpha)
\end{equation}
These integral relationships are then neatly expressible in terms
of hypergeometric functions:
\begin{equation}
\int d\alpha W(-s,\alpha)=\alpha\left\{ \begin{array}{cc}
\dfrac{1}{2} & \alpha^{2}\\
\dfrac{s}{2} & \dfrac{3}{2}
\end{array}\right\}
\end{equation}
\begin{equation}
\int d\alpha W(-s,\alpha)b_{1,W}(\alpha)=\sum_{n=1}^{5}\left\{ \begin{array}{cc}
1/2 & \alpha^{2}\\
\dfrac{s}{2} & \dfrac{n}{2}+1
\end{array}\right\} \alpha^{n}C_{n}
\end{equation}
The point symmetry group of $p(\alpha)$ is a kite, all the zeroes
of $q(\alpha)$ lie along the negative real axis.
\section{Chebyshev Polynomials}
Consider a unit triangle in the complex plane. Using De-Moivre's theorem:
\begin{equation}
(e^{i\theta})^{m}=(\cos\theta+i\sin\theta)^{m}=\cos m\theta+i\sin m\theta
\end{equation}
We may write an alternative transformation, viz.:
\begin{equation}
z=x+iy=x+i^{1/2}t{1-x^{2}}
\end{equation}
The left-hand side of De-Moivre's theorem then reads as:
\begin{equation}
(x+i^{1/2}t{1-x^{2}})^{m}=\cos m\theta+i\sin m\theta
\end{equation}
We define the Chebyshev polynomial of the first kind:
\begin{equation}
T_{m}(\cos\theta)=\cos m\theta
\end{equation}
Using the double angle formula, it is a simple exercise to derive
the first recursion relation:
\begin{equation}
T_{m}(x)=xT_{m-1}(x)+(x^{2}-1)U_{m-1}(x)
\end{equation}
where we have defined the Chebyshev polynomial of the second kind:
\begin{equation}
U_{m}(\cos\theta)=\dfrac{\sin m\theta}{\sin\theta}
\end{equation}
with recursion relation:
\begin{equation}
U_{m}(x)=xU_{m-1}(x)+T_{m-1}
\end{equation}
Directly differentiating the recursion relations, we obtain the system:
\begin{equation}
\dfrac{dT_{m}(x)}{dx}=mU_{m}(x)
\end{equation}
\begin{equation}
(1-x^{2})\dfrac{dU_{m}}{dx}=m(xU_{m}-mT_{m})
\end{equation}
\begin{equation}
(1-x^{2})\dfrac{d^{2}T_{m}}{dx^{2}}=(mxU_{m}-m^{2}T_{m})
\end{equation}
\begin{equation}
(1-x^{2})\dfrac{d^{2}T_{m}}{dx^{2}}-x\dfrac{dT_{m}}{dx}+m^{2}T_{m}=0
\end{equation}
This differential equation is related to the elliptic wave equations
used in the previous sections. This comes about due to our particular
break-down of the wave-function in terms of roots of unity on the
unit circle.
\section{Circular Co-ordinates}
Consider the standard cylindrical co-ordinate system in two dimensions,
i.e. a circular system:
\begin{equation}
\vec{x}=r\left(\begin{array}{c}
\cos\theta\\
\sin\theta
\end{array}\right)
\end{equation}
Directly differentiating to find the velocity vector:
\begin{equation}
\dfrac{d\vec{x}}{dt}=\dot{r}\hat{e}_{r}+r\dot{\theta}\hat{e}_{\theta}
\end{equation}
The polar unit vectors have representation:
\begin{equation}
\hat{e}_{r}(\theta)=\left(\begin{array}{c}
\cos\theta\\
\sin\theta
\end{array}\right),\,\hat{e}_{\theta}(\theta)=\left(\begin{array}{c}
-\sin\theta\\
\cos\theta
\end{array}\right)
\end{equation}
\begin{equation}
\hat{e}_{r}(\theta)\cdot\hat{e}_{\theta}(\theta)=0
\end{equation}
\begin{equation}
\hat{e}_{r,\theta}(\theta+\alpha)=\hat{L}(\alpha)\hat{e}_{r,\theta}(\theta)
\end{equation}
\begin{equation}
\hat{L}(\alpha)=\left[\begin{array}{cc}
\cos\alpha & -\sin\alpha\\
\sin\alpha & \cos\alpha
\end{array}\right]
\end{equation}
Writing out the arc-length for the system:
\begin{equation}
ds^{2}=dr^{2}+r^{2}d\theta^{2}
\end{equation}
we derive the gradient for the wave-function:
\begin{equation}
\nabla\Psi=\hat{e}_{r}\dfrac{\partial\Psi}{\partial r}+\dfrac{\hat{e}_{\theta}}{r}\dfrac{\partial\Psi}{\partial\theta}
\end{equation}
Deriving the Laplacian in circular co-ordinates:
\begin{equation}
\nabla^{2}\Psi=(\nabla\cdot\nabla)\Psi=\dfrac{\partial^{2}\Psi}{\partial r^{2}}+\dfrac{1}{r}\dfrac{\partial\Psi}{\partial r}+\dfrac{1}{r^{2}}\dfrac{\partial^{2}\Psi}{\partial\theta^{2}}
\end{equation}
The time-independent wave equation for a free particle on the circle
is given by
\begin{equation}
\hat{H}\Psi(r,\theta)=-\nabla^{2}\Psi(r,\theta)=\xi\Psi(r,\theta)
\end{equation}
Using the method of separation of variables, we obtain the equivalent
system:
\begin{equation}
\Psi(r,\theta)=R(r)Y(\theta)
\end{equation}
\begin{equation}
\dfrac{d^{2}Y(\theta)}{d\theta^{2}}=-k^{2}Y(\theta)
\end{equation}
\begin{equation}
\dfrac{d^{2}R(r)}{dr^{2}}+\dfrac{1}{r}\dfrac{dR(r)}{dr}+(\dfrac{k^{2}}{r^{2}}-\xi)R(r)=0
\end{equation}
These equations have the well-known solutions of complex exponentials
and Bessel functions. The angular part obeys periodic boundary conditions,
and can be separated into an odd and even part, being the sine and
cosine:
\begin{equation}
Y(\theta)=Ae^{ik\theta}+Be^{-ik\theta}
\end{equation}
\begin{equation}
Y_{\pm}(\theta)=\pm Y_{\pm}(-\theta)
\end{equation}
\begin{equation}
Y(2\pi)=Y(0)
\end{equation}
finding a particular quantisation for the system as given by:
\begin{equation}
k=\pm n
\end{equation}
Rewriting the radial wave equation in natural units $\xi=1$, we find:
\begin{equation}
r^{2}\dfrac{d^{2}R_{n}}{dr^{2}}+r\dfrac{dR_{n}}{dr}+(n^{2}-r^{2})R_{n}=0
\end{equation}
Our solution is then the Bessel function:
\begin{equation}
R_{n}(r)=J_{n}(r)
\end{equation}
This has an integral expansion and inner product:
\begin{equation}
J_{n}(r)=\dfrac{1}{2\pi}\int_{-\pi}^{+\pi}e^{-i(n\phi-r\sin\phi)}d\phi
\end{equation}
\begin{equation}
\int_{-\pi}^{+\pi}J_{m}^{*}(\nu)J_{n}(\nu)d\nu=\dfrac{sin(\pi(m-n))}{2\pi^{2}(m-n)}=\dfrac{\delta(m-n)\delta_{mn}}{2\pi^{2}}
\end{equation}
\section{Spin Waves In Crystals}
Consider a spin wave that moves in a one dimensional crystal, perhaps
along a line. This is the simplest of the continuous degrees of freedom
that can be analysed; we examine it purely to explain the context
of the Bessel function that arose in the previous section, to illustrate
the direct physics of the phenomena in quantum systems that this paper
is concerned with.We take the Hamiltonian:
\begin{equation}
\hat{H}\left|n\right\rangle =-A(\left|n+1\right\rangle +\left|n-1\right\rangle -2\left|n\right\rangle )
\end{equation}
Defining a matrix representation:
\begin{equation}
H_{m,n}=\left\langle n\right|\hat{H}\left|m\right\rangle
\end{equation}
\begin{equation}
i\dfrac{dC_{n}}{dt}=\sum H_{n,m}C_{m}
\end{equation}
We solve this differential equation with the ansatz:
\begin{equation}
C_{m}(t)=a_{m}e^{-iEt}
\end{equation}
\begin{equation}
Ea_{n}=A(2a_{n}-a_{n-1}-a_{n+1})
\end{equation}
\begin{equation}
{\cal D}elta E(p)=E-E_{0}=2\cos(p)
\end{equation}
This eigenvalue equation has a solution that can be written in integral
form as a Green's function:
\begin{equation}
K({\cal D}elta q,t|0,0)=\dfrac{1}{2\pi}\int_{-\pi}^{+\pi}exp(-i(p{\cal D}elta q-2t\cos p)).dp
\end{equation}
\begin{equation}
K({\cal D}elta q,t|\mathbf{0})=(-i)^{{\cal D}elta q}J_{{\cal D}elta q}(2t)
\end{equation}
This is the Bessel function, as required.
\section{Time Dependent Oscillator Revisited}
Take the wave equation in one space dimension, coupled to a time dependent
oscillator:
\begin{equation}
i\hbar\dfrac{\partial\Psi}{\partial t}=-\dfrac{\hbar^{2}}{2m_{e}}\dfrac{\partial^{2}\Psi}{\partial x^{2}}+V_{0}\cos\omega t.\Psi
\end{equation}
Using the cosine transform, and the method of separation of variables:
\begin{equation}
\Psi(x,t)=\phi(x)\beta(t)
\end{equation}
\begin{equation}
z=\cos\omega t\rightleftarrows t=\dfrac{1}{\omega}\cos^{-1}(z)
\end{equation}
we find the differential equation of state:
\begin{equation}
i\dfrac{d\beta}{\beta}=\dfrac{(1-\alpha z)}{^{1/2}t{1-z^{2}}}dz
\end{equation}
which has an explicit solution:
\begin{equation}
i(\ln\beta(z)-\ln\beta_{0})=-\cos^{-1}(z)+\alpha^{1/2}t{1-z^{2}}
\end{equation}
This is mid-way between a Bessel function and a Chebyshev polynomial.
Writing ${\rm ch}i=\omega t$, $z=\cos{\rm ch}i$we may show a neat set of transformations:
\begin{equation}
\dfrac{\partial\Psi}{\partial{\rm ch}i}=-^{1/2}t{1-z^{2}}\dfrac{\partial\Psi}{\partial z}
\end{equation}
\begin{equation}
\dfrac{\partial\Psi}{\partial z}=\dfrac{-1}{^{1/2}t{1-z^{2}}}\dfrac{\partial\Psi}{\partial{\rm ch}i}=\dfrac{-1}{\sin{\rm ch}i}\dfrac{\partial\Psi}{\partial{\rm ch}i}
\end{equation}
Differentiating these expressions directly, we arrive at two equivalent
differential equations:
\begin{equation}
\dfrac{\partial^{2}\Psi}{\partial{\rm ch}i^{2}}=^{1/2}t{1-z^{2}}\dfrac{\partial}{\partial z}(^{1/2}t{1-z^{2}}\Psi)
\end{equation}
\begin{equation}
\dfrac{\partial^{2}\Psi}{\partial z^{2}}=\dfrac{1}{\sin{\rm ch}i}\dfrac{\partial}{\partial{\rm ch}i}(\dfrac{1}{\sin{\rm ch}i}\dfrac{\partial\Psi}{\partial{\rm ch}i})
\end{equation}
Expanding these, we find the pair of second order differential equations,
related by the transform pair:
\begin{equation}
\dfrac{\partial^{2}\Psi}{\partial{\rm ch}i^{2}}=(1-z^{2})\dfrac{\partial^{2}\Psi}{\partial z^{2}}-z\dfrac{\partial\Psi}{\partial z}
\end{equation}
\begin{equation}
(1-z^{2})\dfrac{\partial^{2}\Psi}{\partial z^{2}}=\dfrac{\partial^{2}\Psi}{\partial{\rm ch}i^{2}}-\cot{\rm ch}i\dfrac{\partial\Psi}{\partial{\rm ch}i}
\end{equation}
\begin{equation}
z\dfrac{\partial\Psi}{\partial z}=\cot{\rm ch}i\dfrac{\partial\Psi}{\partial{\rm ch}i}
\end{equation}
Taking the equation of state and directly differentiating we find
a second order differential equation:
\begin{equation}
i(1-z^{2})\dfrac{d^{2}\beta}{dz^{2}}=\beta(-i(1-\alpha z)^{2}-\alpha^{1/2}t{1-z^{2}}+\dfrac{z(1-\alpha z)}{^{1/2}t{1-z^{2}}})
\end{equation}
This may be rewritten in the equivalent form:
\begin{equation}
(1-z^{2})\dfrac{d^{2}\beta}{dz^{2}}+(\dfrac{\alpha(1-z^{2})}{1-\alpha z}-z)\dfrac{d\beta}{dz}+(1-\alpha z)^{2}\beta=0
\end{equation}
\begin{equation}
\beta(z)=\beta_{0}exp(i\cos^{-1}(z))exp(-i\alpha^{1/2}t{1-z^{2}})
\end{equation}
\section{Discrete Fourier Transform For SU(3)}
The discrete Fourier transform (DFT) on a qutrit has subtleties that
are not apparent in the even dimensional case, as there are several
transformations which have similar properties to the expected form.
As the structure of the entire unitary space can be broken down into
three sets of orthogonal rotations and two diagonal operators, defining
exactly what is meant by 'odd' and 'even' is not quite so simple.
This is due to the underlying group structure, as we may define a
third set of properties, namely neutral. We would expect to have three
subgroups which map in a similar way to the DFT on even dimensions,
and one total unitary; this is indeed the case. To show that this
is true, we first generate a cube root of unity:
\begin{equation}
z=\dfrac{1}{2}(-1+i^{1/2}t{3})
\end{equation}
with complex conjugate:
\begin{equation}
z^{*}=\dfrac{1}{2}(-1-i^{1/2}t{3})
\end{equation}
This has unit modulus and is a group under $\{*,^{2}\}$:
\begin{equation}
\left|z\right|^{2}=1
\end{equation}
\begin{equation}
z^{2}=z^{*},z=(z^{*})^{2}
\end{equation}
Defining our first DFT matrix:
\begin{equation}
\hat{R}=\dfrac{1}{^{1/2}t{3}}\left[\begin{array}{ccc}
1 & 1 & 1\\
1 & z & z^{*}\\
1 & z^{*} & z
\end{array}\right]\approxeq\dfrac{1}{^{1/2}t{3}}\left[\begin{array}{ccc}
1 & 1 & 1\\
1 & z & z^{2}\\
1 & z^{2} & z^{4}
\end{array}\right]
\end{equation}
This matrix is unitary:
\begin{equation}
\hat{R}\hat{R}^{\dagger}=\hat{R}^{\dagger}\hat{R}=\hat{\mathbf{1}}
\end{equation}
and is a fourth-root of unity$\hat{R}^{4}=\hat{1}$. One useful relationship
is:
\begin{equation}
\hat{R}^{T}\hat{R}=\hat{R}\hat{R}^{T}=\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 0 & 1\\
0 & 1 & 0
\end{array}\right]
\end{equation}
which is the equivalent C-NOT for qutrits. We may write our complex
unit as the cube root of unity:
\begin{equation}
z^{3}=1
\end{equation}
with equivalent Euler representation:
\begin{equation}
z=exp(\dfrac{2\pi i}{3})
\end{equation}
Let us now consider a particular decomposition of the Hamiltonian:
\begin{equation}
\hat{H}=\left[\begin{array}{ccc}
\omega_{1} & 0 & 0\\
0 & \omega_{2} & 0\\
0 & 0 & \omega_{3}
\end{array}\right]+\hat{Q}\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & -1 & 0\\
0 & 0 & 0
\end{array}\right]\hat{Q^{\dagger}}=\tilde{H}_{0}+\tilde{H}_{Q}
\end{equation}
We define an odd-valued split DFT via:
\begin{equation}
\hat{Q}=\left[\begin{array}{ccc}
1 & 1 & 1\\
1 & k & k^{3}\\
1 & k^{3} & k^{5}
\end{array}\right],\hat{Q^{\dagger}}=\left[\begin{array}{ccc}
1 & 1 & 1\\
1 & k^{-1} & k^{-3}\\
1 & k^{-3} & k^{-5}
\end{array}\right]
\end{equation}
with function values:
\begin{equation}
k=e^{i\theta},k^{-1}=k^{*}
\end{equation}
This takes our Hamiltonian to the operator:
\begin{equation}
\hat{H}=\left[\begin{array}{ccc}
\omega_{1} & 1-k^{-1} & 1-k^{-3}\\
1-k & \omega_{2} & 1-k^{-2}\\
1-k^{3} & 1-k^{2} & \omega_{3}
\end{array}\right]
\end{equation}
Writing out the formulae for the trigonometric polynomials:
\begin{equation}
k^{3}=\cos\theta(4\cos^{2}\theta-3)+i\sin\theta(4\sin^{2}\theta-3)
\end{equation}
\begin{equation}
k^{2}=2cos^{2}\theta-1+i2sin\theta cos\theta
\end{equation}
\begin{equation}
k^{-n}=(k^{n})^{*}
\end{equation}
Note that this takes the Hamiltonian to the off-diagonal non-comutative
submanifold; consequently we expect to find two equivalent different
split DFTs which take the Hamiltonian to the same type of submanifolds.
This type of transformation will not be unitary or invertible, it
is more along the same lines as making a projection onto a co-ordinate
axis, keeping a subset of the co-ordinates and discarding the rest
of the set. Writing out the transformation law on the Hamiltonian
matrix:
\begin{equation}
\hat{H}_{W}=\hat{W}\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & -1 & 0\\
0 & 0 & 0
\end{array}\right]\hat{W^{\dagger}}
\end{equation}
We now substitute the even-valued split DFT into this relation:
\begin{equation}
\hat{W}=\left[\begin{array}{ccc}
1 & 1 & 1\\
1 & w & w^{2}\\
1 & w^{2} & w^{4}
\end{array}\right]
\end{equation}
and find the explicitly transformed matrix:
\begin{equation}
\hat{H}_{W}=\left[\begin{array}{ccc}
0 & 1-w^{-1} & 1-w^{-2}\\
1-w & 0 & 1-w^{-1}\\
1-w^{2} & 1-w & 0
\end{array}\right]
\end{equation}
\begin{equation}
w=e^{i{\rm ch}i}
\end{equation}
This matrix has two independent component functions, thereby satisfying
the physical conditions of the SU(3)/SU(2) submanifold. We may write
this in the form:
\begin{equation}
\tilde{H}_{W}=\hat{A}+\hat{A}^{T}+\hat{X}\cos{\rm ch}i+\hat{Y}\sin{\rm ch}i+\hat{K}+\hat{K}^{\dagger}
\end{equation}
Consider the operators $\hat{X}\hat{,Y}$:
\begin{equation}
\hat{X}=\left[\begin{array}{ccc}
0 & 1 & 0\\
1 & 0 & 1\\
0 & 1 & 0
\end{array}\right],\hat{Y}=\left[\begin{array}{ccc}
0 & -i & 0\\
i & 0 & -i\\
0 & i & 0
\end{array}\right]
\end{equation}
They form a non-commutative subgroup that is similar to SU(2):
\begin{equation}
[\hat{X},\hat{Y}]=2i\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 0 & 0\\
0 & 0 & -1
\end{array}\right]=2i\hat{Z}
\end{equation}
\begin{equation}
[\hat{X},\hat{Z}]=-i\hat{Y}
\end{equation}
\begin{equation}
[\hat{Y},\hat{Z}]=i\hat{X}
\end{equation}
Up to a constant matrix, this Hamiltonian is equivalent to the SO(3)
subalgebra of SU(3). The constant matrix is the set of upper and lower
projectors, with representation:
\begin{equation}
\hat{A}=\hat{A}_{1}+\hat{A}_{2}+\hat{A}_{3}
\end{equation}
\begin{equation}
\hat{A}_{1}=\left|1\left\rangle \right\langle 2\right|,\hat{A}_{2}=\left|1\left\rangle \right\langle 3\right|,\hat{A}_{3}=\left|2\left\rangle \right\langle 3\right|
\end{equation}
\begin{equation}
\hat{A}_{j}^{\dagger}=\hat{A}_{j}^{T}
\end{equation}
This matrix is not unique, one may define another matrix, given by:
\begin{equation}
\hat{J}=\left[\begin{array}{ccc}
1 & 1 & 1\\
1 & j & j^{2}\\
1 & j^{2} & j^{3}
\end{array}\right]
\end{equation}
Transforming the Hamiltonian we find:
\begin{equation}
\hat{H}_{J}=\hat{J}\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & -1 & 0\\
0 & 0 & 0
\end{array}\right]\hat{J^{\dagger}}
\end{equation}
\begin{equation}
\hat{H}_{J}=\left[\begin{array}{ccc}
0 & 1-j^{-1} & 1-j^{-2}\\
1-j & 0 & 1-j^{-1}\\
1-j^{2} & 1-j & 0
\end{array}\right]
\end{equation}
which means our mapped Hamiltonian is an equivalent two-parameter
subgroup. Writing this as a congruence relation:
\begin{equation}
\hat{H}_{Q}\ncong\hat{H}_{J}\cong\hat{H}_{W}
\end{equation}
The functions involved have decompositions that may be readily calculated
with De-Moivre's theorem:
\begin{equation}
1-j^{2}=1-e^{2i\theta}=1-\cos2\theta-i\sin2\theta
\end{equation}
\begin{equation}
1-\cos2\theta-i\sin2\theta=2(\cos^{2}\theta-1)-i2\sin\theta\cos\theta
\end{equation}
\begin{equation}
1-j^{2}=2(\sin^{2}\theta)-i2\sin\theta\cos\theta
\end{equation}
These functions have the necessary symmetry under conjugation to give
us a Hermitian matrix:
\begin{equation}
j^{-n}=(j^{n})^{*}
\end{equation}
\begin{equation}
1-j^{-l}=(1-j^{l})^{*}
\end{equation}
None of the split-transforms $\hat{J},\hat{Q}$or $\hat{W}$ are unitary
or orthogonal; the only matrix which has the required property to
be a DFT in its own right is $\hat{R}$.
\section{Dihedral Group On The Unit Triangle}
The permutations one may apply to a triangle with uniquely labelled
vertices generate a dihedral group, defined by the matrices:
\begin{equation}
\hat{S}_{1}=\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 1 & 0\\
0 & 0 & 1
\end{array}\right],\hat{S}_{2}=\left[\begin{array}{ccc}
0 & 0 & 1\\
0 & 1 & 0\\
1 & 0 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{S}_{3}=\left[\begin{array}{ccc}
1 & 0 & 0\\
0 & 0 & 1\\
0 & 1 & 0
\end{array}\right],\hat{S}_{4}=\left[\begin{array}{ccc}
0 & 1 & 0\\
1 & 0 & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{S}_{5}=\left[\begin{array}{ccc}
0 & 0 & 1\\
1 & 0 & 0\\
0 & 1 & 0
\end{array}\right],\hat{S}_{6}=\left[\begin{array}{ccc}
0 & 1 & 0\\
0 & 0 & 1\\
1 & 0 & 0
\end{array}\right]
\end{equation}
This is the smallest non-commutative group. Note that these are a
group of rotoflections, as opposed to proper reflections or rotation
matrices. Using our Hamiltonian operators, we may define a closely
related series of operators using our initial value equations:
\begin{equation}
\hat{Q}(0)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}} & -\dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
This is one of our equivalent Hadamard gates, which constitutes a
pure rotation in 3-D space.
\begin{equation}
\hat{Q}^{2}(0)=\left[\begin{array}{ccc}
0 & -1 & 0\\
1 & 0 & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
We then have a reflection in a plane of symmetry, which is a signed
permutation matrix.
\begin{equation}
\hat{D}(0)=\left[\begin{array}{ccc}
-\dfrac{i}{^{1/2}t{2}} & \dfrac{i}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & i
\end{array}\right]
\end{equation}
Because we are working on higher dimensions, there is a greater number
of available operators, which we need in order to close the algebra
and conduct meaningful calculations. With qubits all that is required
is a phase gate, a NOT gate, a Hadamard matrix and a XOR gate.
\begin{equation}
\hat{D}^{2}(0)=\frac{1}{2}\left[\begin{array}{ccc}
i-1 & i+1 & 0\\
-i+1 & i+1 & 0\\
0 & 0 & -2
\end{array}\right]
\end{equation}
\begin{equation}
\hat{J}(0)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}} & -\dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{J}^{4}(0)=\left[\begin{array}{ccc}
-1 & 0 & 0\\
0 & -1 & 0\\
0 & 0 & 1
\end{array}\right]
\end{equation}
The particular matrices we have been considering above play the role
of the phase gate, one of the possible equivalent C-NOT trit gates
and the Hadamard gate in time optimal qutrit quantum computation.
They are signed complex permutation matrices. The extremals of the
unitary generators are at the points $\{0,\dfrac{\pi}{2},\pi,\dfrac{3\pi}{2}\}$
which define a base set of qutrit computational gates:
\begin{equation}
\hat{Q}(0)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}} & -\dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & 1
\end{array}\right],\hat{Q}(\pi)=\left[\begin{array}{ccc}
-\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & -1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{Q}(\dfrac{\pi}{2})=\left[\begin{array}{ccc}
0 & 0 & ie^{-i\varrho}\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{ie^{i\varrho}}{^{1/2}t{2}} & \dfrac{-ie^{i\varrho}}{^{1/2}t{2}} & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{Q}(\dfrac{3\pi}{2})=\left[\begin{array}{ccc}
0 & 0 & -ie^{-i\varrho}\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
-\dfrac{ie^{i\varrho}}{^{1/2}t{2}} & \dfrac{ie^{i\varrho}}{^{1/2}t{2}} & 0
\end{array}\right]
\end{equation}
It is possible to classify these matrices by their eigenvalues and
determinants into various point symmetry groups.
\noindent In some sense the unitary matrices we are using play the
role of {}``square roots'' of unitary matrices themselves, which
are in fact the dihedral group above. Being the complete set of positive
and negative roots of the group expression, we receive twelve unique
operators which may be readily used to generate a wide variety of
useful operations.
\begin{equation}
\hat{J}(0)=\left[\begin{array}{ccc}
\dfrac{1}{^{1/2}t{2}} & -\dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & i
\end{array}\right],\hat{J}(\pi)=\left[\begin{array}{ccc}
-\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & -i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{J}(\dfrac{\pi}{2})=\left[\begin{array}{ccc}
0 & 0 & -1\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{i}{^{1/2}t{2}} & \dfrac{-i}{^{1/2}t{2}} & 0
\end{array}\right],\hat{J}(\dfrac{3\pi}{2})=\left[\begin{array}{ccc}
0 & 0 & 1\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
-\dfrac{i}{^{1/2}t{2}} & \dfrac{i}{^{1/2}t{2}} & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{D}(0)=\left[\begin{array}{ccc}
-\dfrac{i}{^{1/2}t{2}} & \dfrac{i}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & i
\end{array}\right],\hat{D}(\pi)=\left[\begin{array}{ccc}
\dfrac{i}{^{1/2}t{2}} & -\dfrac{i}{^{1/2}t{2}} & 0\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
0 & 0 & -i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{D}(\dfrac{\pi}{2})=\left[\begin{array}{ccc}
0 & 0 & i\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
\dfrac{i}{^{1/2}t{2}} & \dfrac{-i}{^{1/2}t{2}} & 0
\end{array}\right],\hat{D}(\dfrac{3\pi}{2})=\left[\begin{array}{ccc}
0 & 0 & -i\\
\dfrac{1}{^{1/2}t{2}} & \dfrac{1}{^{1/2}t{2}} & 0\\
-\dfrac{i}{^{1/2}t{2}} & \dfrac{i}{^{1/2}t{2}} & 0
\end{array}\right]
\end{equation}
This set of unitary transformations is necessary and sufficient to
implement any quantum trinary computation. Some elementary operations
include:
\begin{equation}
\hat{D}(0)\hat{D}(\dfrac{\pi}{2})-\hat{D}(\dfrac{\pi}{2})\hat{D}(0)=\left[\begin{array}{ccc}
i & i & 0\\
i & -i & 0\\
0 & 0 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{D}(0)\hat{D}(\dfrac{\pi}{2})+\hat{D}(\dfrac{\pi}{2})\hat{D}(0)=\left[\begin{array}{ccc}
1 & -1 & 0\\
1 & 1 & 0\\
0 & 0 & 2
\end{array}\right]
\end{equation}
Other operators will be explored in future works. By classification
of the eigenvalues, these matrices may be defined as a certain types
of point and continuous symmetry groups in the complex plane, which
are related to the roots of polynomial equations which we explored
earlier. This method of qutrit computation and modelling of quantum
states and control is efficient, in that it is time optimal; resource
bounded, in that there is an existing physical overhead on the total
energy imparted to the system, reversible, in that it is unitary,
and robust to error, as it is periodic. We could consider our SU(3)
system to be in all ways equivalent to the physical sum of a qubit
and an ancilla. In this sense, we are moving beyond qubit quantum
computation into new realms of possibility. With a qubit and an ancilla
it is possible to achieve much more than with a single qubit, or any
number of coupled qubits. Data correction requires ancillas, and the
nature of quantum states is that all indistinguishable alternatives
are to be summed over; this necessitates our examination of qutrits
both as quantum computational objects in themselves, and as possible
sources of error within a qubit calculation. This is the simplest
example of a odd dimensional quantum state; in this particularly simple
case we are fortunately able to derive rich symmetries which are descriptive
of many of the full properties of groups which do not emerge in lower
dimensional cases.
\section{Subgroups of SU(3)}
Explicitly writing out the diagonal generators of SU(3), we find:
\begin{equation}
\frac{1}{^{1/2}t{3}}\left[\begin{array}{ccc}
\gamma & 0 & 0\\
0 & \gamma & 0\\
0 & 0 & -2\gamma
\end{array}\right],\left[\begin{array}{ccc}
\vartheta & 0 & 0\\
0 & -\vartheta & 0\\
0 & 0 & 0
\end{array}\right]\backsim\left[\begin{array}{ccc}
\omega_{1} & 0 & 0\\
0 & \omega_{2} & 0\\
0 & 0 & \omega_{3}
\end{array}\right]
\end{equation}
The diagonal subgroup has one component which commutes with every
other member of the group, the other only commutes with the rotation
subgroup. We may therefore use the diagonal component to define the
group structure, as it does not matter whether the commuting operator
is taken as part of the constraint or Hamiltonian. The matrix structure
of the possible subdivisions is then:
\begin{equation}
\left[\begin{array}{ccc}
\omega_{1} & 0 & 0\\
0 & \omega_{2} & 0\\
0 & 0 & \omega_{3}
\end{array}\right],\left[\begin{array}{ccc}
0 & \epsilon_{1} & \epsilon_{2}\\
\epsilon_{1}^{*} & 0 & \epsilon_{3}\\
\epsilon_{2}^{*} & \epsilon_{3}^{*} & 0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
\omega_{1} & 0 & \kappa\\
0 & \omega_{2} & 0\\
\kappa^{*} & 0 & \omega_{3}
\end{array}\right],\left[\begin{array}{ccc}
0 & \epsilon_{1} & 0\\
\epsilon_{1}^{*} & 0 & \epsilon_{2}\\
0 & \epsilon_{2}^{*} & 0
\end{array}\right]
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
0 & 0 & \kappa\\
0 & 0 & 0\\
\kappa^{*} & 0 & 0
\end{array}\right],\left[\begin{array}{ccc}
\omega_{1} & \epsilon_{1} & 0\\
\epsilon_{1}^{*} & \omega_{2} & \epsilon_{2}\\
0 & \epsilon_{2}^{*} & \omega_{3}
\end{array}\right]
\end{equation}
\begin{equation}
\left[\begin{array}{ccc}
\vartheta & \beta & 0\\
\beta^{*} & -\vartheta & 0\\
0 & 0 & 0
\end{array}\right],\left[\begin{array}{ccc}
\gamma & 0 & \epsilon_{1}\\
0 & \gamma & \epsilon_{2}\\
\epsilon_{1}^{*} & \epsilon_{2}^{*} & -2\gamma
\end{array}\right],
\end{equation}
For any choice of Hamiltonian operator and associated constraint,
we either find one or the other to be periodic (or constant). The
equations of dynamic state are equivalent up to isometry with a permutation
matrix.
\section{Geodesic On SU(4)}
Let us examine a system where we can only apply global control pulses,
without individual addressing, on two qubits. The Hamiltonian matrix
is the Heisenberg model:
\begin{equation}
\tilde{H}[t]=\lambda_{x}\hat{\sigma}_{x}\otimesimes\hat{\sigma}_{x}+\lambda_{y}\hat{\sigma}_{y}\otimesimes\hat{\sigma}_{y}+\lambda_{z}\hat{\sigma}_{z}\otimesimes\hat{\sigma}_{z}
\end{equation}
\noindent In matrix form:
\begin{equation}
\tilde{H}[t]=\left[\begin{array}{cccc}
\lambda_{z} & 0 & 0 & \lambda_{-}\\
0 & -\lambda_{z} & \lambda_{+} & 0\\
0 & \lambda_{+} & -\lambda_{z} & 0\\
\lambda_{-} & 0 & 0 & \lambda_{z}
\end{array}\right];\lambda_{\pm}=\lambda_{x}\pm\lambda_{y}
\end{equation}
The constraint may be expanded in the Hermitian basis:
\noindent
\begin{equation}
\tilde{F}[t]=(\vec{m}(t)\cdot\vec{\sigma})\otimesimes\hat{\mathbf{1}}+\hat{\mathbf{1}}\otimesimes(\vec{n}(t)\cdot\vec{\sigma})+\sum_{i\neq j}\Xi_{i,j}(t)\hat{\sigma}_{i}\otimesimes\hat{\sigma}_{j}
\end{equation}
\noindent Evaluating the quantum brachistochrone, we find the vector
relationship:
\begin{equation}
\dfrac{d}{dt}\left[\begin{array}{c}
\lambda_{x}\\
\lambda_{y}\\
\lambda_{z}
\end{array}\right]=0
\end{equation}
\noindent Our time optimal Hamiltonian is therefore a constant matrix,
and may be exponentiated directly. Our wave-vector after some time
is given by:
\begin{equation}
\left|\psi(t)\right\rangle =\exp(-i\tilde{H}t)\left|\psi(0)\right\rangle
\end{equation}
\noindent Evaluating the quantum brachistochrone equation, and choosing
the initial state $\left|\psi(0)\right\rangle =[1,0,0,0]^{T}$ we
immediately find $\lambda_{x}=-\lambda_{y}$. Our final state is:
\begin{equation}
\left|\psi(T)\right\rangle =\frac{1}{^{1/2}t{2}}(\left|00\right\rangle -i\left|11\right\rangle )
\end{equation}
\noindent from which we obtain $\lambda_{z}=0$. Our state evolves
according to:
\begin{equation}
\left|\psi(t)\right\rangle =\frac{1}{^{1/2}t{2}}\left[\begin{array}{c}
\mathrm{cos}(2\lambda_{x}t)\\
0\\
0\\
-i\mathrm{sin}(2\lambda_{x}t)
\end{array}\right]
\end{equation}
and therefore our energy-time relationship is $T=\dfrac{\pi}{\lambda_{x}}$.
\section{Dirac Equation in Co-rotating Frame}
We take the more general ansatz for a Hamiltonian on SU(4):
\begin{equation}
\tilde{H}=\left[\begin{array}{cccc}
\alpha & 0 & p_{z} & \varepsilon\\
0 & \alpha & \varepsilon^{*} & -p_{z}\\
p_{z} & \varepsilon & -\alpha & 0\\
\varepsilon^{*} & -p_{z} & 0 & -\alpha
\end{array}\right]=\left[\begin{array}{cc}
\alpha\hat{1} & -i\vec{\beta}\cdot\vec{\sigma}\\
i\vec{\beta}\cdot\vec{\sigma} & -\alpha\hat{1}
\end{array}\right]
\end{equation}
Evaluating the determinant:
\begin{equation}
det(\tilde{H}-\lambda\hat{1})=0
\end{equation}
Obtaining eigenvalue equation:
\begin{equation}
(-\lambda^{2}+\alpha^{2}+p_{z}^{2}+\left|\varepsilon\right|^{2})^{2}=0
\end{equation}
\begin{equation}
\lambda=\pm^{1/2}t{\alpha^{2}+p_{z}^{2}+\left|\varepsilon\right|^{2}}
\end{equation}
The isotropic constraint gives:
\begin{equation}
Tr(\dfrac{\tilde{H}^{2}}{2})=\mathrm{const.}=\alpha^{2}+p_{z}^{2}+\left|\varepsilon\right|^{2}
\end{equation}
We therefore rescale the time parameter such that $\alpha^{2}+p_{z}^{2}+\left|\varepsilon\right|^{2}=1$,
so our original Hamiltonian matrix has the form of a 4-dimensional
unit vector. The Hamiltonian matrix is difficult to work with, in
strict terms of evaluating the quantum brachistochrone equation; the
solutions are not obvious, and the bilinearity of the differential
equations causes difficulty in finding an exact solution. We therefore
must use a unitary transformation, equivalent to the Hadamard gate
on the Pauli matrices, that acts on the 4-spinor. The necessary transformation
is given by:
\begin{equation}
\hat{W}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cc}
\hat{1} & \hat{1}\\
\hat{1} & -\hat{1}
\end{array}\right]=\dfrac{1}{^{1/2}t{2}}(\hat{\sigma}_{x}+\hat{\sigma}_{z})\otimesimes\hat{1}
\end{equation}
\begin{equation}
\hat{W}\hat{W}^{\dagger}=\hat{W}^{\dagger}\hat{W}=\mathbf{1}
\end{equation}
\begin{equation}
\hat{W}=\hat{W}^{\dagger}
\end{equation}
Using the standard symmetry transformation, we obtain a new Hamiltonian
matrix:
\begin{equation}
\tilde{H}_{W}=\hat{W}\tilde{H}\hat{W}^{\dagger}=\left[\begin{array}{cc}
\hat{0} & \hat{b}\\
\hat{b}^{\dagger} & \hat{0}
\end{array}\right]
\end{equation}
\begin{equation}
\hat{b}=\alpha\hat{1}+i\vec{\beta}\cdot\vec{\sigma}
\end{equation}
\begin{equation}
\hat{b}^{\dagger}=\alpha\hat{1}-i\vec{\beta}\cdot\vec{\sigma}
\end{equation}
We then form matrices of eigenvectors:
\begin{equation}
\hat{P}=\left[\begin{array}{cc}
\hat{1} & \hat{1}\\
\hat{b} & -\hat{b}
\end{array}\right]
\end{equation}
\begin{equation}
\hat{P}^{\dagger}=\left[\begin{array}{cc}
\hat{1} & \hat{b}^{\dagger}\\
\hat{1} & -\hat{b}^{\dagger}
\end{array}\right]
\end{equation}
\begin{equation}
\hat{P}\hat{P}^{\dagger}=\hat{P}^{\dagger}\hat{P}=\mathbf{1}
\end{equation}
\begin{equation}
\tilde{H}_{WP}=\hat{P}\tilde{H}_{W}\hat{P}^{\dagger}=\left[\begin{array}{cc}
\hat{1} & 0\\
0 & -\hat{1}
\end{array}\right]=\hat{\sigma}_{z}\otimesimes\hat{1}
\end{equation}
The time evolution operator for the original system may be written
as:
\begin{equation}
\hat{U}(t,0)=exp(-i\int_{0}^{t}\tilde{H}(s)ds)
\end{equation}
In the transformed reference frame this reads as:
\begin{equation}
\hat{U}(t,0)=exp(-i\int_{0}^{t}\hat{Q}^{\dagger}(s)\tilde{H}_{WP}\hat{Q}(s)ds)
\end{equation}
The unitary operator which transforms the Hamiltonian is of the form
$\hat{Q}(s)=\hat{P}(s)\hat{W}$. This has the useful property:
\begin{equation}
[\tilde{H}_{WP},\hat{Q}(s)]=0
\end{equation}
which enables the direct evaluation of the time evolution operator:
\begin{equation}
\hat{U}(t,0)=\left[\begin{array}{cc}
\hat{1}e^{it} & 0\\
0 & \hat{1}e^{-it}
\end{array}\right]
\end{equation}
Using this unitary to transform the initial condition of the Hamiltonian
matrix, we find:
\begin{equation}
\tilde{H}(t)=\hat{U}(t,0)\tilde{H}(0)\hat{U}^{\dagger}(t,0)=\tilde{H}_{0}+\tilde{V}[t,\vec{\beta}(0)]
\end{equation}
\begin{equation}
\tilde{H}(t)=\left[\begin{array}{cc}
\alpha(0)\hat{1} & e^{-2it}\hat{A}^{\dagger}(0)\\
e^{2it}\hat{A}(0) & -\alpha(0)\hat{1}
\end{array}\right]
\end{equation}
\begin{equation}
\tilde{H}(t+T)=\tilde{H}(t)
\end{equation}
\begin{equation}
det(\tilde{H}(t)-\lambda\hat{1})=det(\tilde{H}(0)-\lambda\hat{1})=0
\end{equation}
The matrix constraint for the time optimal control problem can then
be written as:
\begin{equation}
\tilde{F}(t)=\left[\begin{array}{cc}
\begin{array}{cc}
0 & \xi_{1}\\
\xi_{1}^{*} & 0
\end{array} & e^{-2it}\hat{A}\\
e^{2it}\hat{A}^{\dagger} & \begin{array}{cc}
0 & \xi_{2}\\
\xi_{2}^{*} & 0
\end{array}
\end{array}\right]
\end{equation}
where $\hat{A}=\left[\begin{array}{cc}
a & b\\
c & d
\end{array}\right]$\,and all the variables involved (other than the obvious time dependence)
are constants. This Hamiltonian is the block-diagonal solution to
the Klein-Gordon equation, in that we may write:
\begin{equation}
\tilde{H}^{2}=(\alpha^{2}+p_{z}^{2}+\left|\varepsilon\right|^{2})\left[\begin{array}{cc}
\hat{1} & 0\\
0 & \hat{1}
\end{array}\right]
\end{equation}
Obviously this is invariant under the time dependent transformation
on the Hamiltonian. It is possible to map this solution unitarily
to a number of other physical systems which describe the motion of
electrons and positrons in different basis sets. For completeness,
these matrices are listed below.
\begin{equation}
\hat{U}_{3}=\left[\begin{array}{cccc}
0 & 0 & 1 & 0\\
0 & 0 & 0 & 1\\
1 & 0 & 0 & 0\\
0 & 1 & 0 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{4}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cccc}
1 & 0 & 1 & 0\\
i & 0 & -i & 0\\
0 & 1 & 0 & 1\\
0 & -i & 0 & i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{5}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cccc}
1 & -i & 0 & 0\\
1 & i & 0 & 0\\
0 & 0 & 1 & -i\\
0 & 0 & 1 & i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{6}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cccc}
1 & 0 & 1 & 0\\
0 & 1 & 0 & 1\\
1 & 0 & -1 & 0\\
0 & 1 & 0 & -1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{7}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cccc}
1 & 0 & 0 & 1\\
0 & 1 & 1 & 0\\
0 & 1 & -1 & 0\\
1 & 0 & 0 & -1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{8}=\dfrac{1}{^{1/2}t{2}}\left[\begin{array}{cccc}
1 & 1 & -1 & 1\\
1 & 1 & 1 & -1\\
-1 & 1 & 1 & 1\\
1 & -1 & 1 & 1
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{8}=\dfrac{1}{2}\left[\begin{array}{cccc}
1 & 1 & 1 & 1\\
1 & -i & -1 & i\\
1 & -1 & 1 & -1\\
1 & i & -1 & -i
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{9}=\left[\begin{array}{cccc}
1 & 0 & 0 & 0\\
0 & 0 & 0 & 1\\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0
\end{array}\right]
\end{equation}
\begin{equation}
\hat{U}_{10}=\left[\begin{array}{cccc}
1 & 0 & 0 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & 0 & 1\\
0 & 0 & 1 & 0
\end{array}\right]
\end{equation}
These transformation matrices mean that, given we have solved the
specific problem of the Dirac matrix, that a large set of problems
fit into the equivalence class, which saves a great deal of effort
developing particular solutions. Note also that the Hamiltonian has
a certain periodicity, which can be written as $T_{min}\times\left\Vert E\right\Vert =\pi$,
this relationship can be extended unitarily to all the other equivalent
representations by isometry.
\section{Dimensional Arrays Of Arbitrary Size}
Take an arbitrary tracefree matrix in $SU(n)$, which we can express
as the array:
\begin{equation}
\tilde{X}=\left[\begin{array}{ccccc}
\omega_{1} & \alpha_{1} & \alpha_{2} & \cdots & \vdots\\
\alpha_{1}^{*} & \omega_{2} & \alpha_{3} & \cdots & \vdots\\
\alpha_{2}^{*} & \alpha_{3}^{*} & \ddots & \ldots & \alpha_{n-1}\\
\vdots & \cdots & \ddots & \ddots & \alpha_{n}\\
\vdots & \cdots & \alpha_{n-1}^{*} & \alpha_{n}^{*} & \omega_{n}
\end{array}\right],\sum_{j}\omega_{j}=0
\end{equation}
\noindent \begin{flushleft}
This can be mapped invertibly and one-to-one to the vector
\par\end{flushleft}
\noindent \begin{flushleft}
\begin{equation}
\overrightarrow{X}=\left[\begin{array}{ccccccccc}
(\omega_{1}, & \cdots, & \omega_{n}), & (\alpha_{1}, & \cdots, & \alpha_{k}), & (\alpha_{1}^{*}, & \cdots, & \alpha_{k}^{*})\end{array}\right]^{T}
\end{equation}
which exists in the space $\left[\begin{array}{ccc}
\mathbb{R^{\mathrm{n-1}}}, & \mathbb{C}^{k}, & \mathbb{C}^{k*}\end{array}\right]^{T}$, and the dimension of the complex space is $k=\sum_{1}^{n-1}j$.
One of the components of the real vector within the multivector is
dependent; we retain the dummy variable to aid in the analysis. In
any real calculation we immediately remove the redundant variable
once the differential equations are calculated. If we choose the particular
Hamiltonian matrix:
\begin{equation}
\widetilde{H}=\left[\begin{array}{cccccc}
\omega_{1} & 0 & \cdots & \cdots & 0 & \varepsilon_{k}\\
0 & \omega_{2} & \cdots & \cdots & \varepsilon_{k-1} & 0\\
\vdots & \vdots & . & . & \vdots & \vdots\\
\vdots & \vdots & . & . & \vdots & \vdots\\
0 & \varepsilon_{k-1}^{*} & \cdots & \cdots & \omega_{n-1} & 0\\
\varepsilon_{k}^{*} & 0 & \cdots & \cdots & 0 & \omega_{n}
\end{array}\right]
\end{equation}
\par\end{flushleft}
\begin{equation}
\tilde{H}=\sum_{n}\omega_{n}(t)\left|n\right\rangle \left\langle n\right|+\sum_{k+j=n}(\varepsilon_{j}(t)\left|k\right\rangle \left\langle j\right|+\mathrm{h.c.})
\end{equation}
Then the constraint is given by:
\begin{equation}
\tilde{F}=\sum_{i\neq j,i+j\neq n}(\alpha_{i,j}(t)\left|i\right\rangle \left\langle j\right|+\mathrm{h.c.})
\end{equation}
\begin{equation}
\widetilde{F}=\left[\begin{array}{cccccc}
0 & \beta_{1,2} & \cdots & \cdots & \beta_{1,n-1} & 0\\
\beta_{1,2}^{*} & 0 & \cdots & \cdots & 0 & \beta_{2,n}\\
\vdots & \vdots & . & . & \vdots & \vdots\\
\vdots & \vdots & . & . & \vdots & \vdots\\
\beta_{1,n-1}^{*} & 0 & \cdots & \cdots & 0 & \beta_{n-1,n}\\
0 & \beta_{2,n}^{*} & \cdots & \cdots & \beta_{n-1,n}^{*} & 0
\end{array}\right]
\end{equation}
We may write the quantum brachistochrone as the form:
\begin{equation}
i\frac{d}{dt}(\widetilde{H}[t]+\widetilde{F}[t])\in G_{F}
\end{equation}
Hence the Hamiltonian is constant:
\begin{equation}
\frac{d}{dt}\widetilde{H}[t]=0
\end{equation}
Note that, by induction, we have shown the existence of a geodesic
on SU(n), having shown the existence of geodesics on SU(2), SU(2+1),
SU(n) and SU(n+1). Another family of infinite dimensional matrices
is given by:
\begin{equation}
\widetilde{H}=\left[\begin{array}{cccccc}
0 & \varepsilon_{1} & 0 & \cdots & \cdots & 0\\
\varepsilon_{1}^{*} & 0 & \varepsilon_{2} & 0 & \cdots & \vdots\\
0 & \varepsilon_{2}^{*} & \ddots & \ddots & 0 & \vdots\\
\vdots & 0 & \ddots & \ddots & \varepsilon_{n-1} & 0\\
\vdots & \cdots & 0 & \varepsilon_{n-1}^{*} & 0 & \varepsilon_{n}\\
0 & \cdots & \cdots & 0 & \varepsilon_{n}^{*} & 0
\end{array}\right]
\end{equation}
with associated constraint matrix $\tilde{F}=$
\begin{equation}
\left[\begin{array}{ccccccc}
\omega_{1} & 0 & \beta_{1,3} & \beta_{1,4} & \cdots & \beta_{1,n-1} & \beta_{1,n}\\
0 & \omega_{2} & 0 & \beta_{2,4} & \cdots & \cdots & \beta_{2,n}\\
\beta_{1,3}^{*} & 0 & \omega_{3} & 0 & \cdots & \cdots & \vdots\\
\beta_{1,4}^{*} & \beta_{2,4}^{*} & 0 & \ddots & \ddots & \beta_{n-3,n-1} & \beta_{n-3,n}\\
\vdots & \vdots & \vdots & \ddots & \ddots & 0 & \beta_{n-2,n}\\
\beta_{1,n-1}^{*} & \vdots & \vdots & \beta_{n-3,n-1}^{*} & 0 & \omega_{n-1} & 0\\
\beta_{1,n}^{*} & \beta_{2,n}^{*} & \cdots & \beta_{n-3,n}^{*} & \beta_{n-2,n}^{*} & 0 & \omega_{n}
\end{array}\right]
\end{equation}
Again, using the quantum brachistochrone, we find that the Hamiltonian
matrix, which defines a continuous symmetry group, is constant. One
more infinite dimensional system has system Hamiltonian and constraint:
\begin{equation}
\tilde{H}[t]=\left[\begin{array}{cccccc}
\omega_{1} & 0 & 0 & \cdots & 0 & 0\\
0 & \omega_{2} & 0 & \ddots & 0 & 0\\
0 & 0 & \ddots & \ddots & \ddots & \vdots\\
\vdots & \ddots & \ddots & \ddots & 0 & 0\\
0 & 0 & \ddots & 0 & \omega_{n-1} & 0\\
0 & 0 & \cdots & 0 & 0 & \omega_{n}
\end{array}\right]
\end{equation}
\begin{equation}
\tilde{F}[t]=\left[\begin{array}{cccccc}
0 & \eta_{1,2} & \cdots & \cdots & \cdots & \eta_{1,n}\\
\eta_{1,2}^{*} & 0 & \ddots & \ddots & \ddots & \vdots\\
\vdots & \ddots & 0 & \ddots & \ddots & \vdots\\
\vdots & \ddots & \ddots & \ddots & \ddots & \vdots\\
\vdots & \ddots & \ddots & \ddots & 0 & \eta_{n-1,n}\\
\eta_{1,n}^{*} & \cdots & \cdots & \cdots & \eta_{n-1,n}^{*} & 0
\end{array}\right]
\end{equation}
\begin{equation}
\sum_{j}\omega_{j}(t)=\omega_{1}+\omega_{2}+....+\omega_{j}+....+\omega_{n-1}+\omega_{n}=0
\end{equation}
Obviously, by construction, the Hamiltonian commutes with the constraint,
and is therefore constant. The difficulty with these problems on arbitrary
dimensions is that there is no obvious way in which the Lagrange multipliers,
representing total upper bounds on the energy, generalises to a matrix
of infinite dimensions. One would hope that the sums would go over
to integrals in the correct fashion, but this remains to shown. Of
course it is possible to create these type of general Hamiltonian-constraint
systems with the geodesic property, but it would be desirable to have
expressions for field densities for matrices of infinite dimension,
which represent the continuous degrees of freedom.
\noindent \begin{flushleft}
In conclusion, we have shown within this paper how one might go about
calculating these more difficult examples on finite dimensional systems,
as well as demonstrating the links between the matrix and vector dynamic
calculus. This paper has demonstrated that the use of finite matrix
methods and algebraic techniques may yield dividends, even when the
situation is graphically complex. We have used the quantum brachistochrone
equation to derive a number of new results on SU(3); these methods
may be extended to higher-dimensional matrix groups in order to find
time-optimal flows of quantum states. The results derived are intriguing,
especially the links between the various branches of mathematics.
Our principal results may be applied within the laboratory to achieve
fast data transfer; as the method of calculation is not reliant on
adiabatic transfer or the rotating wave approximation it is theoretically
the most general and efficient scheme that may be applied.
\par\end{flushleft}
\end{document}
|
\begin{document}
\begin{center}
{\Large \bf Minimal Faithful Upper-Triangular Matrix
Representations for Solvable Lie Algebras}
\end{center}
\begin{center}
{\bf Manuel Ceballos$^\dagger$, Juan N\'u\~nez$^\dagger$ and \'Angel F. Tenorio$^\ddagger$\footnote{Corresponding author. Phone: +34-954349354. Fax: +34-954349339}}
\end{center}
\begin{center}
\small $^\dagger$Departamento de Geometr\'{\i}a y Topolog\'{\i}a.\\
Facultad de Matem\'aticas. Universidad de Sevilla.\\
Aptdo. 1160. 41080-Seville (Spain).\\
Email: \{mceballos,jnvaldes\}@us.es\\
\quad \\
$^\ddagger$Dpto. de Econom\'{\i}a, M\'etodos Cuantitativos e H.$^{\rm a}$ Econ\'omica.\\
Escuela Polit\'ecnica Superior. Universidad Pablo de Olavide.\\
Ctra. Utrera km. 1, 41013-Seville (Spain).\\
Email: [email protected]
\end{center}
\begin{quote}
\noindent {\bf Abstract.} A well-known result on Lie Theory states that every finite-dime\-nsional complex solvable Lie algebra can be represented as a matrix Lie algebra, with upper-triangular square matrices as elements. However, this result does not specify which is the minimal order of the matrices involved in such representations. Hence, the main goal of this paper is to revisit and implement a method to compute both that minimal order and a matrix representative for a given solvable Lie algebra. As application of this procedure, we compute representatives for each solvable Lie algebra with dimension less than $6$.
\end{quote}
\noindent {\bf Key words and phrases:} solvable Lie algebra,
faithful upper-triangular matrix representation, algorithm.
\noindent {\bf 2000 Mathematics Subject Classification:}
17\,B\,30, 17\,B\,05, 17--08, 68W30, 68W05.
\section{Introduction}
Representation Theory of Lie algebras can be allowed for the classification of Lie algebras and groups, which has broad applications to the analysis of continuous symmetries in Mathematics and Physics. More concretely, in Mathematics, the classification of Lie groups reveals symmetries in differential equations. With respect to Physics, representation theory yields natural connections between representation of Lie algebras and the properties of elementary particles.
Ado's Theorem states that given a finite-dimensional complex Lie algebra $\mathfrak{g}$, there exists a matrix algebra isomorphic to $\mathfrak{g}$ (see \cite{Jac} for the classical proof and \cite{Ner} for a very short alternative). In this way, every finite-dimensional complex Lie algebra can be represented as a Lie subalgebra of the complex general linear algebra $\mathfrak{gl}(n;{\mathbb{C}})$, of complex $n \times n$ matrices, for some $n \in {\mathbb{N}}$.
This paper focuses on Lie algebra $\mathfrak{h}_n$, of $n\times n$ upper-triangular matrices. It is well known that every finite-dimensional solvable Lie algebra is isomorphic to a subalgebra of $\mathfrak{h}_n$, for some $n\in {\mathbb{N}}$ (see \cite[Proposition 3.7.3]{Var98}). Therefore, the following interesting question arises for a given finite-dimensional solvable Lie algebra $\mathfrak{g}$: determining the minimal $n \in \mathbb N$ such that $\mathfrak h_n$ contains $\mathfrak g$ as a Lie subalgebra; i.e. obtaining the minimal faithful representation of $\mathfrak g$ by using $n\times n$ upper-triangular matrices.
Several authors have studied the minimal dimension $\mu(\mathfrak g)$ to represent a given Lie algebra $\mathfrak g$ (see Burde \cite{Bu98}
, for instance). However, most of them have considered faithful $\mathfrak g$-modules instead of the particular subclass consisting of Lie algebras~$\mathfrak h_n$. Therefore, the value of $\mu(\mathfrak g)$ is less than or equal to the dimension to be computed in this paper. Regarding this matter, matrix representations were computed by Ghanam et al. \cite{G} for low-dimensional nilpotent Lie algebras, but not studying the minimality and giving some non-minimal representations.
The interest in these faithful representations is motivated, among other issues, by problems from Geometry and Topology. For example, Milnor \cite{Mil77} and Auslander \cite{Aus64,Aus77} studied generalizations of crystallographic groups in relation with this minimal value for matrix representations. Another motivation is based on the following result: Lie algebra $\mathfrak{g}$ of a given Lie group $G$ admitting a left-invariant affine structure satisfies that its minimal dimension of faithful representations is $\mu(\mathfrak{g}) \leq n+1$.
Several papers throughout the literature deal with matrix representation of these solvable Lie algebras. For example, Benjumea et al. \cite{B} introduced an algorithmic procedure which explicitly computed a representative of minimal faithful unitriangular matrix representations for a given nilpotent Lie algebra and its associated Lie group. Subsequently, the complete list of minimal faithful unitriangular matrix representations was given by Benjumea et al.~\cite{MathScan08} for nilpotent Lie algebras of dimension less than 6. Finally, the matrix representation of filiform Lie algebras of dimension less than $9$ was computed in \cite{CNT}. Additionally, N\'u\~nez and Tenorio \cite{NuTeMega07} continued with this research and gave the outlines of an algorithmic procedure to compute explictly representatives of the minimal faithful matrix representation for solvable Lie algebras by using Lie algebras $\mathfrak h_n$, giving some examples of application by hand. This procedure adapted that given in Benjumea et al. \cite{B}, but neither the algorithm was completely debugged nor implementations were carried out and run.
The main goal of the current paper is to advance in the above-mentioned research by debugging and implementing the algorithm sketched in \cite{NuTeMega07} in order to automate the computation of minimal faithful matrix representations for a given solvable Lie algebra starting from its law. As application, we have also computed representations for each solvable Lie algebra of dimension less than $6$ as well as for others of higher dimension. To do so, we have used the classifications given by Mubarakzyanov and Turkowski (see \cite{AND,Mub,Mub1,Turi}).
This paper is structured as follows: Section $2$ reviews some well-known results on Lie Theory to be applied later. Thereupon, Section $3$ revisits the algorithmic procedure sketched in \cite{NuTeMega07} to compute minimal faithful representations for solvable Lie algebras by using upper-triangular matrices, incorporating a formulation of the algorithm which can be dealt computationally with implementation in MAPLE 12. To shorten the paper length, the computational method is only explicitly applied to two algebras in Section $4$. Just afterwards, Section $5$ gives an explicit list with representatives of minimal faithful matrix representations for solvable Lie algebras of dimension less than $6$.
\section{Preliminaries} \label{Sec1}
For an overall review on Lie algebras, the reader can consult \cite{Var98}. In the present section, we only recall some definitions and results about Invariant Theory and Lie algebras to be applied later. Throughout this article, we only consider finite-dimensional Lie algebras over the complex number field $\mathbb C$.
Given a Lie algebra $\mathfrak g$, its {\it derived series} is defined as follows
\begin{equation}\mathcal{C}_1(\mathfrak g)= \mathfrak g, \ \mathcal{C}_2(\mathfrak g)=[\mathfrak g,\mathfrak g], \ \mathcal{C}_3(\mathfrak g)=[\mathcal{C}_2(\mathfrak g),\mathcal{C}_2(\mathfrak g)], \ \dots, \ \mathcal{C}_k(\mathfrak g)=[\mathcal{C}_{k-1}(\mathfrak g), \mathcal{C}_{k-1}(\mathfrak g)], \ \dots \end{equation}
Additionally, the Lie algebra $\mathfrak g$ is said to be {\it solvable} if there exists a natural integer $m$ such that $\mathcal{C}_m(\mathfrak g) \equiv 0$. The solv-index of $\mathfrak g$ is precisely the value of $m\in\mathbb N$ such that $\mathcal{C}_m(\mathfrak g) = 0$ and $\mathcal{C}_{m-1}(\mathfrak g) \neq 0$.
The relation between the derived series of a given Lie algebra $\mathfrak g$ and that of a Lie subalgebras is given as follows
\begin{prp}\label{PropCon}
If $\mathfrak h$ is a Lie subalgebra of a given Lie algebra $\mathfrak g$, then $\mathcal{C}_{k}(\mathfrak h)\subseteq
\mathcal{C}_{k}(\mathfrak g)$, for all $k \in \mathbb N$.
\end{prp}
Given $n\in {\mathbb{N}}$, the complex solvable Lie algebra $\mathfrak{h}_n$ consists of $n \times n$ upper-triangular matrices; i.e. its vectors are expressed as
\begin{equation}
h_{n}(x_{r,s})=\left(
\begin{array}{cccc}
x_{11} & x_{12} & \cdots & x_{1n} \\
0 & x_{22} & \cdots & x_{2n} \\
\vdots & \vdots & \ddots & \vdots \\
0 & \cdots & 0 & x_{nn}
\end{array} \right), \qquad \mathrm{with \ } x_{r,s} \in \mathbb C, \ \mathrm{for} \ 1\le r\le s\le n.
\end{equation}
Lie algebra $\mathfrak{h}_n$ has a basis $\mathcal B_n$ consisting of vectors $X_{i,j}=h_n(x_{r,s})$ with $1 \leq i \leq j \leq n$ and such
that
\begin{equation}
x_{r,s}=\left\{\begin{array}{ll} 1, & \mbox{if
}(r,s) = (i,j),\\ 0, & \mbox{if }(r,s)\neq (i,j).
\end{array} \right.
\end{equation}
The dimension of $\mathfrak{h}_n$ is $\frac{n(n+1)}{2}$ and the nonzero brackets with respect to basis $\mathcal B_n$ are
\begin{eqnarray}
\label{EcBrack1} [X_{i,j},X_{j,k}] = X_{i,k}, & \ \forall\ 1 \leq i < j < k \leq n;\\
\label{EcBrack2} {[}X_{i,i}, X_{i,j}] = X_{i,j}, & \ 1 \leq i < j \leq n;\\
\label{EcBrack3} {[}X_{k,i}, X_{i,i}] = X_{k,i}, & \ \forall\ k \leq i \leq n.
\end{eqnarray}
\section{Computing Minimal Matrix Representations}
\label{Algorithmic method}
This section continues the work started in \cite{NuTeMega07} and introduces an algorithmic method to compute minimal matrix representations of solvable Lie algebras in such a way this can be dealt with computer algebra. After explaining step by step the algorithm, this is implemented in Maple 12 and applied to several examples.
Given a Lie algebra $\mathfrak g$, a {\it representation} of $\mathfrak g$ in $\mathbb C^n$ is a homomorphism of Lie algebras $\phi:\mathfrak g \rightarrow \mathfrak{gl}(\mathbb C,n)$. Then $n\in\mathbb N$ is called the {\it dimension} of this representation. Ado's theorem states that every finite-dimensional Lie algebra over a field of characteristic zero (as in the case of $\mathbb C$) has a linear injective representation on a finite-dimensional vector space; that is, a {\it faithful representation}.
Usually, representations are defined as {\it $\mathfrak g$-modules}, consisting of homomorphisms of Lie algebras from $\mathfrak g$ to Lie algebra $\mathfrak{gl}(V)$ of endomorphisms over an arbitrary $n$-dimensional vector space $V$ (like in \cite{F-H91}).
Regarding minimal representations of Lie algebras, Burde \cite{Bu98} introduced the invariant $\mu(\mathfrak g)$ for an
arbitrary Lie algebra $\mathfrak g$
$$\mu(\mathfrak g)={\rm min}\{{\rm dim} (M) \ | \ M \mbox{ is a faithful } \mathfrak g\mbox{-module}\}.$$
In this section, matrix faithful representations of solvable Lie algebras are studied. Moreover, we are interested in minimal faithful matrix representations with a particular restriction: the representation is contained in $\mathfrak h_n$ for some $n\in\mathbb N$. In this way, given a solvable Lie algebra $\mathfrak g$, we want to compute the minimal value $n$ such that $\mathfrak h_n$ contains a Lie subalgebra isomorphic to $\mathfrak g$. This value is also an invariant of $\mathfrak g$ and its expression is given by
$$\bar{\mu}(\mathfrak g)={\rm min}\{n\in \mathbb N \ | \ \exists \mbox{ subalgebra of } \mathfrak h_n \, \mbox{ isomorphic to } \mathfrak g\}.$$
In general, invariants $\mu(\mathfrak g)$ and $\bar{\mu}(\mathfrak g)$ can be different from each other.
Next, we show the algorithmic method to compute minimal faithful matrix representations for those algebras by using Lie algebras $\mathfrak h_n$. The minimality must be understood in the following sense: There exists a faithful matrix representation of $\mathfrak g$ in $\mathfrak{h}_n$, but no in $\mathfrak{h}_{n-1}$.
To do so, we give a step-by-step explanation of the algorithm used to determine these minimal representations for a given solvable Lie
algebra $\mathfrak{g}$ of dimension $n$.
\begin{enumerate}
\item According to Proposition \ref{PropCon}, we compute the first natural integer $k$ such that the derived series of $\mathfrak{h}_k$ is compatible with that associated with~$\mathfrak g$.
\item We search a Lie subalgebra of $\mathfrak h_k$ isomorphic to $\mathfrak g$, with $k$ as low as possible. To do so, the vectors in the basis $\{e_i\}_{i=1}^n$ of $\mathfrak g$ are expressed as the following linear combinations of basis $\mathcal{B}_k$
\begin{equation}\label{comblineal} e_h = \sum_{1\le i \le j\le k} \lambda_{i,j}^h X_{i,j}, \quad {\rm for} \,\, 1 \leq h \leq n. \end{equation}
\item Bracket $[e_i,e_j]$ is computed for $1\le i\le j\le n$. When imposing the law of $\mathfrak{g}$, a system of non-linear equations is obtained by comparing coordinate to coordinate with respect to basis $\mathcal B_k$.
\item We solve the system of equations
and a solution of the system provides us one of the representations searched for Lie algebra $\mathfrak{g}$ if the solution corresponds to a set of vectors being linearly independent. When no solution is obtained, Lie algebra $\mathfrak{g}$ cannot be represented as a Lie subalgebra of $\mathfrak{h}_k$. In this case, we go back to Step 2 and repeat each step with Lie algebra $\mathfrak{h}_{k+1}$.
\end{enumerate}
The representation obtained for Lie algebra $\mathfrak{g}$ is minimal because we start with $k=1$ and $k$ increases one unit when no representation can be obtained from $\mathfrak{h}_k$.
Obviously, the set of solutions of the system in the last step depends on the Lie algebra which the algorithm is applied to. As an example of application, we have minimally represented solvable Lie algebras with dimension less than $6$ in Section \ref{Representation}. The set of solutions in Step~4 has been computed with the command {\tt solve} in the symbolic computation package Maple $12$. This command works efficiently with polynomial equations, receives as inputs the list of equations and the list of variables, and returns as output the algebraic expression of the set of solutions.
Furthermore, in order to compute a particular solution of the previous system, we have searched one having as many coefficients $\lambda_{i,j}^h$ being equal to 0 as possible. In this way, a coefficient is assumed to be equal zero when it does not appear in the relations obtained by the equations. This will be a natural representative of the Lie algebra $\mathfrak g$.
\subsection{Implementation}
Next, we show the implementation of the different routines in order to apply the previous method. They have been written using the symbolic computation package MAPLE $12$, loading the libraries {\tt DifferentialGeometry, LieAlgebras} to activate commands related to Lie algebras.
First, the routine {\tt law$\_$h} is implemented to compute the law of the solvable Lie algebra $\mathfrak{h}_n$. This routine receives as input the value of $n$ and returns the list of brackets expressing the law of $\mathfrak{h}_n$ with respect to the basis $\{e_1,e_2, \ldots, e_{\frac{n (n+1)}{2}}\}$, which corresponds to $\{X_{1,1}, \ldots, X_{1,n}, X_{2,2}, \ldots, X_{2,n}, \ldots, X_{n,n}\}$. For the implementation, a list {\tt B} saves the basis of $\mathfrak{h}_n$ and {\tt S} keeps all the (non-zero) brackets involved in the law. To carry out the computations, three different loops are programmed to find and save the three different types of non-zero brackets in Eqs.~(\ref{EcBrack1})--(\ref{EcBrack3}). Finally, the law of the algebra is saved in the variable {\tt Ext1} to be loaded in a later routine.
{\footnotesize
\begin{verbatim}
> law_h:=proc(n)
> local B, S;
> B:=[]; S:=[];
> for i from 1 to n do (*Constructing the basis*)
> for j from i to n do
> B:=[op(B),X[i,j]];
> end do;
> end do;
> for i from 1 to n-1 do (*Finding brackets in Eq. (5)*)
> for j from i+1 to n do
> S:=[op(S),[X[i,i],X[i,j]]=X[i,j]];
> end do;
> end do;
> for i from 1 to n-1 do (*Finding brackets in Eq. (6)*)
> for j from i+1 to n do
> S:=[op(S),[X[i,j],X[j,j]]=X[i,j]];
> end do;
> end do;
> for i from 1 to n-2 do (*Finding brackets in Eq. (4)*)
> for j from i+1 to n-1 do
> for k from j+1 to n do
> S:=[op(S),[X[i,j],X[j,k]]=X[i,k]];
> end do;
> end do;
> end do;
> return LieAlgebraData(S,B,Ext1, "LieAlgebraData"); (*Defining the algebra*)
> end proc:
\end{verbatim}}
Next, the routine {\tt DerviedSeries$\_$h} receives as input the value of $n$ and computes a list with the dimension of each term in the derived series of $\mathfrak{h}_n$. Let us note that we have to distinguish two different cases; being the first when $n$ is very low. Otherwise, we only have to consider powers of 2 as it can be proved by a straightforward inductive reasoning.
{\footnotesize
\begin{verbatim}
> DerivedSeries_h:=proc(n)
> local L;
> L:=[n*(n+1)/2];
> if n<4 then
> for i from 1 to n do
> L:=[op(L),(n-i)*(n-i+1)/2];
> end do;
> else
for i from 0 by 1 while 2^i<n do
> L:=[op(L),(n-2^i)*(n-2^i +1)/2];
> end do;
> end if;
> if member(0,L)=false then L:=[op(L),0]; end if;
> return L;
> end proc:
\end{verbatim}}
Now, we define solvable Lie algebra $\mathfrak{g}$ according to the following notation
{\footnotesize
\begin{verbatim}
> L:= _DG([["LieAlgebra", g, [n]], [A]]);
> DGsetup(L);
\end{verbatim}
}
\noindent where $n$ is the dimension of $\mathfrak{g}$ (a value inserted by the user) and {\tt A} is a list containing information about the structure constants of the law of $\mathfrak{g}$. Elements in {\tt A} must be of the form {\tt [[i, j, k], cijk]} where {\tt cijk} is the structure constant $\lambda_{i,j}^k$ corresponding to the coefficient of $e_k$ in the bracket $[e_i,e_j]$. Once these data are loaded, we can operate over Lie algebra $\mathfrak{g}$.
The next routine is called {\tt DerivedSeries} and computes a list with the dimension of each ideal in the derived series of $\mathfrak{g}$.
{\footnotesize
\begin{verbatim}
g > DerivedSeries:=proc()
g > local k;
g > C[1]:=[seq(e||i, i=1..n)]; C[2]:=DerivedAlgebra(); (*Initiating derived series*)
g > if C[2]=[] then return "Abelian Lie algebra"; end if; (*Testing abelian Lie algebras*)
g > for i from 3 by 1 while C[i]<> [] do (*Constructing derived series*)
g > C[i]:=DerivedAlgebra(C[i-1]);
g > if C[i]=[] then return [seq(nops(C[j]),j=1..i)]; end if;
g > end do;
g > end proc:
\end{verbatim}
}
Next, the solv-index of $\mathfrak{g}$ is saved in the variable called {\tt index$\_$g} and all the ideals of the derived series are also defined. To do so, we execute the following sentences
{\footnotesize
\begin{verbatim}
g > assign(index_g,nops(DerivedSeries()));
g > C[1](g):=[seq(e||i, i=1..n)];
g > C[2](g):=DerivedAlgebra();
g > for i from 3 to index_g do
g > C[i](g):=DerivedAlgebra(C[i-1](g));
g > end do;
\end{verbatim}}
The routine {\tt DimRepresentation} computes the minimal dimension for a matrix representation of $\mathfrak{g}$ by using Lie algebras $\mathfrak{h}_n$. To implement this routine, we compare the dimension sequence of the derived series of both $\mathfrak{g}$ and $\mathfrak{h}_n$ by using the routines {\tt DerivedSeries$\_$h} and {\tt DerivedSeries}. The output is the minimal {\tt k}$\in\mathbb N$ such that Step 1 is verified.
{\footnotesize
\begin{verbatim}
g > DimRepresentation:=proc()
g > L:=DerivedSeries();
g > k:=0; d:=nops(L);
g > if d <= 4 then (*Initiating dimension of representation*)
g > n:=d-1; else n:=d;
g > end if;
g > while k=0 do
g > M:=DerivedSeries_h(n);
g > for i from 1 to nops(L) do
g > if L[i]<=M[i] then k:=k; (*Comparing dimension sequences of derived algebras*)
g > else k:=k+1;
g > end if;
g > end do;
g > if k>0 then k:=0; (*Checking compatibility between dimension sequences*)
g > else return n; (*Returning dimension of representation*)
g > end if;
g > n:=n+1;
g > end do;
g > end proc:
\end{verbatim}}
After completing Step 1 in the algorithm, we need to express all the vectors in the basis of $\mathfrak{g}$ as a linear combination of basis $\mathcal B_k$ of $\mathfrak{h}_k$, where $k$ is the output of the routine {\tt DimRepresentation}. Therefore, we start loading Lie algebra $\mathfrak{h}_k$ with the sentence
{\footnotesize
\begin{verbatim}
g > DGsetup(law_h(k),[x],[a])
\end{verbatim}}
In this sentence, {\tt [x]} is used to denote the basis vectors in $\mathcal B_k$ as $\{x_i\}_{i=1}^{\frac{k (k+1)}{2}}$ instead of $\{e_i\}_{i=1}^{\frac{k (k+1)}{2}}$, since we need different notations for the bases of both Lie algebras $\mathfrak{g}$ and $\mathfrak{h}_k$. The notation {\tt [a]} corresponds to the list of structure constants defining the law of Lie algebra $\mathfrak h_n$ and expressed as in (\ref{EcBrack1}), (\ref{EcBrack2}) and (\ref{EcBrack3}).
From this point, we can also work over the Lie algebra $\mathfrak{h}_k$, named {\tt L1} by the package. Next, as we did with Lie algebra $\mathfrak{g}$, we define all the ideals of the derived series of $\mathfrak{h}_k$ as follows
{\footnotesize
\begin{verbatim}
L1 > C[1](h):=[seq(x||i, i=1..k*(k+1)/2)];
L1 > C[2](h):=DerivedAlgebra();
L1 > for i from 3 to nops(DerivedSeries_h(k)) do C[i](h):=DerivedAlgebra(C[i-1](h)); end do;
\end{verbatim}}
Next, we express all the vectors $\{e_i\}_{i=1}^n$ from the basis of $\mathfrak{g}$ as a linear combination of basis $\mathcal B_k=\{x_i\}_{i=1}^{\frac{k (k+1)}{2}}$ of $\mathfrak{h}_k$. To do so, we first implement a subroutine called {\tt listposi}, which computes the position of an element within a list. Then, we also implement the routine {\tt expr}. The latter uses the derived series of both $\mathfrak{g}$ and $\mathfrak{h}_k$ and, by applying Proposition~\ref{PropCon}, returns as output two lists: the first contains all the expressions according to the second step of the method, that is, Equation (\ref{comblineal}); and in the second, the conditions over the coefficients so that non-zero vectors are considered.
{\footnotesize
\begin{verbatim}
L1 > listposi:=proc(a,L)
L1 > for i from 1 to nops(L) do
L1 > if a=L[i] then
L1 > return i
L1 > end if;
L1 > end do;
L1 > end proc:
\end{verbatim}}
Let us note that in order to implement routine {\tt expr} we have defined several clusters comparing the derived series of $\mathfrak{g}$ and $\mathfrak{h}_k$. With this rouitne, we express the basis vectors of $\mathfrak{g}$ as a linear combination of the basis vectors from $\mathfrak{h}_k$.
{\footnotesize
\begin{verbatim}
L1 > expr:=proc()
L1 > L:=[];M:=[];
L1 > for i from 1 to index_g - 1 do
L1 > for j from 1 to nops(C[i](g)) do
L1 > if member(C[i](g)[j],C[i+1](g))=false then
L1 > if C[i+1](g)<>[] then
L1 > L:=[op(L),C[i](g)[j]=sum(a[j,k]*C[i](h)[k],k=1..nops(C[i](h)))];
L1 > M:=[op(M),sum(a[j,k]^2,k=1..nops(C[i](h)))<>0];
L1 > end if;
L1 > else member(C[i](g)[j],C[1](g),'p');
L1 > N:=[seq(listposi(C[i+1](h)[k],C[1](h)),k=1..nops(C[i+1](h)))];
L1 > L:=[op(L),C[i](g)[j]=sum(a[p,N[k]]*C[i+1](h)[k],k=1..nops(C[i+1](h)))];
L1 > M:=[op(M),sum(a[p,N[k]]^2,k=1..nops(C[i+1](h)))<>0];
L1 > end if;
L1 > end do;
L1 > end do;
L1 > return L,M;
L1 > end proc:
\end{verbatim}}
After expressing the basis of $\mathfrak{g}$ with respect to basis $\mathcal B_k$ of $\mathfrak{h}_k$, we impose the law of both Lie algebras to the previous expressions. In this way, we implement the routine {\tt Listeq}, which returns two lists: the first one contaning the elements to be equal to zero; and the second one with the conditions to assure the linear independence of the basis.
{\footnotesize
\begin{verbatim}
L1 > Listeq:=proc()
L1 > R:=[];
L1 > for i from 1 to nops(expr()[1])-1 do
L1 > for j from i+1 to nops(expr()[1]) do
L1 > if BracketOfSubspaces([lhs(expr()[1][i])],[lhs(expr()[1][j])])=[] then
L1 > R:=[op(R),op(BracketOfSubspaces([rhs(expr()[1][i])],[rhs(expr()[1][j])]))];
L1 > else R:=[op(R),op(BracketOfSubspaces([lhs(expr()[1][i])],[lhs(expr()[1][j])]))-
op(BracketOfSubspaces([rhs(expr()[1][i])],[rhs(expr()[1][j])]))];
L1 > end if;
L1 > end do;
L1 > end do;
L1 > return [op(eval(R,expr()[1]))],[op(expr()[2])];
L1 > end proc:
\end{verbatim}}
Next, we define two variables, {\tt Listexp} and {\tt Listcond}, to save the two outputs of the routine {\tt Listeq} respectively. Finally, we implement the routine {\tt sys} to solve the system of equations resulting from the previous expressions. This routine must receive as input the lists {\tt Listexp} and {\tt Listcond}, returning as output the set of solutions which determine the coefficients of the representation of $\mathfrak{g}$ by using Lie algebra $\mathfrak{h}_k$.
{\footnotesize
\begin{verbatim}
L1 > sys:=proc(L,M)
L1 > local Q;
L1 > Q:=[];
L1 > for i from 1 to nops(L) do
L1 > Q:=[op(Q),seq(coeff(L[i],x||j),j=1..nops(C[1](h)))];
L1 > end do;
L1 > Q:=[op(Q),op(M)];
L1 > return solve(Q);
L1 > end proc:
\end{verbatim}}
If no solution is obtained, then it is not possible to represent $\mathfrak{g}$ as a Lie subalgebra of Lie algebra $\mathfrak{h}_k$ and we must try with the next Lie algebra: $\mathfrak{h}_{k+1}$. Therefore, we would have to repeat the process from the execution of $\mathfrak h_{k+1}$ with the sentence {\tt DGsetup(law$\_$h(k),[x],[a])}, but replacing {\tt k} with {\tt k+1}.
\subsection{Examples of application}
Next, we show an example with the $3$-dimensional solvable Lie algebra with law $[e_1,e_3] = e_2$.
We must run all the routines implemented in the previous section. Here, we only reproduce the most important outputs and those sentences to be modified for this specific example. To define the solvable Lie algebra $\mathfrak{g}$, the follow sentence is run
{\footnotesize
\begin{verbatim}
> L:= _DG([["LieAlgebra", g, [3]], [[[1, 3, 2], 1]]]);
> DGsetup(L);
\end{verbatim}}
We must fill in {\tt DerivedSeries} the value $n=3$ and then, execute the following sentences
{\footnotesize
\begin{verbatim}
> DerivedSeries();
[3, 1, 0]
> assign(index_g,nops(DerivedSeries()));
> index_g;
3
> C[1](g):=[seq(e||i, i=1..3)];
C[1](g):=[e1,e2,e3]
> C[2](g):=DerivedAlgebra();
C[2](g):=[e2]
> for i from 3 to index_g do C[i](g):=DerivedAlgebra(C[i-1](g));
C[3](g):=[]
\end{verbatim}}
At this point, loading the routine {\tt DimRepresentation}, we obtain
{\footnotesize
\begin{verbatim}
> DimRepresentation();
2
\end{verbatim}}
Therefore, we must use $k=2$ in Step 1 of the algorithm. We look for a representation of $\mathfrak{g}$ as a Lie subalgebra of $\mathfrak{h}_2$. Now we start loading Lie algebra $\mathfrak{h}_2$ and we define all the terms of its derived series as follows
{\footnotesize
\begin{verbatim}
g > DGsetup(law_h(2),[x],[a])
Lie algebra: L1
L1 > MultiplicationTable("LieBracket");
[[x1, x2] = x2, [x2, x3] = x2]
L1 > C[1](h):=[seq(x||i, i=1..2*3/2)];
C[1](h):=[x1,x2,x3]
L1 > C[2](h):=DerivedAlgebra();
C[2](h):=[x2]
L1 > for i from 3 to nops(DerivedSeries_h(2)) do C[i](h):=DerivedAlgebra(C[i-1](h));end do;
C[3](h):=[]
\end{verbatim}}
By executing the routine {\tt expr}, we obtain the following output
{\footnotesize
\begin{verbatim}
L1 > expr();
[e1=a[1,1]*x1+a[1,2]*x2+a[1,3]*x3,e2=a[2,2]*x2,e3=a[3,1]*x1+a[3,2]*x2+a[3,3]*x3],
[a[1,1]^2+a[1,2]^2+a[1,3]^2<>0,a[2,2]^2<>0,a[3,1]^2+a[3,2]^2+a[3,3]^2<>0]
\end{verbatim}}
The output of {\tt Listeq} is
{\footnotesize
\begin{verbatim}
L1 > Listeq();
[(-a[1,3]*a[2,2]+a[1,1]*a[2,2])*x2,a[2,2]*x2-(-a[1,3]*a[3,2]+a[1,2]*a[3,3]-
a[1,2]*a[3,1]+a[1,1]*a[3,2])*x2,(a[2,2]*a[3,3]-a[2,2]*a[3,1])*x2],
[a[1,1]^2+a[1,2]^2+a[1,3]^2<>0,a[2,2]^2<>0,a[3,1]^2+a[3,2]^2+a[3,3]^2<>0]
\end{verbatim}}
After defining the variables {\tt Listexp} and {\tt Listcond} from the previous output, the routine {\tt sys} is executed as follows
{\footnotesize
\begin{verbatim}
L1 > sys(Listexp,Listcond);
\end{verbatim}}
Since no answer is returned, there is no solution for the underlying system. Hence, Lie algebra $\mathfrak{g}$ cannot
be represented as a Lie subalgebra of $\mathfrak{h}_2$. Thus, the process must now be repeated from the execution of the sentence
{\tt DGsetup(law$\_$h(2),[x],[a])}, where $\mathfrak{h}_2$ is replaced with $\mathfrak{h}_3$.
{\footnotesize
\begin{verbatim}
g > DGsetup(law_h(3),[x],[a])
Lie algebra: L1
L1 > MultiplicationTable("LieBracket");
[[x1,x2]=x2,[x1,x3]=x3,[x2,x4]=x2,[x2,x5]=x3,[x3,x6]=x3,[x4,x5]=x5,[x5,x6]=x5]
L1 > C[1](h):=[seq(x||i, i=1..3*4/2)];
C[1](h):=[x1,x2,x3,x4,x5,x6]
L1 > C[2](h):=DerivedAlgebra();
C[2](h):=[x2,x3,x5]
L1 > for i from 3 to nops(DerivedSeries_h(3)) do
L1 > C[i](h):=DerivedAlgebra(C[i-1](h));
L1 > end do;
C[3](h):=[x3]
C[4](h):=[]
\end{verbatim}}
This time, the variables {\tt Listexp} and {\tt Listcond} are defined
from the output of {\tt Listeq} as follows
{\footnotesize
\begin{verbatim}
L1 > Listexp:=[(-a[1,4]*a[2,2]+a[1,1]*a[2,2])*x2+(-a[1,6]*a[2,3]-a[1,5]*a[2,2]+
a[1,2]*a[2,5]+a[1,1]*a[2,3])*x3+(-a[1,6]*a[2,5]+a[1,4]*a[2,5])*x5,
a[2,2]*x2+a[2,3]*x3+a[2,5]*x5-(-a[1,4]*a[3,2]+a[1,2]*a[3,4]-a[1,2]*a[3,1]+
a[1,1]*a[3,2])*x2-(-a[1,6]*a[3,3]-a[1,5]*a[3,2]+a[1,3]*a[3,6]-a[1,3]*a[3,1]+
a[1,2]*a[3,5]+a[1,1]*a[3,3])*x3-(-a[1,6]*a[3,5]+a[1,5]*a[3,6]-a[1,5]*a[3,4]+
a[1,4]*a[3,5])*x5,(a[2,2]*a[3,4]-a[2,2]*a[3,1])*x2+(-a[2,5]*a[3,2]+
a[2,3]*a[3,6]-a[2,3]*a[3,1]+a[2,2]*a[3,5])*x3+(a[2,5]*a[3,6]-a[2,5]*a[3,4])*x5]:
\end{verbatim}}
{\footnotesize
\begin{verbatim}
L1 > Listcond:=[a[1,1]^2+a[1,2]^2+a[1,3]^2+a[1,4]^2+a[1,5]^2+a[1,6]^2<>0,a[2,2]^2+
a[2,3]^2+a[2,5]^2<>0,a[3,1]^2+a[3,2]^2+a[3,3]^2+a[3,4]^2+a[3,5]^2+a[3,6]^2<>0];
\end{verbatim}}
Finally, the routine {\tt sys} is executed with {\tt Listexp} and {\tt Listcond} as parameters
{\footnotesize
\begin{verbatim}
L1 > sys(Listexp,Listcond);
{a[1,1]=a[1,6],a[1,2]=a[1,2],a[1,3]=a[1,3],a[1,4]=a[1,6],a[1,5]=a[1,5],a[1,6]=a[1,6],
a[2,2]=0,a[2,3]=-a[1,5]*a[3,2]+a[1,2]*a[3,5],a[2,5]=0,a[3,1]=a[3,1],a[3,2]=a[3,2],
a[3,3]=a[3,3],a[3,4]=a[3,1],a[3,5]=a[3,5],a[3,6]=a[3,1]},
{a[1,1]=a[1,1],a[1,2]=a[1,2],a[1,3]=a[1,3],a[1,4]=a[1,1],a[1,5]=a[1,5],a[1,6]=a[1,1],
a[2,2]=0,a[2,3]=-a[1,5]*a[3,2]+a[1,2]*a[3,5],a[2,5]=0,a[3,1]=a[3,1],a[3,2]=a[3,2],
a[3,3]=a[3,3],a[3,4]=a[3,1],a[3,5]=a[3,5],a[3,6]=a[3,1]}
\end{verbatim}}
As a particular solution of this system, we obtain the representative
$$e_1=-x_5, \quad e_2=x_3, \quad e_3=x_2$$
or, by considering the original notation for basis $\mathcal{B}_3$,
$$e_1 = -X_{2,3}, \quad e_2 = X_{1,3}, \quad e_3 = X_{1,2}$$
To conclude this section, we would like to point out that our algorithmic method is not only valid for low-dimensional solvable Lie algebras; but it provides minimal faithful representations for solvable Lie algebras of higher dimension. In this sense, we have computed some other examples corresponding to solvable Lie algebras such that their dimension is greater than the last being classified, namely: dimension $n\ge 7$. More concretely, Kobel~\cite{Kobel} gave the list of 7-dimensional solvable Lie algebras with codimension 1. From that list, we have considered that with law
$$[e_2,e_7]=e_3, [e_3,e_7]=e_4, [e_4,e_7]=e_5, [e_5,e_7]=e_6, [e_6,e_7]=e_6$$
and our algorithm returns the following representation
$$e_1=X_{1,1}, \, e_2= X_{2,3},\, e_3=X_{2,4},\, e_4=X_{2,5},\, e_5=X_{2,6},$$
$$ e_6=X_{2,6}, \, e_7=X_{3,4}+X_{4,5}+X_{5,6}+X_{6,6}$$
We have also considered a second example consisting of the $8$-dimensional solvable Lie algebra with a 4-dimensional abelian ideal and law
$$[e_1,e_2]=e_3, [e_2,e_5]=e_6, [e_4,e_5]=e_8, [e_1,e_6]=e_7, [e_2,e_6]=e_8, [e_3,e_5]=e_7.$$
In this case, our algorithmic method gives this representation
$$e_1=X_{1,4}+X_{3,5}, \, e_2= X_{1,4}+X_{5,6},\, e_3=X_{3,6},\, e_4=-2X_{1,2},$$
$$e_5=X_{1,3}+X_{2,6}+X_{4,5}, \, e_6=X_{1,5}-X_{4,6}, \, e_7=-X_{1,6}, \, e_8=-2X_{1,6}.$$
\section{Solvable Lie algebras of dimension less than $6$}
\label{Representation}
This section is devoted to apply the algorithm implemented in Section \ref{Algorithmic method}, obtaining a minimal faithful upper-triangular matrix representation for each solvable Lie algebra of dimension less than 6. In addition, we compute such representations for several important families of $n$-dimensional solvable Lie algebras. Tables \ref{class} to \ref{TableClas5DNDRSNNLA2} show the classification of solvable Lie algebras of dimension less than $6$ given in \cite{AND,Mub}, taking into account that we have only written the nonzero brackets in the law of each Lie algebra; whereas Tables \ref{repres} to \ref{TableRepres5DNDRSNNLA2} contain a representative for each algebra in the previous tables.
In virtue of these tables, we can state the following
\begin{prp}\label{ThrMain1}\quad
A minimal faithful representation by upper-triangular matrices for each solvable Lie algebra of dimension less than $6$ with the dimension of such a minimal representation is given in Tables \ref{repres} to \ref{TableRepres5DNDRSNNLA2}. Moreover, such representations can be obtained with a natural re\-pre\-sen\-ta\-ti\-ve.
\end{prp}
Next, we show several results to determine a representative for minimal faithful upper-triangular matrix representations of three different families of solvable Lie algebras. Proposition \ref{solvablenon-nilpotent} deals with a family of solvable non-nilpotent Lie algebras. Then, Proposition \ref{heisenberg} computes a minimal representative for Heisenberg algebras. These Lie algebras constitute a special subclass of nilpotent Lie
algebras and are very interesting for their applications to both the theory of nilpotent Lie algebras itself and Theoretical Physics. Finally, Proposition \ref{modelfiliform} provides a minimal representation for model filiform Lie algebras. These algebras are the most structured Lie
algebras in the nilpotent class and were introduced by Vergne \cite{Vergne} in 1966.
These propositions can be proved by applying the algorithm considered in Section \ref{Algorithmic method} from a theoretical point of view and not by running the implementation.
\begin{prp}\label{solvablenon-nilpotent}
Let $\mathfrak{s}_n$ be an $n$-dimensional solvable Lie algebra
with basis $\{e_i\}_{i=1}^n$ and law $[e_i,e_n]=e_i$, for $1 \leq i < n$. Then, $\overline{\mu}(\mathfrak{s}_n) = n$. In fact, a
natural representative of $\mathfrak{s}_n$ is given by
$$\{e_{j}=X_{1,j+1}\}_ {j=1}^{j=n-1} \cup \{e_{n}=-X_{1,1}\}$$
\end{prp}
\begin{prp}\label{heisenberg}
Let $\mathfrak{H}_{2n+1}$ be the $(2n+1)$-dimensional Heisenberg algebra with basis $\{e_i\}_{i=1}^{2n+1}$ and law $[e_{2i},e_{2i+1}]=e_1$, for $1 \leq i \leq n$. Then, $\overline{\mu}(\mathfrak{H}_{2n+1}) = n+2$. Moreover, a
natural representative of $\mathfrak{H}_{2n+1}$ is given by
$$\{e_{2j+1}=X_{j+1,n+2}\}_ {j=0}^{j=n} \cup \{e_{2k}=X_{1,k+1}\}_{k=1}^{n+1}$$
\end{prp}
\begin{prp}\label{modelfiliform}
Let $\mathfrak{f}_{n}$ be the $n$-dimensional filiform Lie algebra
with basis $\{e_i\}_{i=1}^{n}$ and law $[e_1,e_h]=e_{h-1}$, for $3 \leq h \leq n$. Then, $\overline{\mu}(\mathfrak{f}_{n}) = n$. Moreover, a
natural representative of $\mathfrak{f}_{n}$ is given by
$$\left\{e_1 =\sum_{i=1}^{n-2} X_{i,i+1}\right\}\cup \{e_j = X_{j-1,n}\}_{j=2}^{n}$$
\end{prp}
\begin{table}[htp]
\caption{Solvable Lie algebras of dimension less than $5$.}\label{class}
\small
\begin{center}
\begin{tabular}{|c|c|c|}
\hline Dim. & Lie algebra & (Non-zero) Lie brackets \\
\hline
1 & $\mathfrak{s}_1^1$ & --- \\
\hline
2 & $\mathfrak{s}_2^1$ & --- \\
& $\mathfrak{s}_2^2$ & $[e_1, e_2]\!\! =\! e_1$ \\
\hline
3 & $\mathfrak{s}_3^1$ & --- \\
& $\mathfrak{s}_3^2$ & $[e_1,e_3]\!\!=\!e_2$ \\
& $\mathfrak{s}_3^3$ & $[e_1,e_3]\!\! =\! e_1$, \ $[e_2,e_3]\!\! =\! e_2$ \\
& $\mathfrak{s}_3^4$ & $[e_1,e_3]\!\! =\! e_2$, \ $[e_2,e_3]\!\! =\! -e_1$ \\
& $\mathfrak{s}_3^5$ & $[e_1,e_3]\!\! =\! -e_1$, \ $[e_2,e_3]\!\!=\! -e_1\!-\!e_2$ \\
& $\mathfrak{s}_3^6$ & $[e_1,e_3]\!\! =\! -e_1$ \\
\hline
4 & $\mathfrak{s}_4^1$ & --- \\
& $\mathfrak{s}_4^2$ & $[e_1,e_3]\!\!=\!e_2$ , \ $[e_1,e_4]\!\! =\! e_3$\\
& $\mathfrak{s}_4^3$ & $[e_1,e_3]\!\! =\! e_3$, \ $[e_1,e_4]\!\! =\! e_4$, \ $[e_2,e_3]\!\! =\! e_4$ \\
& $\mathfrak{s}_4^4$ & $[e_1,e_3]\!\! =\! e_3$, \ $[e_1,e_4]\!\! =\! e_4$, \ $[e_2,e_3]\!\! =\! -e_4$, \ $[e_2,e_4]\!\! =\! e_3$ \\
& $\mathfrak{s}_4^5$ & $[e_1,e_3]\!\!=\!e_3$ , \ $[e_1,e_2]\!\! =\! e_4$ \\
& $\mathfrak{s}_4^6$ & $[e_4,e_1]\!\! =\! e_1$, \ $[e_4,e_2]\!\! =\!\alpha e_2$, \ $[e_4,e_3]\!\! =\!\beta e_3$ \\
& $\mathfrak{s}_4^7$ & $[e_3,e_1]\!\! =\!\alpha e_1$, \ $[e_3,e_2]\!\! =\! e_2$, \ $[e_3,e_4]\!\! =\!e_2+e_4$ \\
& $\mathfrak{s}_4^8$ & $[e_1,e_2]\!\! =\! e_2+e_3$, \ $[e_1,e_3]\!\! =\! e_3+e_4$, \ $[e_1,e_4]\!\! =\!e_4$ \\
& $\mathfrak{s}_4^9$ & $[e_1,e_2]\!\! =\! \beta e_2-e_3$, \ $[e_1,e_3]\!\! =\!e_2+ \beta e_3$, \ $[e_1,e_4]\!\! =\! \alpha e_4$ \\
& $\mathfrak{s}_4^{10}$ & $[e_1,e_2]\!\! =\! (\alpha-1) e_2$, \ $[e_1,e_3]\!\! =\!e_3$, \ $[e_1,e_4]\!\! =\!\alpha e_4$, \ $[e_2,e_3]\!\! =\! e_4$ \\
& $\mathfrak{s}_4^{11}$ & $[e_1,e_2]\!\! =\! e_2+e_3$, \ $[e_1,e_3]\!\! =\!e_3$, \ $[e_1,e_4]\!\! =\!2 e_4$, \ $[e_2,e_3]\!\! =\! e_4$\\
& $\mathfrak{s}_4^{12}$ & $[e_1,e_2]\!\! =\! \alpha e_2-e_3$, \ $[e_1,e_3]\!\! =\!e_2+\alpha e_3$, \ $[e_1,e_4]\!\! =\!2 \alpha e_4$, \ $[e_2,e_3]\!\! =\! e_4$ \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}
\caption{$5$-dimensional non-decomposable real solvable
Lie algebras}\label{TableClas5DNDRSNNLA}
\begin{center}\scriptsize
\begin{tabular}{|c|c|c|}
\hline Lie algebra & (Non-zero) Lie brackets & Parameters \\
\hline$\mathfrak{g}_{5,1}$ & $[e_1,e_3]=e_5,\, [e_2,e_4]=e_5$ & \\
\hline$\mathfrak{g}_{5,2}$ & $[e_1,e_2]=e_4,\, [e_1,e_3]=e_5$ & \\
\hline$\mathfrak{g}_{5,3}$ & $[e_1,e_2]=e_4,\, [e_1,e_4]=e_5,\, [e_2,e_3]=e_5$ & \\
\hline$\mathfrak{g}_{5,4}$ & $[e_1,e_2]=e_3,\, [e_1,e_3]=e_4,\, [e_2,e_3]=e_5$ & \\
\hline$\mathfrak{g}_{5,5}$ & $[e_1,e_2]=e_3,\, [e_1,e_3]=e_4,\, [e_1,e_4]=e_5$ & \\
\hline$\mathfrak{g}_{5,6}$ & $[e_1,e_2]=e_3,\, [e_1,e_3]=e_4,\, [e_1,e_4]=e_5,\, [e_2,e_3]=e_5$ & \\
\hline $\mathfrak{g}_{5,7}$ & \begin{tabular}{c}$[e_1,e_5]= e_1,
[e_2,e_5]=\alpha e_2,$ \\ $[e_3,e_5]=\beta e_3, [e_4,e_5] = \gamma
e_4$ \end{tabular} & \begin{tabular}{c} $-1 \leq \gamma \leq \beta \leq \alpha \leq 1$, \\ $\alpha \beta \gamma \neq 0$. \end{tabular} \\
\hline$\mathfrak{g}_{5,8}$ & $[e_2,e_5]= e_1, [e_3,e_5]=e_3, [e_4,e_5]=\gamma e_4,$ & $0 < | \gamma | \leq 1$ \\
\hline$\mathfrak{g}_{5,9}$ & \begin{tabular}{c}$[e_1,e_5]=e_1, [e_2,e_5]=e_1+e_3, [e_3,e_5]=\beta e_3, [e_4,e_5]=\gamma e_4$\end{tabular} & $0 \neq \gamma \leq \beta$ \\
\hline$\mathfrak{g}_{5,10}$ & $[e_2,e_5]=e_1, [e_3,e_5]=e_2, [e_4,e_5]=e_4$ & \\
\hline$\mathfrak{g}_{5,11}$ & \begin{tabular}{c}$[e_1,e_5]=e_1, [e_2,e_5]=e_1+e_2, [e_3,e_5]=e_2 + e_3, [e_4,e_5]=\gamma e_4$\end{tabular} & $\gamma \neq 0$ \\
\hline$\mathfrak{g}_{5,12}$ & \begin{tabular}{c}$[e_1,e_5]=e_1, [e_2,e_5]=e_1+e_2,$ \\ $[e_3,e_5]=e_2 + e_3, [e_4,e_5]=e_3 + e_4$\end{tabular} & \\
\hline$\mathfrak{g}_{5,13}$ & \begin{tabular}{c}$[e_1,e_5]=e_1, [e_2,e_5]=\gamma e_2$, \\ $[e_3,e_5]=p e_3 - s e_4, [e_4,e_5]=s e_3 + p e_4$\end{tabular} & $\gamma s \neq 0, |\gamma|\leq 1$ \\
\hline$\mathfrak{g}_{5,14}$ & \begin{tabular}{c}$[e_2,e_5]=e_1, [e_3,e_5]=p e_3 - e_4, [e_4,e_5]=e_3 + p e_4$ \end{tabular} & \\
\hline$\mathfrak{g}_{5,15}$ & \begin{tabular}{c}$[e_1,e_5]=e_1, [e_3,e_5]=\gamma e_3,$ \\ $ [e_2,e_5]=e_1+e_2, [e_4,e_5]=e_3 + \gamma e_4$\end{tabular} & $-1\leq \gamma \leq 1$ \\
\hline$\mathfrak{g}_{5,16}$ & \begin{tabular}{c}$[e_1,e_5]=e_1, [e_2,e_5]=e_1+e_2,$ \\ $[e_3,e_5]=p e_3 - s e_4, [e_4,e_5]=s e_3 + p e_4$\end{tabular} & $s \neq 0$ \\
\hline$\mathfrak{g}_{5,17}$ & \begin{tabular}{c}$[e_1,e_5]=p e_1 - e_2, [e_2,e_5]=e_1+ p e_2,$ \\ $[e_3,e_5]=q e_3 - s e_4, [e_4,e_5]=s e_3 + q e_4$\end{tabular} & $s \neq 0$ \\
\hline$\mathfrak{g}_{5,18}$ & \begin{tabular}{c}$[e_3,e_5]=e_1 + p e_3 - e_4, [e_2,e_5]=e_1+ p e_2$ \\ $[e_1,e_5]=p e_1 - e_2, [e_4,e_5]=e_2 + e_3 - p e_4$\end{tabular} & $p \geq 0$ \\
\hline$\mathfrak{g}_{5,19}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_5]=(1 + \alpha) e_1,$ \\ $[e_2,e_5]=e_2, [e_3,e_5]=\alpha e_3, [e_4,e_5] = \beta e_4$\end{tabular} & $\beta \neq 0$ \\
\hline$\mathfrak{g}_{5,20}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_5]=(1 + \alpha) e_2, [e_2,e_5]=e_2,$ \\ $[e_3,e_5]=\alpha e_3, [e_4,e_5] =e_1 + (1 + \alpha) e_4$\end{tabular} & \\
\hline$\mathfrak{g}_{5,21}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_5]=2 e_1, [e_4,e_5] = e_4,$ \\ $[e_2,e_5]=e_2 + e_3, [e_3,e_5]= e_3 + e_4$\end{tabular} & \\
\hline$\mathfrak{g}_{5,22}$ & $[e_2,e_3]=e_1, [e_2,e_5]=e_3, [e_4,e_5] =e_4$ & \\
\hline$\mathfrak{g}_{5,23}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_5]=2 e_1, [e_3,e_5]= e_3,$ \\ $[e_2,e_5]=e_2 + e_3, [e_4,e_5] =\beta e_4$\end{tabular} & $\beta \neq 0$ \\
\hline$\mathfrak{g}_{5,24}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_5]=2 e_1,[e_3,e_5]= e_3,$ \\ $[e_2,e_5]=e_2 + e_3, [e_4,e_5] =\epsilon e_1 + 2 e_4$\end{tabular} & $\epsilon = \pm 1$ \\
\hline$\mathfrak{g}_{5,25}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_5]=2p e_1, [e_4,e_5] =\beta e_4,$ \\ $[e_2,e_5]=pe_2 + e_3, [e_3,e_5]=- e_2 + pe_3$\end{tabular} & $\beta \neq 0$ \\
\hline$\mathfrak{g}_{5,26}$ & \begin{tabular}{c}$[e_2,e_5]=pe_2 + e_3, [e_1,e_5]=2p e_1, [e_2,e_3]=e_1,$ \\ $ [e_3,e_5]=-e_2 + pe_3, [e_4,e_5] =\epsilon e_1 + 2p e_4$\end{tabular} & $\epsilon = \pm 1$ \\
\hline$\mathfrak{g}_{5,27}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_3,e_5]= e_3 + e_4, [e_4,e_5] =e_1 + e_4, [e_1,e_5]=e_1$\end{tabular} & \\
\hline$\mathfrak{g}_{5,28}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_2,e_5]=\alpha e_2, [e_4,e_5] = e_4,$ \\ $[e_1,e_5]=(1 + \alpha) e_1, [e_3,e_5]= e_3 + e_4$ \end{tabular} & \\
\hline$\mathfrak{g}_{5,29}$ & \begin{tabular}{c} $[e_2,e_3]=e_1, [e_1,e_5]=e_1, [e_2,e_5]=e_2, [e_3,e_5]= e_4$ \end{tabular} & \\
\hline$\mathfrak{g}_{5,30}$ & \begin{tabular}{c}$[e_2,e_4]=e_1, [e_3,e_4]=e_2,$ \\ $[e_1,e_5]=(2 + h)e_1, [e_4,e_5]=e_4$, \\ $[e_2,e_5]=(1 + h)e_2, [e_3,e_5]=h e_3$\end{tabular} & \\
\hline$\mathfrak{g}_{5,31}$ & \begin{tabular}{c}$[e_2,e_4]=e_1, [e_3,e_4]=e_2, [e_1,e_5]=3e_1, [e_3,e_5]= e_3,$ \\ $[e_2,e_5]=2 e_2, [e_4,e_5]=e_3 + e_4$\end{tabular} & \\
\hline$\mathfrak{g}_{5,32}$ & \begin{tabular}{c}$[e_2,e_4]=e_1, [e_3,e_4]=e_2,[e_1,e_5]=e_1,$ \\$[e_2,e_5]=e_2,[e_3,e_5]=he_1 + e_3$\end{tabular} & \\
\hline$\mathfrak{g}_{5,33}$ & \begin{tabular}{c} $[e_1,e_4]=e_1, [e_3,e_4]=\beta e_3, [e_2,e_5]=e_2, [e_3,e_5]=\gamma e_3$ \end{tabular} & $\beta^2 + \gamma^2 \neq 0$ \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}
\caption{$5$-dimensional non-decomposable real solvable
Lie algebras (II)}\label{TableClas5DNDRSNNLA2}
\begin{center}\scriptsize
\begin{tabular}{|c|c|c|}
\hline Lie algebra & (Non-zero) Lie brackets & Parameters \\
\hline$\mathfrak{g}_{5,34}$ & \begin{tabular}{c}$[e_1,e_4]=e_1, [e_2,e_4]=e_2,$ \\ $[e_3,e_4]=e_3,[e_1,e_5]=e_1, [e_3,e_5]= e_2$\end{tabular} & \\
\hline$\mathfrak{g}_{5,35}$ & \begin{tabular}{c}$[e_1,e_4]=h e_1, [e_2,e_4]=e_2,[e_3,e_4]=e_3,$ \\ $[e_2,e_5]=-e_3,[e_1,e_5]=\alpha e_1, [e_3,e_5]= e_2$\end{tabular} & $h^2 + \alpha^2 \neq 0$ \\
\hline$\mathfrak{g}_{5,36}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_4]=e_1,$ \\ $[e_2,e_4]=e_2, [e_3,e_5]= e_3,[e_2,e_5]=-e_2$\end{tabular} & \\
\hline$\mathfrak{g}_{5,37}$ & \begin{tabular}{c}$[e_2,e_3]=e_1, [e_1,e_4]=2e_1,[e_2,e_4]=e_2,$ \\ $[e_3,e_4]=e_3, [e_2,e_5]=-e_3,[e_3,e_5]=e_2$\end{tabular} & \\
\hline$\mathfrak{g}_{5,38}$ & $[e_1,e_4]=e_1, [e_2,e_5]=e_2,[e_4,e_5] =e_3$ & \\
\hline$\mathfrak{g}_{5,39}$ & \begin{tabular}{c}$[e_1,e_4]=e_1, [e_2,e_4]=e_2,$ \\ $[e_1,e_5]=-e_2,[e_2,e_5]=e_1,[e_4,e_5]= e_3$\end{tabular} & \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[htp]
\caption{Representation of Solvable Lie algebras of dimension less than $5$.}\label{repres}
\small
\begin{center}
\begin{tabular}{|c|c|c|}
\hline Lie algebra & Representation & $\overline{\mu}$ \\
\hline $\mathfrak{s}_1^1$ & $e_1=X_{11}$ & $1$ \\
\hline $\mathfrak{s}_2^1$ & $e_1=X_{11}, e_2=X_{22}$ & $2$ \\
\hline $\mathfrak{s}_2^2$ & $e_1=X_{11}, e_2=X_{12}$ & $2$\\
\hline $\mathfrak{s}_3^1$ & $e_1=X_{11}, e_2=X_{22}, e_3=X_{33}$ & $3$\\
\hline $\mathfrak{s}_3^2$ & $e_1=X_{12}, e_2 = X_{13}, e_3=X_{23}$ & $3$\\
\hline $\mathfrak{s}_3^3$ & $e_1=X_{13}, e_2=X_{12}, e_3=-X_{1,1}$ & $3$\\
\hline $\mathfrak{s}_3^4$ & $e_1=X_{12}, e_2=i X_{12}+ X_{13}, e_3=iX_{22}+ X_{23}-iX_{33}$ & $3$\\
\hline $\mathfrak{s}_3^5$ & $e_1 = X_{13}, e_2 = X_{23}, e_3 = X_{12} - X_{33}$ & $3$\\
\hline $\mathfrak{s}_3^6$ & $e_1 = X_{12}$, $e_2 = X_{11}+X_{22}$, $e_3= -X_{22}$ & $2$\\
\hline $\mathfrak{s}_4^1$ & $e_1\!\!=\!X_{11}, e_2\!\!=\!X_{22}, e_3\!\!=\!X_{33}, e_4\!\!=\!X_{44}$ & $4$\\
\hline $\mathfrak{s}_4^2$ & $e_1\!\!=\!-(X_{23}+X_{34}), e_2\!\!=\!X_{14}, e_3\!\!=\!X_{13}, e_4\!\!=\!X_{12}$ & $4$\\
\hline $\mathfrak{s}_4^3$ & $e_1\!\!=\!X_{11}, e_2\!\!=\!-X_{23}, e_3\!\!=\!X_{12}, e_4\!\!=\!X_{13}$ & $3$\\
\hline $\mathfrak{s}_4^4$ & $e_1\!\!=\!X_{11}, e_2 \!\!=\! -\!\, i X_{22}\! +\! X_{23}\! +\! i X_{33}, e_3 \!\!=\! X_{12}, e_4 \!\!=\! -\!\, i X_{12}\!+\! X_{13}$ & $3$ \\
\hline $\mathfrak{s}_4^5$ & $e_1\!\!=\! X_{12}\! +\! X_{33}, e_2\!\!=\!X_{24}, e_3\!\!=\!X_{34}, e_4\!\!=\!X_{14}$ & $4$\\
\hline $\mathfrak{s}_4^6$ & $e_1\!\!=\!X_{14}, e_2\!\!=\!X_{24}, e_3\!\!=\!X_{34}, e_4\!\!=\!(\alpha\!-\!1)X_{22}\!+\!(\beta\!-\!1)X_{33}\!-\!X_{44}$ & $4$ \\
\hline $\mathfrak{s}_4^7$ & $e_1\!\!=\!X_{14}$, $e_2\!\!=\!X_{13}$, $e_3\!\!=\!X_{11}\!+\!X_{23}\!+\!(1\!-\!\alpha)X_{44}$, $e_4\!\!=\!-X_{12}$ & $4$\\
\hline $\mathfrak{s}_4^8$ & $e_1\!\!=\!X_{12}\!-\!X_{23}\!-\!X_{44}, e_2\!\!=\!X_{34}, e_3\!\!=\!-X_{24}, e_4\!\!=\!-X_{14}$ & $4$ \\
\hline $\mathfrak{s}_4^9$ & \begin{tabular}{c} $e_1\!\!=\!\alpha X_{11}\!\!+\!(\alpha\!-\!\beta\!\!-\!i)X_{22}\!\!+\!(\beta\!-\!i)X_{33},$ \\ $e_2\!\!=\!i(X_{12}\!\!-\!\!X_{34}), e_3\!\!=\!X_{12}\!\!+\!X_{34}, e_4\!\!=\!X_{14}$ \end{tabular} & $4$ \\
\hline $\mathfrak{s}_4^{10}$ & $e_1\!\!=\!\alpha X_{11}+X_{22}, e_2\!\!=\!X_{12}, e_3\!\!=\!X_{23}, e_4 \!\!=\! X_{13}$ & $3$ \\
\hline $\mathfrak{s}_4^{11}$ & {\small $e_1\!\!=\!X_{12}+X_{11}+X_{22}-X_{44}, e_2\!\!=\!X_{23}-X_{34}, e_3\!\!=\!X_{13}, e_4\!\!=\!X_{14}$} & $4$ \\
\hline $\mathfrak{s}_4^{12}$ & \begin{tabular}{c} $e_1\!\!=\!2 \alpha X_{11}+(\alpha-i)X_{22}$, $e_2\!\!=\!X_{23}+X_{12}$, \\ $e_3\!\!=\!i(X_{23}-X_{12})$, $e_4\!\!=\!2 i X_{13}$ \end{tabular} & $3$ \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}
\caption{Representation of $5$-dimensional non-decomposable real solvable
Lie algebras}\label{TableRepres5DNDRSNNLA}
\begin{center}\scriptsize
\begin{tabular}{|c|c|c|}
\hline Lie algebra & Representation & $\overline{\mu}$ \\
\hline$\mathfrak{g}_{5,1}$ & $e_1=X_{1,2}, e_2=X_{1,3}, e_3=X_{1,4}, e_4=X_{1,5}, e_6=X_{2,3}+X_{3,4}+X_{4,5}$ & $4$ \\
\hline$\mathfrak{g}_{5,2}$ & $e_1=X_{1,2}+X_{2,4}+X_{3,5}, e_2=X_{1,3}, e_3=X_{1,4}, e_4=X_{1,5}, e_5=X_{2,3}+X_{3,4}+X_{4,5}$ & $5$ \\
\hline$\mathfrak{g}_{5,3}$ & $e_1=X_{1,2}-X_{3,5}, e_2=X_{1,3}+X_{2,5}, e_3=X_{1,4}, e_4=X_{1,5}, e_5=X_{2,3}+X_{3,4}$ & $5$ \\
\hline$\mathfrak{g}_{5,4}$ & $e_1=X_{1,2}+X_{2,3}, e_2=X_{1,3}, e_3=X_{1,4}, e_4=X_{2,4}, e_5=X_{3,4}$ & $4$ \\
\hline$\mathfrak{g}_{5,5}$ & $e_1=X_{1,2}, e_2=X_{1,3}, e_3=X_{1,4}, e_4=X_{2,3}, e_5=X_{2,4}$ & $4$ \\
\hline$\mathfrak{g}_{5,6}$ & $e_1=X_{1,2}, e_2=X_{1,3}, e_3=X_{1,4}, e_4=X_{2,4}, e_5=X_{3,4}$ & $4$ \\
\hline $\mathfrak{g}_{5,7}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{1,2}, e_3=X_{1,5}, e_4=X_{3,5},$ \\
$e_5=\alpha X_{2,2}+(\beta-\gamma)X_{3,3}+X_{4,4}+\beta X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,8}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{1,2}+X_{1,4}, e_3=X_{1,5}, e_4=X_{1,3},$ \\
$e_5= X_{2,4}+ \gamma X_{3,3}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,9}$ & \begin{tabular}{c} $e_1=X_{2,4}, e_2=X_{1,3}+X_{2,4}, e_3=\beta X_{1,3}, e_4=X_{2,3},$ \\
$e_5= (\beta-\gamma)X_{2,2}+ \beta X_{3,3}+(1+ \beta - \gamma)X_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,10}$ & \begin{tabular}{c} $e_1=-X_{1,5}, e_2=-X_{1,4}, e_3=X_{1,2}-X_{1,3}, e_4=X_{1,3}+X_{1,4}+X_{1,5},$ \\
$e_5=X_{1,1}+X_{2,2}+X_{2,3}+2 X_{3,3}+X_{3,4}+X_{4,4}+X_{4,5}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,11}$ & \begin{tabular}{c} $e_1=X_{1,5}, e_2=\frac{1}{2}X_{1,4}+X_{2,5}, e_3=X_{1,5}+X_{2,4}, e_4=X_{3,5},$ \\
$e_5=-\frac{1}{2}X_{1,2}+(1-\gamma)X_{3,3}+X_{4,4}+X_{4,5}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,12}$ & \begin{tabular}{c} $e_1=X_{1,5}, e_2=X_{1,4}, e_3=X_{1,3}, e_4=X_{1,2},$ \\
$e_5=X_{2,2}+X_{2,3}+X_{3,3}+X_{3,4}+X_{4,4}+X_{4,5}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,13}$ & \begin{tabular}{c} $e_1=X_{2,4}, e_2=X_{3,4}, e_3=X_{1,4}, e_4=i X_{1,4}$ \\ $e_5=(p-1-s i)X_{2,2}+(p-\gamma-s i)X_{3,3}+(p-s i)X_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,14}$ & \begin{tabular}{c} $e_1=X_{2,4}, e_2=X_{2,3}, e_3=X_{1,4}, e_4=-iX_{1,4},$ \\
$e_5=iX_{2,2}+iX_{3,3}+X_{3,4}+iX_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,15}$ & $e_1=-X_{1,4}, e_2=X_{2,4}, e_3=X_{1,3}, e_4=-X_{2,3}, e_5=X_{1,2}+\gamma X_{3,3}+X_{4,4}$ & $4$ \\
\hline$\mathfrak{g}_{5,16}$ & \begin{tabular}{c} $e_1=X_{2,4}, e_2=X_{2,3}, e_3=X_{1,4}, e_4=-i X_{1,4}$ \\ $e_5=s i X_{2,2}+(1+s i)X_{3,3}+X_{3,4}+(1+s i)X_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,17}$ & \begin{tabular}{c} $e_1=X_{2,4}, e_2=i X_{2,4}, e_3=X_{1,4}, e_4=i X_{1,4}$ \\
$e_5=(-p+i q -s i) X_{2,2}+(q-s i)X_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,18}$ & $e_1=X_{2,4}, e_2=i X_{2,4}, e_3=-\frac{i(i-p)}{p} X_{2,4}, e_4=\frac{i}{p}X_{2,4}, e_5=(p-i)X_{4,4}$ & $4$\\
\hline$\mathfrak{g}_{5,19}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{1,2}, e_3=X_{2,4}, e_4=X_{3,4}$ \\ $e_5=X_{2,2}+(1+\alpha-\beta) X_{3,3}+ \sqrt{\beta(2-\beta)+2\alpha(\beta-\alpha-2)-3} X_{3,4}+(1+\alpha)X_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,20}$ & \begin{tabular}{c} $e_1=-X_{1,4}, e_2=-X_{1,3}, e_3=Z X_{2,4}+X_{3,4}, e_4=X_{2,4}$ \\ $e_5=-(1+\alpha)X_{1,1}+X_{2,2}-Z X_{1,3}-(1+\alpha)X_{2,2}+Z X_{2,3}+X_{2,4}-\alpha X_{3,3}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,21}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{2,3}+X_{3,4}, e_3=-X_{1,3}+X_{2,5}, e_4=-2 X_{1,5}$ \\ $e_5=X_{1,2}+X_{3,3}+X_{3,5}+2X_{4,4}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,22}$ & $e_1=X_{1,5}, e_2=X_{1,2}+X_{2,3}, e_3=X_{2,5}, e_4=X_{1,4}, e_5=X_{3,5}+X_{4,4}$ & $5$ \\
\hline$\mathfrak{g}_{5,23}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{2,3}+X_{3,4}, e_3=-X_{1,3}, e_4= X_{1,5}$ \\ $e_5=-X_{1,1}+X_{1,2}-X_{2,2}+X_{4,4}+(\beta-1)X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,24}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{2,3}+X_{3,4}, e_3=-X_{1,3}, e_4= X_{1,5}-\epsilon X_{2,4}$ \\ $e_5=-X_{1,1}+X_{1,2}-X_{2,2}+X_{4,4}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,25}$ & \begin{tabular}{c} $e_1=-2 iX_{1,4}, e_2=-i X_{1,3}+ i X_{3,4}, e_3=X_{1,3}+X_{3,4}, e_4=(\beta-2p)X_{1,2}+Z X_{1,4}$ \\ $e_5=-2pX_{1,1}+(\beta-2p)X_{2,2}+Z X_{2,4}+(i-p)X_{3,3}, Z=\sqrt{1+2pi+4p\beta-\beta^2-9p^2}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,26}$ & \begin{tabular}{c} $e_1=2 \omega X_{1,4}, e_2=\omega X_{1,2}-(1-p^2+pi)X_{1,4}-\omega X_{2,4}, e_3=X_{1,2}+(-p+i)X_{1,4}+X_{2,4},$ \\ $e_4=-2\epsilon \omega X_{3,4}, e_5=X_{1,3}+(p-\omega)X_{2,2}+X_{2,4}+ZX_{3,4}+2p X_{4,4},$\\
$Z=\sqrt{2p^3i+2pi-2p^4-5p^2-1}, \omega=-p^3+i(p^2+1)$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,27}$ & $e_1=-X_{1,4}, e_2=X_{1,3}, e_3=-X_{3,4}, e_4=X_{2,4}, e_5=X_{1,2}+X_{2,3}+X_{4,4}$ & $4$ \\
\hline$\mathfrak{g}_{5,28}$ & \begin{tabular}{c} $e_1=-X_{1,4}, e_2=(1+\alpha)iX_{1,4}+X_{2,4}, e_3=X_{1,2}, e_4=X_{1,3},$ \\ $e_5=(1+\alpha)iX_{1,2}+X_{2,2}+X_{2,3}+X_{3,3}+(1+\alpha)X_{4,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,29}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{1,3}, e_3=X_{1,1}+X_{2,2}+X_{3,3}+X_{3,4}+X_{4,4},$ \\ $e_4=-X_{2,4}, e_5=X_{2,2}+X_{2,3}+X_{3,3}+X_{4,4}$ \end{tabular} & $4$\\
\hline$\mathfrak{g}_{5,30}$ & \begin{tabular}{c} $e_1=-2X_{1,4}, e_2=-X_{1,3}+X_{1,4}+X_{2,4}, e_3=X_{2,3}-X_{4,4},$ \\ $e_4=X_{1,2}+X_{3,4}, e_5=X_{2,2}+(1+h)X_{3,3}+X_{3,4}+(2+h)X_{4,4}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,31}$ & \begin{tabular}{c} $e_1=\frac{3}{2}X_{1,5}, e_2=X_{1,4}-X_{2,5}, e_3=\frac{1}{3}X_{1,3}-X_{2,4},$ \\ $e_4=\frac{1}{2}X_{1,2}+X_{3,4}+X_{4,5}, e_5=X_{2,2}+X_{2,3}+X_{3,3}+2X_{4,4}+3 X_{5,5}$ \end{tabular} & $5$ \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}
\caption{Representation of $5$-dimensional non-decomposable real solvable
Lie algebras (II)}\label{TableRepres5DNDRSNNLA2}
\begin{center}\scriptsize
\begin{tabular}{|c|c|c|}
\hline Lie algebra & Representation & $\overline{\mu}$ \\
\hline$\mathfrak{g}_{5,32}$ & \begin{tabular}{c} $e_1=X_{1,4}, e_2=X_{1,3}, e_3=X_{1,2}, e_4=X_{2,2}+X_{3,4},$ \\ $e_5=X_{2,2}+hX_{2,4}+X_{3,3}+X_{4,4}$ \end{tabular} & $4$\\
\hline$\mathfrak{g}_{5,33}$ & \begin{tabular}{c} $e_1=X_{2,4}, e_2=X_{1,4}, e_3=\gamma X_{2,3}+X_{2,4},$ \\ $e_4=X_{1,1}+\beta X_{3,3} + \frac{\beta-1}{\gamma} X_{3,4} + X_{4,4}, e_5=-X_{1,1}+\gamma X_{3,3}+X_{3,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,34}$ & $e_1=X_{1,2}, e_2=-X_{1,4}, e_3=X_{3,4}, e_4=X_{2,2}+X_{4,4}, e_5=-X_{1,1}+X_{1,3}-X_{3,3}-X_{4,4}$ & $4$ \\
\hline$\mathfrak{g}_{5,35}$ & $e_1=X_{2,4}, e_2=X_{1,3}, e_3=-iX_{1,3}, e_4=X_{3,3}+ h X_{4,4}, e_5=-\alpha X_{2,2}+iX_{3,3}$ & $4$ \\
\hline$\mathfrak{g}_{5,36}$ & $e_1=-X_{1,4}, e_2=X_{2,4}, e_3=X_{1,2}, e_4=X_{4,4}, e_5= X_{2,2}+iX_{3,3}$ & $4$ \\
\hline$\mathfrak{g}_{5,37}$ & \begin{tabular}{c} $e_1=2 i X_{1,4}, e_2=X_{1,2}-iX_{1,4}+X_{2,4}, e_3=-iX_{1,2}-X_{1,4}+iX_{2,4},$ \\ $ e_4=X_{2,2}+iX_{2,4}+2X_{4,4}, e_5= iX_{2,2}+X_{2,4}$ \end{tabular} & $4$ \\
\hline$\mathfrak{g}_{5,38}$ & \begin{tabular}{c} $e_1=X_{1,5}, e_2=X_{1,4}, e_3=X_{1,3}, e_4=X_{1,2}+X_{5,5}$ \\
$e_5=X_{1,1}+X_{2,2}+X_{2,3}+X_{3,3}+2 X_{4,4}+X_{5,5}$ \end{tabular} & $5$ \\
\hline$\mathfrak{g}_{5,39}$ & $e_1=X_{2,4}, e_2=-i X_{2,4}, e_3=X_{1,4}, e_4=-X_{2,2}+X_{3,4}, e_5=i X_{1,1}-X_{1,3}+i X_{3,3} + iX_{4,4}$ & $4$ \\
\hline
\end{tabular}
\end{center}
\end{table}
\end{document}
|
\betaegin{document}
\title{Fiber-preserving imbeddings and diffeomorphisms}
\alphauthor{John Kalliongis and Darryl McCullough}
\deltaate{{\footnotesize Department of Mathematics, Saint Louis
University, St.~Louis, MO 63103}
\\
{\footnotesize [email protected]}
\ \\
\ \\
{\footnotesize Department of Mathematics, University of Oklahoma,
Norman, OK 73019}
{\footnotesize [email protected]}
\ \\
\ \\
{\footnotesize \today
}}
\maketitle
\noindent {\footnotesize 1991 Mathematics Subject Classification\ \
Primary: 57R35\ \ Secondary: 57M99}
\section{Introduction}
\label{intro}
\marginwrite{intro}
Let $\hbox{\sl D\/}iff(M)$ be the group of diffeomorphisms of a smooth manifold
$M$, with the $C^\infty$-topology. For a smooth submanifold $N$ of
$M$, denote by $\hbox{\sl I\/}mb(N,M)$ the space of all smooth imbeddings $j$ of
$N$ into $M$ such that $j^{-1}(\partial M)\!=\! N\cap \partial M$. In
\cite{P}, R.~Palais proved a useful result relating diffeomorphisms
and imbeddings. In the case when $M$ is closed, it says that if
$W\subset V$ are submanifolds of $M$, then the mappings $\hbox{\sl D\/}iff(M)\to
\hbox{\sl I\/}mb(V,M)$ and $\hbox{\sl I\/}mb(V,M)\to \hbox{\sl I\/}mb(W,M)$ obtained by restricting
diffeomorphisms and imbeddings are locally trivial, and hence are
Serre fibrations. The same results, with variants for manifolds with
boundary and more complicated additional boundary structure, were
proven by J.~Cerf in~\cite{Cerf}. Among various applications of these
results, the Isotopy Extension Theorem follows by lifting a path in
$\hbox{\sl I\/}mb(V,M)$ starting at the inclusion map of $V$ to a path in
$\hbox{\sl D\/}iff(M)$ starting at $1_M$. Moreover, parameterized versions of
isotopy extension follow just as easily from the homotopy lifting
property for $\hbox{\sl D\/}iff(M)\to \hbox{\sl I\/}mb(V,M)$ (see corollary~\ref{isotopy
lifting}).
In the common situation of a fibering of manifolds, it is natural to
consider the spaces of imbeddings and diffeomorphisms that respect the
fibered structure. Consider a (smooth) fibering $p\colon E\to B$ of
compact manifolds, possibly with boundary. (Actually, most of our
results allow $E$ and $B$ to be noncompact, although the fiber and the
relevant submanifolds must be assumed to be compact. Also, we prove
versions with control relative to subsets of the boundary of $B$ and
their preimages in $E$. For clarity we omit such complications in
this introductory discussion.) A diffeomorphism of $E$ is called {\it
fiber-preserving} when it takes each fiber of $E$ to a fiber of $E$,
and {\it vertical} when it takes each fiber to itself. The space
$\hbox{\sl D\/}iff_f(E)$ of fiber-preserving diffeomorphisms of $E$ contains the
subspace $\hbox{\sl D\/}iff_v(E)$ of vertical diffeomorphisms. Any
fiber-preserving diffeomorphism $g$ of $E$ induces a diffeomorphism
$\ol{g}$ of $B$, and this defines a map from $\hbox{\sl D\/}iff_f(E)$ to
$\hbox{\sl D\/}iff(B)$ for which the preimage of the identity map is $\hbox{\sl D\/}iff_v(E)$.
In section~\ref{project} we prove
\noindent{\betaf Projection Theorem} (Theorem \ref{project diffs}) {\em
$\hbox{\sl D\/}iff_f(E)\to\hbox{\sl D\/}iff(B)$ is locally trivial. }
\noindent This theorem is essentially due to W. Neumann and F. Raymond
(see the comments below). The homotopy extension property for the
projection fibration translates directly into the following.
\noindent{\betaf Parameterized Isotopy Extension Theorem} (Corollary
\ref{isotopy lifting}) {\em Suppose that $p\colon E\to B$ is a
fibering of compact manifolds, and suppose that for each $t$ in a
path-connected parameter space $P$, there is an isotopy $g_{t,s}$
such that $g_{t,0}$ lifts to a diffeomorphism $G_{t,0}$ of $E$. Assume
that sending $(t,s)\to g_{t,s}$ defines a continous function from
$P\times [0,1]$ to $\hbox{\sl D\/}iff(B)$ and sending $t$ to $G_{t,0}$ defines a
continuous function from $P$ to $\hbox{\sl D\/}iff(E)$. Then the family $G_{t,0}$
extends to a continuous family on $P\times I$ such that for each
$(t,s)$, $G_{t,s}$ is a fiber-preserving diffeomorphism inducing
$g_{t,s}$ on~$B$.}
\noindent
A submanifold of $E$ is called {\it vertical} if it is a union of
fibers, and in this case it will be assumed to have the fibered
structure so that the inclusion map is fiber-preserving. An imbedding
of a fibered manifold $W$ into $E$ is called {\it fiber-preserving}
when the image of each fiber of $W$ is a fiber of $E$. The space of
all fiber-preserving imbeddings from $W$ to $E$ is denoted by
$\hbox{\sl I\/}mb_f(W,E)$. When $W\subseteq E$, $\hbox{\sl I\/}mb_f(W,E)$ contains the
subspace of {\it vertical} imbeddings $\hbox{\sl I\/}mb_v(W,E)$ which take each
fiber to itself. For fiber-preserving and vertical imbeddings of
vertical submanifolds, we have a more direct analogue of Palais'
results.
\noindent{\betaf Restriction Theorem} (Corollary \ref{corollary3}) {\em
Let $V$ and $W$ be vertical submanifolds of $E$ with $W\subseteq V$,
each of which is either properly imbedded or codimension-zero. Then
the restrictions $\hbox{\sl I\/}mb_f(V,E)\to \hbox{\sl I\/}mb_f(W,E)$ and $\hbox{\sl I\/}mb_v(V,E)\to
\hbox{\sl I\/}mb_v(W,E)$ are locally trivial.}
\noindent As shown in theorem \ref{square}, the Projection and
Restriction Theorems can be combined into a single commutative square
in which all four maps are locally trivial:
$$\vbox{\halign{\hfil#\hfil\quad&#&\quad\hfil#\hfil\cr
$\hbox{\sl D\/}iff_f(E)$&$\longrightarrow$&$\hbox{\sl I\/}mb_f(W,E)$\cr
\noalign{
}
$\mapdown{}$&&$\mapdown{}$\cr
\noalign{
}
$\hbox{\sl D\/}iff(B)$&$\longrightarrow$&$\hbox{\sl I\/}mb(p(W),B)\rlap{\ .}$\cr}}$$
In 3-dimensional topology, a key role is played by manifolds
admitting a more general kind of fibered structure, called a Seifert
fibering. Some general references for Seifert-fibered 3-manifolds are
\cite{Hempel,Jaco1,JS,Orlik,OVZ,Scott,Seifert,Waldhausen1,Waldhausen2}.
In section~\ref{sfiber}, we prove the analogues of the results
discussed above for many Seifert fiberings $p\colon\Sigma\to{\cal O}$,
not necessarily 3-dimensional. Actually, we work in a somewhat more
general context, called {\t singular fiberings}, which resemble
Seifert fiberings but for which none of the usual structure of the
fiber as a homogeneous space is required.
In the late 1970's fibration results akin to our Projection Theorem
for the singular fibered case were proven by W.~Neumann and
F.~Raymond~\cite{N-R}. They were interested in the case when $\Sigma$
admits an action of the $k$-torus~$T^k$ and $\Sigma\to{\cal O}$ is the
quotient map to the orbit space of the action. They proved that the
space of (weakly) $T^k$-equivariant homeomorphisms of $\Sigma$ fibers
over the space of homeomorphisms of ${\cal O}$ that respect the orbit
types associated to the points of ${\cal O}$. A detailed proof of this
result when the dimension of $\Sigma$ is $k+2$ appears in the
dissertation of C.~Park~\cite{Park}. Park also proves analogous
results for space of weakly $G$-equivariant maps for principal
$G$-bundles and for Seifert fiberings of arbitrary
dimension~\cite{Park,Park1}. These results do not directly overlap
ours since we always consider the full group of fiber-preserving
diffeomorphisms without any restriction to $G$-equivariant maps
(indeed, no assumption of a $G$-action is even present).
Some technical applications of our results appear in \cite{M-R}. In
the present paper we give one main application. For a Seifert-fibered
manifold $\Sigma$, $\hbox{\sl D\/}iff(\Sigma)$ acts on the set of Seifert
fiberings, and the stabilizer of a given fibering is
$\hbox{\sl D\/}iff_f(\Sigma)$, thus the space of cosets
$\hbox{\sl D\/}iff(\Sigma)/\hbox{\sl D\/}iff_f(\Sigma)$ is the {\it space of Seifert
fiberings} of $\Sigma$. We prove in section~\ref{sfspace} that for a
Seifert-fibered Haken 3-manifold, each component of the space of
Seifert fiberings is weakly contractible (apart from a small list of
well-known exceptions, the space of Seifert fiberings is connected).
This result is originally due to Neumann and Raymond, since it is an
immediate consequence of the results in \cite{N-R} combined with
contemporaneous work of Hatcher~\cite{Hatcher}. We make the same use
of~\cite{Hatcher}.
Our results will be proven by adapting the method developed
in~\cite{P}. The main new idea needed for the fibered case is a
modification of the usual exponential map, called the {\it aligned}
exponential map $\Euclideanxp_a$. This is defined and discussed in
section~\ref{exponent}. Section~\ref{metrics} contains some
preliminaries needed for carrying out Palais' approach for manifolds
with boundary. In section~\ref{palais}, we reprove the main result of
\cite{P} for manifolds which may have boundary. This
duplicates~\cite{Cerf} (in fact, the boundary control there is more
refined than ours), but is included to furnish lemmas as well as to
exhibit a prototype for the approach we use to deal with the bounded
case in our later settings. In section~\ref{orbifold}, we give the
analogues of the results of Palais and Cerf for smooth orbifolds,
which for us are quotients $\widetilde{\cal O}/H$ where
$\widetilde{\cal O}$ is a manifold and $H$ is a group acting smoothly
and properly discontinuously on $\widetilde{\cal O}$. Besides being of
independent interest, these analogues are needed for the case of
singular fiberings.
By a submanifold $N$ of $M$, we mean a smooth submanifold. When $M$
has boundary and $\deltaim(N)<\deltaim(M)$, we always require that $N$ be
properly imbedded in the sense that $N\cap \partial M=\partial N$. If
$N$ has codimension~0, we require that the frontier of $N$ be a
codimension-1 submanifold of $M$. In particular, it is understood that
the elements of $\hbox{\sl I\/}mb(N,M)$ carry $N$ to a submanifold satisfying
these conditions. The notation $\hbox{\sl D\/}iff(M\hbox{\ rel\ }\partial M)$ means the
space of diffeomorphisms which restrict to the identity map on each
point of $\partial M$, and for $X\subseteq M$, $\hbox{\sl I\/}mb(X,M\hbox{\ rel\ }\partial
M)$ means the imbeddings that equal the inclusion on $X\cap\partial
M$. For $K\subseteq M$, $\hbox{\sl D\/}iff^K(M)$ means the diffeomorphisms that
agree with the identity on $M-K$. We say that $K$ is a neighborhood of
the subset $X$ when $X$ is contained in the topological interior of
$K$. If $K$ is a neighborhood of a submanifold $N$, then $\hbox{\sl I\/}mb^K(N,M)$
means the elements $j$ in $\hbox{\sl I\/}mb(N,M)$ such that $K$ is a neighborhood
of~$j(N)$.
The second author thanks the MSRI for its support while the present
manuscript was in preparation. Both authors appreciate the continued
support of St.~Louis University for their collaborative work.
\section{Metrics which are products near the boundary}
\label{metrics}
\marginwrite{metrics}
When $M$ has a Riemannian metric, we denote by $d$ the associated
topological metric defined by putting $d(x,y)$ equal to the infimum of
the lengths of all piecewise differentiable paths from $x$ to $y$ when
$x$ and $y$ lie in the same component of $M$, and equal to~1 if $x$
and $y$ lie in different components.
Let $V$ be a (possibly empty) compact submanifold of $M$. Recall that
we always assume that $V$ is properly imbedded, if it has positive
codimension, or that the frontier of $V$ is a properly imbedded
codimension-1 submanifold, if $V$ has codimension~0. Fix a smooth
collar $\partial M\times [0,2]$ of $\partial M$ such that $V\cap
\partial M\times [0,2]$ is a union of $[0,2]$-fibers. Such a collar
can be obtained by constructing an inward-pointing vector field on a
neighborhood of $\partial M$ which is tangent to $V$, and using the
integral curves associated to the vector field to produce the collar.
On $\partial M\times [0,2)$, fix a Riemannian metric that is the
product of a metric on $\partial M$ and the usual metric on $[0,2)$.
Form a metric on $M$ from this metric and any metric defined on all of
$M$ using a partition of unity subordinate to the open cover
$\set{\partial M\times[0,2), M-\partial M\times I}$, where
$I\!=\![0,1]$. Such a metric is said to be a {\it product near $\partial
M$} such that $V$ {\it meets the collar $\partial M\times I$ in
$I$-fibers}. It has the following properties for $0\leq t\leq 1$:
\betaegin{enumerate}
\item[{\rm(i)}] If $x\in M-\partial M\times I$, then $d(x,\partial
M)>1$.
\item[{\rm(ii)}] If $x\!=\! (y,t)\in\partial M\times I$, then
$d(x,\partial M)\!=\! t$.
\item[{\rm(iii)}] If $V$ has positive codimension, then for any
tubular neighborhood of $V$ obtained by exponentiating a normal bundle
of $V$, the fiber at each point of $V\cap \partial M\times \set{t}$
lies in $\partial M\times\set{t}$. In particular, the fiber of each
point in $V\cap \partial M$ lies in~$\partial M$. If $V$ has
codimension $0$, then the corresponding statement holds for any
tubular neighborhood of the frontier of~$V$.
\end{enumerate}
A Riemannian metric is called {\it complete} if every Cauchy sequence
converges. For a complete Riemannian metric on $M$, a geodesic can be
extended indefinitely unless it reaches a point in the boundary of
$M$, where it may continue or it may fail to be extendible because it
``runs out of the manifold.''
One may obtain a complete metric on $M$ that is a product near
$\partial M$ such that $V$ meets the collar $\partial M\times I$ in
$I$-fibers as follows. Carry out the previous construction using a
metric on $\partial M\times[0,2)$ that is the product of a complete
metric on $\partial M$ and the standard metric on $[0,2)$. Define
$f\colon M-\partial M\to (0,\infty)$ by putting $f(x)$ equal to the
supremum of the values of $r$ such that $\Euclideanxp$ is defined on all
vectors in $T_x(M)$ of length less than~$r$. Let $g\colon M-\partial
M\to (0,\infty)$ be a smooth map that is an $\epsilon$-approximation
to $1/f$, and let $\phi\colon M\to[0,1]$ be a smooth map which is
equal to~$0$ on $\partial M\times I$ and is~$1$ on $M-\partial
M\times[0,2)$. Give $M\times[0,\infty)$ the product metric, and define
a smooth imbedding $i\colon M\to M\times[0,\infty)$ by $i(x)\!=\!
(x,\phi(x)g(x))$ if $x\notin\partial M$ and $i(x)\!=\! (x,0)$ if $x\in
\partial M$. The restricted metric on $i(M)$ agrees with the original
metric on $\partial M\times I$ and is complete.
From now on, all metrics will be assumed to be complete.
\section{The Palais-Cerf restriction theorem}
\label{palais}\marginwrite{palais}
In this section we modify some results from \cite{P} to apply to the
bounded case. This duplicates~\cite{Cerf}, in fact our results are not
as general since we do not work in the setting of manifolds with
corners. On the other hand, our argument will provide lemmas needed
for the fibered cases, and is the prototype for the approach we use to
deal with the bounded case in our later settings.
Let $X$ be a $G$-space and $x_0\in X$. A {\it local cross-section} for
$X$ at $x_0$ is a map $\chi$ from a neighborhood $U$ of $x_0$ into $G$
such that $\chi(u)x_0\!=\! u$ for all $u\in U$. By replacing $\chi(u)$
by $\chi(u)\chi(x_0)^{-1}$, one may always assume that $\chi(x_0)\!=\!
1_G$, If $X$ admits a local cross-section at each point, it is said to
admit local cross-sections. From \cite{P} we have
\betaegin{proposition}{theoremA}{} Let $G$ be a topological group and $X$ a
$G$-space admitting local cross-sections. Then any equivariant map of
a $G$-space into $X$ is locally trivial.
\marginwrite{theoremA}
\end{proposition}
\noindent In fact, when $\pi\colon Y\to X$ is $G$-equivariant, the
local coordinates on $\pi^{-1}(U)$ are just given by sending the point
$(u,z)\in U\times \pi^{-1}(y_0)$ to $\chi(u)\cdot z$. Some additional
properties of the bundles obtained in proposition~\ref{theoremA} are
given in~\cite{P}.
The following technical lemma will simplify some of our applications
of proposition~\ref{theoremA}.
\betaegin{proposition}{inclusion}{} Let $V$ be a submanifold of $M$, let
$I(V,M)$ be a space of imbeddings of $V$ into $M$, and let $D(M)$ a
group of diffeomorphisms of $M$. Suppose that for every $i\in I(V,M)$,
the space of imbeddings $I(i(V),M)$ has a local $D(M)$ cross-section
at the inclusion map of $i(V)$ into $M$. Then $I(V,M)$ has local
cross-sections.
\marginwrite{inclusion}
\end{proposition}
\betaegin{proof}{} Denote by $j_{i(V)}$ the inclusion map of $i(V)$ into
$M$. Let $i\in I(V,M)$ and define $Y\colon I(V,M)\to I(i(V),M)$ by
$Y(j)=ji^{-1}$. For a local cross-section $\chi\colon U\to D(M)$ at
$j_{i(V)}$, define $Y_1$ to be the restriction of $Y$ to $Y^{-1}(U)$,
a neighborhood of $i$ in $I(V,M)$. Then $\chi Y_1\colon Y^{-1}(U)\to
D(M)$ is a local cross-section for $I(V,M)$ at~$i$. For if $j\in
Y^{-1}(U)$ and $x\in V$, then $\chi (Y_1(j)) (i(x))= \chi
(Y_1(j))(j_{i(V)}(i(x))) = Y_1(j)(i(x))=j(x)$.
\end{proof}
The results in \cite{P} depend in large part on three lemmas, called
lemmas~b, c and~d there. Here, we adapt their statements and proofs to
the context of manifolds with boundary. First, for $L\subseteq M$ define
$\hbox{\rm Maps}^L(M,M)$ be the space of smooth maps $f\colon M\to M$ such that
$f(\partial M)\subseteq \partial M$ and $f(x)\!=\! x$ for all $x\in
M-L$.
\betaegin{lemma}{J}{(Palais' lemma b)} Let $K$ be a compact subset of a
manifold $M$. Then there exists a neighborhood $J$ of $1_M$ in
$\hbox{\rm Maps}^K(M,M)$ which consists of diffeomorphisms.
\marginwrite{J}
\end{lemma}
\betaegin{proof}{} There exists a ($C^\infty$-) neighborhood $N$ of the
identity consisting of maps $f$ for which the differential $T_x(f)$ is
an isomorphism for all $x$. Since $f(\partial M)\subseteq\partial M$,
this implies that $f$ is a local diffeomorphism. Since $K$ is compact,
the preimage of any compact subset of $M$ under $f$ is compact.
Therefore $f$ is a covering map. If $M\neq K$, then this covering must
be 1-fold off of $f(K)$, hence must be a diffeomorphism, so assume
that $M$ is compact. Fix $\epsilon>0$ such that no closed
noncontractible loop in $M$ has length less than $4\epsilon$, and let
$J$ consist of the elements in $N$ such that $d(f(x),x)<\epsilon$ for
all $x$. Suppose for contradiction that $f\in J$ but $f(p)\!=\! f(q)$
for $p\neq q$. Then $d(p,q)<2\epsilon$, and if $\alphalpha$ is a geodesic
from $p$ to $q$ of length less than $2\epsilon$, then the diameter of
$f(\alphalpha)$ is less than $4\epsilon$. Therefore $f(\alphalpha)$ is a
contractible loop, a contradiction.
\end{proof}
For the next lemmas, we set some notation. The projection from the
tangent bundle $T(M)$ to $M$ is denoted by~$\pi$. For a submanifold
$V$ of $M$, let ${\cal X}(V,T(M))$ denote the sections $X$ from $V$ to
$T(M)\vert_V$ such that
\betaegin{enumerate}
\item[(1)] if $x\in V\cap\partial M$, then $X(x)$ is tangent to
$\partial M$, and
\item[(2)] $\Euclideanxp(X(x))$ is defined for all $x\in V$.
\end{enumerate}
\noindent When the metric is a product near the boundary,
property~(1) implies that if $x\in V\cap \partial M$, then
$\Euclideanxp(X(x))\in\partial M$.
A zero section will usually be denoted by $Z$. The vector fields
satisfying~(1) and~(2) (i.~e.~the case $V\!=\! M$) are denoted simply by
${\cal X}(T(M))$. When $L$ is a subset of $M$, denote by ${\cal
X}^L(T(M))$ the elements of ${\cal X}(T(M))$ which agree with $Z$
outside of~$L$. A subscript ``$<\partialta$'' indicates the sections such
that each image vector has length less than the positive number
$\partialta$, thus for example
$${\cal X}_{<1/2}(V,T(M))\!=\!
\set{X\in{\cal X}(V,T(M))\hbox{\hbox{$\;\vert\;$}} \norm{X(x)}<1/2\hbox{\ for all
$x\in V$}}\ .$$
\betaegin{lemma}{extension}{(Palais' lemma c)} Let $V$ be a compact
submanifold of the smooth manifold $M$ and $L$ a neighborhood of $V$
in $M$. Assume that the metric on $M$ is a product near $\partial M$
such that $V$ is vertical in $\partial M\times I$. Then there exists a
continuous map $k\colon {\cal X}_{<1/2}(V,T(M))\to {\cal X}^L(T(M))$
such that $k(X)(x)\!=\! X(x)$ for all $x$ in $V$ and all $X\in {\cal
X}_{<1/2}(V,T(M))$. Moreover, $k(Z)\!=\! Z$, and if $S\subseteq\partial
M$ is a closed neighborhood in $\partial M$ of $S\cap \partial V$, and
$X(x)\!=\! Z(x)$ for all $x\in S\cap \partial V$, then $k(X)(x)\!=\! Z(x)$
for all $x\in S$.
\marginwrite{extension}
\end{lemma}
\betaegin{proof}{} Suppose first that $V$ has positive codimension. Let
$\nu_\epsilon(V)$ denote the vectors of length less than $\epsilon$ in
the normal bundle of $V$. Fix $\epsilon<1/2$ and sufficiently small so
that $j\colon\nu_\epsilon(V)\to M$ defined by exponentiation is a
tubular neighborhood of $V$ contained in $L$, and such that the union
of the fibers at points in $S\cap\partial V$ is contained in $S$, and
the union of the fibers at points in $(\partial M-S)\cap\partial V$ is
contained in $\partial M-S$. By property (iii) of the metric, the
fiber of this neighborhood at each point of $V\cap \partial M\times
\set{t}$ lies in $\partial M\times\set{t}$. Since $V$ is compact, we
may choose $\epsilon$ sufficiently small so that $j(\omega)\in\partial
M\times I$ only when $\pi(\omega)\in\partial M\times I$.
Suppose $v\in T_x(M)$ and that $\Euclideanxp(v)$ is defined. For all $u\in
T_x(M)$ define $P(u,v)$ to be the vector that results from parallel
translation of $u$ along the path that sends $t$ to~$\Euclideanxp(tv)$, $0\leq
t\leq 1$. Note that $P(u,Z(x))\!=\! u$ for all $u$. Let $\alphalpha\colon
M\to [0,1]$ be a smooth function which is identically~1 on $V$ and
identically~0 on $M-j(\nu_{\epsilon/2}(V))$. Define $k\colon
{\cal X}_{<1/2}(V,T(M))\to{\cal X}^L(T(M))$ by
$$k(X)(x)\!=\!
\cases{
\alphalpha(x)P(X(\pi(j^{-1}(x))),j^{-1}(x))&for $x\in j(\nu_{\epsilon}(V))$\cr
Z(x)&for $x\in M-j(\nu_{\epsilon/2}(V))$\ .\cr}$$
\noindent For $x\in V$, $j^{-1}(x)\!=\! Z(x)$ and $\alphalpha(x)\!=\! 1$,
so $k(X)(x)\!=\! X(x)$. We must check that $k(X)\in{\cal X}^L(T(M))$.
Since the metric on $M$ is a product near $\partial M$,
$k(X)$ satisfies condition~(1) to be in ${\cal X}(T(M))$. To verify
that it satisfies condition~(2), fix $x$ such that $k(X)(x)\neq
Z(x)$. Suppose first that $x\!=\! (y,t)\in\partial M\times I$. Then
$\pi(j^{-1}(x))$ has the form $(y',t)$. If $t\gammaeq 1/2$ then
$\norm{P(X(\pi(j^{-1}(x))),j^{-1}(x))}\!=\!\norm{X(\pi(j^{-1}(x)))}<1/2$,
so since $\alphalpha(x)\leq 1$, $\norm{k(X)(x)} \leq t\!=\! d(x,\partial M)$
and $\Euclideanxp(k(X)(x))$ is defined. Suppose $t\leq 1/2$. Since the metric
is a product near $\partial M$, the component of $k(X)(x)$ in the
$I$-direction can be identified with the component of
$X(\pi(j^{-1}(x)))$ in the $I$-direction, so $\Euclideanxp(k(X)(x))$ is
defined when $\Euclideanxp(X(\pi(j^{-1}(x))))$ is. Finally, if $x\notin
\partial M\times I$ then also $\pi(j^{-1}(x))\notin \partial M\times
I$ so $d(\pi(j^{-1}(x)),\partial M)>1$. Since
$\norm{X(\pi(j^{-1}(x)))}<1/2$ and $\norm{j^{-1}(x)}<\epsilon<1/2$,
$\Euclideanxp(k(X)(x))$ is a point that lies within distance~1
of~$\pi(j^{-1}(x))$. The fact that $k(Z)\!=\! Z$ is immediate from the
definition. Finally, if $X(x)\!=\! Z(x)$ for all $x\in S\cap\partial
V$, then since the metric is a product near the boundary it follows
that $k(X)(x)\!=\! Z(x)$ for all $x\in S$.
Now suppose that $V$ has codimension zero, so that its frontier $W$ is
a properly imbedded submanifold. Let $\nu^+_\epsilon(W)$ denote the
vectors of lengths less than $\epsilon$ in the normal bundle of $W$
that exponentiate into $\overline{M-V}$. Proceed as before, but define
$k$ by
$$k(X)(x)\!=\!
\cases{X(x)&for $x\in V$\cr
\alphalpha(x)P(X(\pi(j^{-1}(x))),j^{-1}(x))&for $x\in j(\nu^+_{\epsilon}(V))$\cr
Z(x)&for $x\in M-j(\nu^+_{\epsilon/2}(V))$\ .\cr}$$
\end{proof}
For our proof of lemma~d, we introduce some additional notation.
Assume that the metric on $M$ is selected to be a product near
$\partial M$. For $x\notin\partial M\times I$, let $R(x,\epsilon)$ be
the set of vectors in $T_x(M)$ of length less than $\epsilon$. If
$x\!=\! (y,t)\in\partial M\times I$, give $T_x(M)$ coordinates
$\omega_1,\ldots\,$,~$\omega_n$ so that
$\omega_1,\ldots\,$,~$\omega_{n-1}$ are in the $\partial M$-direction
(and hence exponentiate into $\partial M\times\set{t}$), and
$\omega_n$ is the coordinate in the $I$-direction. Then, define
$R(x,\epsilon)$ to be $\set{\omega\!=\! (\omega_1,\ldots, \omega_n)\in
T_x(M)\hbox{\hbox{$\;\vert\;$}}\norm{\omega}<\epsilon\hbox{\ and\ }\omega_n\gammaeq -t}$. For
small $\epsilon$ the exponential map $\Euclideanxp$ carries $R(x,\epsilon)$
diffeomorphically to an open neighborhood of $x$, even
when~$x\in\partial M$. For a properly imbedded submanifold $V$ of $M$
which meets $\partial M\times I$ in $I$-fibers, define
$N_\epsilon(V)\subset T(M)\vert_V$ by $N_\epsilon(V)\!=\! \cup_{x\in
V}R(x,\epsilon)$. When $V$ is compact, there exists a positive
$\epsilon$ such that for every $x\in V$, $\Euclideanxp$ carries each
$N_\epsilon(V)\cap T_x(M)$ diffeomorphically to a neighborhood of
$x\in M$.
For spaces of imbeddings, a ``$<\partialta$'' subscript indicates the
imbeddings that are $\partialta$-close to the inclusion, thus for example
$$\hbox{\sl I\/}mb_{<\partialta}(V,M)\!=\!\set{j\in\hbox{\sl I\/}mb(V,M)\hbox{\hbox{$\;\vert\;$}}
d(j(x),i_V(x))<\partialta\hbox{\ for all $x\in V$}}\ .$$
\betaegin{lemma}{logarithm}{(Palais' lemma d)}
Assume that the metric on $M$ is a product near $\partial M$. Let $V$
be a compact submanifold of $M$ that meets $\partial M\times I$ in
$I$-fibers. For sufficiently small positive $\partialta$, there exists a
continuous map $X\colon\hbox{\sl I\/}mb_{<\partialta}(V,M)\to{\cal X}_{<1/2}(V,T(M))$
such that $\Euclideanxp(X(j))(x)\!=\! j(x)$ for all $x\in V$ and $j\in
\hbox{\sl I\/}mb_{<\partialta}(V,M)$. Moreover, if $j(x)\!=\! i_V(x)$ then $X(j)(x)\!=\!
Z(x)$.
\marginwrite{logarithm}
\end{lemma}
\betaegin{proof}{} Choose $\epsilon<1/2$ small enough so that for all $x\in
V$, $\Euclideanxp$ gives a diffeomorphism from $N_\epsilon(V)\cap T_x(M)$ to a
neighborhood of $x$ in $M$. Choose $\partialta$ small enough so that if
$j\in\hbox{\sl I\/}mb_{<\partialta}(V,M)$ then $j(x)\in \Euclideanxp(N_\epsilon(V)\cap
T_x(M))$. For $j\in\hbox{\sl I\/}mb_{<\partialta}(V,M)$ define $X(j)(x)$ to be the
unique vector in $N_\epsilon(V)\cap T_x(M)$ such that $\Euclideanxp(X(j)(x))$
equals~$j(x)$. We must verify that $X\in{\cal X}_{<1/2}(V,T(M))$.
Property~(1) holds because the metric is a product near
$\partial M$, so for $x\in\partial M$ and short vectors $\omega\in T_x(M)$,
$\Euclideanxp(\omega)\in\partial M$ if and only if $\omega$ is tangent
to~$\partial M$. Property~(2) and the final sentence of the lemma are
immediate.
\end{proof}
Before giving the main results of this section, we fix some notation
to simplify their statements. Suppose $V$ is a compact submanifold of
$M$, and $S\subseteq\partial M$ is a (possibly empty) closed subset
which is a neighborhood in $\partial M$ of $S\cap \partial V$. Note
that this implies that $S\cap\partial V$ is a union of components of
$V\cap\partial M$. In this situation, $\hbox{\sl I\/}mb(V,M\hbox{\ rel\ } S)$ will stand for
the space of imbeddings that equal the inclusion on $V\cap S$ and
carry $V\cap (\partial M-S)$ into $\partial M-S$. As usual,
$\hbox{\sl I\/}mb^L(V,M\hbox{\ rel\ } S)$ denotes the subspace consisting of all $j$ such
that $j(V)$ lies in the topological interior of~$L$.
The fundamental result is the analogue of theorem~B of \cite{P}. For
its proof we make one more definition. Define $F\colon{\cal
X}^L(T(M))\to \hbox{\rm Maps}^L(M,M)$ by $F(X)(x)\!=\!\Euclideanxp(X(x))$. We recall that
condition~(1) of the definition of ${\cal X}(T(M))$ and the fact that
the metric is a product near the boundary guarantee that
$F(X)(\partial M)\subset \partial M$. Since $\Euclideanxp$ is smooth, it
follows as in lemma~a of \cite{P} that $F$ is continuous.
\betaegin{theorem}{palaistheoremB}{} Let $V$ be a compact submanifold of
$M$, and let $S\subseteq\partial M$ be a closed neighborhood in
$\partial M$ of $S\cap \partial V$. Let $L$ be a neighborhood of $V$
in $M$. Then $\hbox{\sl I\/}mb^L(V,M\hbox{\ rel\ } S)$ admits local $\hbox{\sl D\/}iff^L(M\hbox{\ rel\ } S)$
cross-sections.
\marginwrite{palaistheoremB}
\end{theorem}
\betaegin{proof}{} By proposition~\ref{inclusion} it suffices to find a
local cross-section at the inclusion map $i_V$. Fix a compact
neighborhood $K$ of $V$ with $K\subseteq L$. Using
lemmas~\ref{logarithm} and~\ref{extension}, we obtain
$X\colon\hbox{\sl I\/}mb_{<\partialta}(V,M)\to {\cal X}_{<1/2}(V,T(M))$ and $k\colon
{\cal X}_{<1/2}(V,T(M))\to {\cal X}^K(T(M))$. Let $J$ be a
neighborhood of $1_M$ in $\hbox{\rm Maps}^K(M,M)$ as in lemma~\ref{J}, and
define $U\!=\! (FkX)^{-1}(J)$. Then $\chi\!=\! FkX\colon U\to\hbox{\sl D\/}iff(M)$ is
the desired local $\hbox{\sl D\/}iff^L(M\hbox{\ rel\ } S)$ cross-section at~$i_V$.
\end{proof}
From proposition~\ref{theoremA} we have immediate corollaries.
\betaegin{corollary}{palaiscoro2}{} Let $V$ be a compact submanifold of
$M$. Let $S\subseteq\partial M$ be a closed neighborhood in $\partial
M$ of $S\cap \partial V$, and $L$ a neighborhood of $V$ in $M$. Then
the restriction $\hbox{\sl D\/}iff^L(M\hbox{\ rel\ } S)\to \hbox{\sl I\/}mb^L(V,M\hbox{\ rel\ } S)$ is locally
trivial.
\marginwrite{palaiscoro2}
\end{corollary}
\betaegin{corollary}{palaiscoro3}{} Let $V$ and $W$ be compact
submanifolds of $M$, with $W\subseteq V$. Let $S\subseteq\partial M$ a
closed neighborhood in $\partial M$ of $S\cap \partial V$, and $L$ a
neighborhood of $V$ in $M$. Then the restriction $\hbox{\sl I\/}mb^L(V,M\hbox{\ rel\ } S)
\to\hbox{\sl I\/}mb^L(W,M\hbox{\ rel\ } S)$ is locally trivial.
\marginwrite{palaiscoro3}
\end{corollary}
\section{The vertical and aligned exponentials}
\label{exponent}\marginwrite{exponent}
Let $p\colon E\to B$ be a locally trivial smooth map of manifolds,
with compact fiber, and let $\pi\colon T(E)\to E$ denote the tangent
bundle of $E$. At each point $x\in E$, let $V_x(E)$ denote the {\it
vertical subspace} of $T_x(E)$ consisting of vectors tangent to the
fiber of~$p$. When $E$ has a Riemannian metric, the orthogonal
complement $H_x(E)$ of $V_x(E)$ in $T_x(E)$ is called the {\it
horizontal subspace.} We usually abbreviate $V_x(E)$ and $H_x(E)$ to
$V_x$ and $H_x$, and call their elements {\it vertical} and {\it
horizontal} respectively. Clearly $V_x$ is the kernel of $p_*\colon
T_x(E)\to T_{p(x)}(B)$, while $p_*\vert_{H_x}\colon H_x\to
T_{p(x)}(B)$ is an isomorphism. Each vector $\omega\in T_x(E)$ has an
orthogonal decomposition $\omega\!=\! \omega_v+\omega_h$.
Define the {\it horizontal boundary} $\partial_hE$ to be $\cup_{x\in
B}\partial(p^{-1}(x))$, and the {\it vertical boundary} $\partial_vE$
to be $p^{-1}(\partial B)$.
A path $\alphalpha$ in $E$ is called {\it horizontal} if $\alphalpha'(t)\in
H_{\alphalpha(t)}$ for all $t$ in the domain of $\alphalpha$. Let
$\gammaamma\colon [a,b]\to B$ be a path such that $\gammaamma'(t)$ never
vanishes, and let $x\in E$ with $p(x)\!=\! \gammaamma(a)$. A horizontal path
$\widetilde{\gammaamma}\colon [a,b]\to E$ such that
$\widetilde{\gammaamma}(a)\!=\! x$ and $p\widetilde{\gammaamma}\!=\! \gammaamma$ is
called a {\it horizontal lift} of $\gammaamma$ starting at~$x$.
To ensure that horizontal lifts exist, we will need a special metric
on $E$. Using the local product structure, at each point $x$ in
$\partial_hE$ select a vector field defined on a neighborhood of
$x$ that
\betaegin{enumerate}
\item[{\rm(a)}] points into the fiber at points of $\partial_hE$, and
\item[{\rm(b)}] is tangent to the fibers wherever it is defined.
\end{enumerate}
\noindent Note that by (b), the vector field must be tangent to
$\partial_vE$ at points in $\partial_vE$. Since scalar multiples and
linear combinations of vectors satisfying these two conditions also
satisfy them, we may piece these local fields together using a
partition of unity to construct a vector field, nonvanishing on a
neighborhood of $\partial_hE$, that satisfies (a) and~(b). Using the
integral curves associated to this vector field we obtain a smooth
collar neighborhood $\partial_hE\times [0,2]$ of $\partial_hE$ such
that each $[0,2]$-fiber lies in a fiber of $p$. On $\partial_hE\times
[0,2)$, fix a Riemannian metric that is the product of a metric on
$\partial_hE$ and the usual metric on $[0,2)$. Form a metric on $E$
from this metric and any metric on all of $E$ using a partition of
unity subordinate to the open cover $\set{\partial_hE\times[0,2),
E-\partial_hE\times I}$. Such a metric is said to be a {\it product
near $\partial_hE$ such that the $I$-fibers of $\partial_hE\times I$
are vertical.} It has the following properties for $0\leq t\leq 1$:
\betaegin{enumerate}
\item[{\rm(i)}] If $x\in E-\partial_hE\times I$, then $d(x,\partial_hE)>1$.
\item[{\rm(ii)}] If $x\!=\!(y,t)\in\partial_hE\times\set{t}$, then
$d(x,\partial_hE)\!=\! t$.
\item[{\rm(iii)}] For $x\in\partial_hE\times\set{t}$, the horizontal
subspace $H_x$ is tangent to~$\partial_hE\times\set{t}$.
\end{enumerate}
\noindent To see property (iii), start with the fact that $H_x$ is
perpendicular to the fiber $p^{-1}(p(x))$. Since the $I$-fiber of
$\partial_hE\times I$ that contains $x$ lies in $p^{-1}(p(x))$, $H_x$
is orthogonal to that $I$-fiber as well. Since
$\partial_hE\times\set{t}$ meets the $I$-fiber orthogonally, with
codimension~1, $H_x$ is tangent to $\partial_hE\times\set{t}$.
Property (iii) implies that a horizontal lift starting in some
$\partial_hE\times \set{t}$ will continue in $\partial_hE\times
\set{t}$. Using the compactness of the fiber, the existence of horizontal
lifts will then be guaranteed.
\betaegin{lemma}{hlift}{}
Assume that the metric on $E$ is a product near $\partial_hE$ such
that the $I$-fibers of $\partial_hE\times I$ are vertical. Let
$\gammaamma\colon [a,b]\to B$ be a path such that $\gammaamma'(t)$ never
vanishes, and let $x\in E$ with $p(x)\!=\! \gammaamma(a)$. Then there exists
a unique horizontal lift of $\gammaamma$ starting at~$x$.
\marginwrite{hlift}
\end{lemma}
\betaegin{proof}{} For any horizontal lift $\widetilde{\gammaamma}(t)$, each
$\widetilde{\gammaamma}'(t)$ is uniquely determined, so the lift through a
given point in $E$ is unique if it exists. For each $\gammaamma(t)$, let
$F_{\gammaamma(t)}$ be the fiber over $\gammaamma(t)$. From the local theory
of ordinary differential equations, each point in $F_{\gammaamma(t)}$ that
does not lie in $\partial_hE$ has a neighborhood in
$p^{-1}(\gammaamma([a,b])$ in which $\gammaamma$ has horizontal lifts. Since
the metric is a product near $\partial_hE$ such that the $I$-fibers
are vertical, the same is true for each point in $\partial_hE$. Since
the fiber is compact, for each $t$ there exists an $\epsilon$ such
that for every $x\in F_{\gammaamma(t)}$, the horizontal lift of $\gammaamma$
through $x$ exists for $s\in(t-\epsilon,t+\epsilon)$, and the result
follows using compactness of the interval~$[a,b]$.
\end{proof}
\noindent For the remainder of this section, assume that the
metric on $E$ is a product near $\partial_hE$ such that the $I$-fibers
of $\partial_hE\times I$ are vertical. Each fiber $F$ of $E$ inherits
a Riemannian metric from that of $E$, and has an exponential map
$\Euclideanxp_F$ which (where defined) carries vectors tangent to $F$ to
points of $F$. Note that the path $\Euclideanxp_F(t\omega)$ is not generally a
geodesic in $E$. The vertical exponential $\Euclideanxp_v$ is defined by
$\Euclideanxp_v(\omega)\!=\!\Euclideanxp_F(\omega)$, where $\omega$ is a vertical vector
and $F$ is the fiber containing~$\pi(\omega)$.
Before defining the aligned exponential map $\Euclideanxp_a$,
we will motivate its definition. A vector field $X\colon E\to T(E)$ is
called {\it aligned} if $p(x)\!=\! p(y)$ implies that $p_*(X(x))\!=\!
p_*(X(y))$. This happens precisely when there exist a vector field
$X_B$ on $B$ and a vertical vector field $X_V$ on $E$ so that for all
$x\in E$,
$$X(x)\!=\! (p_*\vert_{H_x})^{-1}(X_B(p(x)))+X_V(x)\ .$$
\noindent In particular, any vertical vector field is aligned. When
$X$ is aligned, the projected vector field $p_*X$ is well-defined.
The key property of $\Euclideanxp_a$ is that if $X$ is an aligned vector field
on $E$, and $\Euclideanxp_a(X(x))$ is defined for all $x$, then the map of $E$
defined by sending $x$ to $\Euclideanxp_a(X(x))$ will be fiber-preserving.
$\Euclideanxp_a$ is defined as follows. Consider a tangent vector $\betaeta$ in
$B$ such that $\Euclideanxp(\betaeta)$ is defined. A geodesic segment
$\gammaamma_\betaeta$ starting at $\pi(\betaeta)$ is defined by
$\gammaamma_\betaeta(t)\!=\! \Euclideanxp(t\betaeta)$, $0\leq t\leq 1$. Define
$\Euclideanxp_a(\omega)$ to be the endpoint of the unique horizontal lift of
$\gammaamma_{p_*(\omega)}$ starting at $\Euclideanxp_v(\omega_v)$. Note that
$\Euclideanxp_a(\omega)$ exists if and only if both $\Euclideanxp_v(\omega_v)$ and
$\Euclideanxp(p_*(\omega))$ exist. Clearly, when $\Euclideanxp_a(\omega)$ is defined,
it lies in the fiber containing the endpoint of a lift of
$\gammaamma_{p_*(\omega)}$, and therefore $p(\Euclideanxp_a(\omega))\!=\!
\Euclideanxp(p_*(\omega))$. This immediately implies that if $X$ is an aligned
vector field on $E$ such that $\Euclideanxp_a(X(x))$ is defined for all $x\in
E$, then the map defined by sending $x$ to $\Euclideanxp_a(X(x))$ takes fibers
to fibers, and in particular if $X$ is vertical, it takes each fiber
to itself.
In section~\ref{restrict} we will need a further refinement of the
metric on $E$, namely that it also be a product near $\partial_vE$. To
achieve this, we proceed as follows. If $\partial_vE$ is empty, there
is nothing needed, and if $\partial_hE$ is empty, then we simply
choose a metric which is a product near the boundary as in
section~\ref{metrics}. Assuming that both are nonempty, put $Y\!=\!
\partial(\partial_vE)\!=\! \partial(\partial_hE)\!=\!
\partial_vE\cap \partial_hE$. Let $R_h$ be the complete metric on
$\partial_hE$ that was was used to construct the metric $R$ on $E$
that is a product on a collar $\partial_hE\times[0,1]_1$, where the
subscript will distinguish this interval from another to be selected
later. Since the choice of $R_h$ was arbitrary, we may assume that
$R_h$ was a product near $Y$. That is, after reparametrizing, we may
assume that there is a collar $Y\times[0,2]_2$ of $Y$ in $\partial_hE$
such that $R_h$ is a product on all of $Y\times[0,2]_2$. Now
$Y\times[0,2]_1$ is a collar of $Y$ in $\partial_vE$, and
$Y\times[0,2]_1\times[0,2]_2$ is a partial collar of $\partial_vE$
defined on the subset $Y\times[0,2]_1$. Extend this to a collar
$\partial_vE\times[0,2]_2$. Let $R_v$ be the product of the
restricted metric $R\vert_{\partial_vE}$ and the standard metric on
$[0,2]_2$. Since $R_h$ was a product near $Y$, we have $R_v\!=\! R$ on
$Y\times[0,1]_1\times [0,2]_2$. Now form a new metric on $E$ by
piecing together $R_v$ and $R$ using a partition of unity subordinate
to the open cover $\set{\partial_vE\times[0,2)_2,
E-\partial_vE\times[0,1]_2}$. On points of
$Y\times[0,1]_1\times[0,2)_2$ the new metric is just a linear
combination of the form $tR+(1-t)R$, so agrees with $R$. The resulting
metric is both a product near $\partial_hE$ and a product
near~$\partial_vE$.
\section{Projection of fiber-preserving diffeomorphisms}
\label{project}\marginwrite{project}
Throughout this section and the next, it is understood that $p\colon
E\to B$ is a locally trivial smooth map as in section~\ref{exponent},
such that the metric on $B$ is a product near $\partial B$, and the
metric on $E$ is a product near $\partial_hE$ such that the $I$-fibers
of $\partial_hE\times I$ are vertical. When $W$ is a vertical
submanifold of $E$, it is automatic that $W$ meets the collar
$\partial_hE\times I$ in $I$-fibers, and we by rechoosing the metric
on $B$ we may assume that $p(W)$ meets the collar $\partial B\times I$
in $I$-fibers. Define $\partial_hW\!=\! W\cap\partial_hE$ and
$\partial_vW\!=\! W\cap\partial_vE$.
Define ${\cal A}(W,T(E))$ to be the sections $X$ from $W$ to
$T(E)\vert_W$ such that
\betaegin{enumerate}
\item[(1)] $X$ is aligned, that is, if $p(w_1)\!=\! p(w_2)$ then
$p_*(X(w_1))\!=\! p_*(X(w_2))$,
\item[(2)] if $x\in \partial_hW$, then $X(x)$ is tangent to
$\partial_hE$, and if $x\in \partial_vW$, then $X(x)$ is tangent to
$\partial_vE$, and
\item[(3)] $\Euclideanxp_a(X(x))$ is defined for all $x\in W$.
\end{enumerate}
\noindent
When $W\!=\! E$, the vector fields satisfying~(1), (2), and~(3) are
denoted by ${\cal A}(T(E))$. The embellishments ${\cal A}^L(T(E))$ and
${\cal A}_{<1/2}(W,T(E))$ have the same meanings as in
section~\ref{palais}. The elements of ${\cal A}(W,T(E))$ such that
$p_*X(x)\!=\! Z(p(x))$ for all $x\in W$ are denoted by ${\cal
V}(W,T(E))$, and similarly for ${\cal V}(T(E))$.
Define $F_a\colon{\cal A}^L(T(E))\to \hbox{\rm Maps}^L(E,E)$ by
$F_a(X)(x)\!=\!\Euclideanxp_a(X(x))$. Since $\Euclideanxp_a$ is smooth, it follows as in
lemma~a of \cite{P} that $F_a$ is continuous.
\betaegin{theorem}{theorem1}{} Let $K$ be a compact subset of $B$. Let
$S$ be a subset of $\partial B$, and let $T\!=\! p^{-1}(S)$. Then
$\hbox{\sl D\/}iff^K(B\hbox{\ rel\ } S)$ admits local $\hbox{\sl D\/}iff^{p^{-1}(K)}_f(E\hbox{\ rel\ } T)$
cross-sections.
\marginwrite{theorem1}
\end{theorem}
\betaegin{proof}{} Choose a compact subset $L$ of $B$ such that $K\subseteq
\hbox{\it int}(L)$. By lemma~\ref{logarithm}, there exist $\partialta>0$
and a continuous map $X_1\colon \hbox{\sl I\/}mb_{<\partialta}(L,B)\to {\cal
X}_{<1/2}(L,T(B))$ such that $\Euclideanxp(X_1(j)(x))\!=\! j(x)$ for all $x\in
L$ and all $j\in\hbox{\sl I\/}mb_{<\partialta}(L,B)$. Moreover, if $j(x)\!=\! x$, then
$X_1(j)(x)\!=\! Z(x)$.
Let $\rho\colon \hbox{\sl D\/}iff^K(B)\to \hbox{\sl I\/}mb(L,B)$ be restriction. Put $U_0\!=\!
\rho^{-1}(\hbox{\sl I\/}mb_{<\partialta}(L,B))$, a neighborhood of $1_B$, and define
$X_0\colon U_0\to {\cal X}(T(B))$ by
$$X_0(f)(x)\!=\!\cases{X_1(f\vert_L)(x)&if $x\in L$\cr
Z(x)&if $x\notin K$\ .\cr}$$
\noindent This makes sense since if $x\in L-K$, then $f\vert_L(x)\!=\!
x$ so $X_1(f\vert_L)(x)\!=\! Z(x)$. We have $X_0(1_B)\!=\! Z$ and
$\Euclideanxp(X_0(f)(x))\!=\! f(x)$ for every $f\in U_0$ and $x\in B$.
Let $h\in \hbox{\sl D\/}iff^K(B)$. For every $g\in U_0h$, $\Euclideanxp(X_0(gh^{-1}(x)))\!=\!
gh^{-1}(x)$. Define
$$\widetilde{\chi}(g)(x)=
\betaig(p_*\vert_{H_x}\betaig)^{-1}(X_0(gh^{-1})(p(x)))\ ,$$
\noindent so that $\widetilde{\chi}(g)$ is an aligned section of
$T(E)$. We have that $\Euclideanxp_a(\widetilde{\chi}(g)(x))$ exists since
$\Euclideanxp(p_*\widetilde{\chi}(g)(x))\!=\! \Euclideanxp(X_0(gh^{-1})(p(x)))\!=\!
gh^{-1}(p(x))$ exists and $\Euclideanxp_v(\widetilde{\chi}(g)(x))\!=\! x$
exists. The other conditions are easily checked to verify that
$\widetilde{\chi}(g)\in{\cal A}^{p^{-1}(K)}(T(E))$.
Let $J$ be a neighborhood of $1_M\in\hbox{\rm Maps}^{p^{-1}(K)}(E,E)$ given by
lemma~\ref{J}, and put $U\!=\!\widetilde{\chi}^{-1}F_a^{-1}(J)$. Define
$\chi\colon U\to\hbox{\sl D\/}iff_f(E)$ by $\chi(g)\!=\! F_a\widetilde{\chi}(g)$.
The local cross-section condition holds, since given $b\in B$ we may
choose $x$ with $p(x)\!=\! h(b)$ and calculate that
$$\!=\!align{\ol{\chi(g)}h(b)&= p(\chi(g)(x))\cr
&=p\betaig(\Euclideanxp_a\betaig(\widetilde{\chi}(g)(x)\betaig)\betaig)\cr
&=\Euclideanxp\betaig(X_0(gh^{-1})(h(b))\betaig)\cr
&=gh^{-1}(h(b))\cr
}$$
\noindent If $g,h\in\hbox{\sl D\/}iff^K(B\hbox{\ rel\ } S)$, then
$X_0(gh^{-1})(x)\!=\! Z(x)$ for all $x\in\partial S$. It follows that
$\widetilde{\chi}(g)(x)\!=\! Z(x)$ for all $x\in T$, so
$\chi(g)\in\hbox{\sl D\/}iff^{p^{-1}(K)}_f(E\hbox{\ rel\ } T)$.
\end{proof}
From proposition~\ref{theoremA}, we have immediately
\betaegin{theorem}{project diffs}{} Let $K$ be a compact subset of $B$.
Let $S\subseteq\partial B$ and let $T\!=\! p^{-1}(T)$. Then
$\hbox{\sl D\/}iff^{p^{-1}(K)}_f(E\hbox{\ rel\ } T)\to \hbox{\sl D\/}iff^K(B\hbox{\ rel\ } S)$ is locally trivial.
\marginwrite{project diffs}
\end{theorem}
The homotopy extension property of the fibration in
theorem~\ref{project diffs} yields immediately the following
corollary. As indicated in the introduction, each of the fibration
theorems we prove in this paper has a corresponding corollary
involving parameterized lifting or extension, but since the statements
are all analogous we give only the following one as a prototype.
\betaegin{corollary}{isotopy lifting}{(Parameterized Isotopy Extension
Theorem)} Let $K$ be a compact subset of $B$, let $S\subseteq\partial
B$, and let $T\!=\! p^{-1}(S)$. Suppose that for each $t$ in a
path-connected parameter space $P$ there is an isotopy $g_{t,s}$,
which is the identity on $S$ and outside of $K$, such that $g_{t,0}$
lifts to a diffeomorphism $G_{t,0}$ of $E$ which is the identity on
$T$. Assume that sending $(t,s)\to g_{t,s}$ defines a continuous
function from $P\times [0,1]$ to $\hbox{\sl D\/}iff(B\hbox{\ rel\ } S)$ and sending $t$ to
$G_{t,0}$ defines a continuous function from $P$ to $\hbox{\sl D\/}iff(E\hbox{\ rel\ }
T)$. Then the family $G_{t,0}$ extends to a continuous family on
$P\times I$ such that for each $(t,s)$, $G_{t,s}$ is a
fiber-preserving diffeomorphism inducing $g_{t,s}$ on~$B$.
\marginwrite{isotopy lifting}
\end{corollary}
\section{Restriction of fiber-preserving diffeomorphisms}
\label{restrict}
\marginwrite{restrict}
In this section we present the analogues of the main results of
\cite{P} in the fibered case. As in section~\ref{project}, we
assume that the metric on $B$ is a product near $\partial B$, and the
metric on $E$ is a product near $\partial_hE$ such that the $I$-fibers
of $\partial_hE\times I$ are vertical. We further assume that the
metric on $E$ is a product near $\partial_vE$; this is needed only in
the proof of lemma~\ref{lemmaC}.
It is first necessary to adapt lemmas~\ref{extension} and~\ref{logarithm}.
\betaegin{lemma}{lemmaC}{}
Let $W$ be a compact vertical submanifold of $E$. Let $T$ be a closed
fibered neighborhood in $\partial_vE$ of $T\cap\partial_vW$, and let
$L\subseteq E$ be a neighborhood of $W$. For sufficiently small
$\partialta$ there exists a continuous map $k\colon{\cal
A}_{<\partialta}(W,T(E))\to {\cal A}^L(T(E))$ such that $k(X)(x)\!=\! X(x)$
for all $x\in W$ and $X\in {\cal A}_{<\partialta}(W,T(E))$. If $X(x)\!=\!
Z(x)$ for all $x\in T\cap\partial_vW$, then $k(X)(x)\!=\! Z(x)$ for all
$x\in T$. Furthermore, $k({\cal V}_{<\partialta}(W,T(E)))\subset {\cal
V}^L(T(E))$.
\marginwrite{lemmaC}
\end{lemma}
\betaegin{proof}{} Assume first that $W$ has positive codimension. Since
the fiber of $p$ is compact, we may assume that $p(L)$ is a
neighborhood of $p(W)$ with $p^{-1}(p(L))\!=\! L$. Since $W$ is compact
we may choose $\partialta<1/2$ such that if $X\in {\cal
A}_{<\partialta}(W,T(E))$ then $\norm{p_*X(x)}<1/2$ for all $x\in p(W)$.
Let $k_B\colon {\cal X}_{<1/2}(p(W),T(B)) \to{\cal X}^{p(L)}(T(B))$ be
given by lemma~\ref{extension}, using $p(T)$ as the neighborhood $S$
in lemma~\ref{extension}. The vectors
$(p_*\vert_{H_x})^{-1}(k_B(p_*X)(p(x)))$ will give the horizontal part
of our extension $k$, but to obtain sufficient control of the vertical
part we will need to adapt the proof of lemma~\ref{extension}
using~$\Euclideanxp_a$.
Let $\nu_\epsilon(W)$ be the $\epsilon$-normal bundle of $W$. Note
that its fibers are horizontal, since the tangent space of $W$
includes the bundle of vertical vectors of $T(E)$ at points of $W$.
For sufficiently small~$\epsilon$, $j_a\colon \nu_\epsilon(W)\to E$
can be defined by $j_a(\omega)\!=\!\Euclideanxp_a(\omega)$ and carries
$\nu_\epsilon(W)$ diffeomorphically to a neighborhood of $W$ in $E$.
We choose $\epsilon$ small enough so that this neighborhood is
contained in $L$, and so that the image of the fibers at points of
$T\cap\partial_vW$ lies in $T$ and the image of the fibers at points
of $(\partial_vE-T)\cap\partial_vW$ lies in~$\partial_vE-T$.
If $x\in\partial_hE\times \set{t}$, $j_a$ carries the normal fiber at
$x$ into $\partial_hE\times\set{t}$. Since $W$ is compact, we may
choose $\epsilon$ small enough so that $j_a(\omega)\in
\partial_hE\times I$ only when $\pi(\omega)\in\partial_hE\times I$.
Suppose $v\in T_x(E)$ and that $\Euclideanxp_a(v)$ is defined. For all $u\in
T_x(E)$ define $P_a(u,v)$ to be the vector that results from parallel
translation of $u$ along the path that sends $t$ to~$\Euclideanxp_a(tv)$, $0\leq
t\leq 1$. Let $\alphalpha\colon E\to [0,1]$ be a smooth function which is
identically~1 on $W$ and identically~0 on
$E-j_a(\nu_{\epsilon/2}(W))$. Define $k_E\colon {\cal
A}_{<\partialta}(W,T(E))\to {\cal V}^L(T(E))$ by
$$k_E(X)(x)\!=\!
\cases{
\alphalpha(x)P_a(X(\pi(j_a^{-1}(x))),j_a^{-1}(x))_v&for
$x\in j_a(\nu_{\epsilon}(W))$\cr
Z(x)&for $x\in E-j_a(\nu_{\epsilon/2}(W))$\ .\cr}$$
\noindent Note that if $X$ is vertical, then $k(X)(x)\!=\! X(x)$ for all
$x\in W$. For later use we make two observations about $k_E$. First,
if $X(x)\!=\! 0$ for all $x\in T\cap\partial_vW$, then $k_E(X)(x)\!=\! 0$
for all $x\in T$. This is because $j_a^{-1}(T)$ consists exactly of
the vectors normal to $W$ at the points of $T\cap\partial_vW$. Second,
if $x\!=\!(y,t)\in\partial_hE\times I$, and $x\in j_a(\nu_\epsilon(W))$,
then $\pi(j_a^{-1}(x))$ is of the form $(y',t)$, and either
$k_E(X)(x)\!=\! Z(x)$ or the component of $k_E(X)(x)$ in the
$I$-direction is of the form $\betaeta\omega_I$, where $0<\betaeta\leq1$ and
$\omega_I$ is the component of $X(\pi(j_a^{-1}(x)))$ in the
$I$-direction. This follows because the metric is a product on
$\partial_hE\times I$, so parallel translation preserves the component
in the $I$-direction. Consequently, since
$\Euclideanxp_v(X(\pi(j_a^{-1}(x))))$ is defined, so is $\Euclideanxp_v(k(X)(x))$.
For $X\in{\cal A}_{<\partialta}(W,T(E))$, define $X_v(x)=X(x)_v$, and put
$$k(X)(x)=(p_*\vert_{H_x})^{-1}(k_B(p_*X)(p(x)))\;+\;k_E(X_v)(x)\ .$$
\noindent
We need to check that $k(X)$ lies in ${\cal A}^L(T(E))$. From its
definition, $k(X)$ is aligned and vanishes outside of $L$. Let
$x\in\partial_hE$ and suppose that $k(X)(x)\neq Z(x)$. Then
$\pi(j_a^{-1}(x))\in\partial_hE$ and since $X\in{\cal A}(W,T(E))$,
$X(\pi(j_a^{-1}(x)))$ is tangent to $\partial_hE$. Since the metric is
a product near $\partial_hE$, parallel translation preserves vectors
tangent to $\partial_hE$, and it follows that $k(X)(x)$ is tangent to
$\partial_hE$. Suppose that $x\in\partial_vE$ and $k(X)(x)\neq Z(x)$.
Since $k_B(p_*X)(p(x))$ is tangent to $\partial B$,
$\betaig(p_*\vert_{H_x}\betaig)^{-1}(k_B(p_*X)(p(x)))$ is tangent to
$\partial_vE$. The fact that the metric on $E$ is a product near
$\partial_vE$ implies that $\pi(j_a^{-1}(x))\in\partial_vW$, and
moreover, since $X(\pi(j_a^{-1}(x)))$ is tangent to $\partial_vE$,
that $P_a(X(\pi(j_a^{-1}(x))),j_a^{-1}(x))$ is also tangent to
$\partial_vE$. We conclude that $k(X)(x)$ is tangent to $\partial_vE$.
The fact that $\Euclideanxp(k_B(p_*X)(p(x)))$ was defined, together with the
second observation after the definition of~$k_E$, implies that
$\Euclideanxp(k(X)(x))$ is always defined.
Suppose that $X(x)\!=\! Z(x)$ for all $x\in T\cap\partial_vW$. Then
$p_*(X)(p(x))\!=\! Z(p(x))$ for all $x\in p(T)\cap\partial(p(W))$, so
$k_B(p_*X)(p(x))\!=\! Z(p(x))$ for all $x\in p(T)$. The first
observation after the definition of $k_E$ shows that $k_E(X)(x)\!=\!
Z(x)$ for all $x\in T$. Therefore $k(X)(x)\!=\! Z(x)$ for all~$x\in T$.
For the last statement, if $X\in{\cal V}(W,T(E))$, then
$p_*(X)(p(x))\!=\! Z(p(x))$ for all $x\in \partial(p(W))$, so
$k_B(p_*X)(p(x))\!=\! Z(p(x))$ for all $x\in\partial_vE$. Therefore
$k(X)\in{\cal V}(T(E))$.
The case when $W$ has codimension zero is similar. As in the proof of
lemma~\ref{extension}, use the subset $\nu_\epsilon^+\hbox{\it Fr\/}(W)$
consisting of the vectors in the normal bundle of the frontier of $W$
whose aligned exponential lies in $\overline{E-W}$, and define
$$k_E(X)(x)\!=\!
\cases{X(x)&for $x\in W$\cr
\alphalpha(x)P_a(X(\pi(j_a^{-1}(x))),j_a^{-1}(x))_v&for
$x\in j_a(\nu^+_{\epsilon}(\hbox{\it Fr\/}(W)))$\cr
Z(x)&for $x\in E-j_a(\nu^+_{\epsilon/2}(\hbox{\it Fr\/}(W)))$\ .\cr}$$
\end{proof}
For the next lemma we will adapt the neighborhood $N_\epsilon(V)$ used
in the proof of lemma~\ref{logarithm} into the fibered context. For
$x\in E$, let $R_B(p(x),\epsilon)$ be the subset of $T_{p(x)}(B)$ as
defined before the statement of lemma~\ref{logarithm}. Denote
$p^{-1}(p(x))$ by $F$, and let $R_F(x,\epsilon)$ be the subset of
$T_x(F)$ defined before the statement of lemma~\ref{logarithm}. Regard
$T_x(F)$ as the vertical subset $V_x(E)$ of $T_x(E)$, and observe that
for sufficiently small $\epsilon$, the aligned exponential $\Euclideanxp_a$
carries $R_F(x,\epsilon)$ to a neighborhood of $x$ in $R$, since on
$T_x(F)$, $\Euclideanxp_a$ agrees with the exponential map of~$F$. Now define
$S(x,\epsilon)\!=\! R_F(x,\epsilon)\times
\Big(p_*\vert_{H_x}\Big)^{-1}(R_B(p(x),\epsilon))\subset V_x(E)\times
H_x(E)\!=\! T_x(E)$. For a vertical submanifold $W\subseteq E$, define
$N_\epsilon(W)\!=\! \cup_{x\in W}S(x,\epsilon)$. Provided that $W$ is
compact, we may choose a positive $\epsilon$ such that for each $x\in
W$, $\Euclideanxp_a\colon N_\epsilon(W)\cap T_x(E)\to E$ is a diffeomorphism
onto a neighborhood of $x$ in~$E$.
\betaegin{lemma}{lemmaD}{}
Let $W$ be a compact vertical submanifold of $E$. For sufficiently
small $\partialta$, there exists a continuous map $X\colon
(\hbox{\sl I\/}mb_f)_{<\partialta}(W,E)\to{\cal A}(W,T(E))$ such that
$\Euclideanxp_a(X(j)(x))\!=\! j(x)$ for all $x\in W$ and $j\in
(\hbox{\sl I\/}mb_f)_{<\partialta}(W,E)$, and moreover if $j(x)\!=\! i_W(x)$ then
$X(j)(x)\!=\! Z(x)$. Furthermore, $X((\hbox{\sl I\/}mb_v)_{<\partialta}(W,E))\subset
{\cal V}(W,T(E))$.
\marginwrite{lemmaD}
\end{lemma}
\betaegin{proof}{} Let $N_\epsilon(W)$ be as defined above,
with $\epsilon$ small enough to ensure the local diffeomorphism
condition. Choose $\partialta$ small enough so that for
every $x\in W$ and every $j\in (\hbox{\sl I\/}mb_f)_{<\partialta}(W,E)$,
$j(x)\in\Euclideanxp_a(N_\epsilon(W)\cap T_x(E))$. Define $X(j)(x)$ to be the
unique vector in $N_\epsilon(W)\cap T_x(M)$ such that $\Euclideanxp_a(X(j)(x))$
equals~$j(x)$.
\end{proof}
In the statements of our remaining results, the notation $\hbox{\sl I\/}mb(W,E\hbox{\ rel\ }
T)$ is as defined before the statement of
theorem~\ref{palaistheoremB}.
\betaegin{theorem}{theorem2}{}
Let $W$ be a compact vertical submanifold of $E$. Let $T$ be a closed
fibered neighborhood in $\partial_vE$ of $T\cap \partial_vW$, and let
$L$ be a neighborhood of $W$. Then
\betaegin{enumerate}
\item[{\rm (i)}] $\hbox{\sl I\/}mb_f^L(W,E\hbox{\ rel\ } T)$ admits local $\hbox{\sl D\/}iff_f^L(E\hbox{\ rel\ }
T)$ cross-sections, and
\item[{\rm (ii)}] $\hbox{\sl I\/}mb_v(W,E\hbox{\ rel\ } T)$ admits local $\hbox{\sl D\/}iff_v^L(E\hbox{\ rel\ }
T)$ cross-sections.
\end{enumerate}
\marginwrite{theorem2}
\end{theorem}
\betaegin{proof}{} By proposition~\ref{inclusion}, it suffices to find
local cross-sections at the inclusion $i_W$. Choose a compact
neighborhood $K$ of $W$ with $K\subseteq L$. Let $k\colon{\cal
A}_{<\partialta}(W,T(E))\to{\cal X}^K(T(E))$ be obtained using
lemma~\ref{lemmaC}. Using lemma~\ref{lemmaD}, choose $\partialta_1>0$ and
$X\colon (\hbox{\sl I\/}mb_f)_{<\partialta_1}(W,E)\to {\cal A}(W,T(E))$. If $j\in
\hbox{\sl I\/}mb_f^L(W,E\hbox{\ rel\ } T)$, then $X(j)(x)\!=\! 0$ for all $x\in\partial_vE$.
Since $X$ is continuous and $X(i_W)\!=\! Z$, we may choose a
neighborhood $U$ of $i_W$ in $\hbox{\sl I\/}mb_f^L(W,E\hbox{\ rel\ } T)$ so that
$X(U)\subset {\cal A}_{<\partialta}(W,T(E))$. For $j\in U$ define
$\chi(j)\!=\! F_akX(j)$. From lemma~\ref{J}, we may make $U$ small
enough to ensure that $\chi(j)$ is a diffeomorphism, and then $\chi$
is the cross-section that proves~(a). For~(b), suppose that $j\in
U\cap\hbox{\sl I\/}mb_v^L(W,E\hbox{\ rel\ } T)$. Since $k({\cal
V}_{<\partialta}(W,T(E)))\subseteq{\cal V}^K(T(E))$, $\chi(j)$ lies in
$\hbox{\sl D\/}iff_v(E\hbox{\ rel\ } T)$, so the restriction of $\chi$ to
$U\cap\hbox{\sl I\/}mb_v^L(W,E\hbox{\ rel\ } T)$ is the necessary cross-section.
\end{proof}
Using proposition~\ref{theoremA}, we have the following immediate
corollaries.
\betaegin{corollary}{corollary2}{}
Let $W$ be a compact vertical submanifold of $E$. Let $T$ be a closed
fibered neighborhood in $\partial_vE$ of $T\cap \partial_vW$, and $L$
a neighborhood of $W$. Then the following restrictions are locally
trivial:
\betaegin{enumerate}
\item[{\rm(i)}] $\hbox{\sl D\/}iff_f^L(E\hbox{\ rel\ } T)\to\hbox{\sl I\/}mb_f^L(W,E\hbox{\ rel\ } T)$, and
\item[{\rm(ii)}] $\hbox{\sl D\/}iff_v^L(E\hbox{\ rel\ } T)\to \hbox{\sl I\/}mb_v(W,E\hbox{\ rel\ } T)$.
\end{enumerate}
\marginwrite{corollary2}
\end{corollary}
\betaegin{corollary}{corollary3}{} Let $V$ and $W$ be vertical
submanifolds of $E$. Let $T$ be a closed fibered neighborhood in
$\partial_vE$ of $T\cap \partial_vV$, and let $L$ a neighborhood of
$V$. Then the following restrictions are locally trivial:
\betaegin{enumerate}
\item[{\rm(i)}] $\hbox{\sl I\/}mb_f^L(V,E\hbox{\ rel\ } T)\to\hbox{\sl I\/}mb_f^L(W,E\hbox{\ rel\ } T)$.
\item[{\rm(ii)}] $\hbox{\sl I\/}mb_v(V,E\hbox{\ rel\ } T)\to \hbox{\sl I\/}mb_v(W,E\hbox{\ rel\ } T)$.
\end{enumerate}
\marginwrite{corollary3}
\end{corollary}
The final result of this section includes some of our previous
results.
\betaegin{theorem}{square}{} Let $W$ be a compact vertical submanifold of
$E$. Let $K$ be a compact neighborhood of $p(W)$ in $B$. Let $T$ be a
closed fibered neighborhood in $\partial_v\Sigma$ of $T\cap
\partial_vW$, and put $S\!=\! p(T)$. Then all four maps in the following
square are locally trivial:
$$\vbox{\halign{\hfil#\hfil\quad&#&\quad\hfil#\hfil\cr
$\hbox{\sl D\/}iff_f^{p^{-1}(K)}(E\hbox{\ rel\ } T)$&$\longrightarrow$&
$\hbox{\sl I\/}mb_f^{p^{-1}(K)}(W,E\hbox{\ rel\ } T)$\cr
\noalign{
}
$\mapdown{}$&&$\mapdown{}$\cr
\noalign{
}
$\hbox{\sl D\/}iff^K(B\hbox{\ rel\ } S)$&$\longrightarrow$&
$\hbox{\sl I\/}mb^K(p(W),B\hbox{\ rel\ } S)$\rlap{\ .}\cr}}$$
\marginwrite{square}
\end{theorem}
\betaegin{proof}{} The top arrow is corollary~\ref{corollary2}(i), the
left vertical arrow is theorem~\ref{theorem1}, and the bottom arrow is
corollary~\ref{palaiscoro2}. For the right vertical arrow, we will
first show that $\hbox{\sl I\/}mb^K(p(W),B \hbox{\ rel\ }\alphallowbreak S)$ admits local
$\hbox{\sl D\/}iff_f^{p^{-1}(K)}(E\alphallowbreak \hbox{\ rel\ }\alphallowbreak T)$ cross-sections.
Let $i\in \hbox{\sl I\/}mb^K(p(W),B\hbox{\ rel\ } S)$. Using theorems~\ref{theorem2}
and~\ref{theorem1}, choose local cross-sections $\chi_1\colon U\to
\hbox{\sl D\/}iff^K(B\hbox{\ rel\ } S)$ at $i$ and $\chi_2\colon
V\to\hbox{\sl D\/}iff_f^{p^{-1}(K)}\alphallowbreak(E\hbox{\ rel\ } T)$ at $\chi_1(i)$. Let
$U_1\!=\!\chi_1^{-1}(V)$, then for $j\in U_1$ we have
$$\overline{\chi_2\chi_1(j)}i=\overline{\chi_2(\chi_1(j))}i=
\chi_1(j)i=j\ .$$
\noindent Since the right vertical arrow is
$\hbox{\sl D\/}iff_f^{p^{-1}(K)}(E\hbox{\ rel\ } T)$-equivariant, proposition \ref{theoremA}
implies it is locally trivial.
\end{proof}
\section{Palais' theorem for orbifolds}
\marginwrite{orbifold}
\label{orbifold}
In this section, we prove the main results from \cite{P} in the
context of orbifolds. Let ${\cal O}$ be a connected smooth orbifold
whose universal covering $\widetilde{\cal O}$ is a manifold. Denote by
$\tau\colon\widetilde{\cal O}\to{\cal O}$ the orbifold universal
covering, and let $H$ be its group of covering transformations. Since
${\cal O}$ is smooth, the elements of $H$ are diffeomorphisms.
Let $\hbox{\rm Maps}_H(\widetilde{\cal O},\widetilde{\cal O})$ be the space of
weakly $H$-equivariant maps, that is, the maps $f\colon
\widetilde{\cal O}\to \widetilde{\cal O}$ such that for some
automorphism $\alphalpha$ of $H$, $f(h(x))=\alphalpha(h)(f(x))$ for all $x\in
\widetilde{\cal O}$ and $h\in H$. Let $\hbox{\sl D\/}iff_H(\widetilde{\cal O})$ be
the weakly $H$-equivariant diffeomorphisms of $\widetilde{\cal O}$.
It is the normalizer of $H$ in $\hbox{\sl D\/}iff(\widetilde{\cal O})$.
An {\it orbifold diffeomorphism} of ${\cal O}$ is by definition an
orbifold homeomorphism of ${\cal O}$ whose lifts to $\widetilde{\cal
O}$ are diffeomorphisms. Thus the group $\hbox{\sl D\/}iff({\cal O})$ of orbifold
diffeomorphisms of ${\cal O}$ is the quotient of the group
$\hbox{\sl D\/}iff_H({\widetilde{\cal O}})$ by the normal subgroup~$H$.
Throughout this section, we let ${\cal W}$ be a compact suborbifold of
${\cal O}$. By definition, the preimage $\widetilde{\cal W}$ in
$\widetilde{\cal O}$ is a submanifold, and the space of orbifold
imbeddings $\hbox{\sl I\/}mb({\cal W},{\cal O})$ can be regarded as the quotient
of $\hbox{\sl I\/}mb_H(\widetilde{\cal W},\widetilde{\cal O})$ by the action of
$H$. For spaces of vectors, a subscript $H$ will indicate the
equivariant ones, thus for example ${\cal X}_H(\widetilde{\cal
W},T(\widetilde{\cal O}))$ means the $H$-equivariant sections of the
restriction of $T(\widetilde{\cal O})$ to $\widetilde{\cal W}$,
satisfying conditions~(1) and~(2) given in section~\ref{palais}.
Provided that $H$ acts as isometries on the $H$-invariant subset
$\widetilde{L}$ of $\widetilde{\cal O}$, the evaluation map $F$
carries ${\cal X}_H^L(T(\widetilde{\cal O}))$ into
$\hbox{\rm Maps}_H^{\widetilde{L}}(\widetilde{\cal O},\widetilde{\cal O})$.
The next two lemmas provide equivariant functions and metrics.
\betaegin{lemma}{equivariant function}{} Let $H$ be a group acting
smoothly and properly discontinuously on a manifold $M$, possibly with
boundary, such that $M/H$ is compact. Let $A$ be an $H$-invariant
closed subset of $M$, and $U$ an $H$-invariant neighborhood of
$A$. Then there exists an $H$-equivariant smooth function
$\gammaamma\colon M\to [0,1]$ which is identically equal to~$1$ on $A$ and
whose support is contained in~$U$.
\marginwrite{equivariant function}
\end{lemma}
\betaegin{proof}{} Fix a compact subset $C$ of $M$ which maps surjectively
onto $M/H$ under the quotient map. Let $\phi\colon M\to[0,\infty)$ be
a smooth function such that $\phi(x)\gammaeq 1$ for all $x\in C\cap A$ and
whose support is compact and contained in $U$. Define $\psi$ by
$\psi(x)\!=\! \sum_{h\in H}\phi(h(x))$. Now choose $\eta\colon\reals\to[0,1]$
such that $\eta(r)\!=\! 0$ for $r\leq 0$ and $\eta(r)\!=\! 1$ for $r\gammaeq
1$, and put $\gammaamma\!=\! \eta\circ\psi$.
\end{proof}
When ${\cal O}$ is compact, the following lemma provides a Riemannian
metric on $\widetilde{\cal O}$ for which the covering transformations
are isometries.
\betaegin{lemma}{covering isometries}{} Let $H$ be a group acting
smoothly and properly discontinuously on a manifold $M$, possibly with
boundary, such that $M/H$ is compact. Let $N$ be a properly imbedded
$H$-invariant submanifold, possibly empty. Then $M$ admits a complete
Riemannian metric, which is a product near $\partial M$ and such that
$N$ meets the collar $\partial M\times I$ in $I$-fibers, such that $H$
acts as isometries. Moreover, the action preserves the collar, and if
$(y,t)\in \partial M\times I$ and $h\in H$, then $h(y,t)\!=\!
(h\vert_{\partial M}(y),t)$.
\marginwrite{covering isometries}
\end{lemma}
\betaegin{proof}{} We first prove that equivariant Riemannian metrics
exist. Choose a compact subset $C$ of $M$ that maps surjectively onto
$M/H$ under the quotient map. Let $\phi\colon M\to [0,\infty)$ be a
compactly supported smooth function which is positive on $C$. Choose
a Riemannian metric $R$ on $M$ and denote by $R_x$ the inner product
which $R$ assigns to $T_x(M)$. Define a new metric $R'$ by
$$R'_x(v,w)=\sum_{h\in H}\phi(h(x))\,R_{h(x)}(h_*(v),h_*(w))\ .$$
\noindent Since $\phi$ is compactly supported, the sum is finite, and
since every orbit meets the support of $\phi$, $R'$ is positive
definite. To check equivariance, let $g\in H$. Then
$$\!=\!align{R'_{g(x)}(g_*(v),g_*(w))&=
\sum_{h\in H}\phi(h(g(x)))\,
R_{h(g(x))}(h_*(g_*(v)),h_*(g_*(w)))\cr
&=\sum_{h\in H}\phi(hg(x))\,
R_{hg(x)}((hg)_*(v),(hg)_*(w))\cr
&=R'_x(v,w)\ .\cr}$$
\noindent We need to improve the metric near the boundary. First, note
that $C\cap\partial M$ maps surjectively onto the image of $\partial
M$. Choose an inward-pointing vector field $\tau'$ on a neighborhood
$U$ of $C\cap \partial M$, which is tangent to $N$. Choose a smooth
function $\phi\colon M\to [0,\infty)$ which is positive on $C\cap
\partial M$ and has support contained in $U$. The field $\phi\tau'$
defined on $U$ extends using the zero vector field on $M-U$ to a
vector field $\tau$ which is nonvanishing on $C\cap \partial M$. For
$x$ in the union of the $H$-translates of $U$, define
$\omega_x\!=\!\sum_{h\in H}\phi(h(x))\,h_*^{-1}(\tau_{h(x)})$. This is
defined, nonsingular, and equivariant on an equivariant neighborhood
of $\partial M$, and we use it to define a collar $\partial
M\times[0,2]$ equivariant in the sense that if $(y,t)\in\partial
M\times[0,2]$ then $h(y,t)\!=\! (h\vert_{\partial M}(y),t)$. Moreover,
$N$ meets this collar in $I$-fibers. On $\partial M\times[0,2]$,
choose an equivariant metric $R_1$ which is the product of an
equivariant metric on $\partial M$ and the standard metric on $[0,2]$,
and choose any equivariant metric $R_2$ defined on all of $M$. Using
lemma~\ref{equivariant function}, choose $H$-equivariant functions
$\phi_1$ and $\phi_2$ from $M$ to $[0,1]$ so that $\phi_1(x)\!=\! 1$ for
all $x\in \partial M\times [0,3/2]$ and the support of $\phi_1$ is
contained in $\partial M\times[0,2)$, and so that $\phi_2(x)\!=\! 1$ for
$x\in M-\partial M\times [0,3/2]$ and the support of $\phi_2$ is
contained in $M-\partial M\times[0,1]$. Then, $\phi_1R_1+\phi_2R_2$
is $H$-equivariant and is a product near $\partial M$, and $N$ is
vertical in $\partial M\times I$.
Since $M/H$ is compact and $H$ acts as isometries, the metric must be
complete. For let $C$ be a compact subset of $M$ that maps
surjectively onto $M/H$. We may enlarge $C$ to a compact
codimension-zero submanifold $C'$ such that every point of $M$ has a
translate which lies in $C'$ at distance at least a fixed~$\epsilon$
from the frontier of $C'$. Then, any Cauchy sequence in $M$ can be
translated, except for finitely many terms, into a Cauchy sequence
in $C'$. Since $C'$ is compact, this converges, so the original sequence
also converged.
\end{proof}
We need the equivariant analogue of lemma~\ref{J}. Its proof uses
the following general fact.
\betaegin{proposition}{finitely generated}{} Suppose that $H$ acts
properly discontinuously on a locally compact connected space $X$, and
that $X/H$ is compact. Then $H$ is finitely generated.
\marginwrite{finitely generated}
\end{proposition}
\betaegin{proof}{} Using local compactness, there exists a compact set $C$
which maps surjectively to $X/H$. Let $H_0$ be the subgroup generated
by the finitely many elements $h$ such that $h(C)\cap C$ is nonempty.
The union of the $H_0$-translates of $C$ is an open and closed subset,
so must equal $X$. This implies that $H\!=\! H_0$.
\end{proof}
\betaegin{lemma}{orblemmaB}{}
Let $\widetilde{K}$ be an $H$-invariant subset of $\widetilde{\cal O}$
whose quotient in ${\cal O}$ is compact. Then there exists a
neighborhood $J$ of $1_{\widetilde{\cal O}}$ in
$\hbox{\rm Maps}_H^{\widetilde{K}}(\widetilde{\cal O}, \widetilde{\cal O})$ that
consists of diffeomorphisms.
\marginwrite{orblemmaB}
\end{lemma}
\betaegin{proof}{} Suppose first that ${\cal O}$ is compact. Then by
proposition~\ref{finitely generated}, $H$ is finitely generated. We
claim that if $f$ is a map that is close enough to $1_{\widetilde{\cal
O}}$, then $f$ commutes with the $H$-action. Choose an
$x\in\widetilde{\cal O}$ which is not fixed by any nontrivial element
of $H$. Define $\Phi\colon \hbox{\rm Maps}_H^{\widetilde{K}}(\widetilde{\cal
O},\widetilde{\cal O})\to \hbox{End}(H)$ by sending $f$ to $\phi_f$
where $f(h(x))\!=\!\phi_f(h)f(x)$. This is independent of the choice of
$x$, hence is a homomorphism. If $f$ is close enough to
$1_{\widetilde{\cal O}}$ on $\set{x,h_1(x),\ldots,h_n(x)}$, where
$\set{h_1,\ldots,h_n}$ generates $H$, then $\phi_f\!=\! 1_H$. This prove
the claim.
Next we show that for $f$ close enough to $1_{\widetilde{\cal O}}$,
$f^{-1}(S)$ is compact whenever $S$ is compact. From above, we may
assume that $f$ commutes with the $H$-action. Let $C$ be a compact
set in $\widetilde{\cal O}$ which maps surjectively to ${\cal O}$. If
$S$ is a set for which $f^{-1}(S)$ meets infinitely many translates of
$C$ then so does $S$, and $S$ could not be compact.
Consider $f$ close enough to $1_{\widetilde{\cal O}}$ to ensure the
previous conditions. By requiring $f$ sufficiently $C^\infty$-close to
$1_{\widetilde{\cal O}}$, $f_*$ is nonsingular at each point of $C$,
hence on all of $\widetilde{\cal O}$. If follows that $f$ is a local
diffeomorphism. Since also $f$ takes boundary to boundary and
preimages of compact sets are compact, $f$ is a covering map. Since
$\widetilde{\cal O}$ is simply-connected, $f$ is a diffeomorphism.
Now suppose that ${\cal O}$ is noncompact. Enlarge $\widetilde{K}/H$
to a compact codimen\-sion-zero suborbifold $L$. Let $\widetilde{L}'$
be a single component of $\widetilde{L}$ and $H'$ the stabilizer of
$\widetilde{L}'$ in $H$. Let $f\in\hbox{\rm Maps}_H^{\widetilde{K}}
(\widetilde{\cal O},\widetilde{\cal O})$. If $f$ is close enough to
$1_{\widetilde{\cal O}}$, then $f(\widetilde{L}')\!=\!\widetilde{L}'$
and by the previous argument we may assume that $f$ is a covering map
on $\widetilde{L}'$ (although since we don't know that
$\widetilde{L}'$ is simply connected, we cannot immediately conclude
that $f$ is a diffeomorphism). Since ${\cal O}$ is connected and
noncompact, $L$ has frontier in ${\cal O}$, hence $\widetilde{L}'$ has
frontier in $\widetilde{\cal O}$. Since $f$ is the identity on
$\widetilde{\cal O}-\widetilde{K}$, $f$ must be a diffeomorphism on
$\widetilde{L}'$, hence on all of $\widetilde{L}$, hence on all
of~$\widetilde{\cal O}$.
\end{proof}
We now prove the analogues of lemmas~\ref{extension}
and~\ref{logarithm} for vector fields on ${\cal O}$. Assume that
${\cal W}$ is a compact suborbifold of ${\cal O}$.
\betaegin{lemma}{orbextension}{} Let ${\cal W}$ be a compact suborbifold
of ${\cal O}$.
Let $L$ be a neighborhood of ${\cal W}$ in ${\cal O}$ and let $S$ be a
closed neighborhood in $\partial{\cal O}$ of $S\cap\partial{\cal W}$.
Denote the preimages in $\widetilde{\cal O}$ by $\widetilde{L}$ and
$\widetilde{S}$. Then there exists a continuous map $k\colon ({\cal
X}_H)_{<1/2}(\widetilde{\cal W},T(\widetilde{\cal O}))\to {\cal
X}_H^{\widetilde{L}}(T(\widetilde{\cal O}))$ such that $k(X)(x)\!=\!
X(x)$ for all $x$ in $\widetilde{\cal W}$ and $X\in({\cal
X}_H)_{<1/2}(\widetilde{\cal W},T(\widetilde{\cal O}))$. Moreover,
$k(Z)\!=\! Z$, and if $X(x)\!=\! Z(x)$ for all $x\in
\widetilde{S}\cap\partial\widetilde{\cal W}$, then $k(X)(x)\!=\! Z(x)$
for all $x\in\widetilde{S}$.
\marginwrite{orbextension}
\end{lemma}
\betaegin{proof}{} Assume first that ${\cal W}$ has positive codimension.
Replacing $L$ by a compact orbifold neighborhood of ${\cal W}$ and
using lemma~\ref{covering isometries}, we may assume that $H$ acts as
isometries on $\widetilde{O}$, that the metric is a product near
$\partial\widetilde{\cal O}$, and that $\widetilde{\cal W}$ meets the
collar $\partial\widetilde{\cal O}\times I$ in $I$-fibers. Let
$\nu(\widetilde{\cal W})$ be the normal bundle, regarded as a
subbundle of the restriction of $T(\widetilde{\cal O})$ to
${\widetilde{\cal W}}$. For $\epsilon>0$, let
$\nu_\epsilon(\widetilde{\cal W})$ be the subspace of all vectors of
length less than $\epsilon$. Since ${\cal W}$ is compact and $H$ acts
as isometries on $\widetilde{L}$, $\Euclideanxp$ imbeds
$\nu_\epsilon(\widetilde{\cal W})$ as a tubular neighborhood of
$\widetilde{\cal W}$ for sufficiently small $\epsilon$. By choosing
$\epsilon$ small enough, we may assume that
$\Euclideanxp(\nu_\epsilon(\widetilde{\cal W}))\subset \widetilde{L}$, that
the fibers at points in $\widetilde{S}$ map into $\widetilde{S}$, and
that the fibers at points in $\partial\widetilde{\cal
O}-\widetilde{S}$ map into $\partial\widetilde{\cal O}-\widetilde{S}$.
Now use lemma~\ref{equivariant function} to choose an $H$-equivariant
smooth function $\alphalpha\colon\widetilde{\cal O}\to[0,1]$ which is
identically equal to~1 on $\widetilde{\cal W}$ and has support in
$j(\nu_{\epsilon/2}(\widetilde{\cal W}))$. The extension $k(X)$ can
now be defined exactly as in lemma~\ref{extension}. Note that since
$H$ acts as isometries, the parallel translation function $P$ is
$H$-equivariant, and the $H$-equivariance of $k(X)$ follows easily.
The case when $W$ has codimension zero is similar, using
$\nu^+_\epsilon(\widetilde{\cal W})$ as in the proof of
lemma~\ref{extension}.
\end{proof}
\betaegin{lemma}{orblogarithm}{}
For all sufficiently small positive $\partialta$, there exists a
continuous map $X\colon (\hbox{\sl I\/}mb_H)_{<\partialta}(\widetilde{\cal
W},\widetilde{\cal O}) \to\alphallowbreak({\cal
X}_H)_{<1/2}(\widetilde{\cal W},T(\widetilde{\cal O}))$ such that
$\Euclideanxp(X(x))\!=\! j(x)$ for all $x\in \widetilde{\cal W}$ and $j\in
(\hbox{\sl I\/}mb_H)_{<\partialta}(\widetilde{\cal W},\widetilde{\cal O})$, and
moreover if $j(x)\!=\! i_{\widetilde{\cal W}}(x)$ then $X(j)(x)\!=\!
Z(x)$.
\marginwrite{orblogarithm}
\end{lemma}
\betaegin{proof}{} Replacing ${\cal O}$ by a compact orbifold
neighborhood of ${\cal W}$ and using lemma~\ref{covering isometries},
we may assume that $H$ acts as isometries on $\widetilde{O}$, that the
metric is a product near $\partial\widetilde{O}$, and that
$\widetilde{\cal W}$ meets the collar $\partial\widetilde{\cal
O}\times I$ in $I$-fibers. Let $N_\epsilon(\widetilde{\cal W})$ be
defined exactly as in section~\ref{palais}. By compactness of ${\cal
W}$, there exists a positive $\epsilon$ such that for every
$x\in\widetilde{\cal W}$, $\Euclideanxp\colon N_\epsilon(\widetilde{\cal
W})\cap T_x(\widetilde{\cal O})\to \widetilde{\cal O}$ is a
diffeomorphism to a open neighborhood of $x$ in~$\widetilde{\cal O}$,
contained in $\widetilde{L}$. The proof is then essentially the same
as the proof of lemma~\ref{logarithm}.
\end{proof}
The fundamental result is the analogue of theorem~B of \cite{P}.
\betaegin{theorem}{orbtheoremB}{} Let ${\cal W}$ be a compact suborbifold
of ${\cal O}$. Let $S$ be a closed neighborhood in $\partial{\cal O}$
of $S\cap\partial{\cal W}$, and let $L$ be a neighborhood of ${\cal
W}$ in ${\cal O}$. Then $\hbox{\sl I\/}mb^L({\cal W},{\cal O}\hbox{\ rel\ } S)$ admits local
$\hbox{\sl D\/}iff^L({\cal O}\hbox{\ rel\ } S)$ cross-sections.
\marginwrite{orbtheoremB}
\end{theorem}
\betaegin{proof}{} By proposition~\ref{inclusion}, it suffices to find a
local cross-section at the inclusion $i_{\cal W}$. Choose a compact
neighborhood $K$ of ${\cal W}$ with $K\subseteq L$. Using
lemmas~\ref{orblogarithm} and~\ref{orbextension}, there exist
continuous maps $X\colon
(\hbox{\sl I\/}mb_H)_{<\partialta}(\widetilde{W},\widetilde{\cal O}) \to({\cal
X}_H)_{<1/2}(\widetilde{\cal W}, T(\widetilde{\cal O}))$ and $k\colon
({\cal X}_H)_{<1/2}(\widetilde{\cal W},T(\widetilde{\cal O}))
\alphallowbreak\to {\cal X}_H^{\widetilde{K}}(T(\widetilde{\cal O}))$. Let
$J$ be a neighborhood as in lemma~\ref{orblemmaB}. On a sufficiently
small neighborhood $\widetilde{U}$ of $i_{\widetilde{\cal W}}$, the
composition $\widetilde{\chi}\!=\! FkX$ is defined and has image in $J$.
Let $U$ be the imbeddings of ${\cal W}$ in ${\cal O}$ which admit a
lift to $\widetilde{U}$. By choosing $\widetilde{U}$ small enough, we
may ensure that the lift of an element of $U$ is unique. Define $\chi$
to be $\widetilde{\chi}$ applied to the lift of an element of $U$ to
$\widetilde{U}$, followed by the projection of
$\hbox{\sl D\/}iff_H^{\widetilde{K}}(\widetilde{\cal O})$ to $\hbox{\sl D\/}iff^{K}({\cal
O})$.
For elements in $U\cap \hbox{\sl I\/}mb^K({\cal W},{\cal O}\hbox{\ rel\ } S)$, each lift to
$\widetilde{U}$ that is sufficiently close to $i_{\widetilde{\cal W}}$
must agree with $i_{\widetilde{\cal W}}$ on $\widetilde{S}$. So $U$
may be chosen small enough so that if $j\in U$ then
$\widetilde{j}\in\hbox{\sl I\/}mb(\widetilde{\cal W}),\widetilde{\cal
O}\hbox{\ rel\ }\widetilde{S})$. Then, $X(\widetilde{j}(x))\!=\! Z(x)$ for all
$x\in \widetilde{S}$, so $k(X)(x)\!=\! Z(x)$ for all
$x\in\widetilde{S}$. It follows that $\chi(j)\in\hbox{\sl D\/}iff({\cal O}\hbox{\ rel\ }
S)$.
\end{proof}
\betaegin{corollary}{orbcoro2}{} Let ${\cal W}$ be a compact suborbifold
of ${\cal O}$, which is either properly imbedded or codimension-zero.
Let $S$ be a closed neighborhood in $\partial{\cal O}$ of
$S\cap\partial{\cal W}$, and let $L$ be a neighborhood of ${\cal W}$
in ${\cal O}$. Then the restriction $\hbox{\sl D\/}iff^L({\cal O}\hbox{\ rel\ } S)\to
\hbox{\sl I\/}mb^L({\cal W},{\cal O}\hbox{\ rel\ } S)$ is locally trivial.
\marginwrite{orbcoro2}
\end{corollary}
\betaegin{corollary}{orbcoro3}{} Let ${\cal V}$ and ${\cal W}$ be
suborbifolds of ${\cal O}$, with ${\cal W}\subset
{\cal V}$. Assume that ${\cal W}$ compact, and is either properly
imbedded or codimension-zero. Let $S$ be a closed neighborhood in
$\partial{\cal O}$ of $S\cap\partial{\cal W}$, and let $L$ be a
neighborhood of ${\cal W}$ in ${\cal O}$. Then the restriction
$\hbox{\sl I\/}mb^L({\cal V},{\cal O}\hbox{\ rel\ } S) \to \hbox{\sl I\/}mb^L({\cal W},{\cal
O}\hbox{\ rel\ } S)$ is locally trivial.
\marginwrite{orbcoro3}
\end{corollary}
\section{Singular fiberings}
\marginwrite{sfiber}
\label{sfiber}
We will say that a continuous surjection $p\colon \Sigma\to {\cal O}$
of compact connected orbifolds is a {\it singular fibering} if there
exists a commutative diagram
$$\vbox{\halign{\hfil$#$\hfil&\hfil$#$\hfil&\hfil$#$\hfil\cr
\widetilde{\Sigma}&\mapright{\widetilde{p}}&\widetilde{{\cal O}}\cr
\mapdown{\sigma}&&\mapdown{\tau}\cr
\Sigma&\mapright{p}&{\cal O}\cr}}$$
\noindent in which
\betaegin{enumerate}
\item[{\rm(i)}] $\widetilde{\Sigma}$ and $\widetilde{\cal O}$ are
manifolds, and $\sigma$ and $\tau$ are regular orbifold coverings with
groups of covering transformations $G$ and $H$ respectively,
\item[{\rm(ii)}] $\widetilde{p}$ is surjective and locally trivial,
and
\item[{\rm(iii)}] the fibers of $p$ and $\widetilde{p}$ are
path-connected.
\end{enumerate}
The class of singular fiberings includes many Seifert fiberings, for
example all compact 3-dimensional Seifert manifolds $\Sigma$ except
the lens spaces with one or two exceptional orbits (see for
example~\cite{Scott}). For some of those lens spaces, ${\cal O}$ fails
to have an orbifold covering by a manifold. On the other hand, it is a
much larger class than Seifert fiberings, because no structure as a
homogeneous space is required on the fiber.
For mappings there is a complete analogy with the fibered case, where
now $\hbox{\sl D\/}iff_f(\Sigma)$ is by definition the quotient of the group of
fiber-preserving $G$-equivariant diffeomorphisms
$(\hbox{\sl D\/}iff_G)_f(\widetilde{\Sigma})$ by its normal subgroup $G$, and so
on. A suborbifold $W$ of $\Sigma$ is called {\it vertical} if it is a
union of fibers. In this case the preimage $\widetilde{W}$ of $W$ in
$\widetilde{\Sigma}$ is a submanifold, and we can speak of
$\hbox{\sl I\/}mb_f(W,\Sigma)$ and $\hbox{\sl I\/}mb_v(W,\Sigma)$.
Following our usual notations, we put $\partial_v\Sigma\!=\!
p^{-1}(\partial{\cal O})$ and $\partial_vW\!=\!
W\cap\partial_v\Sigma$.
Since ${\cal O}$ is compact, lemma~\ref{covering isometries} shows
that a (complete) Riemannian metric on $\widetilde{\cal O}$ can be
chosen so that $H$ acts as isometries, and moreover so that the metric
on $\widetilde{\cal O}$ is a product near the boundary. Next we will
sketch how to obtain a $G$-equivariant metric which is a product near
$\partial_h\widetilde{\Sigma}$ and near
$\partial_v\widetilde{\Sigma}$. If $\partial_h\widetilde{\Sigma}$ is
empty, we simply apply lemma~\ref{covering isometries}. Assume that
$\partial_h\widetilde{\Sigma}$ is nonemtpy. Construct a
$G$-equivariant collar of $\partial_h\widetilde{\Sigma}$, and use it
to obtain a $G$-equivariant metric such that the $I$-fibers of
$\partial_h\widetilde{\Sigma}\times I$ are vertical. If
$\partial_v\widetilde{\Sigma}$ is also nonempty, put
$Y\!=\!\partial_h\widetilde{\Sigma}\cap\partial_v\widetilde{\Sigma}$. We
will follow the construction in the last paragraph of
section~\ref{exponent}. Denote the collar of
$\partial_h\widetilde{\Sigma}$ by
$\partial_h\widetilde{\Sigma}\times[0,2]_1$. Assume that the metric on
$\partial_h\widetilde{\Sigma}$ was a product on a collar
$Y\times[0,2]_2$ of $Y$ in $\partial_h\widetilde{\Sigma}$. Next,
construct a $G$-equivariant collar $\partial_v\widetilde{\Sigma}\times
[0,2]_2$ of $\partial_v\widetilde{\Sigma}$ whose $[0,2]_2$-fiber at
each point of $Y\times [0,2]_1$ agrees with the $[0,2]_2$-fiber of the
collar of $Y$ in $\partial_h\widetilde{\Sigma}\times\set{t}$. Then,
the product metric on $\partial_v\widetilde{\Sigma}\times[0,2]_2$
agrees with the product metric of
$\partial_h\widetilde{\Sigma}\times[0,2]_1$ where they overlap, and
the $G$-equivariant patching can be done to obtain a metric which is a
product near $\partial_v\widetilde{\Sigma}$ without losing the
property that it is a product near~$\partial_h\widetilde{\Sigma}$. We
will always assume that the metrics have been selected with these
properties. In particular, $G$ preserves the vertical and horizontal
parts of vectors.
Some basic observations about singular fiberings will be needed.
\betaegin{lemma}{lift}{} The action of $G$ preserves the fibers of
$\widetilde{p}$. Moreover:
\betaegin{enumerate}
\item[\rm(i)] If $g\in G$, then there exists an element $h\in H$ such
that $\widetilde{p}g\!=\! h\widetilde{p}$.
\item[\rm(ii)] If $h\in H$, then there exists an element
$g$ of $G$ such that $\widetilde{p}g\!=\! h\widetilde{p}$.
\item[\rm(iii)] If $x\in \Sigma$, then $\tau^{-1}p(x)\!=\!
\widetilde{p}\sigma^{-1}(x)$.
\end{enumerate}
\marginwrite{lift}
\end{lemma}
\betaegin{proof}{}
Suppose that $\widetilde{p}(x)\!=\!\widetilde{p}(y)$. For $g\in G$, we
have $\tau\widetilde{p}(g(x))\!=\! p\sigma(g(x))\!=\! p\sigma(x)\!=\!
\tau\widetilde{p}(x)\!=\! \tau\widetilde{p}(y)\!=\!
\tau\widetilde{p}(g(y))$. Since the fibers of $\widetilde{p}$ are
path-connected, and the fibers of $\tau$ are discrete, this implies
that $g(x)$ and $g(y)$ lie in the same fiber of $\widetilde{p}$. For
(i), let $g\in G$. Since $g$ preserves the fibers of $\widetilde{p}$,
it induces a map $h$ on $\widetilde{{\cal O}}$. Given
$x\in\widetilde{{\cal O}}$, choose $y\in\widetilde{\Sigma}$ with
$\widetilde{p}(y)\!=\! x$. Then $\tau h(x)\!=\! \tau
\widetilde{p}(g(y))\!=\! p\sigma(g(y))\!=\! p\sigma(y)\!=\! \tau
\widetilde{p}(y)\!=\! \tau(x)$ so $h\in H$.
To prove (ii), suppose $h$ is any element of $H$. Let
$\hbox{sing}({\cal O})$ denote the singular set of ${\cal O}$. Choose
$a\in\widetilde{{\cal O}}-\tau^{-1}(\hbox{sing}({\cal O}))$, choose
$s\in \widetilde{\Sigma}$ with $\widetilde{p}(s)\!=\! a$, and choose
$s''\in \widetilde{\Sigma}$ with $\widetilde{p}(s'')\!=\! h(a)$. Since
$p\sigma(s)\!=\! \tau\widetilde{p}(s)\!=\! \tau\widetilde{p}(s'')\!=\!
p\sigma(s'')$, $\sigma(s)$ and $\sigma(s'')$ must lie in the same
fiber of $p$. Since the fiber is path-connected, there exists a path
$\betaeta$ in that fiber from $\sigma(s'')$ to $\sigma(s)$. Let
$\widetilde{\betaeta}$ be its lift in $\widetilde{\Sigma}$ starting at
$s''$ and let $s'$ be the endpoint of this lift, so that
$\sigma(s')\!=\! \sigma(s)$. Note that $\widetilde{p}(s')\!=\!
\widetilde{p}(s'')\!=\! h(a)$ since $\widetilde{\betaeta}$ lies in a fiber
of $\widetilde{p}$. Since $\sigma(s)\!=\! \sigma(s')$, there exists a
covering transformation $g\in G$ with $g(s)\!=\! s'$. To show that
$\widetilde{p}g\!=\! h\widetilde{p}$, it is enough to verify that they
agree on the dense set $\widetilde{p}^{-1}(\widetilde{{\cal
O}}-\tau^{-1}(\hbox{sing}({\cal O})))$. Let $t\in
\widetilde{p}^{-1}(\widetilde{{\cal O}}-\tau^{-1}(\hbox{sing}({\cal
O})))$ and choose a path $\gammaamma$ in
$\widetilde{p}^{-1}(\widetilde{{\cal O}}-\tau^{-1}(\hbox{sing}({\cal
O})))$ from $s$ to $t$. Since $g\in G$, we have $p\sigma\gammaamma\!=\!
p\sigma g\gammaamma$. Therefore $\tau\widetilde{p}\gammaamma\!=\!
\tau\widetilde{p} g\gammaamma$, and so $\widetilde{p}g\gammaamma$ is the
unique lift of $p\sigma\gammaamma$ starting at $\widetilde{p}g(s)\!=\!
h(a)$. But this lift equals $h\widetilde{p}\gammaamma$, so
$h\widetilde{p}(t)\!=\! \widetilde{p}g(t)$.
For (iii), fix $z_0\in \sigma^{-1}(x)$ and let $y_0\!=\!
\widetilde{p}(z_0)$. Suppose $y\in \widetilde{p}\sigma^{-1}(x)$.
Choose $z\in \sigma^{-1}(x)$ with $\widetilde{p}(z)\!=\! y$. Since
$\sigma$ is a regular covering, there exists $g\in G$ such that $g(z)\!=\!
z_0$. By~(i), $g$ induces $h$ on $\widetilde{\cal O}$, and $h(y)\!=\!
h\widetilde{p}(z)\!=\! \widetilde{p}g(z)\!=\! \widetilde{p}(z_0)\!=\! y_0$.
Therefore $\tau(y)\!=\! \tau(h(y))\!=\! \tau(y_0)\!=\!
\tau\widetilde{p}(z_0)\!=\! p\sigma(z_0)\!=\! p(x)$ so $y\in
\tau^{-1}(p(x))$. For the opposite inclusion, suppose that $y\in
\tau^{-1}p(x)$, so $\tau(y)\!=\! p(x)\!=\! \tau(y_0)$. Since $\sigma$ is
regular, there exists $h\in H$ such that $h(y_0)\!=\! y$. Let $g$ be as
in (ii). Then $y\!=\! h(y_0)\!=\! h\widetilde{p}(z_0)\!=\!
\widetilde{p}g(z_0)$, and $\sigma(g(z_0))\!=\! \sigma(z_0)\!=\! x$ so
$y\in\widetilde{p}(\sigma^{-1}(x))$.
\end{proof}
One consequence of lemma~\ref{lift} is that (smooth nonsingular) paths
in $\widetilde{\cal O}$ have horizontal lifts in $\widetilde{\Sigma}$.
To see this, we first claim that the horizontal lifts of any vector
$\omega$ in $T(\widetilde{\cal O})$ have bounded lengths. Fix a compact
subset $C$ of $\widetilde{\Sigma}$ such that $\sigma(C)\!=\!\Sigma$.
Let $H_*\omega$ be the set of $H$-translates of $\omega$. Since $C$ is
compact and $H_*\omega$ is closed, the lengths of the horizontal lifts
of vectors in $H_*\omega$ to vectors in $T(\widetilde{\Sigma})\vert_C$
are bounded by some~$L$. If $\widetilde{\omega}$ is any lift of
$\omega$, there exists $g\in G$ such that $g_*\widetilde{\omega}\in
T(\widetilde{\Sigma})\vert_C$. By lemma~\ref{lift}(i), there exists
$h\in H$ such that $\widetilde{p}_*(g_*\widetilde{\omega})\!=\!
h_*\omega$. Since $g_*\widetilde{\omega}$ is a horizontal lift of
$h_*(\omega)$ and $g_*$ is an isometry, $\norm{\widetilde{\omega}}\!=\!
\norm{g_*\widetilde{\omega}}\leq L$, proving the claim. Since the
metric on $\widetilde{\Sigma}$ is complete, the claim shows that a
path in $\widetilde{\cal O}$ could only fail to lift if a partial lift
started in $\widetilde{\Sigma}-\partial_h\widetilde{\Sigma}$ and then
reached a point of $\partial_h\widetilde{\Sigma}$, impossible since
the metric is a product near~$\partial_h\widetilde{\Sigma}$.
Since horizontal lifts exist, the aligned exponential $\Euclideanxp_a$ of
$\widetilde{\Sigma}$ is defined. Moreover, it is $G$-equivariant:
since $G$ consists of fiber-preserving isometries, $\Euclideanxp_v$ is
$G$-equivariant, and since $G$ preserves horizontal parts of vectors,
it preserves horizontal lifts.
The notations ${\cal A}(\widetilde{W},T(\widetilde{\Sigma}))$ and
${\cal A}(T(\widetilde{\Sigma}))$ and the map $F_a\colon {\cal
A}(T(\widetilde{\Sigma}))\to
\hbox{\rm Maps}(\widetilde{\Sigma},\widetilde{\Sigma})$ are analogous to those
in section~\ref{project}.
\betaegin{theorem}{sftheorem1}{} Let $S$ be a closed subset of
$\partial{\cal O}$, and let $T\!=\! p^{-1}(S)$. Then
$\hbox{\sl D\/}iff({\cal O}\hbox{\ rel\ } S)$ admits local
$\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$ cross-sections.
\marginwrite{sftheorem1}
\end{theorem}
\betaegin{proof}{} Lemma~\ref{orblogarithm}, with
${\cal W}\!=\!{\cal O}$, provides $X\colon
\widetilde{U}_\partialta
\to ({\cal X}_H)_{<1/2}(T(\widetilde{\cal O}))$, where
$\widetilde{U}_\partialta\!=\!\set{f\in \hbox{\sl D\/}iff_H(\widetilde{\cal O})\hbox{\hbox{$\;\vert\;$}}
d(f(x),x)<\partialta\hbox{\ for all $x\in \widetilde{\cal O}$}}$.
Let $h\in \hbox{\sl D\/}iff({\cal O})$ and let $\widetilde{h}\in
\hbox{\sl D\/}iff_H(\widetilde{\cal O})$ be a lift of $h$.
For every $\widetilde{g}\in \widetilde{U}_\partialta \widetilde{h}$,
$\Euclideanxp(X(\widetilde{g}\widetilde{h}^{-1}(x)))\!=\!
\widetilde{g}\widetilde{h}^{-1}(x)$. Define
$\widetilde{\chi}\colon\widetilde{U}_\partialta\to {\cal
A}_G(T(\widetilde{\Sigma}))$ by
$$\widetilde{\chi}(\widetilde{g})(x)=
\betaig(\widetilde{p}_*\vert_{H_x}\betaig)^{-1}(X(\widetilde{g}
\widetilde{h}^{-1})(\widetilde{p}(x)))\ .$$
\noindent The boundary tangency conditions are clearly satisfied, and
$\Euclideanxp_a(\widetilde{\chi}(\widetilde{g})(x))$ exists since it is the
horizontal lift of a geodesic from $\widetilde{p}(x)$ to
$\widetilde{g}\widetilde{h}^{-1}(\widetilde{p}(x))$. To see that
$\widetilde{\chi}(\widetilde{g})$ is $G$-equivariant, suppose
$\gammaamma\in G$. By lemma~\ref{lift}(i), $\gammaamma$ induces $\lambda\in
H$. We have
$$\!=\!align{\widetilde{\chi}(\widetilde{g})(\gammaamma(x))
&=\betaig(\widetilde{p}_*\vert_{H_x}\betaig)^{-1}(X(\widetilde{g}
\widetilde{h}^{-1})(\widetilde{p}(\gammaamma(x))))\cr
&=\betaig(\widetilde{p}_*\vert_{H_x}\betaig)^{-1}(X(\widetilde{g}
\widetilde{h}^{-1})(\lambda\widetilde{p}(x)))\cr
&=\betaig(\widetilde{p}_*\vert_{H_x}\betaig)^{-1}(\lambda_*X(\widetilde{g}
\widetilde{h}^{-1})(\widetilde{p}(x)))\cr
&=\gammaamma_*\betaig(\widetilde{p}_*\vert_{H_x}\betaig)^{-1}(X(\widetilde{g}
\widetilde{h}^{-1})(\widetilde{p}(x)))\cr
&=\gammaamma_*\widetilde{\chi}(\widetilde{g})(x)\ ,\cr}$$
\noindent the penultimate equality using the fact that $G$ preserves the
horizontal subspaces.
Let $\widetilde{U}\!=\! \widetilde{\chi}^{-1}(J)$, where $J$ is a
neighborhood of $1_{\widetilde{\Sigma}}$ as in lemma~\ref{orblemmaB}.
Let $U$ be a neighborhood of $h$ consisting of elements having a lift
in $\widetilde{U}$. Since $G$ is a discrete subgroup of
$\hbox{\sl D\/}iff_G(\widetilde{\Sigma})$, we may choose $\partialta$ small enough to
ensure that these lifts are unique. Now we can define $\chi\colon U\to
\hbox{\sl D\/}iff_f(\Sigma)$ by putting $\chi(g)$ equal to the diffeomorphism
induced on $\Sigma$ by $F_a\widetilde{\chi}(\widetilde{g})$.
\end{proof}
From proposition~\ref{theoremA}, we have immediately
\betaegin{theorem}{sfproject diffs}{} Let $S$ be a closed subset of
$\partial{\cal O}$, and let $T\!=\! p^{-1}(S)$. Then
$\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)\to \hbox{\sl D\/}iff({\cal O}\hbox{\ rel\ } S)$ is
locally trivial.
\marginwrite{sfproject diffs}
\end{theorem}
We now extend lemmas~\ref{lemmaC} and~\ref{lemmaD} to the singular
fibered case.
\betaegin{lemma}{sflemmaC}{}
Let $W$ be a vertical suborbifold of $\Sigma$. Let $T$ be a closed
fibered neighborhood in $\partial_v\Sigma$ of $T\cap \partial_vW$.
Then for all sufficiently small $\partialta$, there exists a continuous
map $k\colon({\cal
A}_G)_{<\partialta}(\widetilde{W},T(\widetilde{\Sigma}))\to {\cal
A}_G(T(\widetilde{\Sigma}))$ such that $k(X)(x)\!=\! X(x)$ for all $x\in
\widetilde{W}$ and $X\in({\cal
A}_G)_{<\partialta}(\widetilde{W},T(\widetilde{\Sigma}))$. If $X(x)\!=\!
Z(x)$ for all $x\in \widetilde{T}\cap\partial_v\widetilde{W}$, then
$k(X)(x)\!=\! Z(x)$ for all $x\in \widetilde{T}$. Furthermore, $k(({\cal
V}_G)_{<\partialta}(\widetilde{W}, T(\widetilde{\Sigma})))\subset{\cal
V}_G(T(\widetilde{\Sigma}))$.
\marginwrite{sflemmaC}
\end{lemma}
\betaegin{proof}{} As with lemma~\ref{extension}, the positive
codimension and codimen\-sion-zero cases are similar, so we only discuss
the former. Let ${\cal W}$ be the image of $W$ in ${\cal O}$, and
denote $\tau^{-1}{\cal W}$ by $\widetilde{\cal W}$. By
lemma~\ref{lift}(iii), $\widetilde{\cal W}\!=\!
\widetilde{p}(\widetilde{W})$, and by lemma~\ref{lift}(ii), it is
$H$-invariant. Since it is a submanifold of $\widetilde{\cal O}$, it
follows that ${\cal W}$ is a suborbifold of ${\cal O}$. A section
$X\in{\cal A}_G(\widetilde{W},T(\widetilde{\Sigma}))$ induces a
well-defined section $\widetilde{p}_*X\in {\cal X}(\widetilde{\cal
W},T(\widetilde{\cal O}))$. By lemma~\ref{lift}(ii),
$\widetilde{p}_*X$ is $H$-equivariant.
We claim that there exists a positive $\partialta$ so that if $X\in ({\cal
A}_G)_{<\partialta}(\widetilde{W},T(\widetilde{\Sigma}))$ then
$\widetilde{p}_*X\in ({\cal X}_H)_{<1/2}(\widetilde{\cal
W},T(\widetilde{\cal O}))$. For if not, there would be a sequence $x_i$
in $\widetilde{W}$ such that $\norm{X(x_i)}\to 0$ but
$\norm{\widetilde{p}_*X(\widetilde{p}(x_i))}\gammaeq 1/2$. Since $W$ is
compact, there exists a compact subset $C\subset
\widetilde{W}$ such that $\sigma(C)\!=\! W$. There exist
elements $g_i\in G$ so that $g_i(x_i)\in C$, and if $h_i\in H$ are
obtained using lemma~\ref{lift}(i) then
$\norm{X(g_i(x_i))}\!=\!\norm{X(x_i)}$ while
$\norm{\widetilde{p}_*X(\widetilde{p}g_i(x_i))}\!=\!
\norm{\widetilde{p}_*X(h_i\widetilde{p}(x_i))}\!=\!
\norm{\widetilde{p}_*X(\widetilde{p}(x_i))}\alphallowbreak\gammaeq 1/2$. So
we may assume that the $x_i$ lie in $C$, hence that they converge to
$x\in C$. Then, $\norm{X(x)}\!=\! 0$ but
$\norm{\widetilde{p}_*X(\widetilde{p}(x))}\gammaeq 1/2$, a contradiction.
We now follow the proof of lemma~\ref{lemmaC}. Let
$k_{\widetilde{\cal O}}\colon ({\cal X}_H)_{<1/2}(\widetilde{\cal
W},T(\widetilde{\cal O}))\to {\cal X}_H(T(\widetilde{\cal O}))$ be
obtained using lemma~\ref{orbextension}. Let
$\nu_\epsilon(\widetilde{W})$ be the $\epsilon$-normal bundle of
$\widetilde{W}$. Since $W$ is compact, for sufficiently
small~$\epsilon$, $j_a\colon \nu_\epsilon(\widetilde{W})\to
\widetilde{\Sigma}$ defined by
$j_a(\omega)\!=\!\Euclideanxp_a(\omega)$ and carries
$\nu_\epsilon(\widetilde{W})$ diffeomorphically to a neighborhood of
$\widetilde{W}$ in $\widetilde{\Sigma}$. Since $W$ is compact, we may
choose $\epsilon$ small enough so that $j_a(\omega)\in
\partial_h\widetilde{\Sigma}\times I$ only
when $\pi(\omega)\in\partial_h\widetilde{\Sigma}\times I$.
Since $G$ acts as isometries and preserves horizontal lifts, the
aligned parallel translation $P_a$ is $G$-equivariant. Using
lemma~\ref{equivariant function} there exists a smooth $G$-equivariant
function $\alphalpha\colon \widetilde{\Sigma}\to [0,1]$ which is
identically~1 on $\widetilde{W}$ and identically~0 on
$\widetilde{\Sigma}-j(\nu_{\epsilon/2}(\widetilde{W}))$. Define
$k_{\widetilde{\Sigma}}\colon ({\cal
A}_G)_{<\partialta}(\widetilde{W},T(\widetilde{\Sigma}))\to {\cal
V}_G(T(\widetilde{\Sigma}))$ by
$$k_{\widetilde{\Sigma}}(X)(x)\!=\!
\cases{
\alphalpha(x)P_a(X(\pi(j_a^{-1}(x))),j_a^{-1}(x))_v&for
$x\in j_a(\nu_{\epsilon}(\widetilde{W}))$\cr
Z(x)&for $x\in \widetilde{\Sigma}-j_a(\nu_{\epsilon/2}(\widetilde{W}))$\cr}$$
\noindent and $k$ by
$$k(X)(x)=(\widetilde{p}_*\vert_{H_x})^{-1}
(k_{\widetilde{\cal O}}(p_*X)(p(x)))
\;+\;k_{\widetilde{\Sigma}}(X_v)(x)\ .$$
\end{proof}
\betaegin{lemma}{sflemmaD}{} Let $W$ be a vertical suborbifold of
$\Sigma$. For small $\partialta>0$, there exists a continuous map
$$X\colon ((\hbox{\sl I\/}mb_G)_f)_{<\partialta}(\widetilde{W},
\widetilde{\Sigma})
\to{\cal A}_G(\widetilde{W},
T(\widetilde{\Sigma}))$$
such that $\Euclideanxp_a(X(j)(x))\!=\! j(x)$ for all
$x\in \widetilde{W}$ and $j\in ((\hbox{\sl I\/}mb_G)_f)_{<\partialta}(\widetilde{W},
\widetilde{\Sigma})$.
Moreover, $X(((\hbox{\sl I\/}mb_G)_v)_{<\partialta}(\widetilde{W},
\widetilde{\Sigma}))\subseteq{\cal
V}_G(\widetilde{W},T(\widetilde{\Sigma}))$, and if $j(x)\!=\!
i_{\widetilde{W}}(x)$ then $X(j)(x)\!=\! Z(x)$.
\marginwrite{sflemmaD}
\end{lemma}
\betaegin{proof}{} Let $N_\epsilon(\widetilde{W})$ be as defined before
the proof of lemma~\ref{lemmaD}. Since $W$ is compact, we can choose
$\epsilon$ small enough to ensure the local diffeomorphism
condition. Choose $\partialta$ small enough so that
$j(x)\in\Euclideanxp_a(N_\epsilon(\widetilde{W})\cap T_x(\widetilde{\Sigma}))$
for every $x\in \widetilde{W}$ and $j\in
((\hbox{\sl I\/}mb_G)_f)_{<\partialta}(\widetilde{W}, \widetilde{\Sigma})$. Define
$X(j)(x)$ to be the unique vector in $N_\partialta(\widetilde{W})\cap
T_x(\widetilde{\Sigma})$ such that $\Euclideanxp_a(X(j)(x))$ equals~$j(x)$.
\end{proof}
\betaegin{theorem}{sftheorem2}{}
Let $W$ be a vertical suborbifold of $\Sigma$. Let $T$ be a closed
fibered neighborhood in $\partial_v\Sigma$ of $T\cap \partial_vW$.
Then
\betaegin{enumerate}
\item[{\rm (i)}] $\hbox{\sl I\/}mb_f(W,\Sigma\hbox{\ rel\ } T)$ admits local
$\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$ cross-sections, and
\item[{\rm (ii)}] $\hbox{\sl I\/}mb_v(W,\Sigma\hbox{\ rel\ } T)$ admits local
$\hbox{\sl D\/}iff_v(\Sigma\hbox{\ rel\ } T)$ cross-sections.
\end{enumerate}
\marginwrite{sftheorem2}
\end{theorem}
\betaegin{proof}{} By proposition~\ref{inclusion}, it suffices to
construct local cross-sections at the inclusion $i_W$. Obtain
$k\colon({\cal A}_G)_{<\partialta}(\widetilde{W},T(\widetilde{\Sigma}))\to
({\cal A}_G)_{<1/2}(T(\widetilde{\Sigma}))$, and $X\colon
((\hbox{\sl I\/}mb_G)_f)_{<\partialta_1}(\widetilde{W},\widetilde{\Sigma}) \to {\cal
A}_G(\widetilde{W},T(\widetilde{\Sigma}))$ using lemmas~\ref{sflemmaC}
and~\ref{sflemmaD}. Fix a neighborhood $\widetilde{U}$ of
$i_{\widetilde{W}}$ small enough so that $X(\widetilde{U})\subseteq
({\cal A}_G)_{<\partialta_1}(\widetilde{W}, T(\widetilde{\Sigma}))$. Let
$U$ be a neighborhood of $i$ small enough so that each element $j$ of
$U$ has a unique lift $\widetilde{j}$ into $\widetilde{U}$, and so
that if $j$ agrees with $i_W$ on $\partial_vW$ then $\widetilde{j}$
agrees with $i_{\widetilde{W}}$ on $\partial_v\widetilde{W}$. For
$j\in U$, define $\chi(j)$ to be the element of $\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ }
T)$ induced by $F_akX(\widetilde{j})$.
\end{proof}
As in section~\ref{restrict}, we have the following immediate
corollaries.
\betaegin{corollary}{sfcorollary2}{} Let $W$ be a vertical suborbifold of
$\Sigma$. Let $T$ be a fibered neighborhood in $\partial_v\Sigma$ of
$T\cap\partial_vW$. Then the following restrictions are locally
trivial:
\betaegin{enumerate}
\item[{\rm(i)}] $\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)\to \hbox{\sl I\/}mb_f(W,\Sigma\hbox{\ rel\ } T)$, and
\item[{\rm(ii)}] $\hbox{\sl D\/}iff_v(\Sigma\hbox{\ rel\ } T)\to
\hbox{\sl I\/}mb_v(W,\Sigma\hbox{\ rel\ } T)$.
\end{enumerate}
\marginwrite{sfcorollary2}
\end{corollary}
\betaegin{corollary}{sfcorollary3}{} Let $V$ and $W$ be vertical
suborbifolds of $\Sigma$, with $W\subseteq V$. Let $T$ be a closed
fibered neighborhood in $\partial_v\Sigma$ of $T\cap\partial_vW$. Then
the following restrictions are locally trivial:
\betaegin{enumerate}
\item[{\rm(i)}] $\hbox{\sl I\/}mb_f(V,\Sigma\hbox{\ rel\ } T)\to
\hbox{\sl I\/}mb_f(V,\Sigma\hbox{\ rel\ } T)$, and
\item[{\rm(ii)}] $\hbox{\sl I\/}mb_v(V,\Sigma\hbox{\ rel\ } T)\to
\hbox{\sl I\/}mb_v(W,\Sigma\hbox{\ rel\ } W)$.
\end{enumerate}
\marginwrite{sfcorollary3}
\end{corollary}
\betaegin{theorem}{sfsquare}{} Let $W$ be a vertical suborbifold of
$\Sigma$. Let $T$ be a closed fibered neighborhood in
$\partial_v\Sigma$ of $T\cap\partial_vW$, and let $S\!=\! p(T)$. Then
all four maps in the following square are locally trivial:
$$\vbox{\halign{\hfil#\hfil\quad&#&\quad\hfil#\hfil\cr
$\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$&$\longrightarrow$&
$\hbox{\sl I\/}mb_f(W,\Sigma\hbox{\ rel\ } T)$\cr
\noalign{
}
$\mapdown{}$&&$\mapdown{}$\cr
\noalign{
}
$\hbox{\sl D\/}iff({\cal O}\hbox{\ rel\ } S)$&$\longrightarrow$&
$\hbox{\sl I\/}mb(p(W),{\cal O}\hbox{\ rel\ } S)$\rlap{\ .}\cr}}$$
\marginwrite{sfsquare}
\end{theorem}
\section{Restricting to the boundary or the basepoint}
\label{basept}
\marginwrite{basept}
Our restriction theorems deal with the case when the suborbifold is
properly imbedded. By a simple doubling trick, we can also extend to
restriction to suborbifolds of the boundary.
\betaegin{proposition}{restrict to boundary}{} Let $\Sigma\to{\cal O}$ be
a singular fibering. Let $S$ be a suborbifold of $\partial{\cal O}$,
and let $T\!=\! p^{-1}(S)$. Then
\betaegin{enumerate}
\item[{\rm(a)}] $\hbox{\sl I\/}mb(S,\partial{\cal O})$ admits local
$\hbox{\sl D\/}iff({\cal O})$ cross-sections.
\item[{\rm(b)}] $\hbox{\sl I\/}mb_f(T,\partial_v{\Sigma})$ admits local
$\hbox{\sl D\/}iff_f(\Sigma)$ cross-sections.
\end{enumerate}
\marginwrite{restrict to boundary}
\end{proposition}
\betaegin{proof}{} For (a), we first show that $\hbox{\sl D\/}iff(\partial{\cal O})$
admits local $\hbox{\sl D\/}iff({\cal O})$ cross-sections. Let $\hbox{\sl D\/}elta$ be the
double of ${\cal O}$ along $\partial{\cal O}$, and regard ${\cal O}$ as
a suborbifold of $\hbox{\sl D\/}elta$ by identifying it with one of the two copies
of ${\cal O}$ in $\hbox{\sl D\/}elta$. By theorem~\ref{orbtheoremB},
$\hbox{\sl I\/}mb(\partial{\cal O}, \hbox{\sl D\/}elta)$ admits local $\hbox{\sl D\/}iff(\hbox{\sl D\/}elta)$
cross-sections. We may regard $\hbox{\sl D\/}iff(\partial{\cal O})$ as a subspace
of $\hbox{\sl I\/}mb(\partial{\cal O},\hbox{\sl D\/}elta)$. Suppose $\chi\colon
U\to\hbox{\sl D\/}iff(\hbox{\sl D\/}elta)$ is a local cross-section at a point in
$\hbox{\sl I\/}mb(\partial{\cal O},\hbox{\sl D\/}elta)$ that lies in $\hbox{\sl D\/}iff(\partial{\cal
O})$. By composing with the diffeomorphism of $\hbox{\sl D\/}elta$ that
interchanges the two copies of ${\cal O}$, and reducing the size of
$U$ if necessary, we may assume that $\chi$ carries the elements of
$U$ that preserve $\partial{\cal O}$ to diffeomorphisms that preserve
${\cal O}$. Then a local $\hbox{\sl D\/}iff({\cal O})$ cross-section on
$U\cap\hbox{\sl D\/}iff(\partial{\cal O})$ is defined by sending $g$ to
$\chi(g)\vert_{\cal O}$.
By proposition~\ref{inclusion}, for (a) it suffices to produce local
cross-sections at the inclusion $i_S$. By theorem~\ref{orbtheoremB},
there is a local $\hbox{\sl D\/}iff(\partial{\cal O})$ cross-section $\chi_1$ for
$\hbox{\sl I\/}mb(S,\partial {\cal O})$ at $i_S$. Let $\chi_2$ be a local
$\hbox{\sl D\/}iff({\cal O})$ cross-section for $\hbox{\sl D\/}iff(\partial {\cal O})$ at
$\chi_1(i_S)$. On a neighborhood $U$ of $i_S$ in
$\hbox{\sl I\/}mb(S,\partial{\cal O})$ small enough so that $\chi_2\chi_1$ is
defined, the composition is the desired $\hbox{\sl D\/}iff({\cal O})$
cross-section. For if $j\in U$, then
$\chi_2(\chi_1(j))(i_S)(x)=\chi_1(j)(i_S)(x)=\chi_1(j)(x)=j(x)$.
The proof of (b) is similar. Double $\Sigma$ along $\partial_v\Sigma$
and apply theorem~\ref{sftheorem2}, to produce local $\hbox{\sl D\/}iff_f(\Sigma)$
cross-sections for $\hbox{\sl D\/}iff_f(\partial_v\Sigma)$. Apply it again to
produce local $\hbox{\sl D\/}iff_f(\partial_v\Sigma)$ cross-sections for
$\hbox{\sl I\/}mb_f(T,\partial_v\Sigma)$. Their composition, where defined, is a
local $\hbox{\sl D\/}iff_f(\Sigma)$ cross-section for
$\hbox{\sl I\/}mb_f(T,\partial_v\Sigma)$.
\end{proof}
An immediate consequence is
\betaegin{corollary}{special1}{} Let $\Sigma\to{\cal O}$ be a singular
fibering. Let $S$ be a suborbifold of $\partial{\cal O}$, and let
$T=p^{-1}(S)$. Then $\hbox{\sl D\/}iff({\cal O})\to \hbox{\sl I\/}mb(S,\partial{\cal O})$ and
$\hbox{\sl D\/}iff_f(\Sigma)\to \hbox{\sl I\/}mb_f(T,\partial_v\Sigma)$ are locally trivial.
In particular, $\hbox{\sl D\/}iff({\cal O})\to\hbox{\sl D\/}iff(\partial{\cal O})$ and
$\hbox{\sl D\/}iff_f(\Sigma)\to\hbox{\sl D\/}iff_f(\partial_v\Sigma)$ are locally trivial.
\marginwrite{special1}
\end{corollary}
Here are two other consequences which are applied in \cite{M-R}.
\betaegin{corollary}{special2}{} Let ${\cal W}$ be a suborbifold of ${\cal
O}$. Then $\hbox{\sl I\/}mb({\cal W},{\cal O})\to\hbox{\sl I\/}mb({\cal W}\cap \partial{\cal
O},\partial{\cal O})$ is locally trivial.
\marginwrite{special2}
\end{corollary}
\betaegin{proof}{} By theorem~\ref{orbtheoremB}, $\hbox{\sl I\/}mb({\cal W}\cap
\partial{\cal O},\partial{\cal O})$ admits local $\hbox{\sl D\/}iff(\partial{\cal
O})$ cross-sections, and by proposition~\ref{restrict to boundary},
$\hbox{\sl D\/}iff(\partial{\cal O})$ admits local $\hbox{\sl D\/}iff({\cal O})$
cross-sections. Composing them gives local $\hbox{\sl D\/}iff({\cal O})$
cross-sections for $\hbox{\sl I\/}mb({\cal W}\cap\partial{\cal O},\partial{\cal
O})$.
\end{proof}
\betaegin{corollary}{special3}{} Let $W$ be a vertical suborbifold of
$\Sigma$. Then $\hbox{\sl I\/}mb_f(W,\Sigma)\to \hbox{\sl I\/}mb_f(W\cap
\partial_v\Sigma,\partial_v\Sigma)$ is locally trivial.
\marginwrite{special3}
\end{corollary}
\betaegin{proof}{} Theorem~\ref{sftheorem2}, applied to
$\partial_v\Sigma$, and proposition~\ref{restrict to boundary} show
that $\hbox{\sl I\/}mb_f(W\cap\partial_v\Sigma,\partial_v\Sigma)$ admits local
$\hbox{\sl D\/}iff_f(\Sigma)$ cross-sections.
\end{proof}
Many applications of the fibration $\hbox{\sl D\/}iff(M)\to\hbox{\sl I\/}mb(V,M)$ concern the
case when the submanifold is a single point. Since in the fibered case
a single point is not usually a vertical submanifold, this case is not
directly covered by our previous theorems. The next proposition allows
nonvertical suborbifolds that are contained in a single fiber, so
applies when the submanifold is a single point. To set notation, let
$p\colon \Sigma\to {\cal O}$ be a singular fibering. Let $P$ be a
suborbifold of $\Sigma$ which is contained in a single fiber~$F$. Let
$T$ be a fibered closed subset of $\partial_v\Sigma$. By
$\hbox{\sl I\/}mb_t(P,\Sigma\hbox{\ rel\ } T)$ we denote the orbifold imbeddings whose image
is contained in a single fiber of~$\Sigma$, which restrict to the
identity on $P\cap T$, and which map $P\cap(\partial \Sigma-T)$ into
$\partial\Sigma-T$.
\betaegin{proposition}{restrict to basepoint}{} Let $T$ be a fibered
closed subset of $\partial_v\Sigma$, which is a neighborhood in
$\partial_v\Sigma$ of $P\cap T$. Then $\hbox{\sl I\/}mb_t(P,\Sigma\hbox{\ rel\ } T)$ admits
local $\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$ cross sections.
\marginwrite{restrict to basepoint}
\end{proposition}
\betaegin{proof}{}
Notice that $p(P)$ is a point and is a properly imbedded suborbifold
of ${\cal O}$, with orbifold structure determined by the local group
at $p(P)$. Each imbedding $i\in\hbox{\sl I\/}mb_t(P,\Sigma)$ induces an orbifold
imbedding $pi\colon p(P)\to{\cal O}$. Let $S\!=\! p(T)$.
By proposition~\ref{inclusion}, it suffices to produce a local
cross-section at the inclusion $i_P$. By theorem~\ref{orbtheoremB},
$\hbox{\sl I\/}mb(p(P),{\cal O}\hbox{\ rel\ } S)$ has local $\hbox{\sl D\/}iff({\cal O}\hbox{\ rel\ } S)$
cross-sections, and by proposition~\ref{sftheorem1}, $\hbox{\sl D\/}iff({\cal
O}\hbox{\ rel\ } S)$ has local $\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$ cross-sections. A
suitable composition of these gives a local $\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$
cross-section $\chi_1$ for $\hbox{\sl I\/}mb(p(P),{\cal O}\hbox{\ rel\ } S)$ at $pi_P$. As
remarked in section~\ref{palais}, we may assume that $\chi_1(pi_P)$ is
the identity diffeomorphism of $\Sigma$. By corollary~\ref{orbcoro2},
there exists a local $\hbox{\sl D\/}iff(F\hbox{\ rel\ } T\cap F)$ cross-section $\chi_2$ for
$\hbox{\sl I\/}mb(P,F\hbox{\ rel\ } T\cap F)$ at $i_P$, and we may assume that $\chi_2(i_P)$
is the identity diffeomorphism of $F$. Let $\chi_3$ be a local
$\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$ cross-section for $\hbox{\sl I\/}mb_f(F,\Sigma\hbox{\ rel\ } T)$ at
$i_F$ given by corollary~\ref{sfcorollary2}. Regarding $\hbox{\sl D\/}iff(F\hbox{\ rel\ }
F\cap T)$ as a subspace of $\hbox{\sl I\/}mb_f(F,\Sigma\hbox{\ rel\ } T)$, we may assume
that the composition $\chi_3\chi_2$ is defined. On a sufficiently
small neighborhood of $i_P$ in $\hbox{\sl I\/}mb_t(P,\Sigma\hbox{\ rel\ } T)$ define
$\chi(j)\in\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ } T)$ by
$$\chi(j)=\chi_1(p(j))\,(\chi_3\chi_2)(\chi_1(p(j))^{-1}\circ j)\ .$$
\noindent Then for $x\in P$ we have
$$\!=\!align{\chi(j)i_P(x)
&=\chi_1(p(j))\,(\chi_3\chi_2)(\chi_1(p(j))^{-1}\circ j)(x)\cr
&=\chi_1(p(j))\,\chi_1(p(j))^{-1} j (x)\cr
&=j(x)\cr}$$
\end{proof}
\noindent This yields immediately
\betaegin{corollary}{restrict imbeddings to S}{} Let $W$ be a vertical
suborbifold of $\Sigma$ containing $P$. Then $\hbox{\sl D\/}iff_f(\Sigma\hbox{\ rel\ }
T)\to\hbox{\sl I\/}mb_t(P,\Sigma\hbox{\ rel\ } T)$ and $\hbox{\sl I\/}mb_f(W,\Sigma\hbox{\ rel\ } T)\to
\hbox{\sl I\/}mb_t(P,\Sigma\hbox{\ rel\ } T)$ are locally trivial.
\end{corollary}
\section{The space of Seifert fiberings of a Haken 3-manifold}
\label{sfspace}
\marginwrite{sfspace}
Let $p\colon\Sigma\to {\cal O}$ be a Seifert fibering of a Haken
manifold $\Sigma$. As noted in section~\ref{sfiber}, $p$ is a singular
fibering. Denote by $\deltaiff_f(\Sigma)$ the connected component of the
identity in $\hbox{\sl D\/}iff_f(\Sigma)$, and similarly for other spaces of
diffeomorphisms and imbeddings. The main result of this section is the
following.
\betaegin{theorem}{space of fp homeos}{} Suppose that $\Sigma$ is a
Haken 3-manifold. Then the inclusion $\deltaiff_f(\Sigma)\to
\deltaiff(\Sigma)$ is a weak homotopy equivalence.
\marginwrite{space of fp homeos}
\end{theorem}
Before proving theorem~\ref{space of fp homeos}, we give an
application. Each element of $\hbox{\sl D\/}iff(\Sigma)$ carries the given
fibering to an isomorphic fibering, and $\hbox{\sl D\/}iff_f(\Sigma)$ is precisely
the stabilizer of the given fibering under this action. Therefore it
is reasonable to define the {\it space of Seifert fiberings}
isomorphic to the given fibering to be the space of cosets
$\hbox{\sl D\/}iff(\Sigma)/\hbox{\sl D\/}iff_f(\Sigma)$. Since $\hbox{\sl D\/}iff_f(\Sigma)$ is a closed
subgroup, the quotient $\hbox{\sl D\/}iff(\Sigma)\to
\hbox{\sl D\/}iff(\Sigma)/\hbox{\sl D\/}iff_f(\Sigma)$ is a principal fibering with fiber
$\hbox{\sl D\/}iff_f(\Sigma)$. As an immediate corollary to theorem~\ref{space of
fp homeos}, we will obtain:
\betaegin{theorem}{space of sf's}{} Suppose that $\Sigma$ is a
Haken 3-manifold. Then each path component of the space of Seifert
fiberings of $\Sigma$ is weakly contractible.
\marginwrite{space of sf's}
\end{theorem}
\betaegin{proof}{} As sketched on p.~85 of \cite{Waldhausen}, two
fiber-preserving diffeomorphisms of $\Sigma$ that are isotopic are
isotopic through fiber-preserving diffeomorphisms. This implies that
$\pi_0(\hbox{\sl D\/}iff_f(\Sigma))\to\pi_0(\hbox{\sl D\/}iff(\Sigma))$ is injective. By
theorem~\ref{space of fp homeos},
$\pi_q(\hbox{\sl D\/}iff_f(\Sigma))\to\pi_q(\hbox{\sl D\/}iff(\Sigma))$ is an isomorphism for
all $q\gammaeq 1$. The theorem now follows from the homotopy exact
sequence for the fibration $\hbox{\sl D\/}iff(\Sigma)\to
\hbox{\sl D\/}iff(\Sigma)/\hbox{\sl D\/}iff_f(\Sigma)$.
\end{proof}
For compact Seifert fibered 3-manifolds, apart from a small list of
well-known exceptions, every diffeomorphism is isotopic to a
fiber-preserving diffeomorphism. So the following immediate corollary
applies to most cases.
\betaegin{corollary}{space of sf's coro}{} Suppose that $\Sigma$ is a
Haken 3-manifold such that every diffeomorphism is isotopic to
a fiber-preserving diffeomorphism. Then the space of Seifert fiberings
of $\Sigma$ is weakly contractible.
\marginwrite{space of sf's coro}
\end{corollary}
\noindent The proof of theorem~\ref{space of fp homeos} will use the
following lemma.
\betaegin{lemma}{rel fiber}{} Let $\Sigma$ be a Haken
Seifert fibered 3-manifold, and let $C$ be a fiber of $\Sigma$. Then
each component of $\hbox{\sl D\/}iff_v(\Sigma\hbox{\ rel\ } C)$ is contractible.
\marginwrite{rel fiber}
\end{lemma}
\betaegin{proof}{} Since $\Sigma$ is Haken, the base orbifold
of $\Sigma-C$ has nonpositive Euler characteristic and is not
closed. It follows (see~\cite{Scott}) that $\Sigma-C$ admits an
$\hyperbolic^2\times\reals$ geometry. Thus there is an action of $\pi_1(\Sigma-C)$
on $\hyperbolic^2\times\reals$ such that every element preserves the $\reals$-fibers
and acts as an isometry on the $\hyperbolic^2$ factor. Let $B$ be the orbit
space of~$\Sigma-C$.
It suffices to show that $\deltaiff_v(\Sigma\hbox{\ rel\ } C)$ is contractible. Let
$N$ be a fibered solid torus neighborhood of $C$ in $\Sigma$. It is
not difficult to see that $\deltaiff_v(\Sigma\hbox{\ rel\ } C)$ deformation retracts
to $\deltaiff_v(\Sigma\hbox{\ rel\ } N)$, which can be identified with
$\deltaiff_v(\Sigma-C\hbox{\ rel\ } N-C)$, so it suffices to show that the latter is
contractible. For $f\in\deltaiff_v(\Sigma-C\hbox{\ rel\ } N-C)$, let $F$ be a lift
of $f$ to $\hyperbolic^2\times\reals$ that has the form $F(x,s)\!=\! (x,s+F_2(x,s))$,
where $F_2(x,s)\in\reals$. Since $f$ is vertically isotopic to the
identity relative to $N-C$, we may moreover choose $F$ so that
$F_2(x,s)\!=\! 0$ if $(x,s)$ projects to $N-C$. To see this, we choose
the lift $F$ to fix a point in the preimage $W$ of $N-C$. Since $f$ is
homotopic to the identity relative to $N-C$, $F$ is equivariantly
homotopic to a covering translation relative to $W$. That covering
translation fixes the point in $W$, and therefore must be the
identity. Thus $F$ fixes $W$ and commutes with every covering
translation.
Define $K_t$ by $K_t(x,s)\!=\! (x,s+(1-t)F_2(x,s))$. Since $K_0\!=\! F$
and $K_1$ is the identity, and each $K_t$ is the identity on the
preimage of $N-C$, this will define a contraction of
$\hbox{\sl D\/}iff_v(\Sigma-C\hbox{\ rel\ } N-C)$ once we have shown that each $K_t$ is
equivariant. Let $\gammaamma\in\pi_1(\Sigma-C)$. From~\cite{Scott},
$\hbox{\sl I\/}som(\hyperbolic^2\times\reals)=\hbox{\sl I\/}som(\hyperbolic^2)\times\hbox{\sl I\/}som(\reals)$, so we can write
$\gammaamma(x,s)=(\gammaamma_1(x),\epsilon_\gammaamma s+\gammaamma_2)$, where
$\epsilon_\gammaamma\!=\! \pm 1$ and $\gammaamma_2\in\reals$. Since $F\gammaamma\!=\!
\gammaamma F$, a straightforward calculation shows that
$$F_2(\gammaamma_1(x),\epsilon_\gammaamma s+\gammaamma_2)\!=\! \epsilon_\gammaamma
F_2(x,s)\ .$$
\noindent Now we calculate
$$\!=\!align{K_t\gammaamma(x,s)&= K_t(\gammaamma_1(x),\epsilon_\gammaamma
s+\gammaamma_2)\cr
&= (\gammaamma_1(x),\epsilon_\gammaamma s +\gammaamma_2+(1-t)
F_2(\gammaamma_1(x),\epsilon_\gammaamma s+\gammaamma_2))\cr
&= (\gammaamma_1(x),\epsilon_\gammaamma s +\gammaamma_2+(1-t)
\epsilon_\gammaamma F_2(x,s))\cr
&= (\gammaamma_1(x),\epsilon_\gammaamma (s+(1-t)F_2(x,s)) +\gammaamma_2)\cr
&= \gammaamma(x,s+(1-t)F_2(x,s))\cr
&= \gammaamma K_t(x,s)\cr}$$
\noindent showing that $K_t$ is equivariant.
\end{proof}
\betaegin{proof}{\ref{space of fp homeos}} We first examine
$\deltaiff_v (\Sigma)$. Choose a regular fiber $C$ and consider the
restriction $\deltaiff_v (\Sigma)\to \hbox{\it imb\/}_v(C,\Sigma)\cong\deltaiff(C)\cong
\deltaiff(S^1)\simeq \hbox{SO}(2)$. By corollary~\ref{sfcorollary2}(ii), this is
a fibration. By lemma~\ref{rel fiber}, each component of the fiber
$\hbox{\sl D\/}iff_v(\Sigma\hbox{\ rel\ } C)\cap\deltaiff_v(\Sigma)$ is contractible. It follows by the
exact sequence for this fibration that
$\pi_q(\deltaiff_v(\Sigma))\cong\pi_q(\hbox{SO}(2))\!=\! 0$ for $q\gammaeq 2$, and for $q\!=\!
1$ we have an exact sequence $$0\longrightarrow
\pi_1(\deltaiff_v(\Sigma))\longrightarrow
\pi_1(\deltaiff(C))\longrightarrow
\pi_0(\hbox{\sl D\/}iff(\Sigma\hbox{\ rel\ } C)\cap \deltaiff_v(\Sigma))\longrightarrow
0\ .$$
We will first show that exactly one of the following holds.
\betaegin{enumerate}
\item[a)] $C$ is central and $\pi_1(\deltaiff_v(\Sigma))\cong\integers$ generated by the
vertical $S^1$-action.
\item[b)] $C$ is not central and $\pi_1(\deltaiff_v(\Sigma))$ is trivial.
\end{enumerate}
\noindent
Suppose first that the fiber $C$ is central in $\pi_1(\Sigma)$. Then
there is a vertical $S^1$-action on $\Sigma$ which moves the basepoint
(in $C$) once around $C$. This maps onto the generator of
$\pi_1(\deltaiff(C))$, so $\pi_1(\deltaiff_v(\Sigma))\to
\pi_1(\deltaiff(C))$ is an isomorphism. Therefore
$\pi_1(\deltaiff_v(\Sigma))$ is infinite cyclic, with generator
represented by the vertical $S^1$-action.
If the fiber is not central, then $\pi_1(\deltaiff(C))\to
\pi_0(\hbox{\sl D\/}iff(\Sigma\hbox{\ rel\ } C)\cap \deltaiff_v(\Sigma))$ carries the
generator to a diffeomorphism of $\Sigma$ which induces an inner
automorphism of infinite order on $\pi_1(\Sigma,x_0)$, where $x_0$ is
a basepoint in $C$. Since elements of $\hbox{\sl D\/}iff(\Sigma\hbox{\ rel\ } C)$ fix the
basepoint, this diffeomorphism (and its powers) are not in
$\deltaiff(\Sigma\hbox{\ rel\ } C)$. Therefore $\pi_1(\deltaiff(C))\to
\pi_0(\hbox{\sl D\/}iff(\Sigma\hbox{\ rel\ } C)\cap \deltaiff_v(\Sigma))$ is injective, so
$\pi_1(\deltaiff_v(\Sigma))$ is trivial.
Now consider the fibration of theorem~\ref{sfproject diffs}:
$$\hbox{\sl D\/}iff_v(\Sigma)\cap \deltaiff_f(\Sigma)
\longrightarrow \deltaiff_f(\Sigma)\longrightarrow
\deltaiff({\cal O})\ .\leqno{(*)}$$
Observe that $\deltaiff({\cal O})$ is homotopy equivalent to the identity
component of the space of diffeomorphisms of the 2-manifold ${\cal
O}-{\cal E}$, where ${\cal E}$ is the exceptional set. Since $\Sigma$
is Haken, this $2$-manifold is either a torus, annulus, disc with one
puncture, Mobius band, or Klein bottle, or a surface of negative Euler
characteristic. Therefore $\deltaiff({\cal O})$ is contractible unless
$\chi({\cal O})=0$, in which case its higher homotopy groups are all
trivial, and its fundamental group is isomorphic to the center of
$\pi_1({\cal O})$. In the latter cases, the elements of $\pi_1({\cal
O})$ are classified by their traces at a basepoint of ${\cal O}-{\cal
E}$. From the exact sequence for the fibration $(*)$, it follows that
$\pi_q(\deltaiff_f(\Sigma))\!=\! 0$ for~$q\gammaeq 2$.
To complete the proof, we recall the result of Hatcher \cite{Hatcher}:
for $M$ Haken, $\pi_q(\deltaiff(M))$ is $0$ for $q\gammaeq 2$
and is isomorphic to the center of $\pi_1(M)$ for $q\!=\! 1$, and the
elements of $\pi_1(\deltaiff(M))$ are classified by their traces at the
basepoint. We already have $\pi_q(\deltaiff_f(\Sigma))\!=\! 0$ for
$q\gammaeq 2$, so it remains to show that $\pi_1(\deltaiff_f(\Sigma))\to
\pi_1(\deltaiff(\Sigma))$ is an isomorphism.
\noindent {\sl Case I:} $\pi_1({\cal O})$ is centerless.
In this case $\deltaiff({\cal O})$ is contractible, and either $C$
generates the center or $\pi_1(\Sigma)$ is centerless. The exact
sequence associated to the fibration $(*)$ shows that
$\pi_1(\deltaiff_v(\Sigma))\to\pi_1(\deltaiff_f(\Sigma))$ is an
isomorphism. Suppose $C$ generates the center. Since
$\pi_1(\deltaiff_v(\Sigma))$ is infinite cyclic generated by the vertical
$S^1$-action, Hatcher's theorem shows that the composition
$$\pi_1(\deltaiff_v(\Sigma)) \to
\pi_1(\deltaiff_f(\Sigma))\to
\pi_1(\deltaiff(\Sigma))$$
\noindent is an isomorphism. Therefore
$\pi_1(\deltaiff_f(\Sigma))\to \pi_1(\deltaiff(\Sigma))$ is an
isomorphism. If $\pi_1(\Sigma)$ is
centerless, then $\pi_1(\deltaiff(\Sigma))\!=\! 0$,
$\pi_1(\deltaiff_f(\Sigma))\cong\pi_1(\deltaiff_v(\Sigma))\!=\! 0$, and again
$\pi_1(\deltaiff_f(\Sigma))\to \pi_1(\deltaiff(\Sigma))$ is an
isomorphism.
\noindent {\sl Case II:} $\pi_1({\cal O})$ has center.
Assume first that ${\cal O}$ is a torus.
If $\Sigma$ is the $3$-torus, then by considering the exact sequence
for the fibration $(*)$, one can check directly that the homomorphism
$\partial\colon\pi_1(\deltaiff({\cal
O}))\rightarrow\pi_0(\hbox{\sl D\/}iff_v(\Sigma)\cap\deltaiff_f(\Sigma))$ is the zero
map. We obtain the exact sequence
$$0\longrightarrow \integers
\longrightarrow \pi_1(\deltaiff_f(\Sigma))\longrightarrow
\integers\times\integers\longrightarrow0\ .$$
\noindent
Since $\deltaiff_f(\Sigma)$ is a topological group,
$\pi_1(\deltaiff_f(\Sigma))$ is abelian and hence isomorphic to
$\integers\times\integers\times\integers$. The traces of the generating
elements generate the center of $\pi_1(\Sigma)$, which shows that
$\pi_1(\deltaiff_f(\Sigma))\to\pi_1(\deltaiff(\Sigma))$ is an
isomorphism.
Suppose that $\Sigma$ is not a 3-torus. Then $\Sigma={\cal O}\times
I/(x,0)\simeq (\phi(x),1)$ for a homeomorphism $\phi\colon\cal O\rightarrow
\cal O$, $\pi_1(\Sigma)=\langle a,b,t \hbox{\hbox{$\;\vert\;$}} tat^{-1}\!=\! a,
[a,b]\!=\!1,tbt^{-1}\!=\! a^nb\rangle$ for some integer $n$, and the fiber
$a$ generates the center of~$\pi_1(\Sigma)$.
Let $b_0$ and $t_0$ be the image of the generators of $b$ and $t$
respectively in $\pi_1({\cal O})$. Now $\pi_1(\deltaiff({\cal O}))\cong\integers\times\integers$
generated by elements whose traces represent the elements $b_0$ and $t_0$. By
lifting these isotopies we see that $\partial\colon\pi_1(\deltaiff({\cal
O}))\rightarrow\pi_0(\deltaiff_v(\Sigma))$ is injective. Therefore
$\pi_1(\deltaiff_v(\Sigma))$ is isomorphic to $\pi_1(\deltaiff_f(\Sigma))$, and the
result follows as in case~I.
Assume now that ${\cal O}$ is a Klein bottle. As in the torus case
we may view $\Sigma={\cal O}\times I/(x,0)\simeq (\phi(x),1)$,
$\pi_1(\Sigma)=\langle a,b,t \hbox{\hbox{$\;\vert\;$}} tat^{-1}\!=\! a^{-1},
[a,b]=1,tbt^{-1}=a^{-n}b^{-1} \rangle$ for some integer $n$, with
fiber $a$, and $\pi_1({\cal O})=\langle b_0,t_0 \hbox{\hbox{$\;\vert\;$}}
t_0b_0t_0^{-1}\!=\! b_0^{-1}\rangle$. Now $\pi_1(\deltaiff({\cal O}))$ is
generated by an isotopy whose trace represents the generator of the
center of $\pi_1(\deltaiff({\cal O}))$, the element $t_0^2$. Observe that
$\pi_1(\Sigma)$ has center if and only if $n\!=\! 0$. If $n=0$, then it
follows that $\partial\colon\pi_1(\deltaiff({\cal O}))\rightarrow
\pi_0(\hbox{\sl D\/}iff_v(\Sigma)\cap\deltaiff_f(\Sigma))$ is the zero
map. Hence $\pi_1(\deltaiff_f(\Sigma))\rightarrow\pi_1(\deltaiff(\cal O))$ is
an isomorphism and the generator of $\pi_1(\deltaiff_f(\Sigma))$ is
represented by an isotopy whose trace represents the element $t^2$. By
Hatcher's result,
$\pi_1(\deltaiff_f(\Sigma))\rightarrow\pi_1(\deltaiff(\Sigma))$ is an
isomorphism. If $n\neq 0$, then $\partial\colon\pi_1(\deltaiff({\cal
O}))\rightarrow\pi_0(\hbox{\sl D\/}iff_v(\Sigma)\cap\deltaiff_f(\Sigma))$ is
injective. Since $\pi_1(\Sigma)$ is centerless,
$\pi_1(\hbox{\sl D\/}iff_v(\Sigma)\cap\deltaiff_f(\Sigma))\!=\!0$. This implies that
$\pi_1(\deltaiff_f(\Sigma))\!=\!0$, and again Hatcher's result applies.
The cases where $\cal O$ is an annulus, disc with one puncture, or a
Mobius band are similar to those of the torus and Klein bottle.
\end{proof}
\betaegin{thebibliography}{99}
{\footnotesize
\betaibitem{Cerf} J. Cerf, Topologie de certains espaces de plongements,
{\em Bull. Soc. Math. Fr.} 89 (1961), 227-380.
\betaibitem{Hatcher} A. Hatcher, Homeomorphisms of sufficiently
large $P^2$-irreducible 3-manifolds, {\em Topology} 15 (1976),
343-347.
\betaibitem{Hempel} J. Hempel, {\em 3-manifolds}, Princeton University Press,
1976.
\betaibitem{Jaco1} W. Jaco, {\em Lectures on Three-manifold Topology,}
CBMS Regional Conference Series No.~43 (1977).
\betaibitem{JS} W. Jaco and P. Shalen, Seifert fibered spaces in
3-manifolds, {\em Mem. Amer. Math. Soc.} 220 (1979).
\betaibitem{N-R} W. Neumann and F. Raymond, Automorphisms of Seifert
manifolds, 1979 preprint.
\betaibitem{Orlik} P. Orlik, {\em Seifert Manifolds,} Springer-Verlag
Lecture Notes in Mathematics 291 (1972).
\betaibitem{OVZ} P. Orlik, E. Vogt, and H. Zieschang, Zur Topologie
gefaserter dreidimensionaler Mannigfaltigkeiten, {\em Topology}
6 (1967), 49--64.
\betaibitem{M-R} D. McCullough and J. H. Rubinstein, The generalized
Smale Conjecture for 3-manifolds with genus 2 one-sided Heegaard
splittings, preprint.
\betaibitem{P} R. Palais, Local triviality of the restriction map for
imbeddings, {\em Comment. Math. Helv.} 34 (1960), 305-312.
\betaibitem{Park} Chan-young Park, Homotopy groups of automorphism groups
of some Seifert fiber spaces, dissertation at the University of
Michigan, 1989.
\betaibitem{Park1} Chan-young Park, On the weak automorphism group of a
principal bundle, product case, {\em Kyungpook Math. J.} 31 (1991),
25-34.
\betaibitem{Scott} P. Scott, The geometries of $3$-manifolds, {\em
Bull. London Math. Soc.} 15 (1983), 401--487.
\betaibitem{Seifert} H. Seifert, Topologie dreidimensionaler gefaseter
Raume, {\em Acta Math.} 60 (1933), 147--238.
\betaibitem{Waldhausen1} F. Waldhausen, Eine Klasse von 3-dimensionalen
Mannigfaltigkeiten I, II, {\em Invent. Math.} 3 (1967), 308--333,
{\betaf 4}(1967), 87--117.
\betaibitem{Waldhausen2} F. Waldhausen, Gruppen mit Zentrum und
$3$-dimensionale Mannigfaltigkeiten, {\em Topology} 6 (1967),
505-517.
\betaibitem{Waldhausen} F. Waldhausen, ``On irreducible $3$-manifolds
which are sufficiently large,'' {\em Ann. of Math.} 87 (1968),
56--88.
}
\end{thebibliography}
\end{document}
|
\begin{document}
\setcitestyle{numbers}
\subjclass[2020]{46E30, 47G10\iffalse, 47B38 (tohle jen kdyby Hardy operatory linearni)\fi}
\keywords{weighted Hardy operators, rearrangement\hyp{}invariant spaces, optimal spaces, supremum operators, iterated operators}
\thanks{This research was supported by the project OPVVV CAAS CZ.02.1.01/0.0/0.0/16\_019/0000778 and by the grant SVV-2020-260583.}
\begin{abstract}
The behavior of certain weighted Hardy-type operators on rearrangement\hyp{}invariant function spaces is thoroughly studied with emphasis being put on the optimality of the obtained results. First, the optimal rearrangement\hyp{}invariant function spaces\textemdash that is, the best possible function spaces within the class of rearrangement\hyp{}invariant function spaces\textemdash guaranteeing the boundedness of the operators from/to a given rearrangement\hyp{}invariant function space are described. Second, the optimal rearrangement\hyp{}invariant function norms being sometimes complicated, the question of whether and how they can be simplified to more manageable expressions, arguably more useful in practice, is addressed. Last, iterated weighted Hardy-type operators are also studied.
Besides aiming to provide a comprehensive treatment of the optimal behavior of the operators on rearrangement\hyp{}invariant function spaces in one place, the paper is motivated by its applicability in various fields of mathematical analysis, such as harmonic analysis, extrapolation theory or the theory of Sobolev-type spaces.
\end{abstract}
\maketitle
\section{Introduction}
When we face a complicated problem, it is only natural to seek for a way to simplify it. Problems (not only) in mathematical analysis often amount to establishing boundedness of operators between certain function spaces. When the function spaces in question are endowed with norms invariant with respect to certain rearrangements/transformations of functions, an often successful way to simplify such problems is make use of the rearrangements. We shall now make this vague idea more definite.
Arguably the most straightforwardly, the idea can be illustrated by the following well-known example, which traces back to the 1930s. Consider the question of establishing the boundedness of the Hardy--Littlewood maximal operator $M$, defined for measurable functions $f$ on $\R^n$ as
\begin{equation*}
Mf(x) = \sup_{x \ni Q}\frac1{|Q|}\int_Q |f(y)|\d{y},\ x\in\R^n,
\end{equation*}
in which the supremum extends over all cubes in $\R^n$ whose edges are parallel to the coordinate axes, from a function space to another. The famous inequality
\begin{equation}\label{intro:M_upper}
(Mf)^*(t)\lesssim \frac1{t}\int_0^t f^*(s)\d{s} \quad \text{for every $t\in(0,\infty)$}
\end{equation}
by F.~Riesz (\cite{R:32}, $n=1$) and N.~Wiener (\cite{W:39}, $n\in\mathbb{N}$), in which $^*$ denotes the nonincreasing rearrangement and the multiplicative constant depends only on $n$, combined with the classical Hardy--Littlewood inequality (\cite[p.~278]{HLP:52}, see also \eqref{ch1:ri:HLg=chiE} below), tells us that, when the function spaces in question are such that equimeasurable functions (i.e., functions whose distribution functions coincide) have the same norm, it is sufficient to establish the boundedness of a considerably simpler operator, acting on functions of a single variable; namely of the operator $R$ defined for measurable functions $g$ on $(0,\infty)$ as
\begin{equation*}
Rg(t) = \frac1{t}\int_0^t|g(s)|\d{s},\ t\in(0,\infty).
\end{equation*}
Moreover, since the reverse inequality to \eqref{intro:M_upper} holds, too, which was proved by C.~Hertz (\cite{H:68}, $n=1$) and by C.~Bennett and R.~Sharpley (\cite{BS:79}, $n\in\mathbb{N}$), the boundedness of $M$ between such function spaces is actually equivalently reduced to the boundedness of the Hardy-type operator $R$. In particular, this reduction is valid for rearrangement\hyp{}invariant function spaces (see \myref{Section}{sec:prel} for precise definitions), which constitute a broad class of functions spaces, containing Lebesgue spaces, Orlicz spaces or Lorentz(--Zygmund) spaces to name a few.
Another important operator of harmonic analysis, hardly needing an introduction, is the Hilbert transform (e.g., \cite[Chapter~3, Definition~4.1]{BS}). An inequality for the nonincreasing rearrangement of the Hilbert transform (more generally, of certain singular integral operators with odd kernels) is known (\cite[Theorem~16.12]{BR:80}, \cite[Lemma~2.1]{S:80}, cf.~\cite[p.~55]{C:97}), and, since the inequality is in a suitable sense sharp (cf.~\cite[p.~29]{EOP:96}), it is easy to show that the boundedness of the Hilbert transform on rearrangement\hyp{}invariant function spaces is equivalent to the boundedness of a sum of two Hardy-type operators acting on functions of a single variable\textemdash namely $R+H$, where $H$ is defined for measurable functions $g$ on $(0,\infty)$ as
\begin{equation*}
Hg(t)=\int_t^\infty|g(s)|\frac1{s}\d{s},\ t\in(0,\infty).
\end{equation*}
Other important operators of harmonic analysis for which (in a suitable sense) sharp inequalities that involve weighted Hardy-type operators being of the form
\begin{align}
(0,\infty)\ni t\mapsto v(t)\int_0^t|g(s)|\d{s}, \label{intro:Rv}
\intertext{and}
(0,\infty)\ni t\mapsto \int_0^t|g(s)|v(s)\d{s}, \label{intro:Hv}
\end{align}
in which $v$ is a fixed positive nonincreasing function on $(0,\infty)$ and $g$ is a measurable function on $(0,\infty)$ on which they act, for their nonincreasing rearrangements are known are certain convolution operators (\cite{On:63,EOP:96}), of which the Riesz potential is a prototypical example, or the fractional maximal operator and its variants (\cite[Theorem~1.1]{CKOP:00}, \cite[Theorem~3.1]{EO:02}). The interested reader can find more information on boundedness of some classical operators of harmonic analysis on rearrangement\hyp{}invariant function spaces in \cite{EMMP:20}.
Much as sharp inequalities for the nonincreasing rearrangements of operators are desired, the number of operators for which such sharp inequalities are known is limited. Nevertheless, what is often at our disposal is at least an upper bound on the nonincreasing rearrangement of a given operator $T$. Obviously, the better we control the upper bound, the better we control the operator $T$. Weighted Hardy-type operators such as those defined by \eqref{intro:Rv} and \eqref{intro:Hv} (or their more complicated forms, which are to be introduced soon) for suitable functions $v$ often serve as such upper bounds. It is worth noting that such upper bounds for various maximal operators may actually involve a Hardy-type operator inside a supremum (see \cite{L:05} and references therein), but the supremum usually does not cause any trouble (see \cite[Lemma~4.10]{EMMP:20}, cf.~\cite[Theorem~3.9]{KP:06}). For example, consider the fractional maximal operator $M_\gamma$ defined for measurable functions $f$ on $\R^n$, $\gamma\in(0,n)$, as
\begin{equation*}
M_\gamma f(x) = \sup_{x \ni Q}\frac1{|Q|^{1-\frac{\gamma}{n}}}\int_Q |f(y)|\d{y},\ x\in\R^n,
\end{equation*}
in which the supremum extends over all cubes in $\R^n$ whose edges are parallel to the coordinate axes. The following pointwise inequality, which is also in a suitable sense sharp, is valid (\cite[Example~1]{L:05}, \cite[Theorem~1.1]{CKOP:00}):
\begin{equation*}
(M_\gamma f)^*(t) \lesssim \sup_{t< s < \infty}s^{\frac{\gamma}{n} - 1}\int_0^s f^*(\tau) \d{\tau} \quad \text{for every $t\in(0,\infty)$},
\end{equation*}
in which the multiplicative constant depends only on $\gamma$ and $n$. Now, while the operators
\begin{equation*}
f\mapsto \sup_{(\cdot)< s < \infty}s^{\frac{\gamma}{n} - 1}\int_0^s f^*(\tau) \d{\tau} \quad \text{and} \quad f\mapsto (\cdot)^{\frac{\gamma}{n} - 1}\int_0^{(\cdot)} f^*(s) \d{s}
\end{equation*}
are not pointwise equivalent, it turns out that their rearrangement\hyp{}invariant function norms are equivalent; it follows from \cite[Lemma~4.10]{EMMP:20} (combined with the Hardy--Littlewood inequality) that the supremum operator is bounded from a rearrangement\hyp{}invariant function function space to a rearrangement\hyp{}invariant function space if and only if the integral Hardy-type operator defined by \eqref{intro:Rv} with $v(t)=t^{\gamma/n - 1}$ is.
Reductions of complicated questions (often involving functions of several variables) to simpler ones of whether certain Hardy-type operators (acting on functions of a single variable) are bounded, are sometimes also achieved with the right use of interpolation or by making use of some intrinsic properties of the problem in question (such as symmetrization principles and isoperimetric inequalities, which are of great use in studying Sobolev spaces). Such approaches have been notably successful in connection with various embeddings of Sobolev-type spaces built upon rearrangement\hyp{}invariant function spaces into rearrangement\hyp{}invariant function spaces. For a wide variety of such embeddings, either complete characterizations (\cite{KP:06, CPS:20, CPS:15, CP:16, CP:09, CKP:08, ACPL:18, M:21, BC:21}) or at least sufficient and/or necessary conditions (\cite{CiaMaz:16, CiaMaz:20, CPS:20, M:21b}) for their validity by means of boundedness of Hardy-type operators have been obtained. For example, consider the Sobolev-type embedding
\begin{equation}\label{intr:Sob_emb}
W^m X(\Omega) \hookrightarrow Y(\overline{\Omega},\nu),
\end{equation}
in which $W^mX(\Omega)$ is the $m$th order Sobolev space built upon a rearrangement\hyp{}invariant function space $X$ over a bounded Lipschitz domain $\Omega$ in $\R^n$, $m<n$, $m\in\mathbb{N}$, and $Y$ is a rearrangement\hyp{}invariant function space over $\overline{\Omega}$ endowed with a $d$-upper Ahlfors measure $\nu$, that is, a finite Borel measure $\nu$ on $\overline{\Omega}$ satisfying
\begin{equation*}
\sup_{x\in\R^n, r>0}\frac{\nu(B_r(x) \cap \overline{\Omega})}{r^d}<\infty
\end{equation*}
with $d\in(0, n]$, in which $B_r(x)$ is the open ball centered at $x$ with radius $r$. When $d\in[n-m,n]$, it was shown in \cite[Theorem~4.1]{CPS:20} that the embedding \eqref{intr:Sob_emb} is valid (in the sense of traces) if the Hardy-type operator defined for measurable functions $g$ on $(0,1)$ as
\begin{equation*}
(0,1)\ni t\mapsto \int_{t^\frac{n}{d}}^1 |g(s)| s^{-1+\frac{m}{n}}\d{s}
\end{equation*}
is bounded from $X(0,1)$ to $Y(0,1)$ (the representation spaces of $X$ and $Y$ over the interval $(0,1)$). Moreover, if there is a point $x_0\in\overline{\Omega}$ for which the exponent $d$ is sharp, that is,
\begin{equation*}
\inf_{r\in(0,1)}\frac{\nu(B_r(x_0) \cap \overline{\Omega})}{r^d}>0,
\end{equation*}
then the boundedness of the Hardy-type operator is also necessary for the validity of \eqref{intr:Sob_emb} (\cite[Theorem~4.3]{CPS:20}). Finally, when $d\in(0,n-m)$, it follows from \cite[Theorem~5.1]{CPS:20} that the embedding \eqref{intr:Sob_emb} is valid if two weighted Hardy-type operators are bounded from $X(0,1)$ to $Y(0,1)$\textemdash namely the same one as in the case $d\in[n-m,n]$ and the one defined for measurable functions $g$ on $(0,1)$ as
\begin{equation*}
(0,1)\ni t\mapsto t^{-\frac{m}{n-d}}\int_0^{t^\frac{n}{d}} |g(s)| s^{-1+\frac{m}{n-d}}\d{s}.
\end{equation*}
We note for the interested reader that, if this is the case, then the rearrangement\hyp{}invariant function space $Y$ in \eqref{intr:Sob_emb} can actually be replaced by a rearrangement\hyp{}invariant function space that is in some cases smaller than $Y$ (see \cite{T:preprint}).
In this paper, we introduce the Hardy-type operators $R_{u,v,\nu}$ and $H_{u,v,\nu}$, of which Hardy-type operators mentioned in the preceding paragraphs are special instances, defined for measurable functions $g$ on $(0,L)$, $L\in(0,\infty]$, as
\begin{align}
R_{u,v,\nu}g(t)&=v(t)\int_0^{\nu(t)}|g(s)|u(s)\d{s},\ t\in(0,L), \label{opRdef}
\intertext{and}
H_{u,v,\nu}g(t)&=u(t)\int_{\nu(t)}^L|g(s)|v(s)\d{s},\ t\in(0,L), \label{opHdef}
\end{align}
where $u, v$ are nonnegative nonincreasing functions on $(0,L)$ and $\nu$ is an increasing bijection of the interval $(0,L)$ onto itself, and thoroughly study their behavior on rearrangement\hyp{}invariant function spaces, putting emphasis on the optimality of our results. First, after fixing some notation and recalling some preliminary results in \myref{Section}{sec:prel}, we characterize the optimal domain and the optimal target rearrangement\hyp{}invariant function space for the operators when the rearrangement\hyp{}invariant function space on the other side is fixed in \myref{Section}{sec:optimal_norms}. More precisely, given one of the operators and a rearrangement\hyp{}invariant function space $X$, we describe the largest and the smallest rearrangement\hyp{}invariant function space $Y$ (in other words, the weakest and the strongest rearrangement\hyp{}invariant function norm $\|\cdot\|_Y$) such that the operator is bounded from $Y$ to $X$ and from $X$ to $Y$, respectively. As a simple corollary, we also obtain a description of the optimal rearrangement\hyp{}invariant function spaces for a sum of the two operators (each with possibly different functions $u,v,\nu$), though the description is less explicit than it could be if we studied directly the sum. Next, in \myref{Section}{sec:simplification}, we take a close look at how to simplify the description of the optimal rearrangement\hyp{}invariant function norms for the Hardy-type operators and whether it is possible, for the simpler and more manageable description we have at our disposal, the more useful it is. It turns out that this problem is more complex than it may appear at first glance, as is often the case in mathematics. Last, in \myref{Section}{sec:iteration}, we investigate the optimal behavior of iterated Hardy-type operators\textemdash namely $R_{u_1,v_1,\nu_1}\circ R_{u_2,v_2,\nu_2}$ and $H_{u_1,v_1,\nu_1}\circ H_{u_2,v_2,\nu_2}$\textemdash on rearrangement\hyp{}invariant function spaces, explaining why it is of interest to study such operators at the beginning of the section.
In considerably less general settings, the questions mentioned in the preceding paragraph were already studied in some of the papers cited in the fifth paragraph and in \cite{EMMP:20} (cf.~\cite{ST:16, OS:07}); however, not only are those results limited to some particular choices of the functions $u$, $v$ and $\nu$ (namely, $u\equiv1$ and $v,\nu$ being power functions for the most part, but see \cite{GL:99, HS:98}), but they are also scattered and often hidden somewhere between the lines with varying degrees of generality. The aim of this paper is thoroughly address the questions in a coherent unified way so that the results obtained here will not only encompass their already-known particular cases but also provide a general theory suitable for various future applications. For example, thanks to our results, we get under control the optimal behavior of upper bounds, which are even sharp in some cases, for nonincreasing rearrangements of not only various less-standard (nonfractional and fractional) maximal operators (\cite{EO:02, L:05}) but also operators with certain behavior of their operator norms, which play a role in the a.e.~convergence of the partial spherical Fourier integrals or in the solvability of the Dirichlet problem for the Laplacian on planar domains (\cite{CaPr:09, CO-C:18, CMM:12} and references therein). Another possible application is related to traces of Sobolev functions. Although, as was already mentioned in the fifth paragraph, embeddings of Sobolev-type spaces into rearrangement\hyp{}invariant function spaces with respect to $d$-upper Ahlfors measures were thoroughly studied in \cite{CPS:20}, there are $d$-dimensional sets (say, in the sense of the Hausdorff dimension) $\Omega_d\subseteq\R^n$, $d\in(0,n]$, that are ``unrecognizable'' by $d$-upper Ahlfors measures $\nu$ (i.e., it may happen that $\nu(\Omega_d)=0$ for every $d$-upper Ahlfors measure $\nu$). For instance, this is (almost surely) the case when $\Omega_d$ is a Brownian path in $\R^n$, $n\geq2$, which has (almost surely) the Hausdorff dimension $2$ but is unrecognizable by $2$-upper Ahlfors measures; other measure functions (see~\cite{C:67}) than power functions have to be considered to rectify the situation (\cite{ET:61, CT:62}). Inevitably, if one is to generalize the results of \cite{CPS:20} to cover such exceptional sets, one will need to deal with general enough Hardy-type operators, whose optimal behavior on rearrangement\hyp{}invariant function spaces is extensively studied here.
General as the results proved in this paper are, we do usually impose some mild restrictions on the functions $u$, $v$, $\nu$ so that we can obtain interesting, strong results; however, the imposed assumptions on the functions are actually not too restrictive for the most part and often exclude only cases being in a way pathological. The assumptions also often reflect the very forms of the Hardy-type operators considered here. In particular, the operators do not involve kernels. Although Hardy-type operators with kernels are undoubtedly of great importance, too, and to investigate thoroughly their behavior on rearrangement\hyp{}invariant function spaces would be of interest (e.g., \cite{CPS:15, AAB-MC:preprint}), it goes beyond the scope of this paper.
Finally, what is missing in this paper are particular examples of optimal function norms and related things. Given the generality of our setting, to provide an exhaustive list of examples would inevitably involve carrying out a large number of, to some extent straightforward but lengthy and technical, computations, which would make this paper unreasonably long and convoluted. The interested reader is referred to \cite{CP:16, EMMP:20, M:21} for some particular examples and hints on how to carry out the needed computations.
\section{Preliminaries}\label{sec:prel}
\begin{conventions}\hphantom{}
\begin{itemize}
\item Throughout the entire paper $L\in(0,\infty]$.
\item We adhere to the convention that $\frac1{\infty}=0\cdot\infty=0$.
\item We write $P\lesssim Q$, where $P,Q$ are nonnegative quantities, when there is a positive constant $c$ independent of all appropriate quantities appearing in the expressions $P$ and $Q$ such that $P\leq c\cdot Q$. If not stated explicitly, what ``the appropriate quantities appearing in the expressions $P$ and $Q$'' are should be obvious from the context. At the few places where it is not obvious, we will explicitly specify what the appropriate quantities are. We also write $P\gtrsim Q$ with the obvious meaning, and $P\approx Q$ when $P\lesssim Q$ and $P\gtrsim Q$ simultaneously.
\item When $A\subseteq(0,L)$ is a (Lebesgue) measurable set, $|A|$ stands for its Lebesgue measure.
\item When $u$ is a nonnegative measurable function defined on $(0,L)$, we denote by $U$ the function defined as $U(t)=\int_0^t u(s)\d{s}$, $t\in(0,L]$. We say that $u$ is \emph{nondegenerate} if there is $t_0\in(0 ,L)$ such that $0< U(t_0) < \infty$.
\end{itemize}
\end{conventions}
We set
\begin{align*}
\mathfrak{M}(0, L)&= \{f\colon \text{$f$ is a measurable function on $(0,L)$ with values in $[-\infty,\infty]$}\},\\
\mathfrak{M}_0(0,L)&= \{f \in \mathfrak{M}(0, L)\colon f \ \text{is finite}\ \text{a.e.~on $(0,L)$}\},\\
\intertext{and}
\mathfrak{M}pl(0,L)&= \{f \in \mathfrak{M}(0,L)\colon f \geq 0\ \text{a.e.~on $(0,L)$}\}.
\end{align*}
The \emph{nonincreasing rearrangement} $f^*\colon(0,\infty) \to [0, \infty]$ of a function $f\in \mathfrak{M}(0,L)$ is defined as
\begin{equation*}
f^*(t)=\inf\{\lambda\in(0,\infty)\colon |\{s\in (0,L)\colon|f(s)|>\lambda\}|\leq t\},\ t\in(0,\infty).
\end{equation*}
Note that $f^*(t)=0$ for every $t\in[L,\infty)$. We say that functions $f,g\in\mathfrak{M}(0,L)$ are \emph{equimeasurable}, and we write $f\sim g$, if $|\{s\in(0,L)\colon|f(s)|>\lambda\}|=|\{s\in (0,L)\colon|g(s)|>\lambda\}|$ for every $\lambda\in(0,\infty)$. We always have that $f\sim f^*$. The relation $\sim$ is transitive.
The \emph{maximal nonincreasing rearrangement} $f^{**} \colon (0,\infty) \to [0, \infty]$ of a function $f\in \mathfrak{M}(0, L)$ is
defined as
\begin{equation*}
f^{**}(t)=\frac1t\int_0^ t f^{*}(s)\d{s},\ t\in(0,\infty).
\end{equation*}
The mapping $f \mapsto f^*$ is monotone in the sense that, for every $f,g\in\mathfrak{M}(0,L)$,
\begin{equation*}
|f| \leq |g| \quad \text{a.e.~on $(0,L)$} \quad \Longrightarrow \quad f^* \leq g^* \quad \text{on $(0, \infty)$};
\end{equation*}
consequently, the same implication remains true if ${}^*$ is replaced by ${}^{**}$. We have that
\begin{equation}\label{ch1:ri:twostarsdominateonestar}
f^*\leq f^{**}\quad\text{for every $f\in\mathfrak{M}(0,L)$}.
\end{equation}
The operation $f\mapsto f^*$ is neither subadditive nor multiplicative. Although $f\mapsto f^*$ is not subadditive, the following pointwise inequality is valid for every $f,g\in\mathfrak{M}_0(0,L)$ (\cite[Chapter~2, Proposition~1.7, (1.16)]{BS}):
\begin{equation}\label{ch1:ri:halfsubadditivityofonestar}
(f + g)^*(t) \leq f^*\mathbb{B}ig(\frac{t}{2}\mathbb{B}ig) + g^*\mathbb{B}ig(\frac{t}{2}\mathbb{B}ig) \quad \text{for every $t\in(0,L)$}.
\end{equation}
Furthermore, the lack of subadditivity of the operation of taking the nonincreasing rearrangement is, up to some extent, compensated by the following fact (\cite[Chapter~2,~(3.10)]{BS}):
\begin{equation}\label{ch1:ri:subadditivityofdoublestar}
\int_0^t(f+g)^*(s)\d{s}\leq\int_0^tf^*(s)\d{s}+\int_0^tg^*(s)\d{s}
\end{equation}
for every $t\in(0,\infty)$, $f,g\in\mathfrak{M}_0(0,L)$. In other words, the operation $f\mapsto f^{**}$ is subadditive.
There are a large number of inequalities concerning rearrangements (e.g., \cite[Chapter~II, Section~2]{KPS:82}, \cite{HLP:52}). We state two of them, which shall prove particularly useful for us. The \emph{Hardy-Littlewood inequality} (\cite[Chapter~2, Theorem~2.2]{BS}) tells us that, for every $f,g\in\mathfrak{M}(0,L)$,
\begin{equation}\label{ch1:ri:HL}
\int_0^L |f(t)||g(t)|\d{t}\leq\int_0^Lf^*(t)g^*(t)\d{t}.
\end{equation}
In particular, by taking $g=\chi_E$ in \eqref{ch1:ri:HL}, one obtain that
\begin{equation}\label{ch1:ri:HLg=chiE}
\int_E |f(t)|\d{t}\leq\int_0^{|E|}f^*(t)\d{t}
\end{equation}
for every measurable $E\subseteq (0,L)$. The \emph{Hardy lemma} (\cite[Chapter~2, Proposition~3.6]{BS}) states that, for every $f,g\in\mathfrak{M}pl(0,\infty)$ and every nonincreasing $h\in\mathfrak{M}pl(0,\infty)$,
\begin{equation}\label{ch1:ri:hardy-lemma}
\begin{aligned}
&\text{if}\quad\int_0^tf(s)\d{s}\leq \int_0^tg(s)\d{s}\quad\text{for all $t\in(0,\infty)$,}\\
&\text{then}\quad\int_0^\infty f(t)h(t)\d{t}\leq \int_0^\infty g(t)h(t)\d{t}.
\end{aligned}
\end{equation}
A functional $\varrho\colon\mathfrak{M}pl(0,L)\to[0,\infty]$ is called a \emph{rearrangement\hyp{}invariant function norm} (on $(0,L)$) if, for all $f$, $g$ and $\{f_k\}_{k=1}^\infty$ in $\mathfrak{M}pl(0,L)$, and every $\lambda\in[0,\infty)$:
\begin{itemize}
\item[(P1)] $\|f\|_{X(0,L)}=0$ if and only if $f=0$ a.e.~on $(0,L)$; $\|\lambda f\|_{X(0,L)}= \lambda \|f\|_{X(0,L)}$; $\|f+g\|_{X(0,L)}\leq \|f\|_{X(0,L)} + \|g\|_{X(0,L)}$;
\item[(P2)] $\|f\|_{X(0,L)}\leq\|g\|_{X(0,L)}$ if $ f\leq g$ a.e.~on $(0,L)$;
\item[(P3)] $\|f_k\|_{X(0,L)} \nearrow \|f\|_{X(0,L)}$ if $f_k \nearrow f$ a.e.~on $(0,L)$;
\item[(P4)] $\|\chi_E\|_{X(0,L)}<\infty$ for every measurable $E\subseteq(0, L)$ of finite measure;
\item[(P5)] for every measurable $E\subseteq(0, L)$ of finite measure, there is a positive, finite constant $C_{E,X}$, possibly depending on $E$ and $\|\cdot\|_{X(0,L)}$ but not on $f$, such that $\int_E f(t)\d{t} \leq C_{E,X} \|f\|_{X(0,L)}$;
\item[(P6)] $\|f\|_{X(0,L)} = \|g\|_{X(0,L)}$ whenever $f\sim g$.
\end{itemize}
The \textit{Hardy--Littlewood--P\'olya principle} (\cite[Chapter~2, Theorem~4.6]{BS}) asserts that, for every $f,g\in\mathfrak{M}(0,L)$ and every rearrangement\hyp{}invariant function norm $\|\cdot\|_{X(0,L)}$,
\begin{equation}\label{ch1:ri:HLP}
\text{if $\int_0^tf^*(s)\d{s}\leq \int_0^tg^*(s)\d{s}$ for all $t\in(0,L)$, then $\|f\|_{X(0,L)}\leq \|g\|_{X(0,L)}$}.
\end{equation}
With every rearrangement\hyp{}invariant function norm $\|\cdot\|_{X(0,L)}$, we associate another functional $\|\cdot\|_{X'(0,L)}$ defined as
\begin{equation}\label{ch1:ri:normX'}
\|f\|_{X'(0,L)}= \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X(0,L)}\leq1}}\int_0^Lf(t)g(t)\d{t},\ f\in\mathfrak{M}pl(0,L).
\end{equation}
The functional $\|\cdot\|_{X'(0,L)}$ is also a rearrangement\hyp{}invariant function norm (\cite[Chapter~2, Proposition~4.2]{BS}), and it is called the \emph{associate function norm} of $\|\cdot\|_{X(0,L)}$. Furthermore, we always have that (\cite[Chapter~1, Theorem~2.7]{BS})
\begin{equation}\label{ch1:ri:normX''}
\|f\|_{X(0,L)}= \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lf(t)g(t)\d{t} \quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation}
that is,
\begin{equation}\label{ch1:ri:X''=X}
\|\cdot \|_{(X')'(0,L)} = \|\cdot \|_{X(0,L)}.
\end{equation}
Consequently, statements like ``Let $\|\cdot\|_{X(0,L)}$ be \emph{the} rearrangement\hyp{}invariant function norm whose associate function norm is \dots'' are well justified. The supremum in \eqref{ch1:ri:normX''} does not change when the functions involved are replaced with their nonincreasing rearrangements (\cite[Chapter~2, Proposition~4.2]{BS}), that is,
\begin{equation}\label{ch1:ri:normX''down}
\|f\|_{X(0,L)}= \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lf^*(t)g^*(t)\d{t} \quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
Given a rearrangement\hyp{}invariant function norm $\|\cdot\|_{X(0,L)}$, we extend it from $\mathfrak{M}pl(0,L)$ to $\mathfrak{M}(0,L)$ by $\|f\|_{X(0,L)}=\|\,|f|\,\|_{X(0,L)}$. The extended functional $\|\cdot\|_{X(0,L)}$ restricted to the linear set $X(0,L)$ defined as
\begin{equation*}
X(0, L)=\{f\in\mathfrak{M}(0,L)\colon \|f\|_{X(0,L)}<\infty\}
\end{equation*}
is a norm (provided that we identify any two functions from $\mathfrak{M}(0,L)$ coinciding a.e.~on $(0,L)$, as usual). In fact, $X(0,L)$ endowed with the norm $\|\cdot\|_{X(0,L)}$ is a Banach space (\cite[Chapter~1, Theorem~1.6]{BS}). We say that $X(0,L)$ is a \emph{rearrangement\hyp{}invariant function space}. Note that $f\in\mathfrak{M}(0,L)$ belongs to $X(0,L)$ if and only if $\|f\|_{X(0,L)}<\infty$. We always have that
\begin{equation}\label{ch1:ri:XembeddedinM0}
S(0,L)\subseteq X(0,L)\subseteq\mathfrak{M}_0(0,L),
\end{equation}
where $S(0,L)$ denotes the set of all simple functions on $(0,L)$ (by a simple function, we mean a (finite) linear combination of characteristic functions of measurable sets having finite measure). Moreover, the second inclusion is continuous if the linear set $\mathfrak{M}_0(0,L)$ is endowed with the (metrizable) topology of convergence in measure on sets of finite measure (\cite[Chapter~1, Theorem~1.4]{BS}).
The class of rearrangement\hyp{}invariant function spaces contains a large number of customary function spaces, such as Lebesgue spaces $L^p$ ($p\in[1,\infty]$), Lorentz spaces $L^{p,q}$ (e.g., \cite[pp.~216--220]{BS}), Orlicz spaces (e.g., \cite{RR:91}) or Lorentz--Zygmund spaces (e.g., \cite{BR:80, OP:99}), to name but a few. Here we provide definitions of only those rearrangement\hyp{}invariant function norms that we shall explicitly need. For $p\in[1,\infty]$, we define the Lebesgue function norm $\|\cdot\|_{L^p(0,L)}$ as
\begin{equation*}
\|f\|_{L^p(0,L)} = \begin{cases}
\int_0^L f(t)^p \d{t} \quad &\text{if $p\in[1,\infty)$},\\
\esssup_{t\in(0,L)} f(t) \quad &\text{if $p=\infty$},
\end{cases}
\end{equation*}
$f\in\mathfrak{M}pl(0,L)$. Given a measurable function $v\colon(0,L)\to(0,\infty)$ such that $V(t)<\infty$ for every $t\in(0,L)$, where $V(t)=\int_0^t v(s)\d{s}$, we define the functional $\|\cdot\|_{\Lambda^1_v(0,L)}$ as
\begin{equation*}
\|f\|_{\Lambda^1_v(0,L)} = \int_0^L f^*(s) v(s) \d{s},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
The functional is equivalent to a rearrangement\hyp{}invariant function norm if and only if (\cite[Theorem~2.3]{CGS:96}, see also \cite[Proposition~1]{S:93} with regard to local embedding of $\Lambda^1_v(0,L)$ in $L^1(0,L)$)
\begin{equation}\label{ch1:ri:when_is_Lambda1_ri_norm}
\frac{V(t)}{t}\lesssim\frac{V(s)}{s}\quad\text{for every $0<s< t<L$}.
\end{equation}
By the fact that it is equivalent to a rearrangement\hyp{}invariant function norm, we mean that there is a rearrangement\hyp{}invariant function norm $\varrho$ on $(0,L)$ such that $\|f\|_{\Lambda^1_v(0,L)} \approx \varrho(f)$ for every $f\in\mathfrak{M}pl(0,L)$; hence we can treat $\Lambda^1_v(0,L)$ as a rearrangement\hyp{}invariant function space whenever \eqref{ch1:ri:when_is_Lambda1_ri_norm} is satisfied. Let $\psi\colon (0,L) \to (0,\infty)$ be a \emph{quasiconcave function}, that is, a nondecreasing function such that the function $(0,L)\ni t\mapsto \frac{\psi(t)}{t}$ is nonincreasing. The functional $\|\cdot\|_{M_\psi(0,L)}$ defined as
\begin{equation*}
\|f\|_{M_\psi(0,L)} = \sup_{t\in(0,L)} \psi(t) f^{**}(t),\ t\in(0,L),
\end{equation*}
is a rearrangement\hyp{}invariant function norm (\cite[Proposition~7.10.2]{PKJF:13}).
The rearrangement\hyp{}invariant function space $X'(0,L)$ built upon the associate function norm $\|\cdot\|_{X'(0,L)}$ of a rearrangement\hyp{}invariant function norm $\|\cdot\|_{X(0,L)}$ is called the \emph{associate function space} of $X(0,L)$. Thanks to \eqref{ch1:ri:X''=X}, we have that $(X')'(0,L)=X(0,L)$. Furthermore, one has that
\begin{equation}\label{ch1:ri:holder}
\int_0^L |f(t)||g(t)|\d{t}\leq \|f\|_{X(0,L)}\|g\|_{X'(0,L)}\quad\text{for every $f,g\in\mathfrak{M}(0,L)$}.
\end{equation}
Inequality \eqref{ch1:ri:holder} is a H\"older-type inequality, and we shall refer to it as the H\"older inequality.
Let $X(0,L)$ and $Y(0,L)$ be rearrangement\hyp{}invariant function spaces. We say that $X(0,L)$ is \emph{embedded in} $Y(0,L)$, and we write $X(0,L)\hookrightarrow Y(0,L)$, if there is a positive constant $C$ such that $\|f\|_{Y(0,L)}\leq C\|f\|_{X(0,L)}$ for every $f\in\mathfrak{M}(0,L)$. If $X(0,L)\hookrightarrow Y(0,L)$ and $Y(0,L)\hookrightarrow X(0,L)$ simultaneously, we write that $X(0,L)=Y(0,L)$. We have that (\cite[Chapter~1, Theorem~1.8]{BS})
\begin{equation}\label{ch1:ri:inclusion_is_always_continuous}
X(0,L)\hookrightarrow Y(0,L)\quad\text{if and only if}\quad X(0,L)\subseteq Y(0,L).
\end{equation}
Furthermore,
\begin{equation}\label{ch1:ri:XtoYiffY'toX'}
X(0,L)\hookrightarrow Y(0,L)\quad\text{if and only if}\quad Y'(0,L)\hookrightarrow X'(0,L)
\end{equation}
with the same embedding constants.
If $\|\cdot\|_{X(0,L)}$ and $\|\cdot\|_{Y(0,L)}$ are rearrangement\hyp{}invariant function norms, then so are $\|\cdot\|_{X(0,L) \cap Y(0,L)}$ and $\|\cdot\|_{(X + Y)(0,L)}$ defined as
\begin{align*}
\|f\|_{X(0,L) \cap Y(0,L)} &= \max\{\|f\|_{X(0,L)}, \|f\|_{Y(0,L)}\},\ f\in\mathfrak{M}pl(0,L),
\intertext{and}
\|f\|_{(X + Y)(0,L)} &= \inf_{f=g+h}(\|g\|_{X(0,L)} + \|h\|_{Y(0,L)}),\ f\in\mathfrak{M}pl(0,L),
\end{align*}
where the infimum extends over all possible decompositions $f=g+h$, $g,h\in\mathfrak{M}pl(0,L)$. Furthermore, we have that (\cite[Theorem~3.1]{L:78}, also \cite[Lemma~1.12]{CNS:03})
\begin{equation}\label{ch1:ri:dual_sum_and_inter}
(X(0,L) \cap Y(0,L))' = (X' + Y')(0,L) \quad \text{and} \quad (X + Y)'(0,L) = X'(0,L) \cap Y'(0,L)
\end{equation}
with equality of norms. The \emph{$\K$-functional} between $X(0,L)$ and $Y(0,L)$ is, for every $f\in (X + Y)(0,L)$ and $t\in(0,\infty)$, defined as
\begin{equation*}
\K(f, t; X, Y) = \inf_{f=g+h}(\|g\|_{X(0,L)} + t\|h\|_{Y(0,L)}),
\end{equation*}
where the infimum extends over all possible decompositions $f=g+h$ with $g\in X(0,L)$ and $h\in Y(0,L)$. For every $f\in (X + Y)(0,L)\setminus\{0\}$, $\K(f, \cdot; X,Y)$ is a positive increasing concave function on $(0,\infty)$ (\cite[Chapter~5, Proposition~1.2]{BS}).
Let $X_0(0,L)$ and $X_1(0,L)$ be rearrangement\hyp{}invariant function spaces. We say that a rearrangement\hyp{}invariant function space $X(0,L)$ is an \emph{intermediate space} between $X_0(0,L)$ and $X_1(0,L)$ if $X_0(0,L) \cap X_1(0,L)\hookrightarrow X(0,L) \hookrightarrow (X_0 + X_1)(0,L)$. A linear operator $T$ defined on $(X_0 + X_1)(0,L)$ having values in $(X_0 + X_1)(0,L)$ is said to be \emph{admissible} for the couple $(X_0(0,L), X_1(0,L))$ if $T$ is bounded on both $X_0(0,L)$ and $X_1(0, L)$. An intermediate space $X(0,L)$ between $X_0(0,L)$ and $X_1(0,L)$ is an \emph{interpolation space} with respect to the couple $(X_0(0,L), X_1(0,L))$ if every admissible operator for the couple is bounded on $X(0,L)$. By \cite[Theorem~3]{C:66}, $X(0,L)$ is always an interpolation space with respect to the couple $(L^1(0,L), L^\infty(0,L))$.
We always have that (\cite[Chapter~2, Theorem~6.6]{BS})
\begin{equation*}
L^1(0,L)\cap L^\infty(0,L)\hookrightarrow X(0,L)\hookrightarrow L^1(0,L) + L^\infty(0,L).
\end{equation*}
In particular,
\begin{equation}\label{ch1:ri:smallestandlargestrispacefinitemeasure}
L^\infty(0,L)\hookrightarrow X(0,L)\hookrightarrow L^1(0,L)
\end{equation}
provided that $L<\infty$.
The \emph{dilation operator} is bounded on every rearrangement\hyp{}invariant function space $X(0,L)$. More precisely, we have that (\cite[Chapter~3, Proposition~5.11]{BS})
\begin{equation}\label{ch1:ri:dilation}
\|D_af\|_{X(0,L)}\leq\max\{1,a\}\|f\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}(0,L)$},
\end{equation}
where $D_af$ is defined on $(0,L)$ as
\begin{equation*}
D_af(t)=\begin{cases}
f(\frac{t}{a}), \quad &\text{if $L=\infty$},\\
f(\frac{t}{a})\chi_{(0, aL)}(t), \quad &\text{if $L<\infty$}.
\end{cases}
\end{equation*}
When $\|\cdot\|_{X(0,L)}$ is a rearrangement\hyp{}invariant function norm, we define its \emph{fundamental function} $\varphi_{X(0,L)}$ as
\begin{equation*}
\varphi_{X(0,L)}(t)=\|\chi_E\|_{X(0,L)},\ t\in[0, L),
\end{equation*}
where $E$ is any measurable subset of $(0,L)$ such that $|E|=t$. The fundamental function is well defined thanks to property (P6) of rearrangement\hyp{}invariant function norms and is a quasiconcave function. The fundamental functions of $\|\cdot\|_{X(0,L)}$ and $\|\cdot\|_{X'(0,L)}$ satisfy (\cite[Chapter~2, Theorem~5.2]{BS}) that
\begin{equation*}
\varphi_{X(0,L)}(t)\varphi_{X'(0,L)}(t)=t\quad\text{for every $t\in[0,L)$}.
\end{equation*}
\section{Hardy-type operators on r.i.~spaces}
We start with an easy but useful observation concerning the Hardy-type operators defined by \eqref{opRdef} and \eqref{opHdef}. Let $u,v\colon(0,L)\to(0,\infty)$ be measurable functions. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. The operators $R_{u,v,\nu}$ and $H_{u,v,\nu^{-1}}$, where $\nu^{-1}$ is the inverse function to $\nu$, are in a sense dual to each other. More precisely, by using the Fubini theorem, one can easily verify that
\begin{equation}\label{RaHdual}
\int_0^Lf(t)R_{u,v,\nu}g(t)\d{t}=\int_0^Lg(t)H_{u,v,\nu^{-1}}f(t)\d{t}\quad\text{for every $f,g\in\mathfrak{M}pl(0,L)$}.
\end{equation}
The validity of \eqref{RaHdual} has an unsurprising, well-known consequence, which we state here for future reference (see also \myref{Corollary}{cor:copsonrestrvsunrestr}).
\begin{proposition}\label{prop:RXtoYbddiffHY'toX'}
Let $\|\cdot\|_{X(0,L)}$, $\|\cdot\|_{Y(0,L)}$ be rearrangement\hyp{}invariant function norms. Let $u,v\colon(0,L)\to(0,\infty)$ be measurable. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. We have that
\begin{equation}\label{prop:RXtoYbddiffHY'toX':normRXtoZ=normHZ'toX'}
\sup_{\|f\|_{X(0,L)}\leq1}\|R_{u,v,\nu}f\|_{Y(0,L)}=\sup_{\|g\|_{Y'(0,L)}\leq1}\|H_{u,v,\nu^{-1}}g\|_{X'(0,L)}.
\end{equation}
In particular,
\begin{align}\label{prop:RXtoYbddiffHY'toX':RXtoZiffHZ'toX'}
\begin{split}
R_{u,v,\nu}\colon X(0,L)\to Y(0,L)\quad&\text{is bounded if and only if}\\
H_{u,v,\nu{-1}}\colon Y'(0,L)\to X'(0,L)\quad&\text{is bounded}.
\end{split}
\end{align}
\end{proposition}
\begin{proof}
We have that
\begin{align*}
\sup_{\|f\|_{X(0,L)}\leq1}\|R_{u,v,\nu}f\|_{Y(0,L)}&=\sup_{\|f\|_{X(0,L)}\leq1}\sup_{\|g\|_{Y'(0,L)}\leq1}\int_0^LR_{u,v,\nu}f(t)|g(t)|\d{t}\\
&=\sup_{\|f\|_{X(0,L)}\leq1}\sup_{\|g\|_{Y'(0,L)}\leq1}\int_0^L|f(t)|H_{u,v,\nu^{-1}}g(t)\d{t}\\
&=\sup_{\|g\|_{Y'(0,L)}\leq1}\|H_{u,v,\nu^{-1}}g\|_{X'(0,L)}
\end{align*}
thanks to \eqref{ch1:ri:normX''}, \eqref{RaHdual} and \eqref{ch1:ri:normX'}.
\end{proof}
\subsection{Optimal r.i.~function norms}\label{sec:optimal_norms}
In this section, we shall investigate optimal mapping properties of the operators $H_{u,v,\nu}$, $R_{u,v,\nu}$. Let $T$ be one of them. We say that a rearrangement\hyp{}invariant function space $Y(0,L)$ is \emph{the optimal target space} for the operator $T$ and a rearrangement\hyp{}invariant function space $X(0,L)$ if $T\colon X(0,L)\to Y(0,L)$ is bounded and $Y(0,L)\hookrightarrow Z(0,L)$ whenever $Z(0,L)$ is a rearrangement\hyp{}invariant function space such that $T\colon X(0,L)\to Z(0,L)$ is bounded (in other words, $\|\cdot\|_{Y(0,L)}$ is the strongest target rearrangement\hyp{}invariant function norm for $T$ and $\|\cdot\|_{X(0,L)}$). We say that a rearrangement\hyp{}invariant function space $X(0,L)$ is \emph{the optimal domain space} for the operator $T$ and a rearrangement\hyp{}invariant function space $Y(0,L)$ if $T\colon X(0,L)\to Y(0,L)$ is bounded and $Z(0,L)\hookrightarrow X(0,L)$ whenever $Z(0,L)$ is a rearrangement\hyp{}invariant function space such that $T\colon Z(0,L)\to Y(0,L)$ is bounded (in other words, $\|\cdot\|_{X(0,L)}$ is the weakest domain rearrangement\hyp{}invariant function norm for $T$ and $\|\cdot\|_{Y(0,L)}$).
We start by characterizing when the functional $\mathfrak{M}pl(0,L)\ni f\mapsto \|R_{u,v,\nu}(f^*)\|_{X(0,L)}$ is a rearrangement\hyp{}invariant function norm. It turns out that it also enables us to characterize optimal domain and target spaces for $R_{u,v,\nu}$ and $H_{u,v,\nu}$, respectively.
\begin{proposition}\label{prop:norminducedbyR}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $u\colon(0,L)\to(0,\infty)$ be a nondegenerate nonincreasing function. If $L<\infty$, assume that $u(L^-)>0$. Let $v\colon(0,L)\to(0,\infty)$ be measurable. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Set
\begin{equation*}
\varrho(f)=\mathbb{B}igg\|v(t)\int_0^{\nu(t)}f^*(s)u(s)\d{s}\mathbb{B}igg\|_{X(0,L)},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
and
\begin{equation}\label{prop:Roptimaldomain:xidef}
\xi(t)=\begin{cases}
v(t)U(\nu(t)),\ t\in(0,L),\quad&\text{if $L<\infty$,}\\
v(t)U(\nu(t))\chi_{(0,1)}(t)+v(t)\chi_{(1,\infty)}(t),\ t\in(0, \infty),\quad&\text{if $L=\infty$.}
\end{cases}
\end{equation}
The functional $\varrho$ is a rearrangement\hyp{}invariant function norm if and only if $\xi\in X(0,L)$.
If $\xi\in X(0,L)$, then the rearrangement\hyp{}invariant function space induced by $\varrho$ is the optimal domain space for the operator $R_{u,v,\nu}$ and $X(0,L)$. If $\xi\not\in X(0,L)$, then there is no rearrangement\hyp{}invariant function space $Z(0,L)$ such that $R_{u,v,\nu}\colon Z(0,L)\to X(0,L)$ is bounded.
\end{proposition}
\begin{proof}
We shall show that $\varrho$ is a rearrangement\hyp{}invariant function norm provided that $\xi\in X(0,L)$. Before we do that, note that, since $u$ is positive and nonincreasing, its nondegeneracy implies that $0 < U(t) < \infty$ for every $t\in(0,L]\cap\mathbb{R}$.
\emph{Property \emph{(P1)}.} The positive homogeneity and positive definiteness of $\varrho$ can be readily verified. As for the subadditivity of $\varrho$, it follows from \eqref{ch1:ri:subadditivityofdoublestar} combined with Hardy's lemma \eqref{ch1:ri:hardy-lemma} that
\begin{equation*}
\int_0^L(f+g)^*(s)u(s)\chi_{(0,\nu(t))}(s)\d{s}\leq\int_0^Lf^*(s)u(s)\chi_{(0,\nu(t))}(s)\d{s}+\int_0^Lg^*(s)u(s)\chi_{(0,\nu(t))}(s)\d{s}
\end{equation*}
for every $f,g\in\mathfrak{M}pl(0,L)$ and $t\in(0,L)$ thanks to the fact that $u$ is nonincreasing. Since $\|\cdot\|_{X(0,L)}$ is subadditive, it follows that
\begin{equation*}
\varrho(f+g)\leq\varrho(f)+\varrho(g)\quad\text{for every $f,g\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
\emph{Properties \emph{(P2)} and \emph{(P3)}.} Since $\|\cdot\|_{X(0,L)}$ has these properties, it can be readily verified that $\varrho$, too, has them.
\emph{Property \emph{(P4)}.} First, assume that $L<\infty$. Clearly, $\varrho(\chi_{(0,L)})<\infty$ if and only if $v(t)U(\nu(t))\in X(0,L)$. Since $\varrho$ has property (P2), $\varrho$ has property (P4) if and only if $v(t)U(\nu(t))\in X(0,L)$. Second, assume that $L=\infty$. Let $E\subseteq(0,\infty)$ be a set of finite positive measure. Clearly, $\varrho(\chi_E)<\infty$ if and only if $v(t)U(\nu(t))\chi_{(0,|E|)}(t)+v(t)\chi_{(|E|,\infty)}(t)\in X(0,\infty)$. If $|E|\leq1$, then
\begin{align*}
&\|v(t)U(\nu(t))\chi_{(0,|E|)}(t)+v(t)\chi_{(|E|,\infty)}(t)\|_{X(0,\infty)}\\
&\leq\|v(t)U(\nu(t))\chi_{(0,1)}(t)\|_{X(0,\infty)}+\|v(t)\chi_{(|E|,1)}(t)\|_{X(0,\infty)}+\|v(t)\chi_{(1,\infty)}(t)\|_{X(0,\infty)}\\
&\leq\|v(t)U(\nu(t))\chi_{(0,1)}(t)\|_{X(0,\infty)}+\frac1{U(\nu(|E|))}\|U(\nu(t))v(t)\chi_{(|E|,1)}(t)\|_{X(0,\infty)}\\
&\quad+\|v(t)\chi_{(1,\infty)}(t)\|_{X(0,\infty)}\\
&\leq\mathbb{B}ig(1+\frac1{U(\nu(|E|))}\mathbb{B}ig)\|v(t)U(\nu(t))\chi_{(0,1)}(t)\|_{X(0,\infty)}+\|v(t)\chi_{(1,\infty)}(t)\|_{X(0,\infty)}.
\end{align*}
If $E\geq1$, we can obtain, in a similar way, that
\begin{align*}
&\|v(t)U(\nu(t))\chi_{(0,|E|)}(t)+v(t)\chi_{(|E|,\infty)}(t)\|_{X(0,\infty)}\\
&\leq\|v(t)U(\nu(t))\chi_{(0,1)}(t)\|_{X(0,\infty)}+(1+U(\nu(|E|)))\|v(t)\chi_{(1,\infty)}(t)\|_{X(0,\infty)}.
\end{align*}
Either way, we have that $\varrho(\chi_E)<\infty$ if and only if
\begin{equation*}
v(t)U(\nu(t))\chi_{(0,1)}(t)+v(t)\chi_{(1,\infty)}(t)\in X(0,\infty).
\end{equation*}
\emph{Property \emph{(P5)}.}
Let $E\subseteq(0,L)$ be a set of finite positive measure. Let $f\in\mathfrak{M}pl(0,L)$. Note that the function $(0,L)\ni t\mapsto\frac1{U(\nu(t))}\int_0^{\nu(t)}f^*(s)u(s)\d{s}$ is nonincreasing because it is the integral mean of a nonnegative nonincreasing function over the interval $(0,\nu(t))$ with respect to the measure $u(s)\d{s}$. Thanks to that and the monotonicity of $u$, we obtain that
\begin{align*}
\mathbb{B}ig\|v(t)\int_0^{\nu(t)}f^*(s)u(s)\d{s}\mathbb{B}ig\|_{X(0,L)}&\geq\mathbb{B}ig\|v(t)\chi_{(0,\nu^{-1}(|E|))}(t)\int_0^{\nu(t)}f^*(s)u(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\geq\mathbb{B}ig\|v(t)U(\nu(t))\chi_{(0,\nu^{-1}(|E|))}(t)\mathbb{B}ig\|_{X(0,L)}\frac1{U(|E|)}\int_0^{|E|}f^*(s)u(s)\d{s}\\
&\geq\mathbb{B}ig\|v(t)U(\nu(t))\chi_{(0,\nu^{-1}(|E|))}(t)\mathbb{B}ig\|_{X(0,L)}\frac{u(|E|^-)}{U(|E|)}\int_0^{|E|}f^*(s)\d{s}\\
&\geq\mathbb{B}ig\|v(t)U(\nu(t))\chi_{(0,\nu^{-1}(|E|))}(t)\mathbb{B}ig\|_{X(0,L)}\frac{u(|E|^-)}{U(|E|)}\int_Ef(s)\d{s},
\end{align*}
where we used \eqref{ch1:ri:HLg=chiE} in the last inequality.
\emph{Property \emph{(P6)}.} Since $f^*=g^*$ when $f,g\in\mathfrak{M}pl(0,L)$ are equimeasurable, this is obvious.
Note that the necessity of $\xi\in X(0,L)$ for $\varrho$ to be a rearrangement\hyp{}invariant function norm was already proved in the paragraph devoted to property (P4).
Assume now that $\xi\in X(0,L)$ and denote by $Y(0,L)$ the rearrangement\hyp{}invariant function space induced by $\varrho$. Thanks to the Hardy--Littlewood inequality \eqref{ch1:ri:HL} and the monotonicity of $u$, we have that
\begin{equation*}
\|R_{u,v,\nu}f\|_{X(0,L)}\leq\|R_{u,v,\nu}(f^*)\|_{X(0,L)}=\|f\|_{Y(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
Hence $R_{u,v,\nu}\colon Y(0,L)\to X(0,L)$ is bounded. Next, if $Z(0,L)$ is a rearrangement\hyp{}invariant function space such that $R_{u,v,\nu}\colon Z(0,L)\to X(0,L)$ is bounded, then we have that
\begin{equation*}
\|f\|_{Y(0,L)}=\|R_{u,v,\nu}(f^*)\|_{X(0,L)}\lesssim\|f^*\|_{Z(0,L)}=\|f\|_{Z(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation*}
and so $Z(0,L)\hookrightarrow Y(0,L)$. Finally, note that, if $R_{u,v,\nu}\colon Z(0,L)\to X(0,L)$ is bounded, then
\begin{equation*}
\|\xi\|_{X(0,L)}\approx\|R_{u,v,\nu}(\chi_{(0,a)})\|_{X(0,L)}\lesssim\|\chi_{(0,a)}\|_{Z(0,L)}<\infty,
\end{equation*}
where
\begin{equation}\label{prop:Roptimaldomain:L_or_1}
a=\begin{cases}
L\quad&\text{if $L<\infty$},\\
1\quad&\text{if $L=\infty$};
\end{cases}
\end{equation}
hence $\xi\in X(0,L)$.
\end{proof}
\begin{remark}\label{rem:optimal_for_R_iff_for_H}
Thanks to \eqref{prop:RXtoYbddiffHY'toX':RXtoZiffHZ'toX'} and \eqref{ch1:ri:X''=X}, $Y(0,L)$ is the optimal target space for the operator $H_{u,v,\nu}$ and $X(0,L)$ if and only if $Y'(0,L)$ is the optimal domain space for the operator $R_{u,v,\nu^{-1}}$ and $X'(0,L)$. Therefore, \myref{Proposition}{prop:norminducedbyR} actually also characterizes optimal target spaces for the operator $H_{u,v,\nu}$.
\end{remark}
\begin{proposition}\label{prop:Hoptimalrange}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Let $u\colon(0,L)\to(0,\infty)$ be a nondegenerate nonincreasing function. If $L<\infty$, assume that $u(L^-)>0$. Let $v\colon(0,L)\to(0,\infty)$ be measurable. Assume that $\xi\in X'(0,L)$, where $\xi$ is defined by \eqref{prop:Roptimaldomain:xidef} with $\nu$ replaced by $\nu^{-1}$. Let $\|\cdot\|_{Y(0,L)}$ be the rearrangement\hyp{}invariant function norm whose associate function norm $\|\cdot\|_{Y'(0,L)}$ is defined as
\begin{equation*}
\|f\|_{Y'(0,L)}=\mathbb{B}igg\|v(t)\int_0^{\nu^{-1}(t)}f^*(s)u(s)\d{s}\mathbb{B}igg\|_{X'(0,L)},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
The rearrangement\hyp{}invariant function space $Y(0,L)$ is the optimal target space for the operator $H_{u,v,\nu}$ and $X(0,L)$. Moreover, if $\xi\not\in X'(0,L)$, then there is no rearrangement\hyp{}invariant function space $Z(0,L)$ such that $H_{u,v,\nu}\colon X(0,L)\to Z(0,L)$ is bounded.
\end{proposition}
We now turn our attention to $H_{u,v,\nu}$; it turns out that the situation becomes significantly more complicated. Notably the fact that, unlike with $R_{u,v,\nu}$, the integration is carried out over intervals away from $0$ often causes great difficulties. In particular, the functional $\mathfrak{M}pl(0,L)\ni f\mapsto \|H_{u,v,\nu}(f^*)\|_{X(0,L)}$ is hardly ever subadditive. Instead, in general, we need to consider a more complicated functional (see \myref{Proposition}{prop:norminducedbyHwhenRnonincreasing}, however). We will need to impose a mild condition on $\nu$. We write $\nu\in\Dinf{0}$, $\nu\in\Dinf{\infty}$, $\nu\in\Dsup{0}$ and $\nu\in\Dsup{\infty}$ if there is $\theta>1$ such that $\liminf_{t\to0^+}\frac{\nu(\theta t)}{\nu(t)}>1$, $\liminf_{t\to\infty}\frac{\nu(\theta t)}{\nu(t)}>1$, $\limsup_{t\to0^+}\frac{\nu(\theta t)}{\nu(t)}<\infty$ and $\limsup_{t\to\infty}\frac{\nu(\theta t)}{\nu(t)}<\infty$, respectively. When we need to emphasize the exact value of $\theta$, we will write $\nu\in\Dinf[\theta]{0}$ and so forth.
\begin{proposition}\label{prop:norminducedbyH}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $u,v\colon(0,L)\to(0,\infty)$ be nonincreasing. If $L<\infty$, assume that $v(L^-)>0$. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. If $L=\infty$, assume that $\nu\in\Dinf{\infty}$. Set
\begin{equation}\label{prop:norminducedbyH:normdef}
\varrho(f)=\sup_{h\sim f}\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)},\ f\in\mathfrak{M}pl(0,L),
\end{equation}
where the supremum extends over all $h\in\mathfrak{M}pl(0,L)$ equimeasurable with $f$. The functional $\varrho$ is a rearrangement\hyp{}invariant function norm if and only if
\begin{equation}\label{prop:norminducedbyH:assum}
\begin{cases}
u(t)\int_{\nu(t)}^L v(s) \d{s}\in X(0,L)\quad&\text{if $L<\infty$},\\
\begin{gathered}[b]u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\in X(0,\infty)\ \text{and}\\ \limsup_{\tau\to\infty}v(\tau)\|u\chi_{(0,\nu^{-1}(\tau))}\|_{X(0,\infty)}<\infty\end{gathered}\quad&\text{if $L=\infty$}.
\end{cases}
\end{equation}
If \eqref{prop:norminducedbyH:assum} is satisfied, then the rearrangement\hyp{}invariant function space induced by $\varrho$ is the optimal domain space for the operator $H_{u,v,\nu}$ and $X(0,L)$. If \eqref{prop:norminducedbyH:assum} is not satisfied, then there is no rearrangement\hyp{}invariant function space $Z(0,L)$ such that $H_{u,v,\nu}\colon Z(0,L)\to X(0,L)$ is bounded.
\end{proposition}
\begin{proof}
We shall show that $\varrho$ is a rearrangement\hyp{}invariant function norm provided that \eqref{prop:norminducedbyH:assum} is satisfied.
\emph{Property \emph{(P2)}.} Let $f,g\in\mathfrak{M}pl(0,L)$ be such that $f\leq g$ a.e. Consequently, $f^*\leq g^*$. Suppose that $\varrho(f) > \varrho(g)$. It implies that there is $\widetilde{f}\in\mathfrak{M}pl(0,L)$, $\widetilde{f}\sim f$, such that
\begin{equation}\label{prop:norminducedbyH:eq1}
\sup_{h\sim g}\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}<\mathbb{B}ig\|u(t)\int_{\nu(t)}^L \widetilde{f}(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}.
\end{equation}
When $L=\infty$, we may assume that $\lim_{t\to\infty}(\widetilde{f})^*(t)=\lim_{t\to\infty}f^*(t)=0$, for we would otherwise approximate $\widetilde f$ by functions $f_n=\widetilde{f}\chi_{(0,n)}$, $n\in\mathbb{N}$ (the monotone convergence theorem and property (P3) of $\|\cdot\|_{X(0,L)}$ would guarantee that the inequality above holds with $\widetilde{f}$ replaced by $f_n$ for $n$ large enough). Thanks to \cite[Chapter~2, Corollary~7.6]{BS} (also~\citep[Proposition~3]{R:70}), there is a measure-preserving transformation (in the sense of \cite[Chapter~2, Definition~7.1]{BS}) $\sigma\colon(0,L)\to(0,L)$ such that $\widetilde{f}=f^*\circ\sigma$. Since $\sigma$ is measure preserving, we have that $(g^*\circ\sigma)\sim g^*\sim g$ (\cite[Chapter~2, Proposition~7.2]{BS}). Consequently,
\begin{equation}\label{prop:norminducedbyH:eq2}
\begin{aligned}
\sup_{h\sim g}\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}&\geq\mathbb{B}ig\|u(t)\int_{\nu(t)}^L g^*(\sigma(s))v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\geq\mathbb{B}ig\|u(t)\int_{\nu(t)}^L f^*(\sigma(s))v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&=\mathbb{B}ig\|u(t)\int_{\nu(t)}^L \widetilde{f}(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}.
\end{aligned}
\end{equation}
By combining \eqref{prop:norminducedbyH:eq1} and \eqref{prop:norminducedbyH:eq2}, we reach a contradiction. Hence $\varrho(f)\leq\varrho(g)$.
\emph{Property \emph{(P3)}.} Let $f, f_k\in\mathfrak{M}pl(0,L)$, $k\in \mathbb{N}$, be such that $f_k \nearrow f$ a.e. Thanks to property (P2) of $\varrho$, the limit $\lim_{k\to\infty}\varrho(f_k)$ exists and we clearly have that $\lim_{k\to\infty}\varrho(f_k)\leq\varrho(f)$. The fact that $\lim_{k\to\infty}\varrho(f_k)=\varrho(f)$ can be proved by contradiction in a similar way to the proof of (P2).
\emph{Property \emph{(P1)}.} The positive homogeneity and positive definiteness of $\varrho$ can be readily verified. As for the subadditivity of $\varrho$, let $f,g\in\mathfrak{M}pl(0,L)$ be simple functions. Let $h\in\mathfrak{M}pl(0,L)$ be such that $h\sim f+g$. Being equimeasurable with $f+g$, $h$ is a simple function having the same range as $f+g$. Furthermore, it is easy to see that $h$ can be decomposed as $h=h_1+h_2$, where $h_1,h_2\in\mathfrak{M}pl(0,L)$ are simple functions such that $h_1\sim f$ and $h_2\sim g$. Using the subadditivity of $\|\cdot\|_{X(0,L)}$, we obtain that
\begin{align*}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}&\leq\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h_1(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}+\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h_2(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\leq\varrho(f)+\varrho(g).
\end{align*}
Hence $\varrho(f+g)\leq\varrho(f)+\varrho(g)$. When $f,g\in\mathfrak{M}pl(0,L)$ are general functions, we approximate each of them by a nondecreasing sequence of nonnegative, simple functions and use property (P3) of $\varrho$ to get $\varrho(f+g)\leq\varrho(f)+\varrho(g)$.
\emph{Property \emph{(P4)}.} Assume that $L<\infty$. Since $\varrho$ has property (P2), $\varrho$ has property (P4) if and only if $\varrho(\chi_{(0,L)})<\infty$. If $h\in\mathfrak{M}pl(0,L)$ is equimeasurable with $\chi_{(0,L)}$, then $h=1$ a.e.~on $(0,L)$; therefore,
\begin{equation*}
\varrho(\chi_{(0,L)})=\mathbb{B}ig\|u(t)\int_{\nu(t)}^L v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}.
\end{equation*}
Hence $\varrho$ has property (P4) if and only if $u(t)\int_{\nu(t)}^L v(s)\d{s}\in X(0,L)$. Assume now that $L=\infty$. Fix $\theta>1$ such that $\nu\in\Dinf[\theta]{\infty}$. Let $E\subseteq(0,\infty)$ be of finite measure. Set $b=\max\mathbb{B}ig\{1,\nu(1),\frac{\theta|E|}{M-1}\mathbb{B}ig\}$, where $M=\inf_{t\in[1,\infty)}\frac{\nu(\theta t)}{\nu(t)}$. Note that $M>1$. Let $h\in\mathfrak{M}pl(0,\infty)$ be equimeasurable with $\chi_E$. It is easy to see that $h=\chi_F$ for some measurable $F\subseteq(0,\infty)$ such that $|F|=|E|$. Thanks to the (outer) regularity of the Lebesgue measure, there is an open set $G\supseteq F$ such that $|G|\leq \theta|F|$. Being an open set on the real line, $G\cap(b,\infty)$ can be expressed as $G\cap(b,\infty)=\bigcup_{k}(a_k,b_k)$, where $\{(a_k,b_k)\}_k$ is a countable system of mutually disjoint open intervals. We plainly have that $F\subseteq(0,b]\cup\big(G\cap(b,\infty)\big)$ and $a_k>b$. Furthermore, we have that $b_k-a_k\leq \theta|F|\leq (M-1)b < (M-1)a_k$, whence
\begin{equation}\label{prop:norminducedbyH:eq3}
\nu^{-1}(b_k)-\nu^{-1}(a_k)<(\theta-1)\nu^{-1}(a_k).
\end{equation}
We have that
\begin{align}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^\infty \chi_F(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}&\leq\mathbb{B}ig\|u(t)\int_{\nu(t)}^\infty \big(\chi_{(0,b]}(s)+\sum_k\chi_{(a_k,b_k)}(s)\big)v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\notag\\
\begin{split}\label{prop:norminducedbyH:eq4}
&\leq\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(b))}(t)\int_{\nu(t)}^bv(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\quad+\sum_k\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(a_k))}(t)\int_{a_k}^{b_k}v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\quad+\sum_k\mathbb{B}ig\|u(t)\chi_{(\nu^{-1}(a_k),\nu^{-1}(b_k))}(t)\int_{\nu(t)}^{b_k}v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}.
\end{split}
\end{align}
Note that the assumption
\begin{equation}\label{prop:norminducedbyH:infinitecase:assump1}
\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}<\infty
\end{equation}
together with the monotonicity of $u$ and $v$ implies that
\begin{equation}\label{prop:norminducedbyH:eq11}
\|u\chi_{(0,a)}\|_{X(0,\infty)}<\infty\quad\text{for every $a\in(0,\infty)$}.
\end{equation}
Indeed, since $u$ is nonincreasing, it is sufficient to show that $\|u\chi_{(0,\nu^{-1}(\frac1{2}))}\|_{X(0,\infty)}<\infty$, which follows from
\begin{align*}
\infty>\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}&\geq\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(\frac1{2}))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\geq \frac{v(1)}{2}\mathbb{B}ig\|u\chi_{(0,\nu^{-1}(\frac1{2}))}\mathbb{B}ig\|_{X(0,\infty)}.
\end{align*}
Furthermore, note that \eqref{prop:norminducedbyH:eq11} guarantees that
\begin{equation}\label{prop:norminducedbyH:infinitecase:assump2}
\limsup_{\tau\to\infty}v(\tau)\|u\chi_{(0,\nu^{-1}(\tau))}\|_{X(0,\infty)}<\infty
\end{equation}
if and only if
\begin{equation}\label{prop:norminducedbyH:eq12}
\sup_{\tau\in[1,\infty)}v(\tau)\|u\chi_{(0,\nu^{-1}(\tau))}\|_{X(0,\infty)}<\infty.
\end{equation}
Now, as for the first term on the right-hand side of \eqref{prop:norminducedbyH:eq4}, we have that
\begin{align}\label{prop:norminducedbyH:eq7}
\begin{split}
\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(b))}(t)\int_{\nu(t)}^bv(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}&\leq\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\quad+\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_1^bv(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\quad+\mathbb{B}ig\|u(t)\chi_{(\nu^{-1}(1),\nu^{-1}(b))}(t)\int_{\nu(t)}^bv(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\leq A<\infty,
\end{split}
\end{align}
where
\begin{align*}
A&=\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}+v(1)(b-1)\|u\chi_{(0,\nu^{-1}(1))}\|_{X(0,\infty)}\\
&\quad+v(1)(b-1)\|u\chi_{(0,\nu^{-1}(b)-\nu^{-1}(1))}\|_{X(0,\infty)}.
\end{align*}
As for the second term on the right-hand side of \eqref{prop:norminducedbyH:eq4}, we have that
\begin{equation}\label{prop:norminducedbyH:eq5}
\begin{aligned}
\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(a_k))}(t)\int_{a_k}^{b_k}v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}&\leq v(a_k)(b_k-a_k)\|u\chi_{(0,\nu^{-1}(a_k))}\|_{X(0,\infty)}\\
&\leq B(b_k-a_k),
\end{aligned}
\end{equation}
where $B$ is the supremum in \eqref{prop:norminducedbyH:eq12}, which is independent of $k$. Next,
\begin{align}
\mathbb{B}ig\|u(t)\chi_{(\nu^{-1}(a_k),\nu^{-1}(b_k))}(t)\int_{\nu(t)}^{b_k}v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}&\leq\int_{a_k}^{b_k}v(s)\d{s}\|u\chi_{(\nu^{-1}(a_k),\nu^{-1}(b_k))}\|_{X(0,\infty)}\notag\\
\begin{split}\label{prop:norminducedbyH:eq6}
&\leq v(a_k)(b_k-a_k)\|u\chi_{(\nu^{-1}(a_k),\nu^{-1}(b_k))}\|_{X(0,\infty)}\\
&\leq v(a_k)(b_k-a_k)\|u\chi_{(0,(\theta-1)\nu^{-1}(a_k))}\|_{X(0,\infty)}\\
&\leq \ceil{\theta-1} v(a_k)(b_k-a_k)\|u\chi_{(0,\nu^{-1}(a_k))}\|_{X(0,\infty)}\\
&\leq \ceil{\theta-1} B(b_k-a_k),
\end{split}
\end{align}
where we used the monotonicity of $u$ and $v$ in the second inequality, \eqref{prop:norminducedbyH:eq3} in the third one, and the monotonicity of $u$ in the fourth one. By combining \eqref{prop:norminducedbyH:eq4} with \eqref{prop:norminducedbyH:eq7}, \eqref{prop:norminducedbyH:eq5} and \eqref{prop:norminducedbyH:eq6}, we obtain that
\begin{equation*}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^\infty h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\leq A+\ceil{\theta}B\sum_k(b_k-a_k)\leq A+\ceil{\theta}\theta B|E|<\infty.
\end{equation*}
Hence $\varrho(\chi_E)<\infty$ provided that \eqref{prop:norminducedbyH:infinitecase:assump1} and \eqref{prop:norminducedbyH:infinitecase:assump2} are satisfied. The necessity of \eqref{prop:norminducedbyH:infinitecase:assump1} is obvious because we have that
\begin{equation*}
\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\leq\varrho(\chi_{(0,1)}).
\end{equation*}
As for the necessity of \eqref{prop:norminducedbyH:infinitecase:assump2}, suppose that $\limsup_{\tau\to\infty}v(\tau)\big\|u\chi_{(0,\nu^{-1}(\tau))}\big\|_{X(0,\infty)}=\infty$. It follows that there is a sequence $\tau_k\nearrow\infty$, $k\to\infty$, such that
\begin{equation*}
\lim_{k\to\infty}v(\tau_k)\|u\chi_{(0,\nu^{-1}(\tau_k))}\|_{X(0,\infty)}=\infty.
\end{equation*}
Since $\inf_{t\in[1,\infty)}\frac{\nu(\theta t)}{\nu(t)}>1$, we can find an $\varepsilon>0$ such that $\frac{\nu(\theta t)}{\nu(t)}\geq 1+\varepsilon$ for every $t\in[1,\infty)$. Moreover, we may clearly assume that $\tau_k\geq \nu(1)+1$ and $\frac{\tau_k}{\tau_k-1}\leq 1+\varepsilon$; hence
\begin{equation}\label{prop:norminducedbyH:eq10}
\nu^{-1}(\tau_k)-\nu^{-1}(\tau_k-1)\leq(\theta-1)\nu^{-1}(\tau_k-1)
\end{equation}
inasmuch as $\frac{\nu(\theta\nu^{-1}(\tau_k-1))}{\nu(\nu^{-1}(\tau_k-1))}\geq1+\varepsilon$. Using \eqref{prop:norminducedbyH:eq10} and the fact that $u$ is nonincreasing, we obtain that
\begin{align*}
\|u\chi_{(0, \nu^{-1}(\tau_k))}\|_{X(0,\infty)}&\leq\|u\chi_{(0, \nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}+\|u\chi_{(\nu^{-1}(\tau_k-1),\nu^{-1}(\tau_k))}\|_{X(0,\infty)}\\
&\leq\|u\chi_{(0, \nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}+\|u\chi_{(0,\nu^{-1}(\tau_k)-\nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}\\
&\leq\|u\chi_{(0, \nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}+\|u\chi_{(0, (\theta - 1)\nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}\\
&=\ceil{\theta}\|u\chi_{(0, \nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}.
\end{align*}
Therefore,
\begin{align*}
\varrho(\chi_{(0,1)})&\geq\mathbb{B}ig\|u(t)\chi_{(0, \nu^{-1}(\tau_k-1))}(t)\int_{\nu(t)}^\infty\chi_{(\tau_k-1,\tau_k)}(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}\\
&\geq v(\tau_k)\|u\chi_{(0, \nu^{-1}(\tau_k-1))}\|_{X(0,\infty)}\geq\frac1{\ceil{\theta}}v(\tau_k)\|u\chi_{(0, \nu^{-1}(\tau_k))}\|_{X(0,\infty)},
\end{align*}
which tends to $\infty$ as $k\to\infty$. Hence $\varrho(\chi_{(0,1)})=\infty$, and so $\varrho$ does not have property (P4).
\emph{Property \emph{(P5)}.} Assume that $L<\infty$. Note that \eqref{prop:norminducedbyH:assum} together with $v(L^-)>0$ implies that $\|u\|_{X(0,L)}<\infty$. Let $f\in\mathfrak{M}pl(0,L)$. Since $f^*$ is nonincreasing, we have that $\int_0^Lf^*(s)\d{s}\leq2\int_0^\frac{L}{2}f^*(s)\d{s}$. Since the function $(0,L)\ni t\mapsto f^*(L-t)$ is equimeasurable with $f$, we have that
\begin{align*}
\varrho(f)&\geq\mathbb{B}ig\|u(t)\int_{\nu(t)}^L f^*(L-s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\geq v(L^-)\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(\frac{L}{2}))}(t)\int_{\nu(t)}^L f^*(L-s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&=v(L^-)\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(\frac{L}{2}))}(t)\int_0^{L-\nu(t)} f^*(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\geq v(L^-)\|u\chi_{(0,\nu^{-1}(\frac{L}{2}))}\|_{X(0,L)}\int_0^\frac{L}{2} f^*(s)\d{s}\\
&\geq\frac{v(L^-)}{2}\|u\chi_{(0,\nu^{-1}(\frac{L}{2}))}\|_{X(0,L)}\int_0^L f^*(s)\d{s}\\
&\geq\frac{v(L^-)}{2}\|u\chi_{(0,\nu^{-1}(\frac{L}{2}))}\|_{X(0,L)}\int_0^L f(s)\d{s},
\end{align*}
where we used \eqref{ch1:ri:HLg=chiE} in the last inequality. Since $\frac{v(L^-)}{2}\|u\chi_{(0,\nu^{-1}(\frac{L}{2}))}\|_{X(0,L)}\in(0,\infty)$ does not depend on $f$, property (P5) follows. Assume now that $L=\infty$. Recall that \eqref{prop:norminducedbyH:eq11} is satisfied provided that \eqref{prop:norminducedbyH:infinitecase:assump1} is satisfied. Let $f\in\mathfrak{M}pl(0,\infty)$ and $E\subseteq(0,\infty)$ be of finite measure. The function $(0,\infty)\ni t\mapsto f^*(t-|E|)\chi_{(|E|,\infty)}(t)$ is equimeasurable with $f$. By arguing similarly to the case $L<\infty$, we obtain that
\begin{equation*}
\varrho(f)\geq v(2|E|)\|u\chi_{(0,\nu^{-1}(|E|))}\|_{X(0,\infty)}\int_Ef(s)\d{s},
\end{equation*}
whence property (P5) follows.
\emph{Property \emph{(P6)}.} Since the relation $\sim$ is transitive, it plainly follows that $\varrho$ has property (P6).
Note that the necessity of \eqref{prop:norminducedbyH:assum} for $\varrho$ to be a rearrangement\hyp{}invariant function norm was already proved in the paragraph devoted to property (P4).
Assume now that \eqref{prop:norminducedbyH:assum} is satisfied and denote the rearrangement\hyp{}invariant function space induced by $\varrho$ by $Y(0,L)$. We plainly have that
\begin{equation*}
\|H_{u,v,\nu}f\|_{X(0,L)}\leq\|f\|_{Y(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation*}
and so $H_{u,v,\nu}\colon Y(0,L)\to X(0,L)$ is bounded. Next, let $Z(0,L)$ be a rearrangement\hyp{}invariant function space such that $H_{u,v,\nu}\colon Z(0,L)\to X(0,L)$ is bounded. For every $f\in\mathfrak{M}pl(0,\infty)$ and each $h\in\mathfrak{M}pl(0,L)$ equimeasurable with $f$, we have that
\begin{equation*}
\|H_{u,v,\nu}h\|_{X(0,L)}\lesssim\|h\|_{Z(0,L)}=\|f\|_{Z(0,L)}.
\end{equation*}
Therefore,
\begin{equation*}
\|f\|_{Y(0,L)}\lesssim\|f\|_{Z(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
Hence $Z(0,L)\hookrightarrow Y(0,L)$. Finally, we claim that \eqref{prop:norminducedbyH:assum} needs to be satisfied if there is any rearrangement\hyp{}invariant function space $Z(0,L)$ such that $H_{u,v,\nu}\colon Z(0,L)\to X(0,L)$ is bounded. If $L<\infty$, we plainly have that
\begin{equation*}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^L v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}=\|H_{u,v,\nu}\chi_{(0,L)}\|_{X(0,L)}\lesssim\|\chi_{(0,L)}\|_{Z(0,L)}<\infty.
\end{equation*}
If $L=\infty$, we can argue as in the paragraph devoted to property (P4) to show that, if \eqref{prop:norminducedbyH:assum} is not satisfied, then
\begin{equation*}
\sup_{h\sim\chi_{(0,1)}}\|H_{u,v,\nu}h\|_{X(0,\infty)}=\infty,
\end{equation*}
whence, thanks to the boundedness of $H_{u,v,\nu}\colon Z(0,\infty)\to X(0,\infty)$,
\begin{equation*}
\infty=\sup_{h\sim\chi_{(0,1)}}\|H_{u,v,\nu}h\|_{X(0,\infty)}\lesssim\|\chi_{(0,1)}\|_{Z(0,\infty)}<\infty,
\end{equation*}
which would be a contradiction.
\end{proof}
\begin{remarks}\hphantom{}
\begin{itemize}
\item The assumption $\nu\in\Dinf{\infty}$ is not overly restrictive. For example, it is satisfied whenever $\nu$ is equivalent to $t\mapsto t^\alpha b(t)$ near $\infty$ for some $\alpha>0$ and a slowly-varying function $b$ (cf.~\cite[Proposition~2.2]{GOT:05}). On the other hand, $\nu(t)=\log^\alpha(t)$ near $\infty$, where $\alpha>0$, is a typical example of a function not satisfying the assumption. The same remark (with the obvious modifications) is true for the assumption $\nu\in\Dinf{0}$, which will appear in \myref{Proposition}{prop:Hiteration}.
\item When $u\equiv1$, \eqref{prop:norminducedbyH:infinitecase:assump2} is equivalent to
\begin{equation*}
\limsup_{t\to\infty}v(\nu(t))\varphi_{X(0,\infty)}(t)<\infty.
\end{equation*}
\item The functional \eqref{prop:norminducedbyH:normdef} is quite complicated; however, we shall see in \myref{Section}{sec:simplification} that it can often be significantly simplified.
\end{itemize}
\end{remarks}
Since, owing to \eqref{prop:RXtoYbddiffHY'toX':RXtoZiffHZ'toX'} and \eqref{ch1:ri:X''=X}, $Y(0,L)$ is the optimal domain space for the operator $H_{u,v,\nu}$ and $X(0,L)$ if and only if $Y'(0,L)$ is the optimal target space for the operator $R_{u,v,\nu^{-1}}$ and $X'(0,L)$, \myref{Proposition}{prop:norminducedbyH} actually also characterizes optimal target spaces for the operator $R_{u,v,\nu}$.
\begin{proposition}\label{prop:Roptimalrange}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. If $L=\infty$, assume that $\nu^{-1}\in\Dinf{\infty}$. Let $u,v\colon(0,L)\to(0,\infty)$ be nonincreasing. If $L<\infty$, assume that $v(L^-)>0$. Assume that
\begin{equation}\label{prop:Roptimalrange:assum}
\begin{cases}
u(t)\int_{\nu^{-1}(t)}^L v(s)\d{s}\in X'(0,L)\quad&\text{if $L<\infty$},\\
\begin{gathered}[b]u(t)\chi_{(0,\nu(1))}(t)\int_{\nu^{-1}(t)}^1v(s)\d{s}\in X'(0,\infty)\ \text{and}\\ \limsup_{\tau\to\infty}v(\tau)\|u\chi_{(0,\nu(\tau))}\|_{X'(0,\infty)}<\infty\end{gathered}\quad&\text{if $L=\infty$}.
\end{cases}
\end{equation}
Let $\|\cdot\|_{Y(0,L)}$ be the rearrangement\hyp{}invariant function norm whose associate function norm $\|\cdot\|_{Y'(0,L)}$ is defined as
\begin{equation}\label{prop:Roptimalrange:normdef}
\|f\|_{Y'(0,L)}=\sup_{h\sim f}\mathbb{B}ig\|u(t)\int_{\nu^{-1}(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X'(0,L)},\ f\in\mathfrak{M}pl(0,L),
\end{equation}
where the supremum extends over all $h\in\mathfrak{M}pl(0,L)$ equimeasurable with $f$. The rearrangement\hyp{}invariant function space $Y(0,L)$ is the optimal target space for the operator $R_{u,v,\nu}$ and $X(0,L)$. Moreover, if \eqref{prop:Roptimalrange:assum} is not satisfied, then there is no rearrangement\hyp{}invariant function space $Z(0,L)$ such that $R_{u,v,\nu}\colon X(0,L)\to Z(0,L)$ is bounded.
\end{proposition}
Although the functional $\mathfrak{M}pl(0,L)\ni f\mapsto \|H_{u,v,\nu}(f^*)\|_{X(0,L)}$ is usually not a rearrangement\hyp{}invariant function norm, a fact that, in turn, complicates the description of the optimal domain and optimal target spaces for the operators $H_{u,v,\nu}$ and $R_{u,v,\nu^{-1}}$, respectively, it is a rearrangement\hyp{}invariant function norm when $u$, $v$ and $\nu$ are related to each other in such a way that the function $R_{u,v,\nu^{-1}}(g^*)$ is nonincreasing for every $g\in\mathfrak{M}pl(0,L)$. This fact is the content of the following proposition, in which we omit its obvious consequence for optimal spaces.
\begin{proposition}\label{prop:norminducedbyHwhenRnonincreasing}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $u\colon(0,L)\to(0,\infty)$ be a nondegenerate nonincreasing function. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Let $v\colon(0,L)\to(0,\infty)$ be defined by
\begin{equation*}
\frac1{v(t)}=\int_0^{\nu^{-1}(t)} u(s)\d{s} \quad \text{for every $t\in(0,L)$}.
\end{equation*}
Set
\begin{equation*}
\varrho(f)=\mathbb{B}ig\|u(t)\int_{\nu(t)}^L f^*(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
The functional $\varrho$ is a rearrangement\hyp{}invariant function norm if and only if
\begin{equation}\label{prop:norminducedbyHwhenRnonincreasing:assum}
\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(a))}(t)\int_{\nu(t)}^a\frac1{U(\nu^{-1}(s))}\d{s}\mathbb{B}ig\|_{X(0,L)} < \infty,
\end{equation}
where $a$ is defined by \eqref{prop:Roptimaldomain:L_or_1}.
\end{proposition}
\begin{proof}
We only sketch the proof (see also \myref{Theorem}{prop:norminducedbyT} and \myref{Remark}{rem:norminducedbyT:varphi_nonincreasing}(i)), which is significantly easier than that of \myref{Proposition}{prop:norminducedbyH}. The functional $\varrho$ plainly possesses properties (P2), (P3) and (P6). It is easy to see that $\varrho$ has property (P4) if and only if \eqref{prop:norminducedbyHwhenRnonincreasing:assum} is satisfied (to this end, note that \eqref{prop:norminducedbyHwhenRnonincreasing:assum} implies \eqref{prop:norminducedbyH:eq11}). As for property (P1), only the subadditivity needs a comment. The key observation is that $(0,L)\ni t\mapsto R_{u,v,\nu^{-1}}(h^*)(t)$ is nonincreasing for every $h\in\mathfrak{M}pl(0,L)$ inasmuch as it is the integral mean of a nonnegative nonincreasing function over the interval $(0,\nu^{-1}(t))$ with respect to the measure $u(s)\d{s}$. Hence, thanks to \eqref{ch1:ri:normX''down}, \eqref{RaHdual} and \eqref{ch1:ri:subadditivityofdoublestar} combined with the Hardy lemma \eqref{ch1:ri:hardy-lemma}, we have that
\begin{align*}
\varrho(f+g)&=\|H_{u,v,\nu}((f+g)^*)\|_{X(0,L)}=\sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L H_{u,v,\nu}((f+g)^*)(t)h^*(t)\d{t} \\
&= \sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L (f+g)^*(t)R_{u,v,\nu^{-1}}(h^*)(t)\d{t} \\
&\leq \sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L f^*(t)R_{u,v,\nu^{-1}}(h^*)(t)\d{t} \\
& \quad + \sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L g^*(t)R_{u,v,\nu^{-1}}(h^*)(t)\d{t} \\
&= \sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L H_{u,v,\nu}(f^*)(t)h^*(t)\d{t} \\
& \quad + \sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L H_{u,v,\nu}(g^*)(t)h^*(t)\d{t} \\
&= \varrho(f) + \varrho(g)
\end{align*}
for every $f,g\in\mathfrak{M}pl(0,L)$. Finally, as for the validity of property (P5), owing to \eqref{ch1:ri:normX''down}, \eqref{RaHdual}, the monotonicity of $R_{u,v,\nu^{-1}}(g^*)$ and the Hardy--Littlewood inequality \eqref{ch1:ri:HL}, we have that
\begin{align*}
\varrho(f)&\geq \varrho(f\chi_E)=\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^L (f\chi_E)^*(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t} \\
&= \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^{|E|} (f\chi_E)^*(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t} \\
&\geq \int_0^{|E|} (f\chi_E)^*(t)\d{t} \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}R_{u,v,\nu^{-1}}(g^*)(|E|) \\
&\geq R_{u,v,\nu^{-1}}\mathbb{B}ig(\frac{\chi_{(0,|E|)}}{\|\chi_{(0,|E|)}\|_{X'(0,L)}}\mathbb{B}ig)(|E|) \int_0^{|E|} (f\chi_E)^*(t)\d{t}\\
&\geq\frac{v(|E|)}{\varphi_{X'(0,L)}(|E|)}U(\nu^{-1}(|E|)) \int_E f(t)\d{t}
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$ and $E\subseteq(0,L)$ having finite measure.
\end{proof}
\begin{remark}
Note that $(R_{u_1, v_1, \nu_1} + H_{u_2, v_2, \nu_2})\colon X(0,L) \to Y(0,L)$ is bounded if and only if both $R_{u_1, v_1, \nu_1}$ and $H_{u_2, v_2, \nu_2}$ are bounded from $X(0,L)$ to $Y(0,L)$. Furthermore, it is easy to see that $(Y_1 + Y_2)(0,L)$ is the optimal target space for $R_{u_1, v_1, \nu_1} + H_{u_2, v_2, \nu_2}$ and $X(0,L)$, where $Y_1(0,L)$ and $Y_2(0,L)$ are the optimal target spaces for $R_{u_1, v_1, \nu_1}$ and $H_{u_2, v_2, \nu_2}$, respectively, and $X_1(0,L) \cap X_2(0,L)$ is the optimal domain space for $R_{u_1, v_1, \nu_1} + H_{u_2, v_2, \nu_2}$ and $Y(0,L)$, where $X_1(0,L)$ and $X_2(0,L)$ are the optimal domain spaces for $R_{u_1, v_1, \nu_1}$ and $H_{u_2, v_2, \nu_2}$.
\end{remark}
We conclude this subsection with an important result ensuring that, to verify the boundedness of $H_{u, v, \nu}$ between a pair of rearrangement\hyp{}invariant function spaces, it is sufficient to verify it on nonincreasing functions. While it is an easy consequence of Hardy--Littlewood inequality \eqref{ch1:ri:HL} that this is the case for the operator $R_{u, v, \nu}$, provided that $u$ is nonincreasing, the validity of such a result for the operator $H_{u, v, \nu}$ is far from obvious because this time the integration is not carried out over a right-neighborhood of $0$. Such a result was first obtained by Cianchi, Pick and Slav\'\i kov\'a in \cite[Theorem~9.5]{CPS:15} for $u\equiv1$, $\nu=\id$ and $L<\infty$. Later, Pe\v sa generalized their result to cover also the case $L=\infty$ in \cite[Theorem~3.10]{Pe:20}. In \cite{CM:20}, we needed such a result for $\nu(t)=t^\alpha$, $\alpha>0$, and $u\not\equiv1$, and, while we felt certain that their proofs would carry over to the needed setting, we still had to carefully check them because there is plenty of fine analysis involved. The following proposition extends the result to the generality considered in this paper. It turns out that their proofs can easily be adapted for our setting. Our proof is actually in a way simpler because they considered operators with kernels.
\begin{proposition}\label{prop:copsonrestrvsunrestr}
Let $\|\cdot\|_{X(0,L)}$ and $\|\cdot\|_{Y(0,L)}$ be rearrangement\hyp{}invariant function norms. Let $u,v\colon(0,L)\to(0, \infty)$ be nonincreasing. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Assume that $\nu^{-1}\in\Dsup[\theta]{0}$ for some $\theta>1$. If $L=\infty$, assume that $\nu^{-1}\in\Dsup[\theta]{\infty}$. The following two statements are equivalent.
\begin{enumerate}[(i)]
\item There is a positive constant $C$ such that
\begin{equation}\label{prop:copsonrestrvsunrestr:unrestr}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^L|f(s)|v(s)\d{s}\mathbb{B}ig\|_{Y(0,L)}\leq C\|f\|_{X(0,L)}
\end{equation}
for every $f\in\mathfrak{M}(0,L)$.
\item There is a positive constant $C$ such that
\begin{equation}\label{prop:copsonrestrvsunrestr:restr}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^Lf^*(s)v(s)\d{s}\mathbb{B}ig\|_{Y(0,L)}\leq C\|f\|_{X(0,L)}
\end{equation}
for every $f\in\mathfrak{M}(0,L)$.
\end{enumerate}
Moreover, if \eqref{prop:copsonrestrvsunrestr:restr} holds with a constant $C$, then \eqref{prop:copsonrestrvsunrestr:unrestr} holds with the constant $C\frac{\theta}{\theta-1}\sup_{t\in(0,L)}\frac{\nu^{-1}(t)}{\nu^{-1}(\frac{t}{\theta})}$.
\end{proposition}
\begin{proof}
Since (i) plainly implies (ii), we only need to prove that (ii) implies (i). Since the quantities in \eqref{prop:copsonrestrvsunrestr:unrestr} and \eqref{prop:copsonrestrvsunrestr:restr} do not change when the function $v$ is redefined on a countable set, we may assume that $v$ is left continuous. Note that $H_{u,v,\nu}f$ is nonincreasing for every $f\in\mathfrak{M}(0,L)$. Hence, thanks to \eqref{ch1:ri:normX''down} and \eqref{RaHdual}, in order to prove that (ii) implies (i), we need to show that
\begin{equation}\label{prop:copsonrestrvsunrestr:eq8}
\sup_{\substack{f\in{\mathfrak{M}pl(0,L)}\\\|f\|_{X(0,L)}\leq1}}\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{Y'(0,L)}\leq1}}\int_0^Lf(s)R_{u,v,\nu^{-1}}(g^*)(s)\d{s}\lesssim\sup_{\substack{f\in{\mathfrak{M}pl(0,L)}\\\|f\|_{X(0,L)}\leq1}}\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{Y'(0,L)}\leq1}}\int_0^Lf^*(s)R_{u,v,\nu^{-1}}(g^*)(s)\d{s}.
\end{equation}
We define the operator $G$ as
\begin{equation*}
Gg(t)=\sup_{\tau\in[t,L)}R_{u,v,\nu^{-1}}(g^*)(\tau),\ t\in(0,L),
\end{equation*}
for every $g\in\mathfrak{M}pl(0,L)$. Note that $Gg$ is nonincreasing for every $g\in\mathfrak{M}pl(0,L)$. Fix $g\in\mathfrak{M}pl(0,L)$ such that $|\{t\in(0,L)\colon g(t)>0\}|<\infty$, and set
\begin{equation*}
E=\big\{t\in(0,L)\colon R_{u,v,\nu^{-1}}(g^*)(t)<Gg(t)\big\}.
\end{equation*}
It can be shown that there is a countable system $\{(a_k,b_k)\}_{k\in\mathcal{I}}$ of mutually disjoint, bounded intervals in $(0,L)$ such that
\begin{align}
E&=\bigcup_{k\in\mathcal{I}}(a_k,b_k);\label{prop:copsonrestrvsunrestr:eq4}\\
Gg(t)&=R_{u,v,\nu^{-1}}(g^*)(t)\quad\text{if $t\in(0,L)\setminus E$;}\label{prop:copsonrestrvsunrestr:eq5}\\
Gg(t)&=R_{u,v,\nu^{-1}}(g^*)(b_k)\quad\text{if $t\in(a_k,b_k)$ for $k\in\mathcal{I}$}.\label{prop:copsonrestrvsunrestr:eq1}
\end{align}
This was proved in \cite[Proposition~9.3]{CPS:15} for $L<\infty$ and in \cite[Lemma~3.9]{Pe:20} for $L=\infty$. Their proofs are for $u\equiv1$ and $\nu=\id$, but the fact that $g^*u$ is nonincreasing and $R_{u,v,\nu^{-1}}(g^*)$ is upper semicontinuous remains valid in our situation, and so it can be readily seen that their proofs carry over verbatim to our setting.
Note that $M=\sup_{t\in(0,L)}\frac{\nu^{-1}(t)}{\nu^{-1}(\frac{t}{\theta})}<\infty$. Set $\sigma=\frac{\theta}{\theta-1}\in(1,\infty)$. Since $v$ and $g^*u$ are nonincreasing, we have that, for every $k\in\mathcal{I}$,
\begin{align}
(b_k - a_k)R_{u,v,\nu^{-1}}(g^*)(b_k)&=\sigma\int_{\frac{a_k+(\sigma-1)b_k}{\sigma}}^{b_k}R_{u,v,\nu^{-1}}(g^*)(b_k)\d{t}\notag\\
\begin{split}\label{prop:copsonrestrvsunrestr:eq6}
&=\sigma\int_{\frac{a_k+(\sigma-1)b_k}{\sigma}}^{b_k}\frac{v(b_k)}{\nu^{-1}(b_k)}\nu^{-1}(b_k)\int_0^{\nu^{-1}(b_k)}g^*(s)u(s)\d{s}\,\d{t}\\
&\leq\sigma\int_{\frac{a_k+(\sigma-1)b_k}{\sigma}}^{b_k}\frac{v(t)}{\nu^{-1}(t)}\nu^{-1}(b_k)\int_0^{\nu^{-1}(t)}g^*(s)u(s)\d{s}\,\d{t}\\
&\leq\sigma\frac{\nu^{-1}(b_k)}{\nu^{-1}(\frac{a_k+(\sigma-1)b_k}{\sigma})}\int_{\frac{a_k+(\sigma-1)b_k}{\sigma}}^{b_k}R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\\
&\leq\sigma\frac{\nu^{-1}(b_k)}{\nu^{-1}(\frac{b_k}{\theta})}\int_{\frac{a_k+(\sigma-1)b_k}{\sigma}}^{b_k}R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\\
&\leq\sigma M\int_{\frac{a_k+(\sigma-1)b_k}{\sigma}}^{b_k}R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\\
&\leq\sigma M\int_{a_k}^{b_k}R_{u,v,\nu^{-1}}(g^*)(t)\d{t},
\end{split}
\end{align}
where we used the fact that $v$ and $(g^*u)^{**}$ are nonincreasing in the first inequality.
Consider the averaging operator $A$ defined as
\begin{equation*}
Af=f^*\chi_{(0,L)\setminus E}+\sum_{k\in\mathcal I}\mathbb{B}ig(\frac1{b_k-a_k}\int_{a_k}^{b_k}f^*(s)\d{s}\mathbb{B}ig)\chi_{(a_k, b_k)},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
Note that $Af$ is a nonincreasing function for every $f\in\mathfrak{M}pl(0,L)$. Furthermore, it is known (\cite[Chapter~2, Theorem~4.8]{BS}) that
\begin{equation}\label{prop:copsonrestrvsunrestr:eq2}
\|Af\|_{X(0,L)}\leq\|f\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
We have, for every $f\in\mathfrak{M}pl(0,L)$, that
\begin{align}\label{prop:copsonrestrvsunrestr:eq3}
\int_0^Lf(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}&\leq\int_0^Lf(t)Gg(t)\d{t}\leq\int_0^Lf^*(t)Gg(t)\d{t} \notag\\
\begin{split}
&=\int_{(0,L)\setminus E}f^*(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}+\sum_{k\in\mathcal{I}}\int_{a_k}^{b_k}f^*(t)R_{u,v,\nu^{-1}}(g^*)(b_k)\d{t}\\
&\leq\int_0^Lf^*(t)\chi_{(0,L)\setminus E}(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\\
&\quad+ \sigma M\sum_{k\in\mathcal{I}}\mathbb{B}ig(\frac1{b_k-a_k}\int_{a_k}^{b_k}f^*(t)\d{t}\mathbb{B}ig)\mathbb{B}ig(\int_{a_k}^{b_k}R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\mathbb{B}ig)\\
&\leq \sigma M\int_0^LAf(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t},
\end{split}
\end{align}
owing to the Hardy--Littlewood inequality \eqref{ch1:ri:HL}, \eqref{prop:copsonrestrvsunrestr:eq4}, \eqref{prop:copsonrestrvsunrestr:eq5}, \eqref{prop:copsonrestrvsunrestr:eq1}, and \eqref{prop:copsonrestrvsunrestr:eq6}.
If $L=\infty$ and $g\in\mathfrak{M}pl(0,\infty)$ is positive on a set of infinite measure, we consider $g\chi_{(0,n)}\nearrow g$, $n\to\infty$, and obtain \eqref{prop:copsonrestrvsunrestr:eq3} even for such functions $g$ thanks to the monotone convergence theorem; hence we have proved that
\begin{equation}\label{prop:copsonrestrvsunrestr:eq7}
\int_0^Lf(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\leq \sigma M\int_0^LAf(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\quad\text{for every $f,g\in\mathfrak{M}pl(0,L)$}.
\end{equation}
By combining \eqref{prop:copsonrestrvsunrestr:eq2} and \eqref{prop:copsonrestrvsunrestr:eq7}, we obtain that
\begin{equation*}
\int_0^Lf(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}\leq \sigma M\sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X(0,L)}\leq1}}\int_0^Lh^*(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$, $\|f\|_{X(0,L)}\leq1$, and $g\in\mathfrak{M}pl(0,L)$. Note that here we used the fact that $Af$ is nonincreasing for every $f\in\mathfrak{M}pl(0,L)$. By taking the supremum over all $f,g\in\mathfrak{M}pl(0,L)$ from the closed unit balls of $X(0,L)$ and $Y'(0,L)$, respectively, we obtain \eqref{prop:copsonrestrvsunrestr:eq8} with the multiplicative constant equal to $\sigma M$.
\end{proof}
\myref{Proposition}{prop:copsonrestrvsunrestr} together with \myref{Proposition}{prop:RXtoYbddiffHY'toX'} has the following important corollary, in which the first equality is just a consequence of the Hardy--Littlewood inequality \eqref{ch1:ri:HL} combined with the obvious inequality
\begin{equation*}
\sup_{\|f\|_{X(0,L)}\leq1}\|R_{u,v,\nu}(f^*)\|_{Y(0,L)}\leq\sup_{\|f\|_{X(0,L)}\leq1}\|R_{u,v,\nu}f\|_{Y(0,L)}.
\end{equation*}
\begin{corollary}\label{cor:copsonrestrvsunrestr}
Let $\|\cdot\|_{X(0,L)}$ and $\|\cdot\|_{Y(0,L)}$ be rearrangement\hyp{}invariant function norms. Let $u,v\colon(0,L)\to(0, \infty)$ be nonincreasing. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Assume that $\nu\in\Dsup{0}$. If $L=\infty$, assume that $\nu\in\Dsup{\infty}$. We have that
\begin{equation*}
\begin{aligned}
\sup_{\|f\|_{X(0,L)}\leq1}\|R_{u,v,\nu}(f^*)\|_{Y(0,L)}&=\sup_{\|f\|_{X(0,L)}\leq1}\|R_{u,v,\nu}f\|_{Y(0,L)}\\
&=\sup_{\|g\|_{Y'(0,L)}\leq1}\|H_{u,v,\nu^{-1}}g\|_{X'(0,L)}\\
&\approx\sup_{\|g\|_{Y'(0,L)}\leq1}\|H_{u,v,\nu^{-1}}(g^*)\|_{X'(0,L)}.
\end{aligned}
\end{equation*}
\end{corollary}
\subsection{Simplification of optimal r.i.~norms}\label{sec:simplification}
In this subsection, we will study deeper properties of the important but complicated functional \eqref{prop:norminducedbyH:normdef}. In particular, we shall see that the functional is actually often equivalent to a significantly more manageable functional (cf.~\cite[Theorem~4.2]{EMMP:20}). To this end, we need to introduce a supremum operator. For a fixed function $\varphi\colon(0,L)\to(0,\infty)$, we define the operator $T_\varphi$ as
\begin{equation}\label{opTdef}
T_\varphi f(t)=\frac1{\varphi(t)}\esssup_{s\in[t,L)}\varphi(s)f^*(s),\ t\in(0,L),\ f\in\mathfrak{M}(0,L).
\end{equation}
Note that $T_\varphi f(t)=\frac1{\varphi(t)}\sup_{s\in[t,L)}\varphi(s)f^*(s)$ for every $t\in(0, L)$ provided that $\varphi$ is nondecreasing and/or right-continuous. If $\varphi$ is nonincreasing, we have that $T_\varphi f(t)= \frac{f^*(t)}{\varphi(t)}\varphi(t^+)$ for every $t\in(0,L)$, and so $T_\varphi f=f^*$ possibly up to a countably many points.
\begin{proposition}\label{prop:norminducedbyT}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. If $L=\infty$, assume that $\nu\in\Dinf{\infty}$. Let $u\colon(0,L)\to(0,\infty)$ be nonincreasing. Let $v\colon(0,L)\to(0,\infty)$ be defined by
\begin{equation*}
\frac1{v(t)}=\int_0^{\nu^{-1}(t)}\xi(s)\d{s} \quad \text{for every $t\in(0,L)$},
\end{equation*}
where $\xi\colon(0,L)\to(0,\infty)$ is a measurable function. If $L<\infty$, assume that $v(L^-)>0$. Assume that
\begin{equation*}
\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(a))}(t)\int_{\nu(t)}^a v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}<\infty,
\end{equation*}
where $a$ is defined by \eqref{prop:Roptimaldomain:L_or_1}, and that the operator $T_\varphi$ defined by \eqref{opTdef} with $\varphi=\frac{u}{\xi}$ is bounded on $X'(0,L)$. Let $\varrho$ be the functional defined by \eqref{prop:norminducedbyH:normdef} and set
\begin{equation*}
\widetilde{\varrho}(f)=\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lf^*(s)v(t)\int_0^{\nu^{-1}(t)}T_\varphi g(s)u(s)\d{s}\d{t},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
The functionals $\varrho$ and $\widetilde{\varrho}$ are rearrangement\hyp{}invariant function norms. Furthermore, we have that
\begin{align}\label{prop:norminducedbyT:equivalencewithHf*}
\begin{split}
\|H_{u,v,\nu}(f^*)\|_{X(0,L)}&\leq\sup_{h\sim f}\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\leq\widetilde{\varrho}(f)\\
&\leq\|T_\varphi\|_{X'(0,L)}\|H_{u,v,\nu}(f^*)\|_{X(0,L)}
\end{split}
\end{align}
for every $f\in\mathfrak{M}pl(0,L)$, where $\|T_\varphi\|_{X'(0,L)}$ stands for the operator norm of $T_\varphi$ on $X'(0,L)$. In particular, the rearrangement\hyp{}invariant function norms $\varrho$ and $\widetilde{\varrho}$ are equivalent.
\end{proposition}
\begin{proof}
Since $f\sim f^*$ for every $f\in\mathfrak{M}pl(0,L)$, the first inequality in \eqref{prop:norminducedbyT:equivalencewithHf*} plainly holds. As for the second inequality, note that the function $(0,L)\ni t\mapsto R_{u,v,\nu^{-1}}(T_\varphi g)(t)$ is nonincreasing for every $g\in\mathfrak{M}pl(0,L)$ because it is the integral mean of the nonincreasing function $(0,L)\ni s\mapsto \esssup_{\tau\in[s,L)}\varphi(\tau)g^*(\tau)$ over the interval $(0,\nu^{-1}(t))$ with respect to the measure $\xi(s)\d{s}$. Consequently, for every $f\in\mathfrak{M}pl(0,L)$ and every $h\in\mathfrak{M}pl(0,L)$ equimeasurable with $f$, we have that
\begin{align}
\|H_{u,v,\nu}h\|_{X(0,L)}&=\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lh(t)R_{u,v,\nu^{-1}}(g^*)(t)\d{t} \notag\\
\begin{split}\label{prop:norminducedbyT:eq1}
&\leq\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lh(t)R_{u,v,\nu^{-1}}(T_\varphi g)(t)\d{t}\\
&\leq\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lh^*(t)R_{u,v,\nu^{-1}}(T_\varphi g)(t)\d{t}\\
&=\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lf^*(t)R_{u,v,\nu^{-1}}(T_\varphi g)(t)\d{t}\\
&=\widetilde{\varrho}(f),
\end{split}
\end{align}
where we used \eqref{ch1:ri:normX''down} (note that the function $H_{u,v,\nu}h$ is nonincreasing for every $h\in\mathfrak{M}pl(0,L)$) together with \eqref{RaHdual} in the first equality, the pointwise estimate $g^*(t)\leq T_\varphi g(t)$ for a.e.~$t\in(0,L)$ in the first inequality, the Hardy--Littlewood inequality \eqref{ch1:ri:HL} in the second inequality, and the equimeasurability of $f$ and $h$ in the last inequality. Hence the second inequality in \eqref{prop:norminducedbyT:equivalencewithHf*} follows from \eqref{prop:norminducedbyT:eq1}. As for the third inequality in \eqref{prop:norminducedbyT:equivalencewithHf*}, we have that
\begin{align*}
\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^Lf^*(t)R_{u,v,\nu^{-1}}(T_\varphi g)(t)\d{t}&=\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^LT_\varphi g(t)H_{u,v,\nu}(f^*)(t)\d{t}\\
&\leq\|H_{u,v,\nu}(f^*)\|_{X(0,L)}\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\|T_\varphi g\|_{X'(0,L)}\\
&=\|T_\varphi\|_{X'(0,L)}\|H_{u,v,\nu}(f^*)\|_{X(0,L)}
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$ thanks to \eqref{RaHdual} and the H\"older inequality \eqref{ch1:ri:holder}.
Second, we shall prove that the functional $\varrho$, defined by \eqref{prop:norminducedbyH:normdef}, is a rearrangement\hyp{}invariant function norm. If $L<\infty$, this follows immediately from \myref{Proposition}{prop:norminducedbyH}. If $L=\infty$, owing to \myref{Proposition}{prop:norminducedbyH} again, we only need to verify that \eqref{prop:norminducedbyH:infinitecase:assump2} is satisfied. To this end, it follows from the proof of property (P4) of $\varrho$ that, if \eqref{prop:norminducedbyH:infinitecase:assump2} did not hold, then we would have
\begin{equation*}
\sup_{h\sim \chi_{(0,1)}}\mathbb{B}ig\|u(t)\int_{\nu(t)}^\infty h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}=\infty.
\end{equation*}
However, thanks to \eqref{prop:norminducedbyT:equivalencewithHf*}, we have that
\begin{align*}
\sup_{h\sim \chi_{(0,1)}}\mathbb{B}ig\|u(t)\int_{\nu(t)}^L h(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}&\approx\|H_{u,v,\nu}\chi_{(0,1)}\|_{X(0,L)}\\
&=\mathbb{B}ig\|u(t)\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}<\infty.
\end{align*}
Therefore, \eqref{prop:norminducedbyH:infinitecase:assump2} is satisfied.
Finally, now that we know that the functionals $\varrho$ and $\widetilde{\varrho}$ are equivalent and the former is a rearrangement\hyp{}invariant function norm, it readily follows that $\widetilde{\varrho}$, too, is a rearangement-invariant function norm once we observe that $\widetilde{\varrho}$ is subadditive. The subadditivity follows from
\begin{align*}
\widetilde{\varrho}(f+g)&=\sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^L(f+g)^*(t)R_{u,v,\nu^{-1}}(T_\varphi h)(t)\d{t}\\
&\leq\sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^Lf^*(t)R_{u,v,\nu^{-1}}(T_\varphi h)(t)\d{t}\\
&\quad+\sup_{\substack{h\in{\mathfrak{M}pl(0,L)}\\\|h\|_{X'(0,L)}\leq1}}\int_0^Lg^*(t)R_{u,v,\nu^{-1}}(T_\varphi h)(t)\d{t}\\
&=\widetilde{\varrho}(f)+\widetilde{\varrho}(g)\quad\text{for every $f,g\in\mathfrak{M}pl(0,L)$},
\end{align*}
where we used \eqref{ch1:ri:subadditivityofdoublestar} together with the Hardy lemma \eqref{ch1:ri:hardy-lemma} (recall that the function $R_{u,v,\nu^{-1}}(T_\varphi h)$ is nonincreasing for every $h\in\mathfrak{M}pl(0,L)$).
\end{proof}
\begin{remarks}\label{rem:norminducedbyT:varphi_nonincreasing}\hphantom{}
\begin{enumerate}[(i)]
\item If $\varphi=\frac{u}{\xi}$ is (equivalent to) a nonincreasing function, $T_\varphi f(t)$ is (equivalent to) $f^*(t)$ for a.e.~$t\in(0,L)$; hence $T_\varphi$ is bounded on every rearrangement\hyp{}invariant function space in this case. Furthermore, when $\varphi=\frac{u}{\xi}$ is nonincreasing, the norm of $T_\varphi$ on every rearrangement\hyp{}invariant function space is equal to $1$; therefore all the inequalities in \eqref{prop:norminducedbyT:equivalencewithHf*} are actually equalities (cf.~\myref{Proposition}{prop:norminducedbyHwhenRnonincreasing}) in this case.
\item The boundedness of $T_\varphi$ on a large number of rearrangement\hyp{}invariant function spaces is characterized by \cite[Theorem~3.2]{GOP:06}.
\end{enumerate}
\end{remarks}
By combining \myref{Proposition}{prop:norminducedbyT} and \myref{Proposition}{prop:Roptimalrange}, we obtain the following proposition, which tells us that the optimal target space for the operator $R_{u,v,\nu}$ and a rearrangement\hyp{}invariant function space $X(0,L)$ has a much more manageable description than that given by \myref{Proposition}{prop:Roptimalrange} provided that the supremum operator $T_\varphi$ defined by \eqref{opTdef} with an appropriate function $\varphi$ is bounded on $X(0,L)$.
\begin{proposition}\label{prop:RoptimalrangeTbound}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. If $L=\infty$, assume that $\nu^{-1}\in\Dinf{\infty}$. Let $u\colon(0,L)\to(0,\infty)$ be nonincreasing. Let $v\colon(0,L)\to(0,\infty)$ be defined by
\begin{equation*}
\frac1{v(t)}=\int_0^{\nu(t)}\xi(s)\d{s} \quad \text{for every $t\in(0,L)$},
\end{equation*}
where $\xi\colon(0,L)\to(0,\infty)$ is a measurable function. If $L<\infty$, assume that $v(L^-)>0$. Furthermore, assume that
\begin{equation*}
\mathbb{B}ig\|u(t)\chi_{(0,\nu(a))}(t)\int_{\nu^{-1}(t)}^a v(s)\d{s}\mathbb{B}ig\|_{X'(0,L)} < \infty,
\end{equation*}
where $a$ is defined by \eqref{prop:Roptimaldomain:L_or_1}. Finally, assume that the operator $T_\varphi$ is bounded on $X(0,L)$, where $\varphi=\frac{u}{\xi}$. Let $\|\cdot\|_{Y(0,L)}$ be the rearrangement\hyp{}invariant function norm whose associate function norm $\|\cdot\|_{Y'(0,L)}$ is defined as
\begin{equation*}
\|f\|_{Y'(0,L)}=\sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X(0,L)}\leq1}}\int_0^Lf^*(s)v(t)\int_0^{\nu(t)}T_\varphi g(s)u(s)\d{s}\d{t},\ f\in\mathfrak{M}pl(0,L).
\end{equation*}
The rearrangement\hyp{}invariant function space $Y(0,L)$ is the optimal target space for the operator $R_{u,v,\nu}$ and $X(0,L)$. Moreover,
\begin{equation*}
\|H_{u,v,\nu^{-1}}(f^*)\|_{X'(0,L)}\leq \|f\|_{Y'(0,L)}\leq\|T_\varphi\|_{X(0,L)}\|H_{u,v,\nu^{-1}}(f^*)\|_{X'(0,L)}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$, where $\|T_\varphi\|_{X(0,L)}$ stands for the operator norm of $T_\varphi$ on $X(0,L)$.
\end{proposition}
\begin{remark}
Owing to \myref{Remark}{rem:optimal_for_R_iff_for_H}, \myref{Proposition}{prop:RoptimalrangeTbound} can also be used to get a simpler description of optimal domain spaces for the operator $H_{u,v,\nu}$.
\end{remark}
We already know that a sufficient condition for simplification of the complicated function norm \eqref{prop:Roptimalrange:normdef} is boundedness of a certain supremum operator. We shall soon see that the connection between the supremum operator and the question of whether the supremum in the function norm can be `simplified' is actually much tighter than it may look at first glance. Furthermore, not only is the boundedness of the supremum operator often also necessary for simplifying \eqref{prop:Roptimalrange:normdef}, but it also goes hand in hand with the notion of being an optimal function space and a certain interpolation property of the rearrangement\hyp{}invariant function space on which the supremum operator acts.
As the following theorem shows, there is a connection between a rearrangement\hyp{}invariant function space $X(0,L)$ being an interpolation space with respect to a certain pair of endpoint spaces and the boundedness of $T_\varphi$ on the associate space of $X(0,L)$ (cf.~\citep[Theorem~3.12]{KP:06}). We say that a measurable a.e.~positive function on $(0,L)$ satisfies \emph{the averaging condition \eqref{averaging_condition}} (cf.~\cite[Lemma~2.3]{S:79}) if
\begin{equation}\label{averaging_condition}
\esssup_{t\in(0, L)} \frac1{t w(t)} \int_0^t w(s) \d{s} < \infty,
\end{equation}
in which $w$ temporarily denotes the function in question. The value of the essential supremum will be referred to as \emph{the averaging constant} of the function.
\begin{theorem}\label{thm:Tbounded_iff_Xinterpolation}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\varphi\colon (0,L) \to (0, \infty)$ be a measurable function that is equivalent to a continuous nondecreasing function. Set $\xi = \frac1{\varphi}$. Assume that $\xi$ satisfies the averaging condition~\eqref{averaging_condition}. Set $\psi(t)=\frac{t}{\int_0^t \xi(s) \d{s}}$, $t\in(0,L)$. Consider the following three statements.
\begin{enumerate}[(i)]
\item The operator $T_\varphi$, defined by \eqref{opTdef}, is bounded on $X'(0,L)$.
\item $X(0,L) \in \Int\big(\Lambda^1_\xi(0,L), L^\infty(0,L)\big)$.
\item $X'(0,L) \in \Int\big(L^1(0,L), M_\psi(0,L)\big)$.
\end{enumerate}
If $L<\infty$, then the three statements are equivalent to each other. If $L=\infty$, then (i) implies (ii), and (iii) implies (i).
\end{theorem}
\begin{proof}
We start off by noting that we may without loss of generality assume that $\varphi$ is continuous and nondecreasing. Furthermore, $(\Lambda^1_\xi)'(0,L)=M_\psi(0,L)$ (\cite[Theorem~10.4.1]{PKJF:13}) and
\begin{equation}\label{thm:Tbounded_iff_Xinterpolation:eq6}
\psi\approx\varphi \quad \text {on $(0,L)$}
\end{equation}
thanks to the fact that $\xi$ satisfies the averaging condition~\eqref{averaging_condition} and is (equivalent to) a nonincreasing function. We shall show that (i) implies (ii), whether $L$ is finite or infinite. First, we observe that $X(0,L)$ is an intermediate space between $\Lambda^1_\xi(0,L)$ and $L^\infty(0,L)$. Set $\Xi_L=\int_0^L \xi(s)\d{s}\in(0,\infty]$ (note that $\Xi_L<\infty$ if $L<\infty$). Let $\Xi^{-1}\colon (0, \Xi_L) \to (0,L)$ be the increasing bijection that is inverse to the function $(0,L) \ni t \mapsto \int_0^t \xi(s)\d{s}$. By \cite[Lemma~6.8]{S:72}, we have that
\begin{equation}\label{thm:Tbounded_iff_Xinterpolation:eq3}
\K(f,t; \Lambda^1_\xi, L^\infty) \approx \int_0^{\Xi^{-1}(t)} f^*(s) \xi(s) \d{s} \quad \text{for every $f\in\mathfrak{M}pl(0,L)$ and $t\in(0,\Xi_L)$}.
\end{equation}
Let $a$ be defined by \eqref{prop:Roptimaldomain:L_or_1}. The embedding $X(0,L) \hookrightarrow (\Lambda^1_\xi + L^\infty)(0,L)$ follows from
\begin{align*}
\| f \|_{(\Lambda^1_\xi + L^\infty)(0,L)} &= \K(f,1; \Lambda^1_\xi, L^\infty) \leq \max\mathbb{B}ig\{1,\frac1{\int_0^a \xi(s)\d{s}}\mathbb{B}ig\} \K(f,\int_0^a \xi(s)\d{s}; \Lambda^1_\xi, L^\infty) \\
&\approx \int_0^a f^*(t) \xi(t) \d{t} \lesssim \frac1{\psi(a)} \int_0^a f^*(t) T_\varphi \chi_{(0,a)}(t) \d{t} \\
&\lesssim \|f\|_{X(0,L)}\|T_\varphi \chi_{(0,a)}\|_{X'(0,L)} \lesssim \|f\|_{X(0,L)},
\end{align*}
where we used H\"older's inequality \eqref{ch1:ri:holder} in the last but one inequality and the boundedness of $T_\varphi$ on $X'(0,L)$ in the last one. We now turn our attention to the embedding $\Lambda^1_\xi(0,L)\cap L^\infty(0,L) \hookrightarrow X(0,L)$. If $L<\infty$, the embedding is plainly true owing to \eqref{ch1:ri:smallestandlargestrispacefinitemeasure}. If $L=\infty$, it is sufficient to observe that, for every $f\in X'(0,\infty)$, $f^*=g+h$ for some functions $g\in L^1(0,\infty)$ and $h\in M_\psi(0,\infty)$ thanks to \eqref{ch1:ri:XtoYiffY'toX'}, the fact that $(\Lambda^1_\xi(0,\infty)\cap L^\infty(0,\infty))' = L^1(0,\infty) + M_\psi(0,\infty)$ by \eqref{ch1:ri:dual_sum_and_inter}, and \eqref{ch1:ri:inclusion_is_always_continuous}. Set $g=f^*\chi_{(0,1)}$ and $h=f^*\chi_{(1,\infty)}$. Clearly, $g\in L^1(0,\infty)$ thanks to property (P5) of $\|\cdot\|_{X'(0,L)}$. Furthermore
\begin{align*}
\|h\|_{M_\psi(0,\infty)} &\approx \sup_{t\in(0,\infty)}\psi(t) (f^*\chi_{(1,\infty)})^*(t) \lesssim \sup_{t\in(0,\infty)}\psi(t+1) f^*(t+1) = \frac{\psi(1)}{\psi(1)}\sup_{t\in [1,\infty)}\psi(t) f^*(t) \\
&= \psi(1) T_\psi f(1) \approx T_\varphi f(1) < \infty,
\end{align*}
where we used the fact that $\xi$ satisfies the averaging condition~\eqref{averaging_condition} in the first equivalence (cf.~\cite[Lemma~2.1]{MO:19}) and \eqref{thm:Tbounded_iff_Xinterpolation:eq6} in the last one. Note that $T_\varphi f(1)$ is finite owing to \eqref{ch1:ri:XembeddedinM0} inasmuch as $T_\varphi f\in X'(0,\infty)$ and it is a nonincreasing function. Next, now that we know that $X(0,L)$ is an intermediate space between $\Lambda^1_\xi(0,L)$ and $L^\infty(0,L)$, in order to prove that (i) implies (ii), it remains to show that every admissible operator $S$ for the couple $\big(\Lambda^1_\xi(0,L), L^\infty(0,L)\big)$ is bounded on $X(0,L)$. Let $S$ be such an operator. Since $S$ is linear and bounded on both $\Lambda^1_\xi(0,L)$ and $L^\infty(0,L)$, it follows that (see~\cite[Chapter~5, Theorem~1.11]{BS})
\begin{equation}\label{thm:Tbounded_iff_Xinterpolation:eq4}
\K(Sf,t; \Lambda^1_\xi, L^\infty) \lesssim \K(f,t; \Lambda^1_\xi, L^\infty) \quad \text{for every $f\in(\Lambda^1_\xi + L^\infty)(0,L)$ and $t\in(0,L)$}.
\end{equation}
By combining \eqref{thm:Tbounded_iff_Xinterpolation:eq3} and \eqref{thm:Tbounded_iff_Xinterpolation:eq4}, we obtain that
\begin{equation}\label{thm:Tbounded_iff_Xinterpolation:eq5}
\int_0^t (Sf)^*(s) \xi(s) \d{s} \lesssim \int_0^t f^*(s) \xi(s) \d{s} \quad \text{for every $f\in(\Lambda^1_\xi + L^\infty)(0,L)$ and $t\in(0,L)$}.
\end{equation}
Since the function $(0,L)\ni t \mapsto \sup_{t\leq s < L}\varphi(s)g^*(s)$ is nonincreasing for every $g\in\mathfrak{M}pl(0,L)$, the Hardy lemma \eqref{ch1:ri:hardy-lemma} together with \eqref{thm:Tbounded_iff_Xinterpolation:eq5} implies that
\begin{equation*}
\int_0^L (Sf)^*(t) T_\varphi g(t) \d{t} \lesssim \int_0^L f^*(t) T_\varphi g(t) \d{t} \quad \text{for every $f\in(\Lambda^1_\xi + L^\infty)(0,L)$ and $g\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
Therefore
\begin{align*}
\|Sf\|_{X(0,L)} &= \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^{L}(Sf)^*(t)g^*(t)\d{t} \leq \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^{L}(Sf)^*(t)T_\varphi g(t)\d{t} \\
&\lesssim \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\int_0^{L}f^*(t)T_\varphi g(t)\d{t} \leq \|f\|_{X(0,L)} \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\|g\|_{X'(0,L)}\leq1}}\|T_\varphi g\|_{X'(0,L)} \\
&\lesssim \|f\|_{X(0,L)}
\end{align*}
for every $f\in X(0,L)$, where we used \eqref{ch1:ri:normX''down} in the equality, H\"older's inequality \eqref{ch1:ri:holder} in the last but one inequality and the boundedness of $T_\varphi$ on $X'(0,L)$ in the last one. Hence $S$ is bounded on $X(0,L)$.
We shall now prove that (iii) implies (i), whether $L$ is finite or infinite. Since $\xi$ is nonincreasing and satisfies the averaging condition~\eqref{averaging_condition}, it follows from \citep[Theorem~3.2]{GOP:06} (cf.~\cite[Lemma~3.1]{MO:19}) that $T_\varphi$ is bounded on $L^1(0,L)$. Furthermore, $T_\varphi$ is also bounded on $M_\psi(0,L)$, for
\begin{align*}
\|T_\varphi f\|_{M_\psi(0,L)} &= \sup_{t\in(0,L)} (T_\varphi f)^{**}(t)\psi(t) = \sup_{t\in(0,L)} \frac1{\int_0^t \xi(s) \d{s}}\int_0^t \xi(s) \sup_{s\leq \tau < L} \varphi(\tau)f(\tau) \d{s} \\
&\leq \sup_{t\in(0,L)}\varphi(t)f^*(t) \lesssim \|f\|_{M_\psi(0,L)},
\end{align*}
where we used \eqref{thm:Tbounded_iff_Xinterpolation:eq6} and \eqref{ch1:ri:twostarsdominateonestar} in the last inequality. Fix $f\in (L^1 + M_\psi)(0,L)$. We claim that
\begin{equation}\label{thm:Tbounded_iff_Xinterpolation:eq1}
\K(T_\varphi f, t; L^1, M_\psi)\lesssim \K(f, t; L^1, M_\psi) \quad \text{for every $t\in(0,\infty)$}
\end{equation}
with a multiplicative constant independent of $f$. Let $f=g+h$ with $g\in L^1(0, L)$ and $h\in M_\psi(0, L)$ be a decomposition of $f$. Note that the fact that $\xi$ is nonincreasing and satisfies the averaging condition~\eqref{averaging_condition} implies that
\begin{equation*}
\varphi(s)\lesssim \varphi\mathbb{B}ig(\frac{s}{2}\mathbb{B}ig) \quad \text{for every $s\in(0,L)$}.
\end{equation*}
Thanks to this and \eqref{ch1:ri:halfsubadditivityofonestar}, we have that
\begin{equation}\label{thm:Tbounded_iff_Xinterpolation:eq2}
\begin{aligned}
T_\varphi f(s)&\leq \frac1{\varphi(s)}\mathbb{B}ig(\sup_{s\leq\tau < L}\varphi(\tau) g^*\mathbb{B}ig(\frac{\tau}{2}\mathbb{B}ig) + \sup_{s\leq\tau < L}\varphi(\tau) h^*\mathbb{B}ig(\frac{\tau}{2}\mathbb{B}ig)\mathbb{B}ig) \\
&\lesssim T_\varphi g\mathbb{B}ig(\frac{s}{2}\mathbb{B}ig) + T_\varphi h\mathbb{B}ig(\frac{s}{2}\mathbb{B}ig)
\end{aligned}
\end{equation}
for every $s\in(0, L)$. By combining \eqref{thm:Tbounded_iff_Xinterpolation:eq2} and the boundedness of the dilation operator $D_2$ (see~\eqref{ch1:ri:dilation}) with the fact that $T_\varphi$ is bounded on both $L^1(0,L)$ and $M_\psi(0,L)$, we obtain that (cf.~\cite[p.~497]{BK:91})
\begin{align*}
\K(T_\varphi f, t; L^1, M_\psi) &\lesssim \K\mathbb{B}ig(T_\varphi g\mathbb{B}ig(\frac{\cdot}{2}\mathbb{B}ig), t; L^1, M_\psi\mathbb{B}ig) + \K\mathbb{B}ig(T_\varphi h\mathbb{B}ig(\frac{\cdot}{2}\mathbb{B}ig), t; L^1, M_\psi\mathbb{B}ig) \\
&\lesssim \|T_\varphi g\|_{L^1(0, L)} + t\|T_\varphi h\|_{M_\psi(0, L)} \lesssim \|g\|_{L^1(0, L)} + t\|h\|_{M_\psi(0, L)}
\end{align*}
for every $t\in(0,\infty)$, in which the multiplicative constants are independent of $f,g,h$ and $t$. Hence \eqref{thm:Tbounded_iff_Xinterpolation:eq1} is true. Now, since we have \eqref{thm:Tbounded_iff_Xinterpolation:eq1} at our disposal, there is a linear operator $S$ bounded on both $L^1(0,L)$ and $M_\psi(0,L)$ with norms that can be bounded from above by a constant independent of $f$ such that $Sf=T_\varphi f$ by virtue of \cite[Theorem~2]{CN:85}. Owing to (iii), $S$ is also bounded on $X'(0,L)$; moreover, its norm on $X'(0,L)$ can be bounded from above by a constant independent of $f$ (\cite[Chapter~3, Proposition~1.11]{BS}). Therefore
\begin{equation*}
\|T_\varphi f\|_{X'(0,L)} = \|Sf\|_{X'(0,L)} \lesssim \|f\|_{X'(0,L)},
\end{equation*}
in which the multiplicative constant is independent of $f$; hence $T_\varphi$ is bounded on $X'(0,L)$.
Finally, if $L<\infty$, then (ii) is equivalent to (iii); hence the three statements are equivalent to each other in this case. Indeed, since $(\Lambda^1_\xi+L^\infty)(0,L)=\Lambda^1_\xi(0,L)$ and $(L^1 + M_\psi)(0,L)=L^1(0,L)$ owing to \eqref{ch1:ri:smallestandlargestrispacefinitemeasure} and both $\Lambda^1_\xi(0,L)$ and $L^1(0,L)$ have absolutely continuous norm (in the sense of \cite[Chapter~1, Definition~3.1]{BS}), the equivalence of (ii) and (iii) follows from \cite[Corollary~3.6]{M:89}.
\end{proof}
A great deal of our effort has been devoted to describing optimal rearrangement\hyp{}invariant function spaces. A natural, somewhat related question is, can every rearrangement\hyp{}invariant function space be an optimal space? Suppose that $Z(0,L)$ is the optimal domain space for $H_{u, v, \nu}$ and $X(0,L)$, and denote by $W(0,L)$ the optimal target space for $H_{u, v, \nu}$ and $Z(0,L)$. Owing to the optimality of $W(0,L)$, we immediately see that $W(0,L)\hookrightarrow X(0,L)$. What is not obvious, however, is whether the opposite embedding, too, (is)/(can be) true. This leads us to the following theorem.
\begin{theorem}\label{thm:char_of_optimal_spaces}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. If $L=\infty$, assume that $\nu\in\Dinf{\infty}$. Let $u,v\colon(0,L)\to(0,\infty)$ be nonincreasing functions. Assume that $u$ is nondegenerate. If $L<\infty$, assume that $u(L^-)>0$ and $v(L^-)>0$. Let $\varrho$ be the functional defined by \eqref{prop:norminducedbyH:normdef}. The following three statements are equivalent.
\begin{enumerate}[(i)]
\item The space $X(0,L)$ is the optimal target space for the operator $H_{u, v, \nu}$ and some rearrangement\hyp{}invariant function space.
\item The space $X'(0,L)$ is the optimal domain space for the operator $R_{u, v, \nu^{-1}}$ and some rearrangement\hyp{}invariant function space.
\item We have that
\begin{equation}\label{thm:char_of_optimal_spaces:normonX'}
\|f\|_{X'(0,L)}\approx \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\varrho(g)\leq1}}\int_0^Lg(t)R_{u, v, \nu^{-1}}(f^*)(t)\d{t} \quad \text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
\end{enumerate}
If, in addition,
\begin{equation*}
\frac1{v(t)}=\int_0^{\nu^{-1}(t)}\xi(s)\d{s} \quad \text{for every $t\in(0,L)$},
\end{equation*}
where $\xi\colon(0,L)\to(0,\infty)$ is a measurable function satisfying
\begin{equation}\label{thm:char_of_optimal_spaces:integral_cond_on_xi}
\frac{u(t)}{U(t)}\int_0^t \xi(s) \d{s} \lesssim \xi(t) \quad \text{for a.e.~$t\in(0,L)$},
\end{equation}
and the function $\varphi\circ\nu^{-1}$, where $\varphi=\frac{u}{\xi}$, is equivalent to a quasiconcave function, then each of the equivalent statements above implies that
\begin{enumerate}
\item[(iv)] the operator $T_\varphi$, defined by \eqref{opTdef}, is bounded on $X'(0,L)$.
\end{enumerate}
\end{theorem}
\begin{proof}
We start off by observing that each of the three equivalent statements implies that the functional $\varrho$ is actually a rearrangement\hyp{}invariant function norm. Statements (i) and (ii) imply it thanks to \myref{Proposition}{prop:norminducedbyH} and \myref{Proposition}{prop:Roptimalrange}, respectively. If we assume (iii), then, in particular, the set $\{g\in\mathfrak{M}pl(0,L)\colon\varrho(g)\leq1\}$ needs to contain a function $g\in\mathfrak{M}pl(0,L)$ not equal to $0$ a.e. It follows from \myref{Proposition}{prop:norminducedbyH} and its proof that $\varrho(g)=\infty$ for every $g\in\mathfrak{M}pl(0,L)$ not equal to $0$ a.e.~provided that $\varrho$ fails to be a rearrangement\hyp{}invariant function norm. Hence $\varrho$ is a rearrangement\hyp{}invariant function norm if (iii) is assumed. Therefore, in all of the cases, we are entitled to denote the corresponding rearrangement\hyp{}invariant function space over $(0,L)$ by $Z(0,L)$. Moreover, note that \eqref{thm:char_of_optimal_spaces:normonX'} actually reads as
\begin{equation}\label{thm:char_of_optimal_spaces:normonX'notationofnorms}
\|f\|_{X'(0,L)}\approx\|R_{u, v, \nu^{-1}}(f^*)\|_{Z'(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
Second, statements (i) and (ii) are clearly equivalent to each other owing to \myref{Remark}{rem:optimal_for_R_iff_for_H}.
Next, the proof of the fact that (ii) implies (iii) is based on the following important observation. If $X'(0,L)$ is the optimal domain space for the operator $R_{u, v, \nu^{-1}}$ and a rearrangement\hyp{}invariant function space $Y(0,L)$, then, in particular, $R_{u, v, \nu^{-1}}\colon X'(0,L)\to Y(0,L)$ is bounded. Consequently, by virtue of \myref{Proposition}{prop:Roptimalrange}, the rearragement\hyp{}invariant function space whose associate function norm is $\varrho$ is the optimal target space for the operator $R_{u, v, \nu^{-1}}$ and $X'(0,L)$. By \eqref{ch1:ri:X''=X}, this optimal target space is actually the space $Z'(0,L)$. Owing to \myref{Proposition}{prop:norminducedbyR}, the optimal domain space for the operator $R_{u, v, \nu^{-1}}$ and $Z'(0,L)$ exists, and we denote it by $W(0,L)$. Moreover,
\begin{equation}\label{thm:char_of_optimal_spaces:eq3}
\|f\|_{W(0,L)}\approx\|R_{u, v, \nu^{-1}}(f^*)\|_{Z'(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
The crucial observation is that we have, in fact, that $X'(0,L)=W(0,L)$. The embedding $X'(0,L)\hookrightarrow W(0,L)$ is valid because $R_{u, v, \nu^{-1}}\colon X'(0,L)\to Z'(0,L)$ is bounded and $W(0,L)$ is the optimal domain space for the operator $R_{u, v, \nu^{-1}}$ and $Z'(0,L)$. The validity of the opposite embedding is slightly more complicated. Since $R_{u, v, \nu^{-1}}\colon X'(0,L)\to Y(0,L)$ is bounded and $Z'(0,L)$ is the optimal target space for the operator $R_{u, v, \nu^{-1}}$ and $X'(0,L)$, we have that $Z'(0,L)\hookrightarrow Y(0,L)$. Consequently, since $R_{u, v, \nu^{-1}}\colon W(0,L)\to Z'(0,L)$ is bounded, so is $R_{u, v, \nu^{-1}}\colon W(0,L)\to Y(0,L)$. Using the fact that $X'(0,L)$ is the optimal domain space for the operator $R_{u, v, \nu^{-1}}$ and $Y(0,L)$, we obtain that $W(0,L)\hookrightarrow X'(0,L)$. Now that we know that $X'(0,L)=W(0,L)$, \eqref{thm:char_of_optimal_spaces:normonX'notationofnorms} follows from \eqref{thm:char_of_optimal_spaces:eq3}.
Next, note that (iii) implies (ii). Indeed, \eqref{thm:char_of_optimal_spaces:normonX'notationofnorms} coupled with \myref{Proposition}{prop:norminducedbyR} tells us that $X'(0,L)$ is the optimal domain space for the operator $R_{u, v, \nu^{-1}}$ and $Z'(0,L)$.
Finally, it only remains to prove that (iii) implies (iv) under the additional assumptions. By combining \eqref{thm:char_of_optimal_spaces:normonX'notationofnorms} with the fact that $T_\varphi f$ is equivalent to a nonincreasing function for every $f\in\mathfrak{M}pl(0,L)$ (the multiplicative constants in this equivalence are independent of $f$), we have that
\begin{equation}\label{thm:char_of_optimal_spaces:eq7}
\begin{aligned}
\|T_\varphi f\|_{X'(0,L)}&\approx\|R_{u, v, \nu^{-1}}((T_\varphi f)^*)\|_{Z'(0,L)}\approx\|R_{u, v, \nu^{-1}}(T_\varphi f)\|_{Z'(0,L)}\\
&=\mathbb{B}ig\|v(t)\int_0^{\nu^{-1}(t)} \xi(s) \esssup_{\tau\in[s,L)}\varphi(\tau)f^*(\tau)\d{s}\mathbb{B}ig\|_{Z'(0,L)}\\
&\leq\mathbb{B}ig\|v(t)\int_0^{\nu^{-1}(t)} \xi(s) \esssup_{\tau\in[s,\nu^{-1}(t))}\varphi(\tau)f^*(\tau)\d{s}\mathbb{B}ig\|_{Z'(0,L)}\\
&\quad+\mathbb{B}ig\|v(t) \mathbb{B}ig(\esssup_{\tau\in[\nu^{-1}(t),L)}\varphi(\tau)f^*(\tau) \mathbb{B}ig) \int_0^{\nu^{-1}(t)}\xi(s)\d{s}\mathbb{B}ig\|_{Z'(0,L)}\\
&=\mathbb{B}ig\|v(t)\int_0^{\nu^{-1}(t)} \xi(s) \esssup_{\tau\in[s,\nu^{-1}(t))}\varphi(\tau)f^*(\tau)\d{s}\mathbb{B}ig\|_{Z'(0,L)}\\
&\quad+\mathbb{B}ig\|\esssup_{\tau\in[\nu^{-1}(t),L)}\varphi(\tau)f^*(\tau)\mathbb{B}ig\|_{Z'(0,L)}.
\end{aligned}
\end{equation}
Since $\varphi$ is equivalent to a continuous nondecreasing function and $\xi$ satisfies \eqref{thm:char_of_optimal_spaces:integral_cond_on_xi}, it follows from \cite[Theorem~3.2]{GOP:06} that
\begin{equation*}
\int_0^{\nu^{-1}(t)} \xi(s) \esssup_{\tau\in[s,\nu^{-1}(t))}\varphi(\tau)f^*(\tau)\d{s}\lesssim\int_0^{\nu^{-1}(t)}f^*(s) u(s) \d{s}\quad\text{for every $t\in(0,L)$};
\end{equation*}
hence
\begin{equation}\label{thm:char_of_optimal_spaces:eq4}
\mathbb{B}ig\|v(t)\int_0^{\nu^{-1}(t)} \xi(s) \esssup_{\tau\in[s,\nu^{-1}(t))}\varphi(\tau)f^*(\tau)\d{s}\mathbb{B}ig\|_{Z'(0,L)}\lesssim\|R_{u, v, \nu^{-1}}(f^*)\|_{Z'(0,L)}.
\end{equation}
Since the function $\varphi\circ\nu^{-1}$ is equivalent to a quasiconcave function, it follows from \cite[Lemma~4.10]{EMMP:20} that
\begin{equation*}
\mathbb{B}ig\|\esssup_{\tau\in[\nu^{-1}(t),L)}\varphi(\tau)f^*(\tau)\mathbb{B}ig\|_{Z'(0,L)}\lesssim\|\varphi(\nu^{-1}(t))f^*(\nu^{-1}(t))\|_{Z'(0,L)}.
\end{equation*}
We note that, although \cite[Lemma~4.10]{EMMP:20} deals only with the case $L=\infty$, its proof translates verbatim to the case of $L\in(0,\infty)$. Furthermore, we have that
\begin{equation}\label{thm:char_of_optimal_spaces:eq6}
\begin{aligned}
\|\varphi(\nu^{-1}(t))f^*(\nu^{-1}(t))\|_{Z'(0,L)}&\lesssim\mathbb{B}igg\|\frac{U(\nu^{-1}(t))}{\int_0^{\nu^{-1}(t)}\xi(s)\d{s}}f^*(\nu^{-1}(t))\mathbb{B}igg\|_{Z'(0,L)}\\
&\leq\|R_{u, v, \nu^{-1}}(f^*)\|_{Z'(0,L)},
\end{aligned}
\end{equation}
where we used the fact that $\xi$ satisfies \eqref{thm:char_of_optimal_spaces:integral_cond_on_xi} in the first inequality and the monotonicity of $f^*$ in the second one. By combining \eqref{thm:char_of_optimal_spaces:eq7} with \eqref{thm:char_of_optimal_spaces:eq4} and \eqref{thm:char_of_optimal_spaces:eq6} and using \eqref{thm:char_of_optimal_spaces:normonX'notationofnorms}, we obtain that
\begin{equation*}
\|T_\varphi f\|_{X'(0,L)}\lesssim\|R_{u, v, \nu^{-1}}(f^*)\|_{Z'(0,L)}\approx\|f\|_{X'(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$};
\end{equation*}
hence $T_\varphi$ is bounded on $X'(0,L)$.
\end{proof}
\begin{remarks}\
\begin{enumerate}[(i)]
\item If $X'(0,L)$ is the optimal domain space for $R_{u, v, \nu^{-1}}$ and some rearrangement\hyp{}invariant function space $Y(0,L)$, then $X'(0,L)$ is actually the optimal domain space for $R_{u, v, \nu^{-1}}$ and its own optimal target space. This follows from the following. Thanks to \myref{Proposition}{prop:Roptimalrange} and \myref{Proposition}{prop:norminducedbyR}, we are entitled to denote by $Z(0,L)$ the optimal target space for $R_{u, v, \nu^{-1}}$ and $X'(0,L)$ and by $W(0,L)$ the optimal domain space for $R_{u, v, \nu^{-1}}$ and $Z(0,L)$. We need to show that $X'(0,L)=W(0,L)$. On the one hand, since $R_{u, v, \nu^{-1}}\colon X'(0,L)\to Z(0,L)$ is bounded and $W(0,L)$ is the optimal domain space for $R_{u, v, \nu^{-1}}$ and $Z(0,L)$, we have that $X'(0,L)\hookrightarrow W(0,L)$. On the other hand, since $R_{u, v, \nu^{-1}}\colon X'(0,L)\to Y(0,L)$ is bounded and $Z(0,L)$ is the optimal target space for $R_{u, v, \nu^{-1}}$ and $X'(0,L)$, we have that $Z(0,L)\hookrightarrow Y(0,L)$; consequently, $R_{u, v, \nu^{-1}}\colon W(0,L)\to Y(0,L)$ is bounded. Finally, since $X'(0,L)$ is the optimal domain space for $R_{u, v, \nu^{-1}}$ and $Y(0,L)$, we obtain that $W(0,L)\hookrightarrow X'(0,L)$. Furthermore, by combining this observation with \myref{Remark}{rem:optimal_for_R_iff_for_H}, we also obtain that, if $X(0,L)$ is the optimal target space for $H_{u, v, \nu}$ and some rearrangement\hyp{}invariant function space $Y(0,L)$, then $X(0,L)$ is actually the optimal target space for $H_{u, v, \nu}$ and its own optimal domain space.
\item If $\xi$ satisfies the averaging condition~\eqref{averaging_condition}, then \eqref{thm:char_of_optimal_spaces:integral_cond_on_xi} is satisfied for every nonincreasing function $u$ inasmuch as $tu(t)\leq U(t)$ for every $t\in(0,L)$.
\item When $u(t)=t^{-1+\alpha}$, $v(t)=t^{-1+\beta}$, and $\nu(t) = t^\gamma$, $t\in(0,L)$, the additional assumptions of \myref{Theorem}{thm:char_of_optimal_spaces} are satisfied if $\alpha\in(0,1]$, $\beta\in[0,1)$, $\gamma>0$ and $1 \leq \frac{\alpha}{\gamma} + \beta \leq 2$.
\end{enumerate}
\end{remarks}
The remainder of this subsection is devoted to the particular but important case $u\equiv1$, in which we will be able to establish an even stronger connection between the various notions that we have met. First, we need to equip ourselves with the following auxiliary result, which generalizes \cite[Lemma~4.9]{EMMP:20} and whose immediate corollary for $u\equiv1$ (namely \myref{Corollary}{cor:Honsimple:u==1}) is of independent interest.
\begin{proposition}\label{prop:Honsimple}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Assume that $\nu^{-1}\in\Dsup{0}$. If $L=\infty$, assume that $\nu^{-1}\in\Dsup{\infty}$. Let $u,v\colon(0,L)\to(0,\infty)$ be nonincreasing. Assume that $v$ satisfies the averaging condition \eqref{averaging_condition}, and denote its averaging constant by $C$.
Set $f=\sum_{i=1}^Nc_i\chi_{(0,a_i)}$, where $c_i\in(0,\infty)$, $i=1,\dots, N$, and $0<a_1<\cdots<a_N < L$. We have that
\begin{equation}\label{prop:Honsimple:equivalence}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^Lf(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\approx\mathbb{B}ig\|u(t)\sum_{i=1}^Na_ic_iv(a_i)\chi_{(0,\nu^{-1}(a_i))}(t)\mathbb{B}ig\|_{X(0,L)},
\end{equation}
in which the multiplicative constants depend only on $\nu$ and $C$.
\end{proposition}
\begin{proof}
First, observe that $\inf_{t\in(0,L)}\frac{\nu^{-1}(\frac{t}{\theta})}{\nu^{-1}(t)}\in(0,1)$, where $\theta>1$ is such that $\nu^{-1}\in\Dsup[\theta]{0}$ and, if $L=\infty$, also $\nu^{-1}\in\Dsup[\theta]{\infty}$. We denote the infimum by $M$.
Second, we have that
\begin{align}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^Lf(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}&=\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(a_i))}(t)\int_{\nu(t)}^{a_i}v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\notag\\
\begin{split}\label{prop:Honsimple:eq1}
&\geq\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(\frac{a_i}{\theta}))}(t)v(a_i)(a_i-\nu(t))\mathbb{B}ig\|_{X(0,L)}\\
&\geq\frac{\theta-1}{\theta}\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(\frac{a_i}{\theta}))}(t)v(a_i)a_i\mathbb{B}ig\|_{X(0,L)}\\
&\geq\frac{\theta-1}{\theta}\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,M\nu^{-1}(a_i))}(t)v(a_i)a_i\mathbb{B}ig\|_{X(0,L)}\\
&\geq M\frac{\theta-1}{\theta}\mathbb{B}ig\|u(Mt)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(a_i))}(t)v(a_i)a_i\mathbb{B}ig\|_{X(0,L)}\\
&\geq M\frac{\theta-1}{\theta}\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(a_i))}(t)v(a_i)a_i\mathbb{B}ig\|_{X(0,L)}
\end{split}
\end{align}
thanks to the fact that $u$ and $v$ are nonincreasing and \eqref{ch1:ri:dilation} (the boundedness of the dilation operator $D_{\frac1{M}}$).
Last, using the fact that $v$ satisfies the averaging condition~\eqref{averaging_condition}, we obtain that
\begin{equation}\label{prop:Honsimple:eq2}
\begin{aligned}
\mathbb{B}ig\|u(t)\int_{\nu(t)}^Lf(s)v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}&=\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(a_i))}(t)\int_{\nu(t)}^{a_i}v(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\leq C\mathbb{B}ig\|u(t)\sum_{i=1}^Nc_i\chi_{(0,\nu^{-1}(a_i))}(t)a_iv(a_i)\mathbb{B}ig\|_{X(0,L)}.
\end{aligned}
\end{equation}
By combining \eqref{prop:Honsimple:eq2} and \eqref{prop:Honsimple:eq1}, we obtain \eqref{prop:Honsimple:equivalence}.
\end{proof}
Since every nonnegative, nonincreasing function on $(0,L)$ is the pointwise limit of a nondecreasing sequence of nonnegative, nonincreasing simple functions, \myref{Proposition}{prop:Honsimple} with $u\equiv1$ has the following important corollary.
\begin{corollary}\label{cor:Honsimple:u==1}
Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Assume that $\nu^{-1}\in\Dsup{0}$. If $L=\infty$, assume that $\nu^{-1}\in\Dsup{\infty}$. Let $v\colon(0,L)\to(0, \infty)$ be a nonincreasing function satisfying the averaging condition~\eqref{averaging_condition}. Let $f\in\mathfrak{M}pl(0,L)$. There is a nondecreasing sequence $\{f_k\}_{k=1}^\infty$ of nonnegative, nonincreasing simple functions on $(0,L)$ such that, for every rearrangement\hyp{}invariant function norm $\|\cdot\|_{X(0,L)}$,
\begin{equation*}
\lim_{k\to\infty}\|H_{1, v, \nu}(f_k)\|_{X(0,L)}\approx\|f^*\|_{X(0,L)}=\|f\|_{X(0,L)},
\end{equation*}
in which the multiplicative constants depend only on $\nu$ and the averaging constant of $v$.
\end{corollary}
\begin{remark}
The assumption $\nu^{-1}\in\Dsup{0}$ is not overly restrictive. For example, it is satisfied whenever $\nu$ is equivalent to $t\mapsto t^\alpha \ell_1(t)^{\beta_1}\cdots\ell_k(t)^{\beta_k}$ near $0$ for any $\alpha>0$, $k\in\mathbb{N}_0$ and $\beta_j\in\mathbb{R}$, $j=1, 2, \dots, k$, where the functions $\ell_j$ are $j$-times iterated logarithmic functions defined as
\begin{equation*}
\ell_j(t)=
\begin{cases}
1 + |\log t| \quad &\text{if $j=1$},\\
1 + \log\ell_{j-1}(t) \quad &\text{if $j>1$},
\end{cases}
\end{equation*}
for $t\in(0,L)$. In this case, $\nu^{-1}$ is equivalent to $t\mapsto t^{\frac1{\alpha}} \ell_1(t)^{-\frac{\beta_1}{\alpha}}\cdots\ell_k(t)^{-\frac{\beta_k}{\alpha}}$ near $0$ (cf.~\citep[Appendix~5]{BGT:89}). On the other hand, $\nu(t)=\log^\alpha(\frac1{t})$ near $0$, where $\alpha<0$, is a typical example of a function not satisfying the assumption. The same remark (with the obvious modifications) is true for the assumption $\nu^{-1}\in\Dsup{\infty}$.
\end{remark}
While \myref{Proposition}{prop:norminducedbyT} provides a sufficient condition for simplification of \eqref{prop:norminducedbyH:normdef}, the following proposition provides a necessary one.
\begin{proposition}\label{prop:normH_withu=1_simplified_implies_X_optimal_space}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Assume that $\nu^{-1}\in\Dsup{0}$. If $L=\infty$, assume that $\nu^{-1}\in\Dsup{\infty}$ and $\nu\in\Dinf{\infty}$. Let $v\colon (0,L) \to (0,\infty)$ be a nonincreasing function satisfying the averaging condition~\eqref{averaging_condition}. If there is a positive constant $C$ such that
\begin{equation}\label{prop:normH_withu=1_simplified_implies_X_optimal_space:simplifiednorm}
\sup_{h\sim f}\|H_{1, v, \nu}h\|_{X(0,L)}\leq C\|H_{1, v, \nu}(f^*)\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation}
then the three equivalent statements from \myref{Theorem}{thm:char_of_optimal_spaces} with $u\equiv1$ are satisfied.
\end{proposition}
\begin{proof}
Let $a$ be defined by \eqref{prop:Roptimaldomain:L_or_1}. Since $v$ is integrable over $(0,a)$, for it satisfies the averaging condition~\eqref{averaging_condition}, we have that
\begin{equation}\label{prop:normH_withu=1_simplified_implies_X_optimal_space:eq3}
\mathbb{B}ig\|\chi_{(0,\nu^{-1}(a))}(t)\int_{\nu(t)}^av(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\leq\int_0^av(s)\d{s}\|\chi_{(0,\nu^{-1}(a))}\|_{X(0,L)}<\infty.
\end{equation}
Furthermore, if $L=\infty$, then $\limsup_{\tau\to\infty}v(\tau)\|\chi_{(0,\nu^{-1}(\tau))}\|_{X(0,\infty)}<\infty$. Indeed, suppose that $\limsup_{\tau\to\infty}v(\tau)\|\chi_{(0,\nu^{-1}(\tau))}\|_{X(0,\infty)}=\infty$. It follows from the proof of \myref{Proposition}{prop:norminducedbyH} that
\begin{equation}\label{prop:normH_withu=1_simplified_implies_X_optimal_space:eq4}
\sup_{h\sim \chi_{(0,1)}}\|H_{1, v, \nu}h\|_{X(0,\infty)} = \infty.
\end{equation}
However, since
\begin{equation*}
\sup_{h\sim \chi_{(0,1)}}\|H_{1, v, \nu}h\|_{X(0,\infty)} \approx \|H_{1, v, \nu}\chi_{(0,1)}\|_{X(0,\infty)} = \mathbb{B}ig\|\chi_{(0,\nu^{-1}(1))}(t)\int_{\nu(t)}^1v(s)\d{s}\mathbb{B}ig\|_{X(0,\infty)}<\infty
\end{equation*}
thanks to \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:simplifiednorm} and \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:eq3}, \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:eq4} is not possible. Hence, \myref{Proposition}{prop:norminducedbyH} guarantees that the optimal domain space for $H_{1, v, \nu}$ and $X(0,L)$ exists; moreover, if we denote it by $Z(0,L)$, then \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:simplifiednorm} implies that
\begin{equation}\label{prop:normH_withu=1_simplified_implies_X_optimal_space:Znorm}
\|f\|_{Z(0,L)} \approx \| H_{1, v, \nu}(f^*) \|_{X(0,L)} \quad \text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
Now, we finally turn our attention to proving that \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:simplifiednorm} implies statement (iii) from \myref{Theorem}{thm:char_of_optimal_spaces}. Let $Y(0,L)$ be the optimal target space for the operator $H_{1, v, \nu}$ and $Z(0,L)$. Its existence is guaranteed by \myref{Proposition}{prop:Hoptimalrange}, and we have that
\begin{equation}\label{prop:normH_withu=1_simplified_implies_X_optimal_space:eq1}
\|f\|_{Y'(0,L)}=\|R_{1, v, \nu^{-1}}(f^*)\|_{Z'(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
Using the optimality of $Y(0,L)$ combined with the fact that $H_{1, v, \nu}\colon Z(0,L) \to X(0,L)$ is bounded (and so $Y(0,L)\hookrightarrow X(0,L)$), and \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:Znorm}, we obtain that
\begin{equation*}
\|H_{1, v, \nu}(f^*)\|_{X(0,L)} \lesssim \|H_{1, v, \nu}(f^*)\|_{Y(0,L)} \lesssim \|f\|_{Z(0,L)} \approx \|H_{1, v, \nu}(f^*)\|_{X(0,L)}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$; hence
\begin{equation*}
\|H_{1, v, \nu}(f^*)\|_{X(0,L)} \approx \|H_{1, v, \nu}(f^*)\|_{Y(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
In particular, we have that
\begin{equation}\label{prop:normH_withu=1_simplified_implies_X_optimal_space:eq2}
\|H_{1, v, \nu}h\|_{X(0,L)}\approx\|H_{1, v, \nu}h\|_{Y(0,L)}
\end{equation}
for every nonincreasing simple function $h\in\mathfrak{M}pl(0,L)$. By combining \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:eq2} with \myref{Corollary}{cor:Honsimple:u==1}, we obtain that
\begin{equation*}
\|f^*\|_{X(0,L)}\approx\|f^*\|_{Y(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
Owing to the rearrangement invariance of both function norms, it follows that $X(0,L)=Y(0,L)$. Hence \eqref{thm:char_of_optimal_spaces:normonX'} with $u\equiv1$ follows from \eqref{prop:normH_withu=1_simplified_implies_X_optimal_space:eq1} combined with \eqref{ch1:ri:normX'}.
\end{proof}
We obtain the final result of this subsection by combining \myref{Theorem}{thm:char_of_optimal_spaces}, \myref{Proposition}{prop:normH_withu=1_simplified_implies_X_optimal_space}, \myref{Proposition}{prop:norminducedbyT} and \myref{Theorem}{thm:Tbounded_iff_Xinterpolation}.
\begin{theorem}\label{thm:charTbounded}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu\colon(0,L)\to(0,L)$ be an increasing bijection. Assume that $\nu^{-1}\in\Dsup{0}$. If $L=\infty$, assume that $\nu^{-1}\in\Dsup{\infty}$ and $\nu\in\Dinf{\infty}$. Let $v\colon(0,L)\to(0,\infty)$ be defined by
\begin{equation*}
\frac1{v(t)}=\int_0^{\nu^{-1}(t)}\xi(s)\d{s},\ t\in(0,L),
\end{equation*}
where $\xi\colon(0,L)\to(0,\infty)$ is a measurable function satisfying the averaging condition~\eqref{averaging_condition}. Assume that $v$, too, satisfies the averaging condition~\eqref{averaging_condition}. Set $\varphi=\frac1{\xi}$. Assume that the function $\varphi\circ\nu^{-1}$ is equivalent to a quasiconcave function. Let $\varrho$ be the functional defined by \eqref{prop:norminducedbyH:normdef} with $u\equiv1$. The following five statements are equivalent.
\begin{enumerate}[(i)]
\item The operator $T_\varphi$, defined by \eqref{opTdef}, is bounded on $X'(0,L)$.
\item There is a positive constant $C$ such that
\begin{equation*}
\sup_{h\sim f}\|H_{1, v, \nu}h\|_{X(0,L)}\leq C\|H_{1, v, \nu}(f^*)\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
\item The space $X(0,L)$ is the optimal target space for the operator $H_{1, v, \nu}$ and some rearrangement\hyp{}invariant function space.
\item The space $X'(0,L)$ is the optimal domain space for the operator $R_{1, v, \nu^{-1}}$ and some rearrangement\hyp{}invariant function space.
\item We have that
\begin{equation*}
\|f\|_{X'(0,L)} \approx \sup_{\substack{g\in{\mathfrak{M}pl(0,L)}\\\varrho(g)\leq1}}\int_0^Lg(t)R_{1, v, \nu^{-1}}(f^*)(t)\d{t} \quad \text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
\end{enumerate}
If $L<\infty$, these five statements are also equivalent to
\begin{enumerate}[(i)]
\item[(vi)] $X(0,L) \in \Int\big(\Lambda^1_\xi(0,1), L^\infty(0,1)\big)$.
\end{enumerate}
\end{theorem}
\begin{remarks}\hphantom{}
\begin{itemize}
\item The assumption that $v$ satisfies the averaging condition~\eqref{averaging_condition} is natural because it forbids weights $v$ for which the question of whether $X(0,L)$ (or $X'(0,L)$) is the optimal target (or domain) space for $H_{1, v, \nu}$ (or $R_{1, v, \nu^{-1}}$) and some rearrangement\hyp{}invariant function space cannot be decided by the boundedness of the corresponding supremum operator $T_\varphi$. This can be illustrated by a very simple example. Consider $\nu=\id$ and $\xi\equiv1$. Since $T_\varphi f=f^*$, $T_\varphi$ is bounded on any $X'(0,L)$; however, $R_{1, v,\nu^{-1}}f(t)=\frac1{t}\int_0^t |f(s)|\d{s}$ clearly need not be bounded from $X'(0,L)$ to $(L^1+L^\infty)(0,L)$, which is the largest rearrangement\hyp{}invariant function space. To this end, consider, for example, $X(0,L)=L^\infty(0,\infty)$ (cf.~\cite[Proposition~4.1]{ST:16}).
\item When $v(t)=t^{-1+\beta}$ and $\nu(t) = t^\gamma$, $t\in(0,L)$, the assumptions of \myref{Theorem}{thm:charTbounded} are satisfied if $\beta\in(0,1)$, $\gamma>0$ and $1 \leq \frac1{\gamma} + \beta \leq 2$.
\end{itemize}
\end{remarks}
\subsection{Iteration of optimal r.i.~norms}\label{sec:iteration}
This subsection is devoted to so-called \emph{sharp iteration principles} for the operators $R_{u, v, \nu}$ and $H_{u, v, \nu}$. To illustrate their meaning and importance, suppose that $Y_1(0,L)$ is the optimal target space for $H_{u_1,v_1,\nu_1}$ and a rearrangement\hyp{}invariant function space $X(0,L)$. Let us now go one step further and suppose that $Y_2(0,L)$ is the optimal target space for $H_{u_2,v_2,\nu_2}$ and $Y_1(0,L)$. In the light of \myref{Proposition}{prop:Hoptimalrange}, the associate function norm of $\|\cdot\|_{Y_2(0,L)}$ is equal to $\|f\|_{Y_2'(0,L)}=\|R_{u_1,v_1,\nu_1^{-1}}((R_{u_2,v_2,\nu_2^{-1}}(f^*))^*)\|_{X'(0,L)}$. We immediately see that there is an inevitable difficulty that we face if we wish to understand the iterated norm. This difficulty is caused by the fact that the function $R_{u_2,v_2,\nu_2^{-1}}(f^*)$ is hardly ever (equivalent to) a nonincreasing function (unless $u_2$, $v_2$ and $\nu_2$ are related to each other in a very specific way; see \myref{Proposition}{prop:norminducedbyHwhenRnonincreasing}), and so we cannot just readily ``delete'' the outer star. Nevertheless, with some substantial effort, we shall be able to equivalently express the iterated norm as a noniterated one under suitable assumptions. The suitable assumptions are such that the iteration does not lead to the presence of kernels, which would go beyond the scope of this paper (see~\cite[Section~8]{CPS:15} in that regard). It should be noted that such iteration is not artificial. For example, it is an essential tool for establishing sharp iteration principles for various Sobolev embeddings, which, roughly speaking, ensure that the optimal rearrangement\hyp{}invariant target space in a Sobolev embedding of $(k+l)$th order is the same as that obtained by composing the optimal Sobolev embedding of order $k$ with the optimal Sobolev embedding of order $l$ (see~\cite{CP:16, CPS:20, M:21} and references therein). Another possible application is description of optimal rearrangement\hyp{}invariant function norms for compositions of some operators of harmonic analysis (see~\cite{EMMP:20} and references therein for optimal behavior of some classical operators on rearrangement\hyp{}invariant function spaces). Finally, the motivation behind studying function norms induced by $H_{u_1, v_1, \nu_1}\circ H_{u_1, v_1, \nu_1}$ is similar.
The following proposition is the first step towards the sharp iteration principle for $R_{u, v, \nu}$.
\begin{proposition}\label{prop:Riterationoptimaldomainlowerbound}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu_1,\nu_2\colon(0,L)\to(0,L)$ be increasing bijections. Assume that $\nu_2\in\Dsup{0}$. If $L=\infty$, assume that $\nu_2\in\Dsup{\infty}$. Set $\nu=\nu_2\circ\nu_1$. Let $u_1, u_2\colon(0,L)\to(0,\infty)$ be nonincreasing. Let $v_1\colon(0,L)\to(0,\infty)$ be measurable. Let $v_2\colon(0,L)\to(0,\infty)$ be a nonincreasing function satisfying the averaging condition~\eqref{averaging_condition}. We have that
\begin{equation*}
\mathbb{B}ig\|u_1(\nu_1(t))v_1(t)\nu_1(t)v_2(\nu_1(t))\int_0^{\nu(t)}f^*(s) u_2(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\lesssim\|R_{u_1, v_1, \nu_1}((R_{u_2, v_2, \nu_2}(f^*))^*)\|_{X(0,L)}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$, in which the multiplicative constant depends only on $\nu_2$ and the averaging constant of $v_2$.
\end{proposition}
\begin{proof}
Note that $\inf_{t\in(0,L)}\frac{\nu_2(\frac{t}{\theta})}{\nu_2(t)}>0$, where $\theta>1$ is such that $\nu_2\in\Dsup[\theta]{0}$ and, if $L=\infty$, also $\nu_2\in\Dsup[\theta]{\infty}$. Consequently, there is $N\in\mathbb{N}$, such that $\nu_2(t)\leq N\nu_2(\frac{t}{\theta})$ for every $t\in(0,L)$. Hence, for every $f\in\mathfrak{M}pl(0,L)$, we have that
\begin{equation}\label{prop:Riterationoptimaldomainlowerbound:eq1}
\int_0^{\nu_2(t)}f^*(s)u_2(s)\d{s}\leq N\int_0^{\nu_2(\frac{t}{\theta})}f^*(s)u_2(s)\d{s} \quad \text{for every $t\in(0,L)$}
\end{equation}
owing to the fact that $f^* u_2$ is nonincreasing. Thanks to the monotonicity of $u_1$ and $v_2$, the fact that $v_2$ satisfies the averaging condition~\eqref{averaging_condition} and the inequality \eqref{prop:Riterationoptimaldomainlowerbound:eq1}, we have that
\begin{align*}
&\mathbb{B}ig\|v_1(t)u_1(\nu_1(t))\nu_1(t)v_2(\nu_1(t))\int_0^{\nu(t)}f^*(s)u_2(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\lesssim\mathbb{B}ig\|v_1(t)u_1(\nu_1(t))\int_0^{\nu_1(t)}v_2(s)\d{s}\,\int_0^{\nu(t)}f^*(s)u_2(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\approx\mathbb{B}ig\|v_1(t)u_1(\nu_1(t))\int_{\frac{\nu_1(t)}{\theta}}^{\nu_1(t)}v_2(s)\d{s}\,\int_0^{\nu(t)}f^*(s)u_2(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\leq\mathbb{B}ig\|v_1(t)\int_{\frac{\nu_1(t)}{\theta}}^{\nu_1(t)}v_2(s)u_1(s)\d{s}\,\int_0^{\nu(t)}f^*(s)u_2(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\lesssim\mathbb{B}ig\|v_1(t)\int_{\frac{\nu_1(t)}{\theta}}^{\nu_1(t)}v_2(s)u_1(s)\d{s}\,\int_0^{\nu_2(\frac{\nu_1(t)}{\theta})}f^*(s)u_2(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\leq\mathbb{B}ig\|v_1(t)\int_{\frac{\nu_1(t)}{\theta}}^{\nu_1(t)}\mathbb{B}ig(v_2(s)\int_0^{\nu_2(s)}f^*(\tau)u_2(\tau)\d{\tau}\mathbb{B}ig)u_1(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\leq\mathbb{B}ig\|v_1(t)\int_0^{\nu_1(t)}\mathbb{B}ig(v_2(s)\int_0^{\nu_2(s)}f^*(\tau)u_2(\tau)\d{\tau}\mathbb{B}ig)u_1(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\leq\mathbb{B}ig\|R_{u_1,v_1,\nu_1}((R_{u_2,v_2,\nu_2}(f^*))^*)\mathbb{B}ig\|_{X(0,L)}
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$, where we used the Hardy--Littlewood inequality \eqref{ch1:ri:HL} in the last inequality.
\end{proof}
We are now in a position to establish the sharp iteration principle for $R_{u,v,\nu}$.
\begin{theorem}\label{thm:Riterationoptimaldomainupperbound}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu_1,\nu_2\colon(0,L)\to(0,L)$ be increasing bijections. Assume that $\nu_2\in\Dsup{0}$. If $L=\infty$, assume that $\nu_2\in\Dsup{\infty}$. Let $u_1, u_2\colon(0,L)\to(0,\infty)$ be nonincreasing. Let $v_1\colon(0,L)\to(0,\infty)$ be a continuous function. Let $v_2\colon(0,L)\to(0,\infty)$ be defined by
\begin{equation*}
\frac1{v_2(t)}=\int_0^{\nu_2(t)}\xi(s)\d{s},\ t\in(0,L),
\end{equation*}
where $\xi\colon(0,L)\to(0,\infty)$ is a measurable function. Assume that the function $u_1 v_2$ satisfies the averaging condition~\eqref{averaging_condition}. Set $\nu=\nu_2\circ\nu_1$,
\begin{align*}
v(t)&=\nu_1(t)u_1(\nu_1(t))v_1(t)v_2(\nu_1(t)),\ t\in(0,L),\\
\intertext{and}
\eta(t)&=\frac1{U_2(t)v(\nu^{-1}(t))},\ t\in(0,L).
\end{align*}
Assume that $\eta$ and $\frac{\eta}{\xi}$ are equivalent to nonincreasing functions. Furthermore, assume that there are positive constants $C_1$ and $C_2$ such that
\begin{align}
\int_0^t \eta(s) u_2(s) \d{s} &\leq C_1 U_2(t) \eta(t) \quad \text{for a.e.~$t\in(0,L)$} \label{thm:Riterationoptimaldomainupperbound:integralassumption_on_eta_times_u2} \\
\intertext{and}
\frac1{t}\int_0^t U_2(\nu(s)) v(s) \d{s} &\geq C_2 U_2(\nu(t)) v(t) \quad \text{for a.e.~$t\in(0,L)$}. \label{thm:Riterationoptimaldomainupperbound:integralassumptionon1overeta}
\end{align}
We have that
\begin{equation*}
\|R_{u_1, v_1, \nu_1}((R_{u_2, v_2, \nu_2}(f^*))^*)\|_{X(0,L)}\approx\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$, in which the multiplicative constants depend only on $\nu_1$, $\nu_2$, $C_1$, $C_2$, the averaging constant of $u_1 v_2$ and the multiplicative constants in the equivalences of $\eta$ and $\frac{\eta}{\xi}$ to nonincreasing functions.
\end{theorem}
\begin{proof}
First, since the fact that $v_2u_1$ satisfies the averaging condition~\eqref{averaging_condition} together with the monotonicity of $u_1$ implies that $v_2$, too, satisfies the averaging condition~\eqref{averaging_condition} (with the same multiplicative constant), we have that
\begin{equation*}
\|R_{u_1, v_1, \nu_1}((R_{u_2, v_2, \nu_2}(f^*))^*)\|_{X(0,L)}\gtrsim\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}
\end{equation*}
thanks to \myref{Proposition}{prop:Riterationoptimaldomainlowerbound}; consequently we only need to prove the opposite inequality.
We may assume that $u_2$ is nondegenerate and $\psi\in X(0,L)$, where $\psi$ is defined as $\psi(t)=v(t)U_2(\nu(t))\chi_{(0,L)}(t)+v(t)\chi_{(L,\infty)}(t)$, $t\in(0,L)$, for, if it is not the case, then $\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}=\infty$ for every $f\in\mathfrak{M}pl(0,L)$ that is not equivalent to $0$ a.e. \myref{Proposition}{prop:norminducedbyR} with $u=u_2$ guarantees that there is a rearrangement\hyp{}invariant function space $Z(0,L)$ such that
\begin{equation*}
\|f\|_{Z(0,L)}=\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
Furthermore, by \eqref{prop:RXtoYbddiffHY'toX':normRXtoZ=normHZ'toX'} and the Hardy--Littlewood inequality \eqref{ch1:ri:HL}, we have that
\begin{equation}\label{thm:Riterationoptimaldomainupperbound:eq1}
\sup_{\|g\|_{X'(0,L)}\leq1}\|H_{u_2, v, \nu^{-1}}g\|_{Z'(0,L)}=1.
\end{equation}
Note that, for every $f\in\mathfrak{M}pl(0,L)$, the function
\begin{equation*}
(0,L)\ni t\mapsto v_2(t)\int_0^{\nu_2(t)}\xi(s) u_2(s) \sup_{\tau\in[s,L)}\frac1{\xi(\tau)}f^*(\tau)\d{s}
\end{equation*}
is nonincreasing because it is the integral mean of the nonincreasing function $(0,L)\ni s \mapsto u_2(s) \sup_{\tau\in[s,L)}\frac1{\xi(\tau)}f^*(\tau)$ over the interval $(0,\nu_2(t))$ with respect to the measure $\xi(s)\d{s}$. By \eqref{ch1:ri:normX''} and \eqref{RaHdual}, we have that
\begin{align*}
&\|R_{u_1, v_1, \nu_1}((R_{u_2, v_2, \nu_2}(f^*))^*)\|_{X(0,L)}=\sup_{\|g\|_{X'(0,L)}\leq1}\int_0^L(R_{u_2, v_2, \nu_2}(f^*))^*(t)H_{u_1 v_1, \nu_1^{-1}}g(t)\d{t}\\
&=\sup_{\|g\|_{X'(0,L)}\leq1}\int_0^L\mathbb{B}ig[v_2(s)\int_0^{\nu_2(s)} u_2(\tau) f^*(\tau)\d{\tau}\mathbb{B}ig]^*(t)H_{u_1, v_1, \nu_1^{-1}}g(t)\d{t}\\
&\leq\sup_{\|g\|_{X'(0,L)}\leq1}\int_0^L\mathbb{B}ig[v_2(s)\int_0^{\nu_2(s)} \xi(\tau) u_2(\tau) \sup_{x\in[\tau,L)}\frac1{\xi(x)}f^*(x)\d{\tau}\mathbb{B}ig]^*(t)H_{u_1, v_1, \nu_1^{-1}}g(t)\d{t}\\
&=\sup_{\|g\|_{X'(0,L)}\leq1}\int_0^Lv_2(t)\int_0^{\nu_2(t)} \xi(s) u_2(s) \sup_{\tau\in[s,L)}\frac1{\xi(\tau)}f^*(\tau)\d{s}\,H_{u_1, v_1, \nu_1^{-1}}g(t)\d{t}\\
&=\sup_{\|g\|_{X'(0,L)}\leq1}\int_0^L\mathbb{B}ig(\xi(s)\sup_{\tau\in[s,L)}\frac1{\xi(\tau)}f^*(\tau)\mathbb{B}ig)\mathbb{B}ig(u_2(s)\int_{\nu_2^{-1}(s)}^Lv_2(t) u_1(t) \int_{\nu_1^{-1}(t)}^Lg(x)v_1(x)\d{x}\d{t}\mathbb{B}ig)\d{s}\\
&\leq\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\sup_{\|g\|_{X'(0,L)}\leq1}\mathbb{B}ig\|u_2(t)\int_{\nu_2^{-1}(t)}^Lv_2(s) u_1(s)\int_{\nu_1^{-1}(s)}^Lg(\tau)v_1(\tau)\d{\tau}\d{s}\mathbb{B}ig\|_{Z'(0,L)}\\
&=\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\sup_{\|g\|_{X'(0,L)}\leq1}\mathbb{B}ig\|u_2(t)\int_{\nu^{-1}(t)}^Lg(\tau)v_1(\tau)\int_{\nu_2^{-1}(t)}^{\nu_1(\tau)}v_2(s)u_1(s)\d{s}\d{\tau}\mathbb{B}ig\|_{Z'(0,L)}\\
&\lesssim\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\sup_{\|g\|_{X'(0,L)}\leq1}\mathbb{B}ig\|u_2(t)\int_{\nu^{-1}(t)}^Lg(\tau)v_1(\tau)\nu_1(\tau)u_1(\nu_1(\tau))v_2(\nu_1(\tau))\d{\tau}\mathbb{B}ig\|_{Z'(0,L)}\\
&=\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\sup_{\|g\|_{X'(0,L)}\leq1}\|H_{u_2, v, \nu^{-1}}g\|_{Z'(0,L)}\\
&=\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)},
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$, where we used Fubini's theorem in the fourth and fifth equalities, the H\"older inequality \eqref{ch1:ri:holder} in the second inequality, the fact that $u_1 v_2$ satisfies the averaging condition~\eqref{averaging_condition} in the last inequality, and \eqref{thm:Riterationoptimaldomainupperbound:eq1} in the last equality. Therefore, the proof will be finished once we show that
\begin{equation*}
\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\lesssim\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation*}
Since the function $\frac{\eta}{\xi}$ is equivalent to a nonincreasing function, we have that
\begin{equation*}
\mathbb{B}ig\|\xi(t)\sup_{s\in[t,L)}\frac1{\xi(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\lesssim\mathbb{B}ig\|\eta(t)\sup_{s\in[t,L)}\frac1{\eta(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$. Hence it is sufficient to show that
\begin{equation}\label{thm:Riterationoptimaldomainupperbound:eq2}
\mathbb{B}ig\|\eta(t)\sup_{s\in[t,L)}\frac1{\eta(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}\lesssim\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$}.
\end{equation}
Note that, for every $f\in\mathfrak{M}pl(0,L)$,
\begin{align}\label{thm:Riterationoptimaldomainupperbound:eq3}
\mathbb{B}ig\|\eta(t)\sup_{s\in[t,L)}\frac1{\eta(s)}f^*(s)\mathbb{B}ig\|_{Z(0,L)}&\approx\mathbb{B}ig\|v(t)\int_0^{\nu(t)} u_2(s) \eta(s) \sup_{\tau\in[s,L)}\frac1{\eta(\tau)}f^*(\tau)\d{s}\mathbb{B}ig\|_{X(0,L)}\notag\\
\begin{split}
&\leq\mathbb{B}ig\|v(t)\int_0^{\nu(t)} u_2(s) \eta(s) \sup_{\tau\in[s,\nu(t))}\frac1{\eta(\tau)}f^*(\tau)\d{s}\mathbb{B}ig\|_{X(0,L)}\\
&\quad+\mathbb{B}ig\|v(t) \mathbb{B}ig(\sup_{\tau\in[\nu(t),L)}\frac1{\eta(\tau)}f^*(\tau)\mathbb{B}ig) \int_0^{\nu(t)} u_2(s) \eta(s) \d{s}\mathbb{B}ig\|_{X(0,L)}
\end{split}
\end{align}
inasmuch as $\eta$ is equivalent to a nonincreasing function. Furthermore, since $\eta$ is equivalent to a nonincreasing function and satisfies \eqref{thm:Riterationoptimaldomainupperbound:integralassumption_on_eta_times_u2}, \cite[Theorem~3.2]{GOP:06} guarantees that
\begin{equation*}
\int_0^{\nu(t)} u_2(s) \eta(s) \sup_{\tau\in[s,\nu(t))}\frac1{\eta(\tau)}f^*(\tau)\d{s}\lesssim\int_0^{\nu(t)}f^*(s) u_2(s) \d{s}
\end{equation*}
for every $t\in(0,L)$ and every $f\in\mathfrak{M}pl(0,L)$, in which the multiplicative constant depends only on $C_2$. Hence
\begin{equation}\label{thm:Riterationoptimaldomainupperbound:eq4}
\begin{aligned}
\mathbb{B}ig\|v(t)\int_0^{\nu(t)} u_2(s) \eta(s) \sup_{\tau\in[s,\nu(t))}\frac1{\eta(\tau)}f^*(\tau)\d{s}\mathbb{B}ig\|_{X(0,L)}&\lesssim\mathbb{B}ig\|v(t)\int_0^{\nu(t)} f^*(s) u_2(s) \d{s}\mathbb{B}ig\|_{X(0,L)}\\
&=\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}
\end{aligned}
\end{equation}
for every $f\in\mathfrak{M}pl(0,L)$. Furthermore, thanks to the fact that $\eta$ satisfies \eqref{thm:Riterationoptimaldomainupperbound:integralassumption_on_eta_times_u2} again, we have that
\begin{align}\label{thm:Riterationoptimaldomainupperbound:eq5}
\mathbb{B}ig\|v(t) \mathbb{B}ig(\sup_{\tau\in[\nu(t),L)}\frac1{\eta(\tau)}f^*(\tau)\mathbb{B}ig) \int_0^{\nu(t)}u_2(s) \eta(s) \d{s}\mathbb{B}ig\|_{X(0,L)}&\lesssim\mathbb{B}ig\|v(t)U_2(\nu(t))\eta(\nu(t))\sup_{\tau\in[\nu(t),L)}\frac1{\eta(\tau)}f^*(\tau)\mathbb{B}ig\|_{X(0,L)}\notag\\
\begin{split}
&=\mathbb{B}ig\|\sup_{\tau\in[\nu(t),L)}\frac1{\eta(\tau)}f^*(\tau)\mathbb{B}ig\|_{X(0,L)}\\
&=\mathbb{B}ig\|\sup_{\tau\in[t,L)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))\mathbb{B}ig\|_{X(0,L)}
\end{split}
\end{align}
for every $f\in\mathfrak{M}pl(0,L)$. We claim that
\begin{equation}\label{thm:Riterationoptimaldomainupperbound:eq6}
\mathbb{B}ig\|\sup_{\tau\in[t,L)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))\mathbb{B}ig\|_{X(0,L)}\lesssim\|R_{u_2, v, \nu}(f^*)\|_{X(0,L)}.
\end{equation}
Thanks to the Hardy--Littlewood--P\'olya principle \eqref{ch1:ri:HLP}, it is sufficient to show that
\begin{equation}\label{thm:Riterationoptimaldomainupperbound:eq7}
\int_0^t\sup_{\tau\in[s,L)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))\d{s}\lesssim\int_0^t(R_{u_2, v, \nu}(f^*))^*(s)\d{s}\quad\text{for every $t\in(0,L)$}.
\end{equation}
To this end, we have that
\begin{align}
\int_0^t\sup_{\tau\in[s,t)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))\d{s}&\lesssim\int_0^t\frac1{\eta(\nu(s))}f^*(\nu(s))\d{s}=\int_0^t U_2(\nu(s)) v(s) f^*(\nu(s)) \d{s}\notag\\
&\leq\int_0^tR_{u_2, v, \nu}(f^*)(s)\d{s}\leq\int_0^t(R_{u_2, v, \nu}(f^*))^*(s)\d{s}\label{thm:Riterationoptimaldomainupperbound:eq8}
\end{align}
for every $t\in(0,L)$, where the first inequality follows from \cite[Theorem~3.2]{GOP:06} (the fact that the function $(0,L)\ni s\mapsto \frac1{\eta(\nu(s))}=U_2(\nu(s)) v(s)$ is equivalent to a nondecreasing function and satisfies \eqref{thm:Riterationoptimaldomainupperbound:integralassumptionon1overeta} was used here), the second inequality follows from the monotonicity of $f^*$, and the last one follows from the Hardy--Littlewood inequality \eqref{ch1:ri:HL}. Furthermore, owing to \eqref{thm:Riterationoptimaldomainupperbound:integralassumptionon1overeta} again, we have that
\begin{align}
\sup_{\tau\in[t,L)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))&=\sup_{\tau\in[t,L)} U_2(\nu(\tau)) v(\tau) f^*(\nu(\tau))\lesssim\sup_{\tau\in[t,L)}\mathbb{B}ig(\frac1{\tau}\int_0^\tau U_2(\nu(s)) v(s)\d{s}\mathbb{B}ig)f^*(\nu(\tau))\notag\\
\begin{split}\label{thm:Riterationoptimaldomainupperbound:eq9}
&\leq\sup_{\tau\in[t,L)}\frac1{\tau}\int_0^\tau U_2(\nu(s)) v(s)f^*(\nu(s))\d{s}\leq\sup_{\tau\in[t,L)}\frac1{\tau}\int_0^\tau R_{u_2, v, \nu}(f^*)(s)\d{s}\\
&\leq\sup_{\tau\in[t,L)}\frac1{\tau}\int_0^\tau (R_{u_2, v, \nu}(f^*))^*(s)\d{s}=\frac1{t}\int_0^t (R_{u_2, v, \nu}(f^*))^*(s)\d{s}.
\end{split}
\end{align}
Inequality \eqref{thm:Riterationoptimaldomainupperbound:eq7} now follows from \eqref{thm:Riterationoptimaldomainupperbound:eq8} and \eqref{thm:Riterationoptimaldomainupperbound:eq9} inasmuch as
\begin{equation*}
\int_0^t\sup_{\tau\in[s,L)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))\d{s}\leq\int_0^t\sup_{\tau\in[s,t)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))\d{s} + t\sup_{\tau\in[t,L)}\frac1{\eta(\nu(\tau))}f^*(\nu(\tau))
\end{equation*}
for every $t\in(0,L)$.
Finally, by combining \eqref{thm:Riterationoptimaldomainupperbound:eq3} with \eqref{thm:Riterationoptimaldomainupperbound:eq4}, \eqref{thm:Riterationoptimaldomainupperbound:eq5} and \eqref{thm:Riterationoptimaldomainupperbound:eq6}, we obtain \eqref{thm:Riterationoptimaldomainupperbound:eq2}.
\end{proof}
\begin{remark}
Since \myref{Theorem}{thm:Riterationoptimaldomainupperbound} has several assumptions, it is instructive to provide a concrete, important example, which is also quite general. Let $\alpha_1, \alpha_2, \beta_1, \beta_2, \gamma_1, \gamma_2 \in(0,\infty)$. Set $\nu_j(t)=t^{\alpha_j}$, $u_j(t)=t^{\beta_j-1}b_j(t)$ and $v_j(t)=t^{\gamma_j-1}c_j(t)$, $t\in(0,L)$, $j=1,2$, where $b_j$, $c_j$ are continuous slowly-varying functions. Set $d=(b_1\circ\nu_1) \cdot c_1 \cdot (c_2\circ\nu_1)$ and $\widetilde{d}=(b_1\circ\nu_1) \cdot c_1$. Assume that $\gamma_2 < 1$, $\beta_1 + \gamma_2 > 1$, and
\begin{equation*}
\alpha_1(\beta_1 + \alpha_2\beta_2 + \gamma_2 - 1) + \gamma_1 \geq 1,\ \alpha_1(\beta_1 +\alpha_2\beta_2 - \alpha_2) + \gamma_1 \geq 1,\ \alpha_1(\beta_1 + \gamma_2 - 1) + \gamma_1 < 1.
\end{equation*}
If $\alpha_1(\beta_1 + \alpha_2\beta_2 + \gamma_2 - 1) + \gamma_1=1$ or $\alpha_1(\beta_1 +\alpha_2\beta_2 - \alpha_2) + \gamma_1 = 1$, also assume that $d$ or $\widetilde{d}$, respectively, is equivalent to a nondecreasing function. Under these assumptions, we can use \myref{Theorem}{thm:Riterationoptimaldomainupperbound} to obtain that
\begin{equation*}
\mathbb{B}ig\|v_1(t)\int_0^{t^{\alpha_1}}\big[v_2(\tau)\int_0^{\tau^{\alpha_2}}f^*(\sigma)u_2(\sigma)\d{\sigma}\big]^*(s)u_1(s)\d{s}\mathbb{B}ig\|_{X(0,L)}\approx\mathbb{B}ig\|t^\delta d(t)\int_0^{t^{\alpha_1\alpha_2}}f^*(s)s^{\beta_2-1}\d{s}\mathbb{B}ig\|_{X(0,L)}
\end{equation*}
for every $f\in\mathfrak{M}pl(0,L)$, where $\delta=\alpha_1(\beta_1 + \gamma_2 - 1) + \gamma_1 - 1$.
When $\beta_j=1$ and $b_j=c_j\equiv1$, $j=1,2$, the assumptions are satisfied provided that
\begin{equation}\label{rem:Riterationoptimaldomainupperbound:specialcase}
\alpha_1(\alpha_2 + \gamma_2) + \gamma_1 \geq 1,\ \alpha_1 + \gamma_1 \geq 1,\ \alpha_1\gamma_2 + \gamma_1 < 1.
\end{equation}
In particular, \eqref{rem:Riterationoptimaldomainupperbound:specialcase} is satisfied if (cf.~\cite[Theorem~3.4]{CP:16})
\begin{equation*}
\alpha_2+\gamma_2\geq1,\ \alpha_1+\gamma_1\geq1,\ \alpha_1\gamma_2 + \gamma_1<1.
\end{equation*}
\end{remark}
We conclude this paper with a $H_{u, v, \nu}$ counterpart to \myref{Theorem}{thm:Riterationoptimaldomainupperbound}, whose proof is substantially simpler than that of the theorem.
\begin{proposition}\label{prop:Hiteration}
Let $\|\cdot\|_{X(0,L)}$ be a rearrangement\hyp{}invariant function norm. Let $\nu_1,\nu_2\colon (0,L)\to(0,\infty)$ be increasing bijections. Assume that $\nu_1\in\Dinf{0}$. If $L=\infty$, assume that $\nu_1\in\Dinf{\infty}$. Let $u_1, u_2, v_1, v_2\colon(0,L)\to(0,\infty)$ be measurable. Assume that the function $v_1 u_2$ is equivalent to a nonincreasing function and that it satisfies the averaging condition~\eqref{averaging_condition}. Set
\begin{equation*}
v(t)=\nu_2^{-1}(t)v_1(\nu_2^{-1}(t))u_2(\nu_2^{-1}(t))v_2(t),\ t\in(0,L),
\end{equation*}
and $\nu=\nu_2\circ\nu_1$. We have that
\begin{equation}\label{prop:Hiteration:H1_circ_H2}
\|H_{u_1, v_1, \nu_1}(H_{u_2, v_2, \nu_2}f)\|_{X(0,L)}\approx\|H_{u_1, v, \nu}f\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation}
in which the multiplicative constants depend only on $\nu_1$, the averaging constant of $v_1 u_2$, and the multiplicative constants in the equivalence of $v_1 u_2$ to a nonincreasing function.
If, in addition, $u_1$ and $u_2$ are nonincreasing, if there is a measurable function $\xi\colon (0,L) \to (0,\infty)$ such that
\begin{equation*}
\frac1{v_1(t)} = \int_0^{\nu_1^{-1}(t)} \xi(s) \d{s} \quad \text{for every $t\in(0,L)$},
\end{equation*}
and if the operator $T_\varphi$ defined by \eqref{opTdef} with $\varphi = \frac{u_1}{\xi}$ is bounded on $X'(0,L)$,
then
\begin{equation*}
\sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \sup_{\substack{h \sim H_{u_2, v_2, \nu_2} g\\h\in\mathfrak{M}pl(0,L)}} \| H_{u_1, v_1, \nu_1}h\|_{X(0,L)} \approx \sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \|H_{u_1, v, \nu}g\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation*}
in which the multiplicative constants depend only on the norm of $T_\varphi$ on $X'(0,L)$ and the multiplicative constant in \eqref{prop:Hiteration:H1_circ_H2}.
\end{proposition}
\begin{proof}
On the one hand, we have that
\begin{align*}
\|H_{u_1, v_1, \nu_1}(H_{u_2, v_2, \nu_2}f)\|_{X(0,L)}&=\mathbb{B}ig\|u_1(t) \int_{\nu_1(t)}^L \mathbb{B}ig( u_2(s)\int_{\nu_2(s)}^Lf(\tau)v_2(\tau)\d{\tau}\mathbb{B}ig) u_2(s) v_1(s) \d{s}\mathbb{B}ig\|_{X(0,L)}\notag\\
\begin{split}
&=\mathbb{B}ig\|u_1(t) \int_{\nu(t)}^Lf(\tau) v_2(\tau) \int_{\nu_1(t)}^{\nu_2^{-1}(\tau)} u_2(s) v_1(s) \d{s}\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\leq\mathbb{B}ig\|u_1(t) \int_{\nu(t)}^Lf(\tau)v_2(\tau)\int_0^{\nu_2^{-1}(\tau)} u_2(s) v_1(s)\d{s}\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\lesssim\mathbb{B}ig\|u_1(t) \int_{\nu(t)}^Lf(\tau)v_2(\tau)\nu_2^{-1}(\tau)u_2(\nu_2^{-1}(\tau)) v_1(\nu_2^{-1}(\tau))\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&=\|H_{u_1, v, \nu}f\|_{X(0,L)}
\end{split}
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$ thanks to the fact that $v_1 u_2$ satisfies the averaging condition~\eqref{averaging_condition}.
As for the opposite inequality, observe that $M=\inf_{t\in(0,\frac{L}{\theta})}\frac{\nu_1(\theta t)}{\nu_1(t)}>1$, where $\theta>1$ is such that $\nu_1\in\Dinf[\theta]{0}$ and, if $L=\infty$, also $\nu_1\in\Dinf[\theta]{\infty}$. Set $K=\min\{\frac1{\theta}, \nu_1^{-1}(\frac1{M})\}$. We have that
\begin{align*}
&\|H_{u_1, v_1, \nu_1}(H_{u_2, v_2, \nu_2}f)\|_{X(0,L)}\\
&=\mathbb{B}ig\|u_1(t) \int_{\nu(t)}^Lf(\tau)v_2(\tau)\int_{\nu_1(t)}^{\nu_2^{-1}(\tau)} u_2(s) v_1(s)\d{s}\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\geq\mathbb{B}ig\|\chi_{(0,KL)}(t) u_1(t)\int_{\nu_2(M\nu_1(t))}^Lf(\tau)v_2(\tau)\int_{\nu_1(t)}^{\nu_2^{-1}(\tau)} u_2(s) v_1(s)\d{s}\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\gtrsim\mathbb{B}ig\|\chi_{(0,KL)}(t) u_1(t)\int_{\nu_2(M\nu_1(t))}^Lf(\tau)v_2(\tau)u_2(\nu_2^{-1}(\tau)) v_1(\nu_2^{-1}(\tau))(\nu_2^{-1}(\tau)-\nu_1(t))\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\geq\frac{M-1}{M}\mathbb{B}ig\|\chi_{(0,KL)}(t) u_1(t)\int_{\nu_2(M\nu_1(t))}^Lf(\tau)v(\tau)\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\geq\frac{M-1}{M}\mathbb{B}ig\|\chi_{(0,KL)}(t) u_1(t)\int_{\nu_2(\nu_1(\theta t))}^Lf(\tau)v(\tau)\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\geq\frac{M-1}{M}\mathbb{B}ig\|\chi_{(0,L)}\mathbb{B}ig(\frac{t}{K}\mathbb{B}ig) u_1(t)\int_{\nu_2(\nu_1(\frac{t}{K}))}^Lf(\tau)v(\tau)\d{\tau}\mathbb{B}ig\|_{X(0,L)}\\
&\geq \frac{M-1}{M}K\mathbb{B}ig\|u_1(t) \int_{\nu(t)}^Lf(\tau)v(\tau)\d{\tau}\mathbb{B}ig\|_{X(0,L)}
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$, where we used the fact that $v_1u_2$ is equivalent to a nonincreasing function and the boundedness of the dilation operator $D_{\frac1{K}}$ (see~\eqref{ch1:ri:dilation}).
Finally, under the additional assumptions, we have that
\begin{align*}
\sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \sup_{\substack{h \sim H_{u_2, v_2, \nu_2} g\\h\in\mathfrak{M}pl(0,L)}} \| H_{u_1, v_1, \nu_1}h\|_{X(0,L)} &\approx \sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \| H_{u_1, v_1, \nu_1}(H_{u_2, v_2, \nu_2} g)\|_{X(0,L)}\\
&\approx \sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \|H_{u_1, v, \nu}g\|_{X(0,L)}
\end{align*}
for every $f\in\mathfrak{M}pl(0,L)$ thanks to \eqref{prop:norminducedbyT:equivalencewithHf*} combined with \eqref{prop:Hiteration:H1_circ_H2}.
\end{proof}
\begin{remark}
If $T_{\varphi}$ is not bounded on $X'(0,\infty)$, then, while we still have that
\begin{equation*}
\sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \sup_{\substack{h \sim H_{u_2, v_2, \nu_2} g\\h\in\mathfrak{M}pl(0,L)}} \| H_{u_1, v_1, \nu_1}h\|_{X(0,L)} \gtrsim \sup_{\substack{g\sim f\\g\in\mathfrak{M}pl(0,L)}} \|H_{u_1, v, \nu}g\|_{X(0,L)}\quad\text{for every $f\in\mathfrak{M}pl(0,L)$},
\end{equation*}
it remains an open question whether the opposite inequality (is)/(can be) valid.
\end{remark}
\end{document}
|
\begin{document}
\title{ extbf{On Non-Inclusion of Certain Functions in Reproducing Kernel Hilbert Spaces}
\begin{abstract}
\noindent
We use a classical characterisation to prove that functions which are bounded away from zero cannot be elements of reproducing kernel Hilbert spaces whose reproducing kernels decays to zero in a suitable way.
The result is used to study Hilbert spaces on subsets of the real line induced by analytic translation-invariant kernels which decay to zero at infinity.
\end{abstract}
\section{Introduction}
The inclusion or non-inclusion of certain functions, often constants or polynomials, in reproducing kernel Hilbert spaces (RKHSs) has numerous implications in theory of statistical and machine learning algorithms.
See \citet[p.\@~142]{Steinwart2008}; \citet[Assumption~2]{LeeLiZhao2016}; and \citet[Proposition~6]{KarvonenKanagawa2019} for a few specific examples.
Non-inclusion of polynomials in an RKHS also explains the phenomena observed in \citet{XuStein2017}.
Furthermore, error estimates for kernel-based approximations methods typically require that the target function be an element of the RKHS~\citep[Chapter~11]{Wendland2005}.
The RKHSs of a number of finitely smooth kernels, such as Matérn and Wendland kernels, are well understood, being norm-equivalent to Sobolev spaces~\citep[e.g.,][Corollary~10.13]{Wendland2005}.
With the exception of power series kernels~\citep{ZwicknaglSchaback2013}, less is known about infinitely smooth kernels.
Since the work of \citet{Steinwart2006} and \citet{Minh2010}, which is based on explicit computations involving an orthonormal basis of the RKHS, it has been known that the RKHS of the Gaussian kernel does not contain non-trivial polynomials.
Recently, \citet{DetteZhigljavsky2021} have proved that RKHSs of analytic translation-invariant kernels do not contain polynomials via connection to the classical Hamburger moment problem.\footnote{They do not state explicitly that their results apply to all analytic translation-invariant kernels, but this can be seen by inserting the standard bound $\abs[0]{f^{(n)}(x)} \leq C R^n n!$ for analytic functions in their Equation~(1.6) and using Stirling's approximation.}
In this note we use a classical RKHS characterisation to furnish a simple proof for the fact that, roughly speaking, functions which are bounded away from zero (e.g., constant functions) cannot be elements of an RKHS whose kernel decays to zero in a certain manner.
An analyticity assumption is used to effectively localise this result for domains $\Omega \subset \mathbb{R}$ which contain an accumulation point.
We then consider analytic translation-invariant kernels which decay to zero.
Although quite simple, it seems that these results have not appeared in the literature.
Analyticity of functions in an RKHS has been previously studied by \citet[pp.\@~41--43]{Saitoh1997} and \citet{SunZhou2008}.
General results concerning existence of RKHSs containing given classes of functions can be found in \citet[Section~I.13]{Aronszajn1950}.
\section{Results}
Let $\Omega$ be a set.
Recall that a function $K \colon \Omega \times \Omega \to \mathbb{R}$ is a positive-semidefinite kernel if
\begin{equation*}
\sum_{n=1}^N \sum_{m=1}^N a_n a_m K(x_n, x_m) \geq 0
\end{equation*}
for any $N \geq 1$, $a_1, \ldots, a_N \in \mathbb{R}$, and $x_1, \ldots, x_N \in \Omega$.
By the Moore--Aronszajn theorem a positive-semidefinite kernel induces a unique reproducing kernel Hilbert space, $H_K(\Omega)$, which consists of functions $f \colon \Omega \to \mathbb{R}$.
The inner product and norm of this space are denoted $\inprod{\cdot}{\cdot}_K$ and $\norm[0]{\cdot}_K$.
The kernel is reproducing in $H_K(\Omega)$, which is to say that $f(x) = \inprod{f}{K(\cdot, x)}_K$ for every $f \in H_K(\Omega)$ and $x \in \Omega$.
The following theorem characterises the elements of an RKHS; see, for example, Section~3.4 in \citet{Paulsen2016} for a proof.
\begin{theorem}[Aronszajn] \label{thm:rkhs-inclusion-general}
Let $K$ be a positive-semidefinite kernel on $\Omega$.
A function $f \colon \Omega \to \mathbb{R}$ is contained in $H_K(\Omega)$ if and only if
\begin{equation*}
R(x, y) = K(x, y) - c^2 f(x) f(y)
\end{equation*}
defines a positive-semidefinite kernel on $\Omega$ for some $c > 0$.
\end{theorem}
If $\mathsf{T}heta$ is a subset of $\Omega$, the RKHS $H_K(\mathsf{T}heta)$ contains those functions $f \colon \mathsf{T}heta \to \mathbb{R}$ for which there exists an extension $f_e \in H_K(\Omega)$ (i.e., $f = f_e|_\mathsf{T}heta$).
\subsection{General Result}
We begin with a result for general bounded kernels.
\begin{theorem} \label{thm:general}
Let $K$ be a bounded positive-semidefinite kernel on $\Omega$ and $(x_n)_{n=1}^\infty$ a sequence in $\Omega$ such that
\begin{equation} \label{eq:decay-assumption}
\lim_{\ell \to \infty} \abs[0]{ K(x_{\ell+n}, x_{\ell+m}) } = 0 \quad \text{ for any } \quad n \neq m.
\end{equation}
If $f \colon \Omega \to \mathbb{R}$ satisfies either $f(x_n) \geq \alpha$ or $f(x_n) \leq -\alpha$ for some $\alpha > 0$ and all sufficiently large $n$, then $f \notin H_K(\Omega)$.
\end{theorem}
\begin{proof}
Assume to the contrary that $f \in H_K(\Omega)$.
By Theorem~\ref{thm:rkhs-inclusion-general} there exists $c > 0$ such that $R(x, y) = K(x, y) - c^2 f(x) f(y)$ defines a positive-semidefinite kernel on $\Omega$.
Therefore the quadratic form
\begin{equation*}
\begin{split}
r_{N,\ell} &= \sum_{n=1}^N \sum_{m=1}^N a_n a_m R(x_{\ell+n}, x_{\ell+m}) \\
&= \sum_{n=1}^N \sum_{m=1}^N a_n a_m \big( K(x_{\ell+n}, x_{\ell+m}) - c^2 f(x_{\ell+n}) f(x_{\ell+m}) \big)
\end{split}
\end{equation*}
is non-negative for every $N \geq 1$ and $\ell \geq 0$ and any $a_1, \ldots, a_N \in \mathbb{R}$.
By~\eqref{eq:decay-assumption} it holds for all sufficiently large $\ell$ that
\begin{equation*}
\max_{\substack{ n, m \leq N \\ n \neq m}} \abs[0]{ K(x_{\ell+n}, x_{\ell+m}) } \leq \frac{1}{2} c^2 \alpha^2.
\end{equation*}
Let $C_K = \sup_{ x \in \Omega } K(x,x)$ and set $a_1 = \cdots = a_N = 1$.
Then, for sufficiently large $\ell$,
\begin{equation*}
\begin{split}
r_{N,\ell} &= \sum_{n=1}^N K(x_{\ell+n}, x_{\ell+n}) + \sum_{n \neq m} K(x_{\ell+n}, x_{\ell+m}) - c^2 \sum_{n=1}^N \sum_{m=1}^N f(x_{\ell+n}) f(x_{\ell+m}) \\
&\leq C_K N + \frac{1}{2} c^2 \alpha^2 N^2 - c^2 \alpha^2 N^2 \\
&= \bigg( C_K - \frac{1}{2} c^2 \alpha^2 N \bigg) N,
\end{split}
\end{equation*}
which is negative if $N > 2C_K/(c^2 \alpha^2)$.
It follows that $r_{N,\ell}$ is negative for sufficiently large $N$ and $\ell$ which contradicts the assumption that $f \in H_K(\Omega)$.
\end{proof}
An alternative way to prove a similar result in some settings is by appealing to integrability.
For example, elements of the RKHS of an integrable translation-invariant kernel on $\mathbb{R}^d$ are square-integrable~\citep[Theorem~10.12]{Wendland2005}.
Other integrability results can be found in \citet{Sun2005} and \citet{CarmeliDeVitoToigo2006}.
\subsection{Analytic Functions}
Next we use the fact that RKHSs which consist of analytic functions do not depend on the domain to prove a localised versions of the above results for certain subset of $\mathbb{R}$.
The classical results on real analytic functions that we use are collected in Section~1.2 of \citet{KrantzParks2002}.
\begin{lemma} \label{lemma:general-analytic}
Let $K$ be a positive-semidefinite kernel on $\mathbb{R}$ and $\Omega$ a subset of $\mathbb{R}$ which has an accumulation point.
If $H_K(\mathbb{R})$ consists of analytic functions and $f \colon \mathbb{R} \to \mathbb{R}$ is analytic, then $f \in H_K(\mathbb{R})$ if and only if $f|_\Omega \in H_K(\Omega)$.
\end{lemma}
\begin{proof}
If $f \in H_K(\mathbb{R})$, then $f|_\Omega \in H_K(\Omega)$ by definition.
Suppose then that $f|_\Omega \in H_K(\Omega)$.
Hence there is an analytic function $g \in H_K(\mathbb{R})$ such that $g|_\Omega = f|_\Omega$.
The function $f - g$ is analytic and vanishes on $\Omega$.
Because an analytic function which vanishes on a set with an accumulation point is identically zero, we conclude that $g = f$ and therefore $f \in H_K(\mathbb{R})$.
\end{proof}
\begin{theorem} \label{thm:analytic}
Let $K$ be a bounded positive-semidefinite kernel on $\mathbb{R}$ such that $H_K(\mathbb{R})$ consists of analytic functions, $\Omega$ a subset of $\mathbb{R}$ which has an accumulation point, and $(x_n)_{n=1}^\infty$ a sequence in $\Omega$ such that
\begin{equation*}
\lim_{\ell \to \infty} \abs[0]{ K(x_{\ell+n}, x_{\ell+m}) } = 0 \quad \text{ for any } \quad n \neq m.
\end{equation*}
Then a function $f \colon \Omega \to \mathbb{R}$ is not an element of $H_K(\Omega)$ if there exist an analytic function $f_e \colon \mathbb{R} \to \mathbb{R}$ and $\alpha > 0$ such that $f_e|_\Omega = f$ and either $f_e(x_n) \geq \alpha$ or $f_e(x_n) \leq -\alpha$ for all sufficiently large $n$.
\end{theorem}
\begin{proof}
By Lemma~\ref{lemma:general-analytic} $f \in H_K(\Omega)$ if and only if $f_e \in H_K(\mathbb{R})$.
But by Theorem~\ref{thm:general} $f_e$ cannot be an element of $H_K(\mathbb{R})$.
This proves the claim.
\end{proof}
Note that the requirement that $H_K(\mathbb{R})$ consist of analytic function cannot be simply removed.
For example, by Proposition~\ref{prop:radial-general} the RKHS of the non-analytic kernel $K(x, y) = \exp(-\abs[0]{x-y})$ on $\mathbb{R}$ does not contain non-trivial polynomials.
However, if $\Omega$ is a bounded interval, then $H_K(\Omega)$ is norm-equivalent to the first-order standard Sobolev space and therefore contains all polynomials.
\subsection{Translation-Invariant Kernels}
A kernel $K$ on $\mathbb{R}$ is translation-invariant if there is a function $\varphi \colon [0, \infty) \to \mathbb{R}$ such that
\begin{equation*}
K(x, y) = \varphi( (x - y)^2 ) \quad \text{ for all } \quad x, y \in \mathbb{R}.
\end{equation*}
For translation-invariant kernels the decay assumption~\eqref{eq:decay-assumption} can be cast into a less abstract form.
\begin{proposition} \label{prop:radial-general}
Let $K$ be a translation-invariant positive-semidefinite kernel on $\mathbb{R}$ for $\varphi \geq 0$ such that $\lim_{r \to \infty} \varphi(r) = 0$.
Then a function $f \colon \mathbb{R} \to \mathbb{R}$ is not an element of $H_K(\mathbb{R})$ if there is $R \in \mathbb{R}$ such that (a) $f$ does not change sign on $[R, \infty)$ and $\liminf_{ x \to \infty} \abs[0]{f(x)} > 0$ or (b) $f$ does not change sign on $(-\infty, R]$ and $\liminf_{ x \to -\infty} \abs[0]{f(x)} > 0$.
\end{proposition}
\begin{proof}
Translation-invariant kernels are bounded because $K(x,x) = \varphi(0)$ for every $x \in \mathbb{R}$.
The claim follows from Theorem~\ref{thm:general} by selecting a sequence $(x_n)_{n=1}^\infty$ such that $\abs[0]{x_{\ell+n} - x_{\ell+m}} \to \infty$ as $\ell \to \infty$ for any $n \neq m$ and $x_n \to \infty$ (or $x_n \to -\infty$).
For example, $x_n = 1 + \cdots + n$ (or $x_n = -(1+ \cdots + n)$) suffices since then
\begin{equation*}
\abs[0]{ x_{\ell+n} - x_{\ell+m} } = \frac{\abs[0]{n-m}(2\ell+n+m+1)}{2} \geq \ell.
\end{equation*}
\end{proof}
Note that this proposition could be slightly generalised by requiring only that $f(x_n)$ be bounded away from zero for large $n$.
For example, the function $f(x) = \sin(\pi (x+\frac{1}{2}))^2$, which is not covered by Proposition~\ref{prop:radial-general}, satisfies $f(x_n) = 1$ for all $n$ if $x_n = \pm (1+\cdots+n)$.
Let $\varphi_+^{(n)}(0)$ denote the $n$th derivative from right of $\varphi$ at the origin and define
\begin{equation*}
\mathrm{D}^n K_x(y) = \quad \frac{\partial^n}{\partial v^n} K(v, y) \biggl|_{v = x} \text{ and } \quad \mathrm{D}^{n,n} K(x, y) = \frac{\partial^{2n}}{\partial v^n \partial w^n } K(v, w) \biggl|_{\substack{v = x \\ w = y}}.
\end{equation*}
The following lemma has been essentially proved by \citet{SunZhou2008}.
For completeness we supply a simple proof.
\begin{lemma} \label{lemma:radial-analytic}
If $K$ is a translation-invariant positive-semidefinite kernel on $\mathbb{R}$ for $\varphi$ which is analytic on $\mathbb{R}$, then all elements of $H_K(\mathbb{R})$ are analytic.
\end{lemma}
\begin{proof}
Because $K$ is infinitely differentiable on $\mathbb{R}$, every $f \in H_K(\mathbb{R})$ is infinitely differentiable and satisfies
\begin{equation*}
\abs[0]{ f^{(n)}(x) } = \abs[0]{ \inprod{f}{ \mathrm{D}^n K_x}_K } \leq \norm[0]{f}_K \norm[0]{\mathrm{D}^n K_x}_K = \norm[0]{f}_K \sqrt{ D^{n,n} K(x, x) }
\end{equation*}
for every $n \geq 0$ and $x \in \mathbb{R}$~\citep[Corollary~4.36]{Steinwart2008}.
From the Taylor expansion
\begin{equation*}
K(x, y) = \sum_{n=0}^\infty \frac{\varphi^{(n)}(0)}{n!} (x-y)^{2n}
\end{equation*}
it is straightforward to compute that, for any $x \in \mathbb{R}$,
\begin{equation*}
\mathrm{D}^{n,n} K(x, x) = (-1)^n \frac{(2n)!}{n!} \varphi_+^{(n)}(0).
\end{equation*}
Since $\varphi$ is analytic, there are positive constants $C$ and $R$ such that $\abs[0]{ \varphi_+^{(n)}(0) } \leq C R^n n!$ for every $n \geq 0$.
It follows that
\begin{equation*}
\abs[0]{ f^{(n)}(x) } \leq \norm[0]{f}_K \sqrt{ \frac{(2n)!}{n!} \varphi_+^{(n)}(0)} \leq \norm[0]{f}_K \sqrt{CR^n (2n)!} \leq \sqrt{C} \norm[0]{f}_K (2 \sqrt{R} \,)^n n!,
\end{equation*}
which implies that $f$ is analytic on $\mathbb{R}$.
\end{proof}
\begin{theorem} \label{thm:radial}
Let $K$ be a translation-invariant positive-semidefinite kernel on $\mathbb{R}$ for $\varphi \geq 0$ which is analytic on $[0, \infty)$ and satisfies $\lim_{r \to \infty} \varphi(r) = 0$ and $\Omega$ a subset of $\mathbb{R}$ which has an accumulation point.
Then a function $f \colon \Omega \to \mathbb{R}$ is not an element of $H_K(\Omega)$ if there exists an analytic function $f_e \colon \mathbb{R} \to \mathbb{R}$ such that $f_e|_\Omega = f$ and
\begin{equation} \label{eq:liminf-radial-analytic}
\liminf_{ x \to -\infty} \abs[0]{f_e(x)} > 0 \quad \text{ or } \quad \liminf_{ x \to \infty} \abs[0]{f_e(x)} > 0.
\end{equation}
\end{theorem}
\begin{proof} The claim follows from Lemmas~\ref{lemma:general-analytic} and~\ref{lemma:radial-analytic} and Proposition~\ref{prop:radial-general}.
The requirement in Proposition~\ref{prop:radial-general} that the function should not change sign follows from continuity and~\eqref{eq:liminf-radial-analytic}.
\end{proof}
\section{Examples}
Standard examples of analytic translation-invariant kernels are the Gaussian kernel
\begin{equation*}
K(x, y) = \varphi \big( (x-y)^2 \big) \quad \text{ for } \quad \varphi(r) = \exp(-r)
\end{equation*}
and the inverse quadratic
\begin{equation*}
K(x, y) = \varphi \big( (x-y)^2 \big) \quad \text{ for } \quad \varphi(r) = \frac{1}{1+r}.
\end{equation*}
It is known that the RKHSs of these kernels do not contain non-trivial polynomials~\citep{Minh2010,DetteZhigljavsky2021} on bounded intervals.
These results are special cases of Theorem~\ref{thm:radial}, which can be applied to any analytic function whose analytic continuation is bounded away from zero at infinity.
For example, the function
\begin{equation*}
f(x) = \exp\bigg( \! -\sin(x)^2 + \frac{1}{\sqrt{1+x^2}} \bigg)
\end{equation*}
is in the RKHS of no translation-invariant kernel for which $\varphi \geq 0$ decays to zero at infinity.
The exponential kernel
\begin{equation*}
K(x, y) = \exp( x y)
\end{equation*}
serves as a good example that $\lim_{ x \to \infty} K(x, y) = 0$ for infinitely many $y$ is not a sufficient condition for Theorem~\ref{thm:general} to hold.
The RKHS on $\mathbb{R}$ of the exponential kernel consists of analytic functions and contains all polynomials.
For any $y < 0$ it holds that $\lim_{x \to \infty} K(x, y) = 0$.
However, it is not possible to select a sequence $(x_n)_{n=1}^\infty$ for which $K$ satisfies~\eqref{eq:decay-assumption}.
For clearly $x_{\ell+n}$ and $x_{\ell+m}$ would have to have had opposite signs for all sufficiently large $\ell$ if $n \neq m$.
But this would in particular imply that $\mathrm{sgn}(x_{\ell+1}) \neq \mathrm{sgn}(x_{\ell+2})$, $\mathrm{sgn}(x_{\ell+1}) \neq \mathrm{sgn}(x_{\ell+3})$, and $\mathrm{sgn}(x_{\ell+2}) \neq \mathrm{sgn}(x_{\ell+3})$ for sufficiently large $\ell$, which is not possible.
\end{document}
|
\begin{document}
\begin{frontmatter}
\title{New numerical methods for blow-up problems}
\author[ipm,bmstu,mephi]{Andrei D. Polyanin\corref{cor1}}
\ead{[email protected]}
\author[us]{Inna K. Shingareva\corref{cor2}}
\ead{[email protected]}
\cortext[cor1]{Principal corresponding author}
\cortext[cor2]{Corresponding author}
\address[ipm]{Institute for Problems in Mechanics, Russian Academy of Sciences,\\
101 Vernadsky Avenue, bldg~1, 119526 Moscow, Russia}
\address[bmstu]{Bauman Moscow State Technical University,\\
5 Second Baumanskaya Street, 105005 Moscow, Russia}
\address[mephi]{National Research Nuclear University MEPhI,
31 Kashirskoe Shosse, 115409 Moscow, Russia}
\address[us]{University of Sonora, Blvd. Luis Encinas y Rosales S/N, Hermosillo C.P. 83000, Sonora, M\'exico}
\begin{abstract}
Two new methods of numerical integration of Cauchy problems for ODEs with blow-up solutions are described.
The first method is based on applying a differential transformation,
where the first derivative (given in the original equation) is chosen as a new independent variable.
The second method is based on introducing a new non-local variable that reduces ODE to
a system of coupled ODEs.
Both methods lead to problems whose solutions do not have blowing-up singular points;
therefore the standard numerical methods can be applied.
The efficiency of the proposed methods is illustrated with several test problems.
\end{abstract}
\begin{keyword}
nonlinear differential equations\sep
blow-up solutions\sep
numerical methods\sep
differential transformations\sep
non-local transformations\sep
test problems
\end{keyword}
\end{frontmatter}
\section{Introduction}
We will consider Cauchy problems for ODEs, whose solutions tend to infinity at some finite value of~$x$,
say $x=x_*$. The point $x_*$ is not known in advance.
Similar solutions exist on a bounded interval and are called blow-up solutions.
This raises the important question for practice: how to determine the position of a singular point~$x_*$
and the solution in its neighborhood by numerical methods.
In general, the blow-up solutions, that have a power singularity, can be represented
in a neighborhood of the singular point~$x_*$ as
\begin{align}
y\approx A|x_*-x|^{-\beta},\quad \ \beta>0,
\notag
\end{align}
where $A$ is a constant. For these solutions we have $\ds\lim_{x\to x_*}y=\infty$ and $\ds\lim_{x\to x_*}y'_x=\infty$.
The direct application of the standard numerical methods in such problems leads
to certain difficulties because of the singularity in the blow-up solutions
and the unknown (in advance) blow-up point~$x_*$.
Some special methods for solving such problems are described, for example, in \cite{aco2002,mor1979,hir2006,dlam2012}.
Below we propose new methods of numerical integration of such problems.
\section{Problems for first-order equations}
\subsection{Solution method based on a differential transformation}\label{ss:2.1}
The Cauchy problem for the first-order differential equation has the form
\begin{align}
y'_x=f(x,y)\quad (x>x_0);\quad \ y(x_0)=y_0.
\label{eq:02}
\end{align}
In what follows we assume that $f=f(x,y)>0$, $x_0\ge 0$, $y_0>0$, and $f/y\to\infty$ as $y\to\infty$
(in such problems, blow-up solutions arise when the right-hand side of a nonlinear ODE is
quite rapidly growing as $y\to\infty$).
First, we present the ODE \eqref{eq:02} as a system of equations
\begin{align}
t=f(x,y),\quad \ y'_x=t.
\label{eq:02b}
\end{align}
Then, by applying~\eqref{eq:02b}, we derive a system of equations of the standard form, assuming that
$y=y(t)$ and $x=x(t)$.
By taking the full differential of the first equation in~\eqref{eq:02b} and multiplying
the second one by~$dx$, we get
\begin{equation}
dt=f_x\,dx+f_y\,dy,\quad dy=t\,dx,
\label{eq:02c}
\end{equation}
where $f_x$ and $f_y$ are the respective partial derivatives of $f$.
Eliminating first~$dy$, and then~$dx$ from~\eqref{eq:02c}, we obtain a system of the first-order coupled equations
\begin{equation}
x'_t=\frac 1{f_x+tf_y},\quad \ y'_t=\frac t{f_x+tf_y}\quad (t>t_0),
\label{eq:02d}
\end{equation}
which must be supplemented by the initial conditions
\begin{align}
x(t_0)=x_0,\quad y(t_0)=y_0,\quad t_0=f(x_0,y_0).
\label{eq:02e}
\end{align}
Let $f_x\ge 0$ and $f_y>0$ at $t_0<t<\infty$. Then the Cauchy problem \eqref{eq:02d}--\eqref{eq:02e}
can be integrated numerically, for example, by applying the Runge--Kutta method or other
standard numerical methods (see for example \cite{sch,asch}).
In this case, the difficulties (described in the introduction) will not occur
since $x'_t$ rapidly tends to zero as $t\to\infty$.
The required blow-up point is determined as $\ds x_*=\lim_{t\to\infty}x(t)$.
\subsection{Examples of test problems and numerical solutions}\label{ss:2.2}
\textit{Example 1}.
Consider the model Cauchy problem for the first-order ODE
\begin{align}
y'_x=y^2\quad (x>0);\quad \ \ y(0)=a,
\label{eq:02f}
\end{align}
where $a>0$. The exact solution of this problem has the form
\begin{align}
y=\frac{a}{1-ax}.
\label{eq:02g}
\end{align}
It has a power-type singularity (a first-order pole) at a point $x_*=1/a$.
By introducing a new variable $t=y'_x$ in~\eqref{eq:02f}, we obtain the following Cauchy problem for
the system of equations:
\begin{equation}
x'_t=\frac{1}{2ty},\ \ y'_t=\frac{1}{2y}\ \ (t>t_0);\quad\ x(t_0)=0,\ y(t_0)=a,\ t_0=a^2,
\label{eq:02h}
\end{equation}
which is a particular case of the problem \eqref{eq:02d}--\eqref{eq:02e} with $f=y^2$, $x_0=0$, and $y_0=a$.
The exact solution of this problem has the form
\begin{align}
x=\frac{1}{a}- \frac{1}{\sqrt{t}},\quad \ y=\sqrt{t}\quad (t\ge a^2).
\label{eq:02i}
\end{align}
It has no singularities; the function $x=x(t)$ increases monotonically for $t>a^2$, tending
to the desired limit value $\ds x_*=\lim_{t\to\infty}x(t)=1/a$, and the function $y=y(t)$ monotonously
increases with increasing~$t$.
The solution~\eqref{eq:02i} for the system~\eqref{eq:02h} is a solution of the original problem~\eqref{eq:02f} in parametric form.
Let $a=1$. Figure~{\sl 1a} shows a comparison of the exact solution~\eqref{eq:02g} of the Cauchy problem for one equation~\eqref{eq:02f}
with the numerical solution of the system of equations~\eqref{eq:02h},
obtained by the classical Runge--Kutta method (with stepsize=0.2).
\begin{figure}
\caption{{\sl 1a}
\label{fig:Fig1}
\end{figure}
\subsection{Solution method based on non-local transformations}\label{ss:2.4}
Introducing a new non-local variable according to the formula,
\begin{align}
\xi=\int^x_{x_0}g(x,y)\,dx,\quad \ y=y(x),
\label{*}
\end{align}
leads the Cauchy problem for one equation \eqref{eq:02} to the equivalent problem for the
autonomous system of equations
\begin{equation}
x'_\xi=\frac 1{g(x,y)},\quad \ y'_\xi=\frac {f(x,y)}{g(x,y)}\quad \ \ (\xi>0);\quad \ \ x(0)=x_0,\quad y(0)=y_0.
\label{eq:02p}
\end{equation}
Here, the function $g=g(x,y)$ has to satisfy the following conditions:
\begin{equation}
g>0\ \text{at}\ x\ge x_0, \ y\ge y_0; \quad \ g\to \infty \ \text{as}\ y\to\infty;\quad \ f/g=k\ \text{as}\ y\to\infty,
\label{eq:02q}
\end{equation}
where $k=\text{const}>0$ (and the limiting case $k=\infty$ is also allowed); otherwise
the function~$g$ can be chosen rather arbitrarily.
From \eqref{*} and the second condition~\eqref{eq:02q} it follows that
$x'_\xi\to 0$ as $\xi\to\infty$. The Cauchy problem~\eqref{eq:02p} can be integrated numerically
applying the Runge--Kutta method or other standard numerical methods.
Let us consider some possible selections of the function~$g$ in the system~\eqref{eq:02p}.
$1^\circ$. We can take $g=\bl(1+|f|^s\br)^{1/s}$ with $s>0$.
In this case, $k=1$ in~\eqref{eq:02q}. For $s=2$,
we get the method of the arc length transformation~\cite{mor1979}.
$2^\circ$. We can take $g=f/y$ that corresponds to $k=\infty$ in~\eqref{eq:02q}.
\textit{Example 2}.
For the test problem~\eqref{eq:02f}, in which $f=y^2$, we have $g=f/y=y$.
By substituting these functions in~\eqref{eq:02p}, we arrive at the Cauchy problem
\begin{equation}
x'_\xi=\frac{1}{y},\quad \ y'_\xi=y\quad \ \ (\xi>0);\quad \ \
x(0)=0,\quad y(0)=a.
\label{eq:02r}
\end{equation}
The exact solution of this problem is written as follows:
\begin{equation}
x=\frac{1}{a}\bl(1-e^{-\xi}\br),\quad \ y=ae^\xi.
\label{eq:02pe}
\end{equation}
We can see that the unknown quantity $x=x(\xi)$ exponentially tend to the asymptotic values $x=x_*=1/a$ as $\xi\to \infty$.
The numerical solutions of the problems~\eqref{eq:02h} and~\eqref{eq:02r},
obtained by the fourth-order Runge--Kutta method, are presented in Fig.~{\sl 1b} (for $a=1$ and
the same stepsize $=0.2$).
The numerical solutions
are in a good agreement, but the method based on the non-local transformation with $g=t/y$
is more effective than the method based on a differential transformation.
\section{Problems for second-order equations}
\subsection{Solution method based on a differential transformation}\label{ss:3.1}
The Cauchy problem for the second-order differential equation has the form
\begin{align}
y''_{xx}=f(x,y,y'_x)\quad (x>x_0);\quad \ \ y(x_0)=y_0,\quad \ y'_x(x_0)=y_1.\label{eq:03}
\end{align}
Note that the exact solutions of equations of the form~\eqref{eq:03}, which can be used
for test problems with blow-up solutions, can be found,
for example, in \cite{pol2003,kudr}.
Let $f(x,y,u)>0$ if $y>y_0\ge 0$ and $u>y_1\ge 0$, and
the function~$f$ increases quite rapidly as $y\to\infty$
(e.g. if $f$ does not depend on~$y'_x$, then
$\ds\lim_{y\to\infty}f/y=\infty$).
First, we represent ODE~\eqref{eq:03} as an equivalent
system of equations
\begin{align}
y'_x=t,\quad \ y''_{xx}=f(x,y,t).
\label{eq:03b}
\end{align}
where $y=y(x)$ and $t=t(x)$ are unknown functions.
Taking into account~\eqref{eq:03b}, we derive further a standard system of equations, assuming that
$y=y(t)$ and $x=x(t)$.
To do this, we differentiate the first equation in~\eqref{eq:03b} with respect to~$t$.
We have $(y'_x)'_t=1$.
Taking into account the relations $y'_t=tx'_t$ (follows from the first equation of~\eqref{eq:03b})
and $(y'_x)'_t=y''_{xx}/t'_x=x'_ty''_{xx}$, we get further
\begin{equation}
x'_ty''_{xx}=1.
\label{eq:03c}
\end{equation}
If we eliminate the second derivative $y''_{xx}$ by using a second equation of~\eqref{eq:03b},
we obtain the first-order equation
\begin{equation}
x'_t=\frac 1{f(x,y,t)}.
\label{eq:03d}
\end{equation}
Considering further the relation $y'_t=tx'_t$, we transform~\eqref{eq:03d} to the form
\begin{equation}
y'_t=\frac t{f(x,y,t)}.
\label{eq:03e}
\end{equation}
Equations \eqref{eq:03d} and \eqref{eq:03e} represent a system of coupled first-order equations
with respect to functions $x=x(t)$ and $y=y(t)$. The system \eqref{eq:03d}--\eqref{eq:03e}
should be defined with the initial conditions
\begin{align}
x(t_0)=x_0,\quad y(t_0)=y_0,\quad t_0=y_1.
\label{eq:03f}
\end{align}
The Cauchy problem \eqref{eq:03d}--\eqref{eq:03f} can be integrated numerically
applying the standard numerical methods \cite{sch,asch}, without fear of blow-up solutions.
\begin{remark}
Systems of equations
\eqref{eq:02b} and \eqref{eq:03b} are particular cases of parametrically defined
nonlinear differential equations, which are considered in \cite{pol2016,pol2017}.
In \cite{pol2017}, the general solutions of several parametrically defined ODEs were obtained
via differential transformations, based on introducing a new independent
variable~$t=y'_x$.
\end{remark}
\subsection{Examples of test problems and numerical solutions}\label{ss:3.2}
\begin{figure}
\caption{{\sl 2a}
\label{fig:Fig2}
\end{figure}
\textit{Example 3}.
Let us consider Cauchy problem
\begin{align}
y''_{xx}=2y^3\quad \ (x>0),\quad \ y(0)=a,\quad \ y'_x(0)=a^2.
\label{eq:04a}
\end{align}
The exact solution of this problem is defined by the formula~\eqref{eq:02g}.
\goodbreak
Introducing a new variable $t=y'_x$ in~\eqref{eq:04a}, we transform~\eqref{eq:04a} to the Cauchy problem
for the system of the first-order ODEs
\begin{equation}
x'_t=\tfrac 12y^{-3}, \ \ \ y'_t=\tfrac12 ty^{-3} \ \ \ (t>t_0);\quad \
x(t_0)=0, \ \ \ y(t_0)=a, \ \ \ t_0=a^2,
\label{eq:04b}
\end{equation}
which is a particular case of the problem \eqref{eq:03d}--\eqref{eq:03f} with $f=y^2$, $x_0=0$, and $y_0=a$.
The exact solution of the problem~\eqref{eq:04b} is given by the formulas~\eqref{eq:02i}.
Figure~{\sl 2a} shows a comparison of the exact solution~\eqref{eq:02g} of the Cauchy problem for one equation~\eqref{eq:04a}
with the numerical solution of the system of equations~\eqref{eq:04b},
obtained by the fourth-order Runge--Kutta method (we have a good coincidence).
\subsection{Solution method based on non-local transformations}\label{ss:3.3}
First, equation~\eqref{eq:03} can be represented as a system of two equations
$$
y'_x=t,\quad \ t'_x=f(x,y,t),
$$
\noindent
and then we introduce the non-local variable by the formula
\begin{align}
\xi=\int^x_{x_0}g(x,y,t)\,dx,\quad \ y=y(x),\quad t=t(x).
\label{eq:05}
\end{align}
\noindent
As a result, the Cauchy problem \eqref{eq:03} can be transformed to
the following problem for an autonomous system of three equations:
\begin{equation}
\begin{gathered}
x'_\xi=\frac 1{g(x,y,t)},\quad \ y'_\xi=\frac t{g(x,y,t)},\quad \ t'_\xi=\frac {f(x,y,t)}{g(x,y,t)}\quad \ \ (\xi>0);\\
x(0)=x_0,\quad \ y(0)=y_0,\quad \ t(0)=y_1.
\end{gathered}
\label{eq:06}
\end{equation}
For a suitable choice of the function $g=g(x,y,t)$ (not very restrictive conditions of the form~\eqref{eq:02q} must be
imposed on it), the Cauchy problem \eqref{eq:06} can be numerically integrated applying the
standard numerical methods \cite{sch,asch}.
Let us consider some possible selections of the function~$g$ in system~\eqref{eq:06}.
\goodbreak
$1^\circ$. We can take $g=\bl(1+|t|^s+|f|^s\br)^{1/s}$ with $s>0$. The case $s=2$
corresponds to the method of the arc length transformation~\cite{mor1979}.
$2^\circ$. Also, we can take $g=f/y$, $g=f/t$, or $g=t/y$.
\textit{Example 4}. For the test problem~\eqref{eq:04a}, in which $f=2y^3$,
we put $g=t/y$. By substituting these functions in~\eqref{eq:06}, we arrive
at the Cauchy problem
\begin{equation}
x'_\xi=y/t,\ \ y'_\xi=y,\ \ t'_\xi=2y^4/t\ \ (\xi>0);\ \ \ x(0)=0,\ \ y(0)=a,\ \ t(0)=a^2.
\label{eq:xx}
\end{equation}
The exact solution of this problem is written as follows:
\begin{equation}
x=a^{-1}\bl(1-e^{-\xi}\br),\quad \ y=ae^\xi,\quad \ t=a^2e^{2\xi}.
\label{eq:xy}
\end{equation}
\noindent
We can see that the unknown quantity $x=x(\xi)$ exponentially tend to the asymptotic values $x=x_*=1/a$ as $\xi\to \infty$.
The numerical solutions of the problems~\eqref{eq:04b} and~\eqref{eq:xx},
obtained by the fourth-order Runge--Kutta method, are presented in Fig.~{\sl 2b}
(for $a=1$ and the same stepsize $=0.2$).
The numerical solutions
are in a good agreement, but the method based on the non-local transformation with $g=t/y$
is more effective than the method based on a differential transformation.
\begin{remark}
The method described in Section~\ref{ss:3.3} can be generalized to nonlinear ODEs of arbitrary
order and systems of coupled ODEs.
\end{remark}
\end{document}
|
\begin{document}
\title{Robust Dynamical Decoupling for Arbitrary Quantum States of a Single NV Center in Diamond}
\author{J. H. Shim}
\author{I. Niemeyer}
\author{J. Zhang}
\author{D. Suter}
\affiliation{
Fakult\"{a}t Physik, Technische Universit\"{a}t Dortmund, D-44221 Dortmund, Germany\\
}
\begin{abstract}
Dynamical decoupling is a powerful technique for extending the coherence time (T$_2$) of qubits.
We apply this technique to the electron spin qubit of a single nitrogen-vacancy center in type IIa diamond.
In a crystal with natural abundance of $^{13}$C nuclear spins,
we extend the decoherence time up to 2.2 ms.
This is close to the T$_1$ value of this NV center (4 ms).
Since dynamical decoupling must perform well for arbitrary initial conditions,
we measured the dependence on the initial state and compared the performance of different sequences
with respect to initial state dependence and robustness to experimental imperfections.\end{abstract}
\maketitle
\section{Introduction}
Sufficiently long coherence times (T$_2$) are amongst the most important criteria
for the realization of scalable quantum information processors.\cite{Ladd2010}
In solid state systems, it is often possible to extend the coherence time by removing
sources of decoherence from the host material, e.g. by isotopic engineering.\cite{Tyryshkin2011, Balasub2009}
If this is not possible, or to further reduce the detrimental effects of a noisy environment,
it is possible to use a series of control pulses applied to the qubits.
This approach, termed Dynamical Decoupling (DD), refocuses the interaction between
system and environment by applying a stroboscopic sequence of inversion pulses to the qubits.
It has been tested on different systems with environments consisting of electronic or nuclear spin baths.
\cite{Du2009, Biercuk2009, Bylander2011, Lange2010, BarGill2012, alvarez2010}
The technique allows, e.g., to extend the decoherence time T$_2$ or to increase the sensitivity
of magnetic field sensors to ac magnetic fields.\cite{Lange2011, BarGill2012}
The usefulness of this approach for practical applications depends on the robustness of the performance
with respect to experimental imperfections, such as finite precision of control field amplitudes.
Since the number of control operations required for effective DD can be very large,
it is essential that the errors from the individual control operations do not accumulate.
This goal can be achieved by using fault-tolerant sequences, which are designed in such a way
that the errors of any individual control operation is cancelled by all the other operations
of a cycle.\cite{ryan2010, cai2011, souza2011}
Here, we test this approach on a single Nitrogen-Vacancy color center (NV center) in diamond.
The effectiveness of the DD technique has been tested with different types of diamond;
de Lange, et al.\cite{Lange2010} used a diamond with a relatively high concentration
of nitrogen impurities, which generates a strong electron-spin bath.
The other studies employed CVD grown diamond crystals with reduced native nitrogen concentrations,
in which the nuclear spin bath of the $^{13}$C nuclear spins is the major source of
decoherence.\cite{ryan2010, Naydenov2011, BarGill2012}
Here, we extend these earlier studies by testing the limits of DD in two respects:
first, we use DD to extend the dephasing time $T_2$ to nearly the value of the spin-relaxation time $T_1$.
To reach this limit, we have to apply hundreds of refocusing pulses.
With such a large number of pulses, the unavoidable imperfections of the individual gate operations
would normally destroy the coherence of the qubits.
To avoid this, we use robust DD sequences, which were designed such that the errors due to
the individual gate operations do not add up but cancel each other over the complete DD cycle.
When the effectiveness of DD sequences is demonstrated, it is often tempting to consider its
performance for a specific initial condition that is left unperturbed by the combination of DD and
the environmental perturbation.
However, in actual quantum computers, the state of the system is generally not known.
Accordingly, any serious test of DD performance must consider the performance of arbitrary
initial conditions.
We therefore present here a study in which we compare the performance of different
DD sequences over oll possible input states.
\begin{figure}
\caption{(color online)(a) Confocal image (0.8 $\mu$m*0.8 $\mu$m) of single NV center (b) Second order correlation measurement by photon antibunching experiment. (c) Ramsey fringe (d) Fourier transformation of Ramsey fringe curve shows only three peaks due to hyperfine interaction with $^{14}
\label{fig1}
\end{figure}
\section{Experimental setup}
In the experiments described below, we use a home-built confocal microscope
for optical addressing and detection of single NV centers.
Fig.~\ref{fig1} (a) shows, as an example, the scan image of one NV center.
A diode-pumped solid state laser working at a wavelength of 532 nm provides the optical excitation.
The acousto-optical modulator used for pulse shaping of the laser
provides an extinction ratio of more than 57 dB.
To verify that we detect signals from single NV centers, we use a standard photon-antibunching
configuration with single photon detectors in combination with a fast time-correlation card.
Fig.~\ref{fig1} (b) shows a typical correlation signal trace.
We generate the microwave pulses with an arbitrary waveform generator.
Working at 2 GS/s, it generates the pulses around a carrier frequency of 250 MHz.
These pulses are upconverted to the target frequency in the range of 2.6 - 3.2 GHz
by mixing them with a cw signal from a stable microwave source.
Unwanted sidebands are filtered out by properly selected band-pass filters.
The amplified pulses pass through a 20 $\mu$m diameter copper wire
attached to the surface of the diamond crystal and terminated by 50 $\Omega$.
The leakage of microwave power at pulse-off time is $<-60$ dB.
Under typical experimental conditions, the duration of a 180 degree pulse is 40 ns.
A permanent magnet is used to apply a static magnetic field,
which is roughly aligned with the direction of the principal axis of the NV center.
A type IIa diamond crystal with a N concentration $<5$ ppb is the host material of the investigated NV centers.
All the measured curves in the present work are normalized in order to indicate the probability ($p$) of bright state ($m_S=0$).
As a reference for the normalization, maximum and minimum values from Rabi oscillation curve were taken for $p=1$ and $p=0$ respectively.
\section{Experimental results}
\subsection{Spin system}
In the natural abundance diamond crystal with reduced N concentration,
the $^{13}$C nuclear spin form a spin bath that induces decoherence on the NV center.\cite{Balasub2009, Mizuochi2009}
In the case of Hahn echoes, the deocherence due to the $^{13}$C nuclear spins is reduced significantly
when the free precession period between the excitation and refocusing pulse is a multiple of the nuclear spin
Larmor precession period.\cite{maze2008}
For the present study, we selected an NV center that has no $^{13}$C nuclear spins in its immediate vicinity,
so the coupling to the nuclear spin bath is relatively weak and the decoherence correspondingly slow.
Figure \ref{fig1} (c) shows the Ramsey fringe signal measured from this center and Figure \ref{fig1} (d)
its Fourier transform.
We applied a magnetic field of 6.8 mT along the direction of the symmetry axis and excited the transition
at 2.68 GHz with resonant microwave pulses.
\subsection{Dynamical decoupling}
The oldest and simplest pulse sequence for dynamical decoupling is the CPMG sequence -
a train of equidistant $\pi$-pulses around the same axis.
In Fig.~\ref{fig2}, we compare the signal decays for different CPMG sequences, starting with the Hahn echo
at the bottom and experiments with increasing numbers of refocusing pulses towards the top.
In each curve, we observe an initial fast decay, which is followed by a series of revivals.
The revivals are separated by $2 N \tau_L$, where $N$ is the number of $\pi$ pulses in the sequence
and $\tau_L \approx 73 \, \mathrm{\mu s}$ is the Larmor period of the $^{13}$C nuclear spins.
This implies that the refocusing is most effective when the nuclear spins constituting the environment
have completed an integral number of Larmor precessions between the pulses.
The dashed curves in Fig.~\ref{fig2} represent functions $\exp(-(\frac{t}{T_2})^n)$
with decay times $T_2$ and exponents $n$ fitted to the maxima of the Larmor revivals in the experimental data.
The decoherence times $T_2$ are also noted in the figure legend.
Figure \ref{fig3} summarizes the increase of the decoherence time with the number of refocusing pulses.
The decoherence times from the CPMG experiment are shown as black squares, with filled squares
representing the values obtained by fitting the decay of the Larmor revivals (the dashed curves in Fig.\,\ref{fig2}),
while the open squares represent the fit to the initial decay.
In both cases, the decay time increases with the number of pulses in the DD sequence.
If we take the values from the maxima of the Larmor revivals, they increase from $\approx 0.3$ ms for the Hahn echo
up to 2.2 ($\pm$0.2) ms for CPMG 128, which is the longest value ever reported for the NV center.
Apart from the increase of the decoherence time with the number of pulses, we find that also
the shape of the decay curve changes.
In Fig.\ref{fig3}, we quantify this by the exponent $n$, which changes from $\approx 3$ for the Hahn echo
to $\approx 1$ for CPMG 128.
This result indicates that different processes are responsible for the observed decoherence.
For relatively long pulse separations, as in the case of the Hahn echo,
the dominant environmental interaction is the hyperfine coupling to the nuclear spins.
The shortest relevant timescale for the nuclear spins is the Larmor precession, which has a period of
$\tau_L \approx 73 \, \mathrm{\mu s}$.
Since this evolution is coherent, the associated decay is refocused at specific times,
giving rise to the Larmor revivals \cite{Childress2006}.
On a slower timescale, the nuclear spins undergo mutual spin flips, which also contribute to the decoherence of the electron spin.
\begin{figure}
\caption{(color online) Coherence decay curves for different dynamical decoupling sequences.
The bottom curve shows the result from the Hahn echo, the higher curves correspond to the CPMG sequence
with increasing number of refocusing pulses.
The gray dashed lines represent fits to the function $\exp(-(\frac{t}
\label{fig2}
\end{figure}
When the spacing between the pulses becomes short compared to the Larmor period,
the refocusing of the decoherence due to the nuclear spin bath becomes almost perfect.
This is demonstrated in Fig.\,\ref{fig3} by the convergence of the curves from the decay time of the envelope
with that of the initial decay.
Both time scales approach a limiting value of $\approx 2.2$ ms, which is only slightly shorter than the
longitudinal relaxation time for this spin, which is $T_1 \approx 4$ ms.
Apparently, the remaining decoherence processes causing this decay are no longer due to the interaction
with the nuclear spins, but arise from processes with significantly shorter correlation times,
which cannot be refocused by dynamical decoupling.
Possible candidates for these processes include interactions with paramagnetic centers, electric fields\cite{dolde2011}
or phononic processes.
This interpretation is compatible with the observation of a purely exponential decay,
which is expected if the correlation time of environment is shorter than interpulse delay of dynamical decoupling.
\begin{figure}
\caption{(color online) T$_2$ and T$_2^0$ as a function of the number of pulses in CPMG and KDD sequences.
Black symbols are CPMG and red KDD.
Solid symbols represent T$_2$s obtained from the envelope of the revivals,
and open symbols are T$_2^0$s from the initial decay.
Green symbols represent the exponent $n$ in the decay function $\exp(-(\frac{t}
\label{fig3}
\end{figure}
\subsection{Parallel and orthogonal initial states}
The above experiments were performed with one specific initial condition,
for which the CPMG sequence is optimized.
However, in quantum information processing, the initial state is in general unknown,
and it is important to use a pulse
sequence that performs well for arbitrary initial conditions.
To evaluate the effectiveness for arbitrary initial conditions, we now consider the
effect of the DD sequence on two orthogonal initial conditions
$|x\rangle = (|0\rangle + |1\rangle)/\sqrt{2}$ and
$|y\rangle = (|0\rangle + i |1\rangle)/\sqrt{2}$.
Fig.~\ref{fig4} shows the decay of these states for the DD sequences CPMG, KDD, and XY-4
as a function of the number of refocusing pulses.
In the case of the CPMG sequence, we observe a strong asymmetry between the
two initial condition:
If the initial spin polarization is oriented along the rotation axis of the refocusing pulses,
the state preservation works extremely well.
If the initial state is perpendicular to the direction of the pulses, the first $\approx 10$ pulses
destroy the coherence almost completely.
This effect is expected if the rotation angle of the pulses deviates from the ideal value of $\pi$.\cite{alvarez2010}
Such a flip angle error has no effect on a state that is polarized along the rotation axis,
but for orthogonal orientations, the flip angle errors of the individual pulses accumulate
and cause an unwanted evolution of the system.
Such a strong dependence on the initial state of the system is not compatible with quantum information processing,
where the state is not known in general.
Universal DD sequences must perform well for arbitrary quantum states
and they have to be robust against pulse errors.
The earliest sequence that fulfills this requirement is the
XY-4 sequence \cite{Maudsley1986488,Viola2003}
and its derivatives that increase the robustness by combining different versions of the basic XY-4 cycle
into longer cycles with better compensation of pulse imperfections \cite{Gullion1990479,Khodjasteh:2005it}.
A more recent sequence that shows even better performance is the KDD sequence \cite{souza2011},
which is based on an expansion scheme developed by Cho et al \cite{Cho1986}.
As shown in Fig.~\ref{fig4}, the performance of both sequences does not depend on the
initial state, within the experimental uncertainties.
The performance of the XY-4 sequence is comparable to that of CPMG\_y for $<20$ pulses but
deteriorates for longer sequences, while the KDD sequence matches the performance of CPMG\_y
for both initial conditions, indicating that this sequence largely compensates the flip angle errors
of the individual pulses.
\begin{figure}
\caption{(color online) Survival probabilities of two orthogonal initial states, $ |x \rangle$ (solid symbols)
and $| y \rangle$ (empty) during different sequences of equidistant refocusing pulses
for CPMG (red), KDD (blue), and XY-4 (green).
The time interval between the refocusing pulses was fixed to 0.8 $\mu s$.
}
\label{fig4}
\end{figure}
\subsection{Arbitrary initial states}
Fig.~\ref{fig5} extends this study to arbitrary initial conditions: for these experiments,
we prepared the initial states
\begin{equation}
|\psi_I\rangle = \cos(\frac{\theta}{2})|0\rangle + \sin(\frac{\theta}{2}) e^{i\phi} | 1 \rangle,
\label{rho_init}
\end{equation}
where the angles $\theta$ and $\phi$, which parametrize the state,
correspond to spherical coordinates on the Bloch sphere.
Fig.~\ref{fig5} shows the observed survival probability $|\langle \psi_F | \psi_I \rangle|^2$,
with $| \psi_F \rangle$ representing the state of the qubit after 20 refocusing pulses.
The three panels represent the survival probability as a function of $\theta$ and $\phi$
for the three DD sequences CPMG, XY-4 and KDD.
All three sequences reach almost perfect survival probabilities for $\theta = \phi = \pi/2$,
which corresponds to the $y$ initial condition where the spins are aligned with the rotation
axis of the CPMG pulses.
In the case of CPMG, the performance deteriorates when the initial condition deviates
significantly from this situation.
This behavior is an indication that the decay of the system fidelity is dominated
not by the environment, but rather by pulse imperfections \cite{alvarez2010,souza2011}.
\begin{figure}
\caption{(color online) Experimental survival probability for the sequences CPMG, KDD and XY-4
after 20 DD pulses as a function of the parameters $\theta$ and $\phi$
that define the initial state of eq. (\ref{rho_init}
\label{fig5}
\end{figure}
\subsection{Effect of pulse imperfections}
Apart from finite pulse durations, the two dominant experimental imperfections that limit the performance
of experimental dynamical decoupling are flip angle errors and offset errors.
In Fig.~\ref{fig6}, we experimentally investigate the effect of these imperfections on the performance of
different dynamical decoupling sequences.
In the left hand columns, we measured the remaining spin polarization after 20 and 200 pulses as a function
of the actual flip angle of each pulse.
For 20 refocusing pulses and longitudinal initial conditions, flip angle errors
up to $\pm$ 10 $\%$ do not reduce the fidelity.
For higher errors, we find a bi-quadratic reduction (not shown here).
This is consistent with theoretical considerations.\cite{borneman2010}
For the robust sequences, whose performance does not depend significantly
on the initial condition, KDD clearly outperforms XY-4.
This is particularly well visible for 200 pulses.
Interestingly, XY-4 shows strong oscillations as a function of the flip angle error.
This implies that XY-4 becomes very sensitive to flip angle errors if more than 100 refocusing pulses are used,
which limits its usability.
The right hand column shows the measured behavior as a function of the
offset error for 20 refocusing pulses.
For all three sequences, we observe significant reductions of the fidelity
if the offset exceeds $\approx 2$ MHz.
This is significantly less than the Rabi frequency (12.5 MHz) of the pulses used for these experiments
and comparable to inherent offsets like the hyperfine interaction with the nitrogen nuclear spin.
\begin{figure}
\caption{(color online) Left column : Survival probability of the electron spin state as a function of
the fractional flip angle error.
The initial state was chosen along the rotation axis of the CPMG pulses.
The numbers in the parentheses represent number of refocusing pulses in DD sequence.
Right column : variation of survival probability as a function of frequency offset errors on pulses.
The numbers in the parentheses represent number of refocusing pulses in the DD sequence.}
\label{fig6}
\end{figure}
In order to better understand the processes that reduce the process fidelity in these cases,
we performed state tomography
after 20 refocusing pulses for the initial state along the $x$-axis ($\theta=\frac{\pi}{2}$, $\phi=0$).
From the state tomography, we could estimate a flip angle error of 1.6 $\%$,
which corresponds to 2.9 degree deviation from the targeted 180 degree rotation.
This value is quite consistent with our hardware limit on the width of time step.
The arbitrary wave form generator used for pulse generation has 0.5 ns step width (2 GS/s).
Given the duration of the refocusing pulse of 40 ns, the digital time resolution of the pulse
results in a flip angle error of 1.2 $\%$, which is not far from value given above and does not
yet include effects due to amplifier nonlinearities or transient effects.
Frequency offset error was also observed, and this was - 4 $\%$ (0.5 MHz)
of the Rabi oscillation frequency (12.5 MHz).
This amount of estimation can be supported by the results of the frequency offset errors in Fig~\ref{fig6}.
The central positions of all the three curves are slightly shifted to positive frequency, and this deviation is around 0.5 MHz.
The origin of this frequency offset error is unclear to us,
because we tune the frequency of the microwave pulses to the center of the spectrum measured
by the Ramsey fringe experiment in Fig.~\ref{fig1} (d) as precisely as possible,
expecting that the off-resonance effect of the two outer hyperfine lines cancel each other.
There is one way to confirm whether the off-resonance effect generates frequency offset errors on the pulses:
doing experiments at the excited state level anti-crossing point (514 G),
only one of the $^{14}$N nuclear spin states will be populated,\cite{Jacques2009}
and this will make it available to remove off-resonance effect completely.
\section{Conclusions}
Dynamical decoupling is an effective method for increasing the coherence time of quantum bits,
such as spin qubits in solids.
As long as the fluctuation of the noise sources is comparable to or slower than the system's coherence time,
DD can increase the survival time of the quantum information.
With CPMG sequences, we have shown that T$_2$ of a single NV center in natural abundance diamond
can be extended up to 2.2 ms by strongly suppressing the fluctuating fields from the $^{13}$C nuclear spin bath.
This is close to the T$_1$ limit, which is determined by different, more rapidly fluctuating processes.
We found that the recently introduced DD sequence KDD is remarkably robust and works for
arbitrary initial conditions.
It is much less susceptible to experimental uncertainties, such as flip-angle errors and
frequency offsets, than the simpler sequences CPMG and XY-4.
We expect that this robust DD sequence will be used for many other purposes requiring long coherence time,
such as ac-magnetometry or to increase the lifetime of multipartite systems, including entangled states.
\acknowledgments
We thank F. Jelezko for his generous loan of the sample and
Gonzalo A. \'Alvarez and Alexandre M. Souza for fruitful discussions and helpful advice.
This work was supported by the DFG through grant Su 192/27-1.
\end{document}
|
\begin{document}
\vfuzz2pt
\hfuzz2pt
\newtheorem{thm}{Theorem}[section]
\newtheorem{corollary}[thm]{Corollary}
\newtheorem{lemma}[thm]{Lemma}
\newtheorem{proposition}[thm]{Proposition}
\newtheorem{defn}[thm]{Definition}
\newtheorem{remark}[thm]{Remark}
\newtheorem{example}[thm]{Example}
\newtheorem{fact}[thm]{Fact}
\
\newcommand{\norm}[1]{\left\Vert#1\right\Vert}
\newcommand{\abs}[1]{\left\vert#1\right\vert}
\newcommand{\set}[1]{\left\{#1\right\}}
\newcommand{\mathbb R}{\mathbb R}
\newcommand{\varepsilon}{\varepsilon}
\newcommand{\longrightarrow}{\longrightarrow}
\newcommand{\mathbf{B}(X)}{\mathbf{B}(X)}
\newcommand{\mathcal{A}}{\mathcal{A}}
\def
Proof.\ {
Proof.\ }
\font\lasek=lasy10 \chardef\kwadrat="32
\def{\lasek\kwadrat}{{\lasek\kwadrat}}
\def
\lower 2pt\hbox{\kwadracik}
{
\lower 2pt\hbox{{\lasek\kwadrat}}
}
\newcommand*{\C}{\mathbf{C}}
\newcommand*{\R}{\mathbf{R}}
\newcommand*{\Z}{\mathbf {Z}}
\deff:M\longrightarrow \C ^n{f:M\longrightarrow \C ^n}
\def\hbox{\rm det}\, {\hbox{\rm det}\, }
\def\hbox{\rm det}\, c{\hbox{\rm det }_{\C}}
\def\hbox{\rm i}{\hbox{\rm i}}
\def\hbox{\rm tr}\, {\hbox{\rm tr}\, }
\def\hbox{\rm rk}\,{\hbox{\rm rk}\,}
\def\hbox{\rm vol}\,{\hbox{\rm vol}\,}
\def\hbox{\rm Im}\, {\hbox{\rm Im}\, }
\def\hbox{\rm Re}\, {\hbox{\rm Re}\, }
\def\hbox{\rm i}nterior{\hbox{\rm int}\, }
\def\hbox{\rm e}{\hbox{\rm e}}
\def\partial _u{\partial _u}
\def\partial _v{\partial _v}
\def\partial _ui{\partial _{u_i}}
\def\partial _uj{\partial _{u_j}}
\def\partial _uk{\partial {u_k}}
\def\hbox{\rm div}\,{\hbox{\rm div}\,}
\def\hbox{\rm Ric}\,{\hbox{\rm Ric}\,}
\def\r#1{(\ref{#1})}
\def\hbox{\rm ker}\,{\hbox{\rm ker}\,}
\def\hbox{\rm i}m{\hbox{\rm im}\, }
\def\hbox{\rm I}\,{\hbox{\rm I}\,}
\def\hbox{\rm i}d{\hbox{\rm id}\,}
\def\hbox{\rm e}xp{\hbox{{\rm exp}^{\tilde\nabla}}\.}
\def{\mathcal C}^{k,a}{{\mathcal C}^{k,a}}
\def{\mathcal C}^{k+1,a}{{\mathcal C}^{k+1,a}}
\def{\mathcal C}^{1,a}{{\mathcal C}^{1,a}}
\def{\mathcal C}^{2,a}{{\mathcal C}^{2,a}}
\def{\mathcal C}^{3,a}{{\mathcal C}^{3,a}}
\def{\mathcal C}^{0,a}{{\mathcal C}^{0,a}}
\def{\mathcal F}^{0}{{\mathcal F}^{0}}
\def{\mathcal F}^{n-1}{{\mathcal F}^{n-1}}
\def{\mathcal F}^{n}{{\mathcal F}^{n}}
\def{\mathcal F}^{n}d{{\mathcal F}^{n-2}}
\def{\mathcal H}^n{{\mathcal H}^n}
\def{\mathcal H}^nj{{\mathcal H}^{n-1}}
\def\hbox{\rm e}mb{\mathcal C^{\hbox{\rm i}nfty}_{emb}(M,N)}
\def\mathcal M{\mathcal M}
\def\mathcal E _f{\mathcal E _f}
\def\mathcal E _g{\mathcal E _g}
\def\mathcal N _f{\mathcal N _f}
\def\mathcal N _g{\mathcal N _g}
\def\mathcal T _f{\mathcal T _f}
\def\mathcal T _g{\mathcal T _g}
\def{\mathcal Diff}^{\infty}(M){{\mathcal Diff}^{\hbox{\rm i}nfty}(M)}
\def\hbox{\rm e}mbM{\mathcal C^{\hbox{\rm i}nfty}_{emb}(M,M)}
\def{\mathcal U}^1 _f{{\mathcal U}^1 _f}
\def{\mathcal U} _f{{\mathcal U} _f}
\def{\mathcal U} _g{{\mathcal U} _g}
\def{\mathcal U}^1 _{[f]}{{\mathcal U}^1 _{[f]}}
\title{A moduli space of minimal affine Lagrangian submanifolds}
\author{Barbara Opozda}
\subjclass{ Primary: 53C40, 57R40, 58B99 Secondary: 53C38, 58A10}
\keywords{infinite dimensional Fr\'echet manifold, variation, phase
function, tubular mapping, H\"older-Banach space, differential
operator, elliptic regularity theorems}
\thanks{The research partially supported by the
grant NN 201 545738} \maketitle
\address{Instytut Matematyki UJ, ul. \L ojasiewicza 6, 30-348 Cracow,
Poland}
\hbox{\rm e}mail{[email protected]}
\vskip 1in \noindent
\vskip 0.5in \noindent
{\bf Abstract.}
It is proved that the moduli space of all connected compact orientable embedded minimal
affine Lagrangian submanifolds of a complex equiaffine space constitutes an infinite dimensional
Fr\'echet manifold (if it is not $\hbox{\rm e}mptyset$). The moduli space of
all connected compact orientable metric Lagrangian embedded surfaces in
an almost K\"ahler 4-dimensional manifold forms an infinite dimensional
Fr\'echet manifold (if it is not $\hbox{\rm e}mptyset$).
\maketitle
\section{Introduction}
R. McLean proved in \cite{McL} that special Lagrangian submanifolds
near a compact special Lagrangian submanifold of a Calabi-Yau
manifold form a manifold of dimension $b_1$, where $b_1$ is the
first Betti number of the submanifold. Then a few papers giving
generalizations to the cases where the ambient space is not a
Calabi-Yau manifold but a more general type of space have been
published. All those cases are, in fact, within metric geometry.
The aim of this paper is to prove a similar result in the
non-metric case. Moreover, we prove a global result, that is, we
describe the set of all minimal affine Lagrangian embeddings of a
compact manifold. It turns out that this set has a nice structure.
Namely, it is an infinite dimensional Fr\'echet manifold modeled on
the Fr\'echet space of all closed $(n-1)$-forms on the submanifold,
where $n$ is the complex dimension of the ambient space.
The main result of this paper says that the set of all minimal
affine Lagrangian embeddings of a compact manifold into an
equiaffine complex space is a submanifold of the Fr\'echet manifold
of all compact
submanifolds of the complex equiaffine space. We provide a rigorous proof of this fact.
It seems that from a differential geometry viewpoint
non-metric analogues of Calabi-Yau manifolds are equiaffine complex
manifolds, that is, complex manifolds equipped with a torsion-free
complex connection and a non-vanishing covariant constant complex
volume form. There are many very natural complex equiaffine
manifolds. For instance, complex affine hyperspheres of the complex
affine space $\C ^n$ with an induced equiaffine structure obtained
in a way standard in affine differential geometry (see \cite{DV})
are examples. Equiaffine structures are, in general, non-metrizable.
For instance, the complex hyperspheres of $\C ^n$ with the induced
equiaffine structure are non-metrizable. In particular, they are not
related to Stentzel's metric.
If $N$ is a complex $n$-dimensional space with a complex
structure $J$, then an $n$-dimensional real submanifold $M$ of $N$
is affine Lagrangian if $JTM$ is transversal to $M$. Of course, if
$N$ is almost Hermitian, then Lagrangian (in the metric sense)
submanifolds, for which $JTM$ is orthogonal to $TM$, are affine Lagrangian and there are many affine
Lagrangian submanifolds which are not metric Lagrangian even if
the ambient space is almost Hermitian.
In order to discuss minimality of submanifolds a metric structure
is not necessary. It is sufficient to have induced volume elements
on submanifolds. Such a situation exists in case of affine
Lagrangian submanifolds. In this case there does not exist (in
general) any mean curvature vector but there exists the Maslov
1-form which can play, in some situations, a role similar to that
played by the mean curvature vector. Note that in the general affine
case we do not have any canonical duality between tangent vectors
and 1-forms. The vanishing of the Maslov form implies that the
submanifold is a point where a naturally defined volume
functional attains its minimum for compactly supported variations.
Affine Lagrangian submanifolds have a phase function. It turns out
that a connected affine Lagrangian submanifold is minimal if and
only if its phase function is constant.
If a connected affine Lagrangian submanifold is minimal (i.e. of
constant phase), then after rescaling the complex volume form in the
ambient space we can assume that the constant phase function
vanishes on $M$. Analogously to the metric case an affine Lagrangian
submanifold is called special if its phase function vanishes on $M$.
The notion of special submanifolds corresponds to the notion of
calibrations. Calibrations in Riemannian geometry were introduced in
the famous paper \cite{HL}. The notion can be generalized to the
affine case and, like in the metric case, an affine Lagrangian
submanifold is special if and only if it is calibrated by the real
part of the complex volume form in the ambient space. The minimality
of affine Lagrangian submanifolds is discussed in \cite{O_1} and
\cite{O}.
In this paper we try to assume as little as possible. In particular,
we do not assume that the ambient space $N$ is complex equiaffine
but
we only assume that it is almost complex and endowed with a nowhere
vanishing closed complex $n$--form $\Omega$, where $2n=\dim _{\R}N$.
Then affine Lagrangian immersions $f:M\to N$, where $\dim M=n$, are
those for which $f^*\Omega \ne 0$ at each point of $M$. If $M$ is
oriented, then $\Omega$ induces on $M$ a unique volume element $\nu
$. We have $f^*\Omega=\hbox{\rm e} ^{\hbox{\rm i}\theta}\nu $, where $\theta $ is the
phase function of $f$. In this paper minimal (relative to $\Omega$)
affine Lagrangian submanifolds will be those (by definition) which
have constant phase.
We shall prove the following theorem.
\begin{thm}\label{main} Let $M$ be a connected compact oriented $n$-dimensional real
manifold admitting a minimal affine Lagrangian embedding into an
almost complex $2n$--dimensional manifold $N$ equipped with a
nowhere-vanishing complex closed $n$-form. The set of all minimal
affine Lagrangian embeddings of $M$ into $N$ has a structure of an
infinite dimensional manifold modeled on the Fr\'echet vector space
$\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F_{closed}^{n-1})$ of all smooth
closed $(n-1)$--forms on $M$.
\hbox{\rm e}nd{thm}
A precise formulation of this theorem is given in Section 4.
The Fr\'echet manifold in Theorem \ref{main} may have many connected
components.
In the above theorem a manifold $M$ is fixed. But the theorem says,
in fact, about all compact (connected oriented) minimal affine
Lagrangian embedded submanifolds. Non-diffeomorphic submanifolds are
in different connected components.
At the end of this paper we observe that, in contrast with the
metric geometry, in the affine case there exist non-smooth minimal
submanifolds of smooth manifolds.
Almost the same consideration as in the proof of Theorem \ref{main}
gives the statement saying that the set of all (metric) Lagrangian
embeddings of a connected compact 2-dimensional manifold into a
4-dimensional almost K\"ahler manifold forms an infinite dimensional
Fr\'echet manifold modeled on the Fr\"echet vector space $\mathcal C
^{\hbox{\rm i}nfty} (\mathcal F ^1_{closed})$ (if it is not $\hbox{\rm e}mptyset$).
We also give a simple application of Theorem \ref{main} in the case
where the ambient space is the tangent bundle of a flat manifold.
\section{Basic notions}
Let $N$ be a $2n$--dimensional almost complex manifold with an
almost complex structure $J$. Let $M$ be a connected
$n$-dimensional manifold and $f:M\to N$ be an immersion. We say that
$f$ is affine Lagrangian (some authors call it purely real or
totally real) if the bundle $Jf_*(TM)$ is transversal to $f_*(TM)$.
We shall call this transversal bundle the normal bundle.
The almost complex structure $J$ gives an isomorphism between the normal bundle and the tangent bundle $TM$.
If $\Omega$ is a nowhere vanishing complex
$n$-form on $N$, then $f$ is affine Lagrangian if and only if
$f^*\Omega \ne 0$ at each point of $M$. If $f:M \to N$ is such a
mapping that $f^*\Omega \ne 0$ at each point of $M$, then $f$ is
automatically an immersion.
Recall now the notion of a phase. Let $\bf V$ be an $n$-dimensional
complex vector space with a complex volume form $\Omega$ and $\bf U$
be its $n$-dimensional real oriented vector subspace such that
$\Omega _{| {\bf U}}\ne 0$. Let $X_1,...., X_n$ be a positively
oriented basis of $\bf U$. Then $\Omega (X_1,...,X_n)=\mu
e^{\hbox{\rm i}\theta}$, where $\mu \hbox{\rm i}n \R^+$ and $\theta \hbox{\rm i}n \R$. If we
change the basis $X_1,..., X_n$ to another positively oriented
basis of $\bf U$, then $e^{\hbox{\rm i}\theta}$ remains unchanged. $\theta$ is
called the phase or the angle of the subspace $\bf U$.
Assume $N$ is endowed with a nowhere vanishing complex volume form $\Omega$ and $M$ is oriented.
For an
affine Lagrangian immersion $f:M\longrightarrow N$, at each point
$x$ of $M$ we have the phase $\theta _x$ of the tangent vector
subspace $f_*(T_xM)$ of $T_{f(x)} N$. The phase function
$x\longrightarrow \theta _x$ is multi-valued. In general, if we want
to have the phase function to be a smooth function, it is defined
only locally. For each point $x\hbox{\rm i}n M$ there is a smooth phase
function of $f$ defined around $x$.
The constancy of the
phase function is a well defined global notion, that is, if $\theta$
is locally constant, then it can be chosen
globally constant.
Recall few facts concerning the situation where the ambient space is complex equiaffine
or, in the metric case, Calabi-Yau. A Lagrangian submanifold
(affine or metric) is minimal if and only if it is volume
minimizing for compactly supported variations. This is equivalent to
the fact that the Maslov form vanishes. Moreover, Lagrangian
submanifolds (affine or metric) are minimal if and only if they have
constant phase.
In this paper, where, in general, we do not assume that the ambient space is complex equiaffine, we shall say (by
definition) that an affine Lagrangian submanifold $f$ is minimal if
and only if its phase is constant. As usual, if the phase constantly vanishes
on $M$, then the submanifold will be called special. If the phase $\theta$ is
constant, then we can rescale $\Omega$ in the ambient space by
multiplying it by $\hbox{\rm e} ^{-\hbox{\rm i} \theta}$ and after this change the given
immersion becomes special. But if we have a family of minimal affine
Lagrangian immersions of $M$ into $N$, and we adjust the complex
volume form $\Omega$ to one member of the family then, in general,
the rest of the family remain only minimal.
For an oriented affine Lagrangian immersion $f:M\to N$ we have the
induced volume form $\nu $ on $M$ defined by the condition $\nu
(X_1,..., X_n)=\vert\omega (X_1,..., X_n)\vert$, where $X_1,...,X_n$
is a positively oriented basis of $T_xM$, $x\hbox{\rm i}n M$ and $\omega
=f^*\Omega$. The form $\omega$ is a real complex--valued $n$--form
on $M$. We have
$$\omega =\hbox{\rm e} ^{\hbox{\rm i}\theta } \nu ,$$
where $\theta$ is the phase function. Note that by multiplying
$\Omega$ by $\hbox{\rm e} ^{\hbox{\rm i} \alpha}$ for any $\alpha \hbox{\rm i}n\R$ we do not
change the induced volume form on $M$. Decompose $\omega$ into the
real and imaginary parts: $\omega= \omega _1+\hbox{\rm i}\omega_2$, where
$\omega _1=\cos \theta\,\nu $, $\omega _2=\sin\theta\,\nu.$ If
$W\hbox{\rm i}n\mathcal X (M)$, then, since $\Omega$ is complex, we have
$$
f^*(\hbox{\rm i}ota _{(Jf_*W)} \Omega)=-\hbox{\rm i}ota _W\omega _2 +\hbox{\rm i} \hbox{\rm i}ota _W\omega
_1,
$$
where $\hbox{\rm i}ota$ stands for the interior product operator.
Hence, if $f$ is special (i.e. $\nu =\omega_1$) we get
\begin{equation}\label{przedGriffiths}
f^*(\hbox{\rm i}ota _{(J f_*W )}\hbox{\rm I}\,m\Omega) = \hbox{\rm i}ota _W\nu.
\hbox{\rm e}nd{equation}
Assume now that $f_t$, $|t|<\varepsilon$, is a smooth variation of $f$. Denote by
$\mathcal V (t, x)$ its variation vector field. Assume it is normal
to $f$ at $t=0$. Then $V:=\mathcal V_{\vert{\{0\}}\times M}$ is
equal to $Jf_*W$ for some $W\hbox{\rm i}n\mathcal X (M)$. If $f$ is special and $\Omega$ is closed,
then using formula
(\ref{przedGriffiths}) and Proposition (I.b.5) from \cite{G}, we
obtain
\begin{equation}\label{Griffiths}
{d\over{dt}}\left (f^*_t\hbox{\rm Im}\, \Omega\right )_{|t=0} =d(\hbox{\rm i}ota _W\nu).
\hbox{\rm e}nd{equation}
This formula is also
directly computed in \cite{O}, but there the form $\Omega$ is
assumed to be parallel relative to a torsion-free complex
connection.
We shall now give a justification of the term ``minimal'' adopted in
this paper. Assume $M$ is compact. If affine Lagrangian immersions
$f, \tilde f: M\to N$ are cohomologous (in particular, if they are
homotopic), then the cohomology class of $\omega _i$ is equal to the
cohomology class of $\tilde\omega _i$, for $i=1,2$, where
$\tilde\omega _1=\cos \tilde\theta\, \tilde\nu$, $\tilde\omega
_2=\sin\tilde\theta\, \tilde \nu$ are the real and imaginary parts
of $\tilde{\omega} =\tilde f ^*\Omega$ and $\tilde\theta$,
$\tilde\nu$ are the phase and the induced volume element for $\tilde
f$.
Assume that $f$ is special.
Then $\omega _1=\nu $ and consequently
$$ \hbox{\rm i}nt _M \nu=\hbox{\rm i}nt _M \omega _1 =\hbox{\rm i}nt _M \tilde\omega _1=\hbox{\rm i}nt _M
\cos\tilde\theta \, \tilde \nu \le\hbox{\rm i}nt _M\tilde\nu,$$
which means that
with the definition of minimality we adopted in this paper compact
special (and consequently minimal) affine Lagrangian submanifolds
are volume minimizing in their respective cohomology classes.
Assume additionally
that $\tilde f$ is minimal with the constant phase $\tilde\theta$.
We have
$$0=\hbox{\rm i}nt _M\omega _2=\hbox{\rm i}nt _M \tilde\omega_2= \hbox{\rm i}nt _M \sin\tilde\theta\,
\tilde \nu =\sin\tilde\theta \hbox{\rm i}nt_M \tilde\nu ,$$ which means that
$\tilde\omega _2=0$, that is, $\tilde f$ is also special.
If $f$ is minimal (special), then for any diffeomorphism $\varphi$
of $M$ $f\circ\varphi$ is minimal (special).
\section{Moduli spaces of compact embedded submanifolds}
Assume first that $M$ and $N$ are arbitrary manifolds
such that $\dim M\le \dim N$. Assume moreover that $M$ is connected
compact and it admits an embedding into $N$. Denote by $\mathcal
C^{\hbox{\rm i}nfty}_{emb}(M,N)$ the set of all embeddings from $M$ into $N$.
This is a well known topological space forming an open subset (in
the $\mathcal C^1$ topology) of $\mathcal C ^{\hbox{\rm i}nfty}(M,N)$.
Denote by
$\mathcal M$ the space $\hbox{\rm e}mb _{/ Diff ^{\hbox{\rm i}nfty}(M)}$ with the
quotient topology. The equivalence class of $f\hbox{\rm i}n \hbox{\rm e}mb$ will be
denoted by $[f]$. For $f,g\hbox{\rm i}n \hbox{\rm e}mb$ we have that $f\sim g$ if and
only if the images of $f$ and $g$ are equal in $N$.
We shall now introduce a structure of an infinite dimensional
manifold (modeled on Fr\'echet spaces) on $\mathcal M$. It is certainly well
known but we have not found suitable references and moreover we need
the construction. We use the notion of a manifold modeled on
Fr\'echet vector spaces given in \cite{H}. We denote the Fr\'echet
space of all $\mathcal C ^{\hbox{\rm i}nfty}$ sections of a vector bundle
$E\to M$ by $\mathcal C^{\hbox{\rm i}nfty}(M\leftarrow E)$. Analogously the
Banach spaces of all $\mathcal C ^k$ sections of a vector bundle
$E\to M$ will be denoted by $\mathcal C ^k(M\leftarrow E)$.
The basic tool in the construction are tubular mappings. We use the
following setting of this notion. Assume that $\mathcal N _f$ is any
smooth transversal bundle for an embedding $f:M\to N$. Having any
connection on $N$ we have the exponential mapping $exp $ given by
the connection. No relation between the connection and the
transversal bundle is needed. From the theory of connections one
knows that there is an open neighborhood $\mathcal U$ of the
zero-section in the total space $\mathcal N _f$ and an open
neighborhood $\mathcal T$ of $f(M)$ in $N$ such that $exp_{\vert
\mathcal U} :\mathcal U\to \mathcal T$ is a diffeomorphism, $exp
_{|M}=\hbox{\rm i}d _M$ and the differential $exp _*: T_{0_x}(\mathcal
N_f)=f_*(T_xM)\oplus \mathcal (N_f)_x\to T_xN$ of $exp$ at $0$ is
the identity for each point $x$ of $M$. The mapping $exp_{\vert
\mathcal U}$ is a tubular mapping. In order to reduce a play with
neighborhoods we shall use the following lemma, which allows to have
the whole total space $\mathcal N _f$ as the domain of a tubular mapping. In
what follows $\mathcal N _f$ will denote either the transversal
vector bundle or its total space depending on the context.
\begin{lemma}\label{lemacik_o_funkcji}
Let $E\longrightarrow M$ be a Riemannian vector bundle and $\mathcal
U_{\varepsilon}$ be the neighbourhood of the zero section of $E$
given as follows
$$\mathcal U_{\varepsilon}=\{v\hbox{\rm i}n E;\ \mid v\mid<\varepsilon\}, $$
where $\mid\ \mid$ is the norm on fibers of $E$ determined by the
Riemannian structure. There is a fiber-respecting diffeomorphism
$\sigma :E\longrightarrow \mathcal U_{\varepsilon}$ which is the
identity on $\mathcal U_{\varepsilon/2}$.
\hbox{\rm e}nd{lemma}
Proof.\ Let $\psi : [0, \hbox{\rm i}nfty)\to \R$ be a smooth function such that
$\psi (t)=t$ for $t\le \varepsilon/2$, $\psi (t)\le \varepsilon$ for
$t>\varepsilon/2$ and $\psi(t)\rightarrow \varepsilon$ for
$t\to{\hbox{\rm i}nfty}$. Then the function $\Upsilon(t)=(1/t)\psi (t)$ is
also a smooth function on $[0,\hbox{\rm i}nfty)$. The mapping
$\sigma:E\to\mathcal U_{\varepsilon} $ given by
$$\sigma (v)=\Upsilon (\mid v\mid )v$$
satisfies the required conditions.
\lower 2pt\hbox{\kwadracik}
We now endow the bundle $\mathcal N _f$ with any Riemannian metric.
Since $M$ is compact, there is $\varepsilon>0$ such that $\mathcal U
_{\varepsilon}\subset \mathcal U$. We use Lemma
\ref{lemacik_o_funkcji} for $\mathcal U_{\varepsilon}$ and take the
tubular mapping $\mathcal E _f = exp \circ \sigma $. The tubular neighborhood
$\mathcal E _f(\mathcal N _f)$ of $f(M)$ will be denoted by $\mathcal T _f$.
The set $\mathcal C^{\hbox{\rm i}nfty}_{emb}(M,\mathcal T_f )$ is open in
$\hbox{\rm e}mb$ (in the $\mathcal C ^0$-topology).
Consider the mapping
\begin{equation}
\Psi: \mathcal C^{\hbox{\rm i}nfty}_{emb}(M,\mathcal T_f )\ni h\longrightarrow
\Pi_f\circ\mathcal E _f ^{-1}\circ h\hbox{\rm i}n \mathcal C^{\hbox{\rm i}nfty}(M,M),
\hbox{\rm e}nd{equation}
where $\Pi _f:\mathcal N _f\longrightarrow M$ is the natural projection. The
mapping is continuous and the set ${\mathcal Diff}^{\infty}(M) $ is open in $\mathcal C
^{\hbox{\rm i}nfty} (M,M)$ (in the $\mathcal C ^1$-topology). Thus the set
\begin{equation}
{\mathcal U}^1_f =\Psi^{-1}({\mathcal Diff}^{\infty}(M) )= \{h\hbox{\rm i}n\mathcal C^{\hbox{\rm i}nfty}(M,\mathcal T _f
); \ \Pi _f\circ \mathcal E _f ^{-1}\circ h\hbox{\rm i}n{\mathcal Diff}^{\infty}(M) \}
\hbox{\rm e}nd{equation}
is open in $\hbox{\rm e}mb$ in the $\mathcal C^1$ topology. Observe that
$h\hbox{\rm i}n\mathcal U ^1_f$ if and only if there is a section $V\hbox{\rm i}n
{\mathcal C}^{\hbox{\rm i}nfty}(M\longleftarrow\mathcal N _f )$ and $\varphi \hbox{\rm i}n {\mathcal Diff}^{\infty}(M) $
such that
\begin{equation}
\mathcal E _f\circ V=h\circ \varphi .
\hbox{\rm e}nd{equation}
The set ${\mathcal U}^1 _f$ has the following properties:
1) If $h\hbox{\rm i}n{\mathcal U}^1 _f$ and $\varphi \hbox{\rm i}n{\mathcal Diff}^{\infty}(M)$, then $h\circ\varphi
\hbox{\rm i}n{\mathcal U}^1 _f$ .
2) For every $\varphi \hbox{\rm i}n{\mathcal Diff}^{\infty}(M)$ we have ${\mathcal U}
^1_{f\circ\varphi}={\mathcal U}^1 _f$.
Take the neighborhood ${\mathcal U}^1 _{[f]}= \{[h]\hbox{\rm i}n\mathcal M ; \ h\hbox{\rm i}n{\mathcal U}^1 _f\}$ of $[f]$ in
$\mathcal M$. Observe that the elements of ${\mathcal U}^1 _{[f]}$ can be parametrized
simultaneously. Namely, we have
\begin{lemma}\label{simultaneously}
Let $\xi _0 \hbox{\rm i}n {\mathcal U}^1 _{[f]} $ and $h_0\hbox{\rm i}n\hbox{\rm e}mb$ be its fixed
parametrization. For each $\xi \hbox{\rm i}n {\mathcal U}^1 _{[f]}$ there is a unique
parametrization $h_{\xi}\hbox{\rm i}n\hbox{\rm e}mb$ of $\xi$ such that
$$\Pi _f\circ\mathcal E _f ^{-1}\circ h_0 =\Pi _f\circ \mathcal E _f ^{-1}\circ
h_{\xi}$$
\hbox{\rm e}nd{lemma}
Proof.\ We first reparametrize $f$ in such a way that after the
reparametrization
$$\Pi _f \circ\mathcal E _f ^{-1}\circ h_0=\hbox{\rm i}d _M.$$
Assume that $f$ is already parametrized in this way.
For every $h\hbox{\rm i}n {\mathcal U}^1 _f $ the mapping $\varphi ^{-1}=\Pi _f\circ\mathcal E _f ^{-1}\circ
h$ is a diffeomorphism and it is sufficient to replace $h$ representing $[h]$ by
$h\circ\varphi$. The uniqueness is obvious.
{\lasek\kwadrat}
By the above lemma we see that ${\mathcal U}^1 _{[f]}$ can be identified with the set
\begin{equation}{\mathcal U_{[f]}} =\{h\hbox{\rm i}n\mathcal C^{\hbox{\rm i}nfty}_{emb}(M,\mathcal T_f ) ;
\ \Pi _f\circ\mathcal E _f ^{-1}\circ h=\hbox{\rm i}d_M\}.\hbox{\rm e}nd{equation}
We now define the bijection
$$
u_{[f]}:\mathcal U_{[f]}\longrightarrow \C ^{\hbox{\rm i}nfty}(M\leftarrow
\mathcal N _f)$$ as follows:
\begin{equation}
u_{[f]}(h)\longrightarrow \mathcal E _f^{-1}\circ h .
\hbox{\rm e}nd{equation}
We see that
$$u_{[f]} ^{-1} (V)=\mathcal E _f\circ V$$
and $\mathcal E _f\circ V$ has values in $\mathcal T _f$. If $ U$ is an open subset of
$\mathcal T _f$, then
$$u_{[f]}(\{h\hbox{\rm i}n \mathcal U_{[f]}; h(M)\subset
U\})=\{V\hbox{\rm i}n\mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow\mathcal N _f);\
V(M)\subset\mathcal E _f^{-1}( U)\}$$ and hence is open in $\mathcal C
^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)$.
Assume now that $f, g\hbox{\rm i}n \hbox{\rm e}mb$ and $\mathcal
U_{[f]}\cap \mathcal U_{[g]}\ne\hbox{\rm e}mptyset$. Take $\xi _0\hbox{\rm i}n \mathcal
U_{[f]}\cap \mathcal U_{[g]}$ and fix its parametrization $h_0$.
Reparametrize $f$ and $g$ as in Lemma \ref{simultaneously} adjusting
the parametrizations to $h_0$. Then
\begin{eqnarray*}\mathcal U_{[f]}\cap \mathcal U_{[g]}&=\{h\hbox{\rm i}n \mathcal C
^{\hbox{\rm i}nfty}_{emb}(M,\mathcal T _f\cap\mathcal T _g);\ \Pi _f\circ \mathcal E _f ^{-1}\circ h=\hbox{\rm i}d _M,
\ \Pi _g\circ\mathcal E _g ^{-1}\circ h=\hbox{\rm i}d _M\}\\
&=\{\mathcal E _f\circ V;\ V\hbox{\rm i}n \mathcal C^{\hbox{\rm i}nfty} (M\leftarrow\mathcal N _f);\ V(M)\subset \mathcal E _f ^{-1} (\mathcal T _f\cap\mathcal T _g )\}\\
\hbox{\rm e}nd{eqnarray*}
and consequently
$$u_{[f]}(\mathcal U_{[f]}\cap \mathcal U _{[g]})=\{
\ V\hbox{\rm i}n \mathcal C^{\hbox{\rm i}nfty} (M\leftarrow\mathcal N _f );\ V(M)\subset \mathcal E _f
^{-1} (\mathcal T _f\cap\mathcal T _g )\}$$
The mapping $\mathcal E _g^{-1}\circ\mathcal E _f
:\mathcal E _f^{-1}(\mathcal T _f\cap\mathcal T _g)\to \mathcal E _g ^{-1}(\mathcal T _f\cap\mathcal T _g )$ is smooth and
fiber respecting (because of specially chosen parametrizations $f$ and $g$).
It is known, \cite{H},
that the set $u_{[f]}(\mathcal U_{[f]}\cap \mathcal U _{[g]})$ is open in the Fr\'echet space $\mathcal C^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)$
and the mapping
\begin{eqnarray*}
&u_{[g][f]}:u_{[f]}(\mathcal U_{[f]}\cap \mathcal U _{[g]})\ni V\to
\mathcal E _g ^{-1}\circ\mathcal E _f\circ V \hbox{\rm i}n u_{[g]}(\mathcal U_{[f]}\cap \mathcal U
_{[g]})
\hbox{\rm e}nd{eqnarray*}
is smooth. For the same reason the set $u_{[g]}(\mathcal
U_{[f]}\cap \mathcal U_{[f]})$ is open and the mapping $u_{[f][g]}$
is smooth.
We have
built a smooth atlas on $\mathcal M$. Hence we have
\begin{thm}
Let $M$ be a connected compact manifold admitting an embedding in a
manifold $N$. Then $\mathcal M$ is an infinite dimensional
manifold modeled on the Fr\'echet vector spaces $\mathcal C
^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)$ for $f\hbox{\rm i}n\mathcal C_{emb}
^{\hbox{\rm i}nfty}(M,N)$, where $\mathcal N _f$ is any bundle transversal to
$f$.
\hbox{\rm e}nd{thm}
In the theorem $\mathcal N _f$ can be replaced by any bundle isomorphic (over
the identity on $M$) to the transversal bundle $\mathcal N _f$.
In what follows the Fr\'echet space of all smooth $r$-forms on $M$
will be denoted by $\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^{r})$. The
Banach space of $r$-forms of class $\mathcal C ^k$,
$k\hbox{\rm i}n\mathbf N$, will be denoted by $\mathcal C ^k(\mathcal F ^r)$.
Assume now additionally that $N$ is a $2n$-dimensional manifold with
an almost complex structure $J$ and $M$ is $n$-dimensional
orientable with a fixed volume form $\nu$. Having the volume element
$\nu$, we have an isomorphic correspondence between tangent vectors
and $(n-1)$-forms. It is given by the interior multiplication
$$T_xM\ni W\longrightarrow \hbox{\rm i}ota _W\nu \hbox{\rm i}n \Lambda ^{n-1}(T_xM)^*,$$
for $x\hbox{\rm i}n M$. If $f:M\to N$ is affine Lagrangian, then by composing
this isomorphism with the isomorphism determined by $J$ between the
tangent bundle $TM$ and the normal bundle $\mathcal N _f$ we get an
isomorphism, say $\rho$, of vector bundles
\begin{equation}\label{rho}
\rho: \Lambda ^{n-1} TM^*\longrightarrow \mathcal N _f.
\hbox{\rm e}nd{equation}
The isomorphism gives a smooth isomorphism (linear smooth
diffeomorphism) $\wp $ between Fr\'echet vector spaces $\mathcal C
^{\hbox{\rm i}nfty}(\mathcal F ^{n-1})$ and $\mathcal C^ {\hbox{\rm i}nfty}(M\leftarrow
\mathcal N _f)$ given by $\wp (\gamma)=\rho\circ \gamma$.
We now have
\begin{thm}\label{Mal}
Let $M$ be a connected compact orientable $n$-dimensional real
manifold admitting an affine Lagrangian embedding into a
$2n$-dimensional almost complex manifold $N$. The set $\mathcal M aL
=\{[f]\hbox{\rm i}n \mathcal M;\ f \ is \ affine\ Lagrangian\}$ is an infinite
dimensional manifold modeled on the Fr\'echet vector space
$\C^{\hbox{\rm i}nfty}(\mathcal F^{n-1})$.
\hbox{\rm e}nd{thm}
Proof.\
For each $y\hbox{\rm i}n N$ there is an open neighborhood $U_y$ of $y$ in $N$ and
a smooth complex $n$-form $\Omega _y$ on $N$ such that $\Omega _y\ne 0$ at each point of $U_y$.
Let $U_{y_1},..., U_{y_l}$ cover $f(M)$. Set $\tilde{\Theta} _j= {\mathcal E _f} ^*\Omega _{y_j} $.
Consider the mapping
\begin{equation}\mathcal C ^1(M\leftarrow \mathcal N _f)\ni V\to (V^*\tilde{\Theta}_1,...,V^* \tilde{\Theta}_l)\hbox{\rm i}n \mathcal
(C ^0(\mathcal F (\C)))^l.
\hbox{\rm e}nd{equation}
where $\mathcal C ^0(\mathcal F(\C))$ stands for the space of all
real complex-valued $n$-forms on $M$ of class $\mathcal C ^0$. It is
known, see Theorem 2.2.15 from \cite{Ba}, that this mapping is
continuous between Banach spaces. Hence
$$\tilde{\mathcal U}=\{V\hbox{\rm i}n\mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow\mathcal N _f);\
((V^*\tilde{\Theta }_1)_x, ... , (V^*\tilde {\Theta }_l)_x)\ne 0 \
\forall x\hbox{\rm i}n M\}$$ is open in $\mathcal C^{\hbox{\rm i}nfty}(M\leftarrow\mathcal N _f)$.
It is clear that $[h]\hbox{\rm i}n \mathcal U ^1_{[f]}$ is affine Lagrangian
if and only $u_{[f]}([h])\hbox{\rm i}n \tilde {\mathcal U}$. We now compose
$u_{[f]}$ with the isomorphism $\wp ^{-1}$, where $\wp$ is
determined by any fixed volume form on $M$.
\lower 2pt\hbox{\kwadracik}
In the above atlas we can compose a chart $u_{[f]}$ with a bijective
mapping, say $\phi$, sending an open neighborhood of $0$ in
$\C^{\hbox{\rm i}nfty}(\mathcal F^{n-1})$ onto an open neighborhood of $0$ in
$\C^{\hbox{\rm i}nfty}(\mathcal F^{n-1})$ and such that $\phi $ and $\phi
^{-1}$ are smooth in the sense of the theory of Fr\'echet vector
spaces. This does not change the differentiable structure on
$\mathcal M aL$. We shall use this possibility in the next section.
\section{The moduli space of minimal submanifolds}
The precise formulation of Theorem \ref{main} is the following
\begin{thm}\label{main-theorem} Let $N$ be a $2n$-dimensional almost complex manifold equipped with a
smooth nowhere-vanishing closed complex $n$-form $\Omega$. Let $M$
be a connected compact oriented $n$-dimensional real manifold
admitting a minimal (relative to $\Omega$) affine Lagrangian
embedding into $N$.
Then the set $$\mathcal M maL= \{[f]\hbox{\rm i}n \mathcal M aL;\ f \ is \
minimal\}$$ is an infinite dimensional manifold modeled on the
Fr\'echet vector space \newline $\C^{\hbox{\rm i}nfty}(\mathcal
F^{n-1}_{closed})$. It is a submanifold of $\mathcal M aL$.
\hbox{\rm e}nd{thm}
Proof.\
We shall improve the charts obtained in Theorem
\ref{Mal} in such a way that the set $\mathcal M maL$ will get a
structure of a submanifold of $\mathcal MaL$ in the sense of the theory of Fr\'echet manifolds.
Let $f:M\to N$ be a
given minimal affine Lagrangian embedding. By rescaling $\Omega$ in
the ambient space we make $f$ special. We have the normal bundle
$\mathcal N _f=J f_*(TM)$. Fix a tubular mapping $\mathcal E _f :\mathcal N _f\to\mathcal T _f$. For each
section $V\hbox{\rm i}n \mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)$ we have the
embedding $f_V=\mathcal E _f\circ V$. In general, $f_V$ is neither special nor
minimal nor even affine Lagrangian. Consider the mapping
$$\tilde P: \mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)\ni V\to \tilde P (V)=f_V^* (\hbox{\rm I}\,m\,
\Omega)\hbox{\rm i}n\mathcal C ^{\hbox{\rm i}nfty} (\mathcal F^{n}).$$ Of course $\tilde
P(0)=0$. For a section $V\hbox{\rm i}n \mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)$
take the variation $f_t=f_{tV}$. The section $V$ is the variation
vector field for $f_t$ at $0$. Using now formula (\ref{Griffiths})
one sees that the linearization $L_0 \tilde P$ of $\tilde P$ at $0$
is given by the formula
\begin{equation}\label{LP}
L_0\tilde P(V)=d(\hbox{\rm i}ota _W\nu ),
\hbox{\rm e}nd{equation} where $V=Jf_*W$ and $\nu$ is the volume form on $M$ induced by
$f$.
Since for each $V\hbox{\rm i}n \mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N _f)$ the
embedding $f_V$ is homotopic to $f$, we have that $\tilde P$ has
values in $\mathcal C^{\hbox{\rm i}nfty}(\mathcal F_{exact}^{n})$. Moreover,
as it was observed in Section 2, if $f_V$ is minimal affine
Lagrangian, then it is automatically special.
We shall now use the isomorphism $\rho$ given by (\ref{rho}).
If $\gamma \hbox{\rm i}n \mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^{n-1})$ and $V=\rho\circ\gamma$,
then $\gamma= \hbox{\rm i}ota_W\nu$, where $V=Jf_*W$.
We now have the mapping
$$P: \mathcal C^{\hbox{\rm i}nfty}({\mathcal F}^{n-1})\longrightarrow \mathcal C^{\hbox{\rm i}nfty}({\mathcal F}^n_{exact})$$
defined as follows:
\begin{equation}
P(\gamma) =\tilde P (\rho \circ\gamma).
\hbox{\rm e}nd{equation}
The mapping $P$ can be also expressed as follows. If we set
$\Theta=({\mathcal E _f}\circ\rho)^*\hbox{\rm I}\,m\Omega$, then $\Theta$ is a
closed $n$-form on the total space of $\Lambda ^{n-1}TM^*$. We have
$ P(\gamma )=\gamma ^*\Theta $ for any $(n-1)$ - form $\gamma$.
Obviously $ P(0)=0$. Moreover, $P(\gamma)=0$ if and only if $f_V$,
where $V= \rho\circ\gamma $, is special (if $f_V$ is affine Lagrangian).
We shall now regard $ P$ as
a differential operator. It is smooth, of order 1, non-linear and,
by (\ref{LP}), the linearization $L_0 P$ of $P$ at $0$ is given by
\begin{equation}
L_0 P=d
\hbox{\rm e}nd{equation}
We shall now fix an arbitrary positive definite metric tensor field
on $M$. The metric is only a tool here and has no relation with
the affine geometric structure considered in this paper. Denote by
$\delta$ the codifferential operator determined by the metric.
Denote by ${\mathcal C}^{k,a}(\mathcal F^{r})$ the
H\"older-Banach space of all $r$-forms on $M$ of class ${\mathcal
C}^{k,a}$, where $k\hbox{\rm i}n \mathbf N$ and $a$ is a real number from
$(0,1)$.
We extend the action of the operators $ P, d, \delta$ to the action
on the
forms of class ${\mathcal C}^{k,a}$. The extensions will be denoted by the same letters.
In particular, after extending, $P$ becomes a ${\mathcal
C}^{\hbox{\rm i}nfty}$ mapping between Banach spaces, see \cite{Ba} p. 34,
\begin{equation}
P: {\mathcal C}^{k,a} ({\mathcal F}^{n-1} ) \longrightarrow {\mathcal
C}^{k-1,a}({{\mathcal F}^{n}}_{exact})
\hbox{\rm e}nd{equation}
for each $k=1,2,...$. As in the proof of Theorem \ref{Mal} one sees
that there is an open neighborhood, say $\mathcal W$, of $0$ in
${\mathcal C}^{1,a} ({\mathcal F}^{n-1} )$ such that $f_V$ are affine Lagrangian for
$V=\rho\circ\gamma$ and $\gamma \hbox{\rm i}n \mathcal W$. From now on all
neighborhoods of $0$ in $\mathcal C ^{1,a}({\mathcal F}^{n-1})$ will be contained
in $\mathcal W$. Moreover, all neighborhoods will be assumed open.
Consider now $P: {\mathcal C}^{1,a} ({\mathcal F}^{n-1} ) \longrightarrow {\mathcal
C}^{0,a}({{\mathcal F}^{n}}_{exact})$ as a mapping between Banach spaces. The
mapping $L_0 P$ is a surjection. Moreover $\hbox{\rm ker}\, L_0 P=\hbox{\rm ker}\, d$.
Denote the
Banach space $\mathcal C ^{1,a}(\mathcal F_{closed}^{n-1})=\hbox{\rm ker}\,
d\subset{\mathcal C}^{1,a}({\mathcal F}^{n-1})$ by $X$. The space $\delta ({\mathcal C}^{2,a} ({\mathcal F}^{n} ))$ is a
closed complement to $\hbox{\rm ker}\, d$. Denote this Banach space by $Y$.
Using the implicit mapping theorem for Banach spaces one gets that
there is an open neighborhood $A$ of $0$ in $X$ and an open
neighborhood $B$ of $0$ in $Y$ and a unique smooth mapping
$G:A\longrightarrow B$ such that
$$(A+B)\cap P^{-1}(0)=\{\alpha +G(\alpha);\, \alpha \hbox{\rm i}n A\}.$$
We shall now observe that if $\alpha $ is of
class ${\mathcal C} ^{k,a}$, where $k\ge 2$ or of class $\mathcal C
^{\hbox{\rm i}nfty}$, then so is $G(\alpha)$, for $\alpha$ from some
neighborhood of $0$ in $X$. In Riemannian geometry special submanifolds
as minimal ones are automatically $\mathcal C ^{\hbox{\rm i}nfty}$
(after possible reparametrization), but in the affine case we do not
have such a statement and we have to prove that $\alpha$ of
class $\mathcal C ^{\hbox{\rm i}nfty}$ give rise to a smooth embedding.
For an $(n-1)$--form $\gamma$ we define a differential
operator $ P_{\gamma}$ of the second order from the vector bundle
$\Lambda ^n{TM^*}$ into itself by the formula
\begin{equation}
P_{\gamma} (\beta)= P(\gamma+\delta\beta)
\hbox{\rm e}nd{equation}
for an $n$-form $\beta$.
Since $d\beta=0$, the linearization of $ P_0$ at $0$ is the Laplace
operator.
We also have
\begin{equation}
L_{\beta}P_{\gamma}=L_{\gamma +\delta \beta}P\circ \delta.
\hbox{\rm e}nd{equation}
Hence, if $\gamma$ is of class $\mathcal C ^{k,a}$ and $\beta$ is of class $\mathcal C ^{k+1,a}$, then the linear differential operator
$L_{\beta}P_{\gamma}$ is of class $\mathcal C ^{k-1,a}$.
We have the following smooth mapping between Banach spaces
$$\mathcal C^{1,a}(\mathcal F^{n-1})\times \mathcal C^{2,a}(\mathcal F
^{n})\ni (\gamma,\beta)\longrightarrow P_\gamma (\beta)\hbox{\rm i}n \mathcal
C ^{0,a}(\mathcal F ^n)$$
and the continuous mapping
\begin{equation}
\Phi :STM^*\times {\mathcal C}^{1,a}({\mathcal F}^{n-1})\times \mathcal C ^{2,a}({\mathcal F}^{n})\ni (\xi, \alpha,\beta ) \longrightarrow
\hbox{\rm det}\, \, \sigma _{\xi} (L_{\beta} { P}_{\alpha})\hbox{\rm i}n \R ,
\hbox{\rm e}nd{equation}
where $STM^*$ stands for the total space of the unit spheres bundle in $TM^*$ and $\sigma _\xi$ denotes
the principal symbol of a differential operator.
Since $STM^*$ is compact and $\Phi (\xi, 0,0)\ne 0$ for every $\xi \hbox{\rm i}n STM^*$, we obtain the following
\begin{lemma}\label{eliptyczny}
There is a neighborhood $\mathcal U_0$ of $0$ in $\mathcal C ^{1,a}({\mathcal F}^{n-1})$ and
a neighborhood $\mathcal V_0$ of $0$ in $\mathcal C ^{2,a}({\mathcal F}^{n})$ such that
for each $\gamma \hbox{\rm i}n \mathcal U_0$ and $\beta \hbox{\rm i}n \mathcal V_0$ the
differential operator $L_{\beta}P_{\gamma}$ is elliptic.
\hbox{\rm e}nd{lemma}
From the theory of elliptic differential operators applied to
$d+\delta$ we know that the codifferential (after restricting) is a
linear homemorphism of Banach spaces
$$ \delta : \mathcal C ^{2,a}(\mathcal F ^n_{exact})\longrightarrow Y=\delta (\mathcal C ^{2,a}({\mathcal F}^{n})).$$
Take the neighborhood of $0$ in $X$ given by $\mathcal
U_1=G^{-1}(\delta (\mathcal V_0\cap \mathcal C ^{2,a}(\mathcal F
^n_{exact}))\cap \mathcal U_0$. Let $\alpha \hbox{\rm i}n \mathcal U_1$. Then
$G(\alpha)$ exists and there exists $\beta\hbox{\rm i}n \mathcal V_0$ such
that $G(\alpha )=\delta\beta$. Moreover $P_\alpha (\beta)=0$ and
$L_\beta P_{\alpha}$ is elliptic. Take now any $k\ge 2$ and $\alpha
\hbox{\rm i}n \mathcal U_1\cap \mathcal C^{k,a}({\mathcal F}^{n-1})$. Then the differential
operator $P_\alpha$ is of class $\mathcal C ^{k-1, a}$. For
$G(\alpha)$ we have $\beta\hbox{\rm i}n \mathcal V_0$ of class $\mathcal C ^2$
such that $G(\alpha) =\delta\beta$, i.e. $P_\alpha(\beta)= 0$. Hence
$\beta$ is an elliptic solution of the equation
$P_{\alpha}(\beta)=0$ and from the elliptic regularity theorem for
non-linear differential operators we know that $\beta$ is of class
$\mathcal C ^{k+1,a}$ and consequently $G(\alpha)=\delta\beta$ is of
class $\mathcal C ^{k,a}$. Thus if $\alpha$ is of class $\mathcal C
^{\hbox{\rm i}nfty}$ then so is $G(\alpha)$. We have got
\begin{lemma}
There is a neighborhood $\mathcal U_1$ of $0$ in X such that for each $k\ge 1$ we have the mapping
\begin{equation}\label{odwzorowanie_klasy_ck}
G_{| \mathcal C ^{k,a}({\mathcal F}^{n-1})}: \mathcal U_1\cap \mathcal C
^{k,a}({\mathcal F}^{n-1}) \longrightarrow Y\cap \mathcal C^{k,a}({\mathcal F}^{n-1})=\delta (
\mathcal C^{k+1,a}(\mathcal F ^n)).\hbox{\rm e}nd{equation} Consequently we
have the mapping
\begin{equation}\label{odwzorowanie_nieskonczonosc}
G_{| \mathcal C ^{\hbox{\rm i}nfty}({\mathcal F}^{n-1})}: \mathcal U_1\cap\mathcal C
^{\hbox{\rm i}nfty}({\mathcal F}^{n-1})\longrightarrow Y\cap \mathcal C
^{\hbox{\rm i}nfty}({\mathcal F}^{n})=\delta (\mathcal C ^{\hbox{\rm i}nfty}({\mathcal F}^{n})).\hbox{\rm e}nd{equation}
\hbox{\rm e}nd{lemma}
We know that $G:\mathcal U_1\to Y$ is smooth between Banach spaces.
We shall now prove that the mappings (\ref{odwzorowanie_klasy_ck}),
for $k=2,...\, $, are smooth mappings between Banach spaces, when we
replace $\mathcal U_1$ by a (possibly) smaller neighborhood of $0$
in $X$. It will imply that the mapping
(\ref{odwzorowanie_nieskonczonosc}) is smooth as a mapping between
Fr\'echet spaces in a sufficiently small neighborhood of $0$ in
$\mathcal C ^{\hbox{\rm i}nfty}({\mathcal F}^{n-1})$.
We have the continuous mapping
\begin{equation} \mathcal C ^{1,a}({\mathcal F}^{n-1})\ni\gamma\longrightarrow (L_{\gamma}P)_{| Y}\hbox{\rm i}n \mathcal L (Y,Z),\hbox{\rm e}nd{equation}
where $Z= \mathcal C^{0,a}(\mathcal F ^n_{exact})$ and $\mathcal L (Y,Z)$ stands
for the Banach space of continuous linear mappings from $Y$ to $Z$. We know that $(L_0P)_{| Y}=d_{|Y} :Y\to Z$
is an isomorphism (linear and topological) between Banach spaces $Y$, $Z$. Since the set of isomorphisms is
open in $\mathcal L (Y,Z)$, there is a neighborhood, say $\mathcal U_2$, of $0$ in $\mathcal C ^{1,a} ({\mathcal F}^{n-1})$
such that if $\gamma \hbox{\rm i}n \mathcal U_2$, then $(L_{\gamma}P)_{|Y}$ is an isomorphism between $Y$ and $Z$.
Take $\gamma \hbox{\rm i}n \mathcal U_2\cap \mathcal C ^{k,a}({\mathcal F}^{n-1})$, We have the mapping
\begin{equation}\label{mapping}
(L_{\gamma}P)_{|Y_k} : Y_k\longrightarrow \mathcal C ^{k-1,a}(\mathcal F ^n_{exact})=Z\cap \mathcal C ^{k-1,a}({\mathcal F}^{n} ),
\hbox{\rm e}nd{equation}
where $Y_k=Y\cap \mathcal C ^{k,a}({\mathcal F}^{n-1})=\delta (\mathcal C ^{k+1,a}({\mathcal F}^{n}))$.
As a restriction of the injection $L_{\gamma}P:Y\to Z$, it is injective.
Since $P_{| \mathcal C ^{k,a}({\mathcal F}^{n-1})}:\mathcal C ^{k,a}({\mathcal F}^{n-1})\longrightarrow \mathcal C ^{k-1, a}(\mathcal F ^n_{exact})$
is smooth between Banach spaces, we have that
$$L_{\gamma} \left (P_{|\mathcal C ^{k,a}({\mathcal F}^{n-1})}\right): \mathcal C ^{k,a}({\mathcal F}^{n-1})\longrightarrow
\mathcal C ^{k-1,a}(\mathcal F ^n_{exact})$$
is continuous. Hence
$$ \left(L_{\gamma} \left (P_{|\mathcal C ^{k,a}({\mathcal F}^{n-1})}\right )\right) _{|Y _k}: Y_k\longrightarrow
\mathcal C ^{k-1,a}(\mathcal F ^n_{exact})$$
is continuous.
On the other hand
$$ L_{\gamma}\left(P_{|\mathcal C ^{k,a}({\mathcal F}^{n-1})}\right) =(L_{\gamma}P)_{|\mathcal C ^{k,a}({\mathcal F}^{n-1})}.
$$
Thus the mapping given by (\ref{mapping}) is a continuous linear
monomorphism.
We shall now show that it is surjective
for $\gamma \hbox{\rm i}n \mathcal U_2\cap \mathcal U_0\cap \mathcal C ^{k,a}({\mathcal F}^{n-1})$. Let $\mu \hbox{\rm i}n \mathcal C ^{k-1,a}(\mathcal F ^n_{exact})$.
Since $\gamma\hbox{\rm i}n \mathcal U_0$, by Lemma \ref{eliptyczny}, we know that $L_0P_{\gamma}$ is elliptic. The differential operator $L_0P_{\gamma}$ is of class $\mathcal C ^{k-1,a}$.
Since $\gamma \hbox{\rm i}n \mathcal U_2$, there is $\beta\hbox{\rm i}n \mathcal C ^{2,a}({\mathcal F}^{n})$ such that $L_{\gamma} P(\delta \beta)=\mu$. From the elliptic regularity theorem we know that $\beta $ is of class $\mathcal C ^{k+1,a}$, i.e. $\delta\beta$ is of class $\mathcal C ^{k,a}$. Set $\mathcal U_3=\mathcal U_0\cap \mathcal U_2$. We have got
\begin{lemma}
There is a neighborhood $\mathcal U_3$ of $0$ in $\mathcal C ^{1,a}({\mathcal F}^{n-1})$ such that for every $\gamma\hbox{\rm i}n \mathcal U_3\cap \mathcal C ^{k,a}({\mathcal F}^{n-1})$ the mapping
$$L_{\gamma}P_{|Y_k}:Y_k\longrightarrow \mathcal C ^{k-1,a}(\mathcal F ^n_{exact})$$
is an isomorphism (topological and linear).
\hbox{\rm e}nd{lemma}
Denote by $\tilde G:A\to\mathcal C ^{1,a}(\mathcal F ^{n-1})$ the
mapping given by $\tilde G(\alpha)=\alpha + G(\alpha)$.
Take $\mathcal U_4=\tilde G^{-1}(\mathcal U_3)\cap \mathcal U_1\subset X$.
Let $\alpha _0 \hbox{\rm i}n \mathcal U_4\cap \mathcal C ^{k,a}({\mathcal F}^{n-1})$.
Then $\gamma _0=\alpha _0+G (\alpha _0)$ is of class $\mathcal C ^{k,a}$
(because $\alpha _0\hbox{\rm i}n \mathcal U_1$) and $L_{\gamma_0}P_{| Y_k}:Y_k\to \mathcal C ^{k-1,a}(\mathcal F^n_{exact})$
is an isomorphism (because $\alpha _0\hbox{\rm i}n \tilde G ^{-1}(\mathcal U_3)$).
Denote by $\tilde X _k$ the Banach space $\hbox{\rm ker}\, L_{\gamma _0}P$.
We have $P(\gamma _0)=0$ and $\tilde X _k\oplus Y_k=\mathcal C ^{k,a}(\mathcal F ^{n-1})$.
We want to prove that $G$ is smooth around $\alpha _0$ in the sense of the Banach spaces theory.
Denote by $\tilde \pi :\mathcal C ^{k,a}({\mathcal F}^{n-1})=\tilde X _k\oplus Y_k\longrightarrow \tilde X_k$
the canonical projection. It is a smooth mapping between Banach spaces.
Set $\tilde \alpha _0=\tilde \pi(\alpha _0)$.
From the implicit mapping theorem we know that there is a neighborhood $\tilde U $ of $\tilde\alpha _0$
in $\tilde X _k$ and a smooth mapping $F$ defined on $\tilde U$ such that
$ \{\tilde\alpha + F(\tilde\alpha); \tilde\alpha \hbox{\rm i}n \tilde U\}\subset P^{-1}(0)$.
In a neighborhood of $\alpha _0$ we have
$$G(\alpha)= \tilde\pi (\alpha) +F(\tilde \pi (\alpha))-\alpha.$$
Hence in a neighborhood of $\alpha _0$ the mapping $G$ is smooth and
consequently it is smooth in $\mathcal U_4\cap \mathcal C
^{k,a}({\mathcal F}^{n-1})$. It follows that $G_{|\mathcal U_4\cap \mathcal C
^{\hbox{\rm i}nfty}({\mathcal F}^{n-1})}$ is smooth in the sense of the theory of Fr\'echet
spaces.
The projections in the
Hodge decomposition $\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F^{n-1})=
\mathcal C^{\hbox{\rm i}nfty}(\mathcal F^{n-1}_{closed})\oplus \delta
(\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^n))$ are smooth mappings of
Fr\'echet spaces.
Denote by $p: \mathcal C ^{\hbox{\rm i}nfty}(\mathcal F^{n-1})\to \mathcal
C^{\hbox{\rm i}nfty}(\mathcal
F^{n-1}_{closed})$
the projection. Set $\mathfrak U=(\mathcal U_4 \cap \mathcal C
^{\hbox{\rm i}nfty}(\mathcal F^{n-1}))\oplus (\delta(\mathcal C^{\hbox{\rm i}nfty
}\mathcal (F^{n}))$. Consider the mapping $\phi: \mathfrak U\to
\mathfrak U$ defined as $\phi (z)=z-(G\circ p)(z)$. It is a
bijection and its converse is given by $\phi ^{-1}(z)= z+(G\circ
p)(z)$. Both mappings $\phi$ and $\phi ^{-1}$ are smooth in the
sense of the theory of Fr\'echet vector spaces. We now compose the
chart obtained in the proof of Theorem \ref{Mal}
with $\phi$. Since
$$\phi ( \{\gamma =\alpha +G(\alpha); \
\alpha\hbox{\rm i}n \mathcal U_4\cap \mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^{n-1})
\})=\mathcal U_4 \cap \mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^{n-1})$$
is an open subset of the closed subspace $\mathcal C
^{\hbox{\rm i}nfty}(\mathcal F^{\hbox{\rm i}nfty}_{closed})$ of $\mathcal C
^{\hbox{\rm i}nfty}(\mathcal F ^{\hbox{\rm i}nfty})$, we have that the set $\mathcal M
maL$ is a submanifold of $\mathcal M aL$.
The proof is completed.
\begin{remark}
{\rm We shall now observe that there exist minimal affine Lagrangian submanifolds
which are not smooth. We refer to Section 3 for notation.
Having a smooth special affine Lagrangian embedding $f:M\to N$ we have the mapping
$$ \Phi _k:\mathcal C ^k_{emb}(M, \mathcal T _f)\ni h\longrightarrow \Pi _f
\circ \mathcal E ^{-1}_f\circ h\hbox{\rm i}n \mathcal C ^k(M,M).$$
The set $Diff ^k(M)$ is open in $\mathcal C ^k(M,M)$ and the set
$$\mathcal U _{k,f}=\{ h\hbox{\rm i}n \mathcal C ^k_{emb}(M,\mathcal T _f) ;\
\hbox{\rm e}xists V\hbox{\rm i}n \mathcal C ^k(M\leftarrow \mathcal N _f): \mathcal E _f\circ V=h\}$$
can be regarded as an open neighbourghood of $[f]$ in
$\mathcal M^ {k}=\mathcal C ^k(M, \mathcal T _f)_{/ Diff ^k(M)}$.
We have the bijection
\begin{equation}\label{bijection}
\mathcal C ^k(M,\leftarrow \mathcal N_f) \ni V\longrightarrow \mathcal E _f\circ V\hbox{\rm i}n \mathcal U _{k,f}.
\hbox{\rm e}nd{equation}
In order to study minimal affine Lagrangian submanifolds of complex
equiaffine spaces like in \cite{O_1}, \cite{O} or in Section 1 of
this paper it suffices that the immersions or embeddings under
consideration are of class $\mathcal C ^2$. Also in the proof of
Theorem \ref{main-theorem} the class $\mathcal C ^2$ is sufficient,
that is, if $\alpha$ is of class $\mathcal C ^k$, where $k\ge 2$,
then $G(\alpha)$ is of class $\mathcal C ^k$. Since $\mathcal C
^k(\mathcal F ^{n-1}_{closed})\ne\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F
^{n-1}_{closed})$, it is clear by the proof of Theorem
\ref{main-theorem} that there exist non-smooth minimal affine
Lagrangian embeddings of class $\mathcal C ^k$, for $k\ge 2$. }
\hbox{\rm e}nd{remark}
\begin{example}{\rm Let $M$ be an $n$-dimensional real manifold equipped with a torsion-free linear connection
$\nabla$. The tangent bundle to the tangent bundle $TTM$ admits a
decomposition into a direct sum of the vertical bundle (tangent to
the fibers of $TM$) and the horizontal bundle (depending on the
connection). The vertical lift of $X\hbox{\rm i}n T_xM$ to $T_ZTM$ for $Z\hbox{\rm i}n
T_xM$ will be denoted by $X^v_Z$. Analogously the horizontal lift
will be denoted by $X^h_Z$. The following formulas for the lifts of
vector fields $X, Y\hbox{\rm i}n \mathcal X (M)$ are known, see \cite{D},
\begin{equation}\label{nawiasy}
\begin{array}{rcl}
&& [X^v,Y^v]=0,\\
&&[X^h, Y^v]=(\nabla _XY)^v,\\
&& [X^h,Y^h]_Z=-(R(X,Y)Z)^v_Z +[X,Y]^h_Z,
\hbox{\rm e}nd{array}
\hbox{\rm e}nd{equation}
where $R$ denotes the curvature tensor of $\nabla$.
The total space $TM$ has an almost complex structure $J$ determined
by $\nabla$. Namely
\begin{equation}
JX^h=X^v,\ \ \ \ \ \ JX^v=-X^h.
\hbox{\rm e}nd{equation}
From
(\ref{nawiasy}) it follows that the almost complex structure is
integrable if and only if the connection $\nabla$ is flat.
Assume that $\nu$ is a volume form on $M$ such that $\nabla \nu
=0$. In other words, the pair $\nabla$, $\nu$ is an equiaffine
structure on $M$. We define a complex volume form $\Omega$ on $TM$
by the formula
\begin{equation}\label{omega}
\Omega (X_1^h,...,X_n^h)=\nu (X_1,...,X_n).
\hbox{\rm e}nd{equation}
By using (\ref{nawiasy}) one sees that $d\Omega=0$ if and only if
$\nabla$ is flat.
From now on we assume that $\nabla$ is flat and $\nabla\nu =0$. A
manifold with such a structure is usally called an affine manifold
with parallel volume. Take the zero-section of $TM$.
The horizontal space at $0_x$ is equal to $T_xM$
(independently of a connection $\nabla$). Hence the zero-section
treated as a mapping $ 0:M\to TM$ is an affine Lagrangian embedding.
By (\ref{omega}) it is special (also independently of a given
connection). We have
\begin{proposition}
Each affine manifold with parallel volume admits a special affine
Lagrangian embedding into a complex space with closed complex
volume form.
\hbox{\rm e}nd{proposition}
From the main theorem of this paper we know that if $M$ is
additionally compact, then such embeddings are plentiful.
If $\nabla$ is flat and $\nabla\nu =0$ then, in fact, the total
space of the tangent bundle $TM$ has a structure of a complex
equiaffine manifold.}
\hbox{\rm e}nd{example}
\begin{remark}
{\rm Assume now that $N$ is a 4-dimensional almost K\"ahler manifold
with symplectic form $\kappa$. Let $M$ be a connected compact
orientable $2$-dimensional manifold and $f:M\to N$ be a Lagrangian
embedding (in the metric sense). We now have the canonical
(depending only on the metric) isomorphism, say $\mathfrak{b}$,
between vector fields and $1$-forms on $M$. By Theorem \ref{Mal} we
have the manifold $\mathcal M aL$ modeled on the Fr\'echet space
$\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^1)$.
Similarly as in the proof of Theorem \ref{main-theorem} we define
the mapping
\begin{equation}
\tilde P : \mathcal C ^{\hbox{\rm i}nfty}(M\leftarrow \mathcal N) \ni V\to
f^*_V\kappa \hbox{\rm i}n\mathcal C ^{\hbox{\rm i}nfty}(\mathcal F^2).
\hbox{\rm e}nd{equation}
Since $f^*\kappa =0$ and the normal bundle is star-shaped, the
mapping $\tilde P$ has values in $\mathcal C ^{\hbox{\rm i}nfty}(\mathcal
F^2_{exact})$. By composing the mapping with the isomorphism between
the normal bundle and the tangent bundle and the isomorphism
$\mathfrak{b}$ we obtain the mapping $$ P:\mathcal C
^{\hbox{\rm i}nfty}(\mathcal F^1)\to \mathcal C ^{\hbox{\rm i}nfty}(\mathcal F ^2)$$
whose linearization at $0$ is equal to the exterior differential
operator $d$. Now we can argue as in the proof of Theorem
\ref{main-theorem} and we get
\begin{thm}
Let $N$ be a 4-dimensional almost K\"ahler manifold and $f:M\to N$
be a Lagrangian embedding of a connected compact orientable
2-dimensional manifold. Then the set $$\mathcal M L= \{ [f]\hbox{\rm i}n
\mathcal MaL;\ f\ is \ Lagrangian\}$$ is an infinite dimensional
Fr\'echet manifold modeled on the Fr\'echet vector space $\mathcal
C^{\hbox{\rm i}nfty}(\mathcal F ^1_{closed})$. It is a submanifold of
$\mathcal M aL$.
\hbox{\rm e}nd{thm}
}
\hbox{\rm e}nd{remark}
\begin{thebibliography}{20}
\bibitem{Ba} P. D. Baier, \hbox{\rm e}mph{Special Lagrangian Geometry}, PhD
thesis, University of Oxford, 2001.
\bibitem{B} A. L. Besse, \hbox{\rm e}mph{Einstein Manifolds}, Springer-Verlag,
1987.
\bibitem{DV} F. Dillen, L. Vrancken \hbox{\rm e}mph{Complex affine
hypersurfaces of $\C ^{n+1}$}, Parts I and II, Bull. Soc. Math.
Belg. Ser. B., 40 (1988), 420-432, 41(1989), 245-271.
\bibitem{D} P. Dombrowski, \hbox{\rm e}mph{On the geometry of the tangent
bundle}, J. Reine Ang. Math., 210 (1962), 73-88.
\bibitem{G} P. Griffiths, Exterior Differential Systems and the Calculus of Variations, vol 25, Progress in Math., Birkhauser, 1983
\bibitem{H} R. Hamilton, The inverse function theorem of Nash and Moser, Bull. Amer. Math. Soc., vol. 7 (1982) 65-222.
\bibitem{HL} R. Harvey, H. B. Lawson, Jr.,\hbox{\rm e}mph{Calibrated geometries},
Acta Math. 148 (1982), 47-157.
\bibitem{M} S. P. Marshall, \hbox{\rm e}mph{Deformations of special Lagrangian
submanifolds}, PhD thesis, University of Oxford, 2002.
\bibitem{McL} R. C. McLean, \hbox{\rm e}mph{Deformations of calibrated
submanifolds}, Comm. Anal. Geom. 6 (1998) 705-747.
\bibitem{O_1} B. Opozda, \hbox{\rm e}mph{Affine geometry of special real submanifolds of $\C
^n$}, Geom. Dedicata 121 (2006) 155-166.
\bibitem{O} B. Opozda, \hbox{\rm e}mph{Minimality of affine Lagrangian submanifolds in complex equiaffine spaces},
to appear in Intern. J. Math..
\hbox{\rm e}nd{thebibliography}
\hbox{\rm e}nd{document}
|
\begin{document}
\title{Generation of Photon Pairs by Stimulated Emission in Ring Resonators}
\title{Generation of Photon Pairs by Stimulated Emission in Ring Resonators}
Nonlinear parametric processes are arguably the most common phenomena used for the generation of nonclassical states of light. Practical implementations typically rely on two such processes: spontaneous parametric down-conversion (SPDC), a second-order process in which a pump photon is converted to a photon pair, and spontaneous four-wave mixing (SFWM), a third-order process in which the annihilation of two pump photons again results in the generation of a photon pair. Both SPDC and SFWM have been demonstrated in a plethora of systems, ranging from bulk optics \cite{kwiat_new_1995} to integrated photonics \cite{grassani_micrometer-scale_2015}.
For a third-order nonlinearity, one can envision an alternative approach to generating non-classical light based on third-order parametric down conversion (TOPDC), in which one pump photon is converted to a photon triplet \cite{Chekhova_pra_2005}. Spontaneous TOPDC (SpTOPDC) is in fact the third-order analogue of SPDC, but with the important difference that photons are generated in triplets instead of pairs. Thus, in general the generated state is non-Gaussian. This feature has inspired considerable interest in SpTOPDC in recent years, because non-Gaussian states can be very powerful resources in quantum computation, but are difficult to generate on-demand \cite{hamel_np_2014,okoth_pra_2019,Walschaers_arXiv_21, threephoton_1997}. However, SpTOPDC is a very inefficient process, because it relies on a third-order nonlinear response with a generation rate that scales linearly with the pump intensity. A few strategies have been proposed to improve the process efficiency \cite{corona_pra_2011,Corona_ol_2011,dot_pra_2012}, but practical generation rates have not yet been demonstrated.
Here our focus is instead on stimulated TOPDC (StTOPDC). In this scenario, photons are still emitted in triplets, but the down-conversion of the pump photon is stimulated by the presence of additional seed fields which, unlike the pump field, do not provide any energy but simply stimulate the process \cite{Blay_StTOPDC}. Recently, it has been shown that StTOPDC can be exploited to characterize the properties of the quantum light that would be generated by SpTOPDC in the absence of any seed field \cite{dominguez-serna_third-order_2020, liscidini_stimulated_2013}. In contrast to that, here we are interested in StTOPDC as a scheme to generate quantum correlated photon pairs.
{Whereas earlier studies of TOPDC have dealt only with non-resonant structures, here we investigate the use of microring resonators to enhance and control StTOPDC \cite{Corona_ol_2011,okoth_pra_2019,corona_pra_2011}; addressing this resonant configuration is essential due to the qualitative differences that generally arise between nonlinear processes in resonant and non-resonant systems \cite{how_does_it_scale}.} We calculate the StTOPDC
photon pair generation rate for a composite AlN-Si$_3$N$_4$ structure compatible with the current fabrication technology, and characterize the biphoton state arising from StTOPDC in this structure by calculating the joint spectral intensity (JSI).
We begin by considering the generic Hamiltonian describing the generation of photon triplets via TOPDC, in which a pump photon in mode $P$ is down-converted into three photons, one in each of the modes $G_1$, $G_2$, and $G_3$:
\begin{align}
H_{TOPDC} = - \hbar \sum_{G_1,G_2,G_3} \Lambda_{G_1 G_2 G_3 P}
a^{\dagger}_{G_1} a^{\dagger}_{G_2} a^{\dagger}_{G_3} a_P + H.c.\label{eq: spon_TG},
\end{align}
where $a^{\dagger}_i$ is the creation operator of a photon in the $i$-th mode and $\Lambda_{G_1 G_2 G_3 P}$ is the nonlinear coupling rate, which depends on the structure under consideration.
Most studies have focused on fully degenerate ($G_1 = G_2 = G_3$) or fully non-degenerate ($G_1 \neq G_2 \neq G_3$) SpTOPDC, and the tripartite entanglement characterizing the generated triplet \cite{Moebius:16}. However, another scenario is that in which the down-conversion is stimulated by a seed field \cite{okoth_pra_2019}, with the other two photons being quantum correlated and generated in the modes $G_1$ and $G_2$. For that process it is useful to rewrite \eqref{eq: spon_TG} as
\begin{align}
H_{TOPDC} = - 3\hbar \sum_{G_1,G_2\neq S} \Lambda_{G_1 G_2 S P}
a^{\dagger}_{G_1} a^{\dagger}_{G_2} a^{\dagger}_{S} a_P + H.c. \label{eq: stim_TG},
\end{align}
where we highlight the creation operator associated with the mode $S$ that would be seeded in the stimulated process, and we restrict the discussion to configurations where $G_1, G_2 \neq S$. If one traces over all the photons generated in the mode $S$, StTOPDC can be thought as an effective
photon pair generation process.
\begin{figure}
\caption{Schematics of the mode configurations for
(a) fully degenerate (b) non-degenerate TOPDC
, and (c) a sketch of the ring resonator
in the non-degenerate configuration.
Arrows pointing up represent generated modes; arrows pointing down represent input modes. The dotted arrow in (b) represents the possibility of injecting a seed field in mode $S$.}
\label{fig:configuration}
\end{figure}
{One might assume at first glance that StTOPDC, when driven by classical pump and seed fields, is equivalent to dual-pump SFWM. However, although the Hamiltonians describing the two processes in this scenario are formally similar, it is important to stress that the seed field cannot be understood as a second pump; indeed, the seed does not provide any energy in the generation of the photon pairs, and is in fact amplified by StTOPDC. This distinction between StTOPDC and SFWM is due to the fact that the former stems from a `higher order' spontaneous process.}
This has practical consequences. First, because the generation of pairs is seeded, the properties of the pairs generated by StTOPDC are sensitive to those of the seed field. Second, energy conservation leads to an unusual frequency configuration of the four modes involved in the process.
{We note that unlike in SFWM, here the pump and seed fields can be asymmetrically located with respect to the generated modes (as in the case sketched in Fig. \ref{fig:configuration}b), with the pump field being the farthest. In this case, both input fields could be filtered using a low-pass filter, instead of notch or bandpass filters, which are typically required in SFWM \cite{grassani_micrometer-scale_2015}. In addition, input power can be distributed unevenly between the two inputs, without affecting the efficiency. Thus, for some implementations this frequency configuration may lead to easier filtering stages, particularly in integrated platforms.}
{The flexibility in the pump and seed frequencies also leads to other practical advantages: For example, it gives more freedom in dispersion engineering to achieve phase matching. In addition, one could use this degree of freedom, along with the ability to redistribute the input powers, to limit the effect of parasitic processes that can arise when photon pairs are generated via SFWM \cite{zhang_nat_comm_2021}; to suppress such processes in SFWM, one must rely on the use of more complex structures \cite{linearly_uncoupled}.}
With these interesting features in mind, we calculate the generation rates for TOPDC. Unlike earlier studies,
here we focus on an integrated ring resonator structure as sketched in Fig. \ref{fig:configuration}c
\cite{Corona_ol_2011,okoth_pra_2019,corona_pra_2011}. This enables us to take advantage of the field enhancement associated with the spatial and temporal light confinement, and to exploit the presence of a comb of resonances which favours the scenario in which pump, seed, and generated photons are on resonance at well-separated frequencies. We assume that the effect of the nonlinearity can be restricted to the ring, where the field intensities are the largest, so the operators $a$ and $a^{\dagger}$ in (\ref{eq: spon_TG}) and (\ref{eq: stim_TG}) refer to the ring modes.
We begin by using Fermi's Golden Rule to make a first-order calculation of the rates for SpTOPDC and StTOPDC. For simplicity, we consider pump fields and, when present, seed fields in the CW limit. For the spontaneous process governed by Eq. (\ref{eq: spon_TG}), we first consider the generation of triplets in a single fundamental resonance from a third harmonic pump field (i.e. $P \equiv T$ and $G_1=G_2=G_3 \equiv F$, see Fig.\ref{fig:configuration}a). In reality, there will be multiple energy-conserving processes that can generate triplets in sets of different resonances, but we defer the inclusion of these processes to later work. We adopt an interaction picture such that $H_{NL}^{I}(t) = e^{iH_0 t/\hbar} H_{NL} e^{-iH_0 t/\hbar}$, where $H_0$ captures the linear dynamics of the ring-channel system and $H_{NL}$ is the nonlinear Hamiltonian \cite{Yang2008}. Then for this degenerate process we have
\begin{align}
H_{\text{deg}}^{I}(t) =&
\nonumber - \hbar \Lambda_{F F F T} \mathcal{L}^2 \frac{1}{4\pi^2} \text{sinc}\left( \frac{\Delta \kappa\mathcal{L}}{2}\right) \int dk_1 ... dk_4 F_{F+}^*({k_1})\\
\nonumber
&\times F_{F+}^*({k_2})F_{F+}^*({k_3})F_{T-}({k_4}) e^{-i(\omega_{k_4T} - \omega_{k_3F} - \omega_{k_2F} - \omega_{k_1F})t} \\
&\times a^{\dagger}_F(k_1) a^{\dagger}_F(k_2) a^{\dagger}_F(k_3) \alpha_T(k_4) + H.c.,
\label{eq:H_NL_spontTG}
\end{align}
where $a^{\dagger}_F(k)$ is the creation operator of a photon in the mode $F$ with wavevector $k$, and we treat the pump classically by taking $a_T(k_4) \rightarrow \alpha_T(k_4)$, with $\alpha_T(k_4)$ the complex amplitude of the pump field in the channel. Here $\mathcal{L}$ is the ring circumference, and $\Delta \kappa = \kappa_T - 3 \kappa_F$, where $\kappa_J$ is the wavenumber in the ring corresponding to the resonant frequency $\omega_J$. If the channel waveguide and the ring are identical, $\kappa_J \cong K_J$, where $K_J$ is the resonant wavenumber in the channel waveguide. We use $ \omega_{kJ}=\omega_J +v_J(k-K_J)$, where $v_J$ is the group velocity at $\omega_J$. We neglect the effects of group velocity dispersion across each resonance \footnotemark[1], but we take them into account between resonances.
\footnotetext[1]{{This approximation is valid if the group velocity dispersion $\beta_2$ is small enough; for a 1 GHz resonance linewidth, we require $\beta_2 < 10^{-20}$ s$^2$/m. By numerical simulations, we find $\beta_2 < 10^{-22}$ s$^2$/m for the relevant modes in the sample system considered below.}}
We also introduce complex field enhancement factors
\begin{align}
F_{J\pm}(k) = \frac{1}{\sqrt{\mathcal{L}}} \left( \frac{\gamma^*_J}{v_J(K_{J} - k) \pm i\overline{\Gamma}_J }\right),
\label{eq:Lorentzian}
\end{align}
where $\gamma_J$ is the ring-channel coupling constant, and the linewidth of resonance $J$ is set by $\overline{\Gamma}_J =\omega_J/{2Q_J}$, where $Q_J$ is the loaded quality factor \cite{vernon_spontaneous_2015}. Here and in \eqref{eq:H_NL_spontTG}, $\pm$ indicates incoming (+) or outgoing (-) fields \cite{liscidini_pra_2012}. Finally,
$\Lambda_{F F F T}$ is the nonlinear coupling rate
\begin{align}
\Lambda_{F F F T} =&
\frac{\hbar \sqrt{\omega_{T}\omega_{F}^3}} {4\epsilon_0 c^2} \sqrt{\frac{{v}_F^3 {v}_T}{\overline{n}_F^3 \overline{n}_T}}\frac{\overline{\chi}_3}{\mathcal{L} A_{eff}}, \label{eq:lambda}
\end{align}
where $\overline{\chi}_3$ is a characteristic value of $\chi_3$ in the ring, and $\overline{n}_T$ and $\overline{n}_F$ are respectively characteristic values of the refractive indices at $\omega_T$ and $\omega_F$; finally, $A_{eff}$ is an effective area determined by the mode overlap in the ring \cite{how_does_it_scale}.
Assuming a CW pump at $\omega_T$, the rate of generated triplets appearing at the system's output is
\begin{align}
R^\text{spon}_{F F F} = 2^5 |\Lambda_{F F F T}|^2 \eta_F^3 \eta_T \frac{Q_F Q_T}{\hbar \omega_T^2 \omega_F}
P_T
\text{sinc}^2\left( \frac{{\Delta \kappa\cal{L}}}{2}\right),
\label{eq:spont_rateA}
\end{align}
where $P_T$ is the pump power in the input channel, and we introduce the escape efficiency $\eta_J = Q_J/Q_{J,C}$, with $Q_{J,C}$ being the quality factor determined solely by the coupling $\gamma_J$ between the channel and the input/output channel waveguide. {Here we identify an important qualitative feature of the SpTOPDC efficiency in a resonant system: The lower scaling with the ring quality factors arises due to the lower scaling with pump power, compared to SFWM and SPDC.}
{For simplicity,} we consider the particular non-degenerate case of $G_1=G_2 \equiv G, G_3 \equiv S$ (see Fig \ref{fig:configuration}b)\footnotemark[2]. For SpTOPDC in this configuration, we have
\footnotetext[2]{{Configurations where $G_1\neq G_2$ can be analysed in a similar way.}}
\begin{align}
H_{\text{non-deg}}^{I}(t) =&
\nonumber - 3\hbar \Lambda_{G G S P} \mathcal{L}^2 \frac{1}{4\pi^2} \text{sinc}\left( \frac{\Delta \kappa\mathcal{L}}{2}\right) \int dk_1 ... dk_4 F_{G+}^*({k_1}) \\
\nonumber
&\times F_{G+}^*({k_2})F_{S+}^*({k_3})F_{P-}({k_4}) e^{-i(\omega_{k_4P} - \omega_{k_3S} - \omega_{k_2G} - \omega_{k_1G})t}\\
&\times a^{\dagger}_G(k_1) a^{\dagger}_G(k_2)
a^{\dagger}_S(k_3)\alpha_P(k_4) + H.c.,
\label{eq:H_NL_spontTG_B}
\end{align}
where $S$ labels the seed mode, $\Delta \kappa = \kappa_{P}-\kappa_{S}-2\kappa_{G}$, and $\Lambda_{G G S P}$ is the nonlinear coupling rate, of the same form as (\ref{eq:lambda}). The corresponding spontaneous triplet generation rate is
\begin{align}
R_{G G S}^\text{spon} &=9\times2^5
|\Lambda_{G G S P}|^2
\frac{\eta_G^2 \eta_S \eta_P\:Q_G Q_S Q_P}{\hbar \omega_P^2 (2 Q_S \omega_G + Q_G \omega_S)}
P_P
\text{sinc}^2\left( \frac{\Delta \kappa\mathcal{L}}{2}\right).
\label{eq:spont_rateB}
\end{align}
We can now turn to the stimulation of this non-degenerate process by a seed beam in mode $S$. Taking (\ref{eq:H_NL_spontTG_B}) and treating the seed field classically by taking $a^{\dagger}_S(k_3) \rightarrow \alpha^*_S(k_3)$, we find the rate of pairs generated in mode $G$ to be
\begin{align}
R_{G G (S)}^\text{stim} &=9\times 2^6
|\Lambda_{G G S P}|^2
\eta_G^2 \eta_S \eta_P
\frac{Q_G Q_S Q_P}{\hbar^2\omega_P^2 \omega_G \omega_S^2 }
P_P P_S
\text{sinc}^2\left( \frac{\Delta \kappa\mathcal{L}}{2}\right),
\nonumber \\
&=R_{G G S}^\text{spon}\frac{P_S}{P_\text{vac}}. \label{eq:ratio}
\end{align}
Here we have introduced an effective vacuum power $P_\text{vac}={\hbar\omega_S}\left(2/{\overline{\Gamma}_S}+1/{\overline{\Gamma}_G}\right)^{-1},$
{which can be interpreted as the power associated with the vacuum fluctuations that drive the spontaneous process \cite{liscidini_stimulated_2013}. From Eq. (\ref{eq:ratio}) one can see that
the improvement of the generation rate in the stimulated regime is equal to $P_S/P_\text{vac}$. The efficiencies of SFWM and SPDC, and their stimulated counterparts, have been derived in similar terms; distinct vacuum powers can be identified in each case, {and the theoretical efficiencies of StTOPDC, SFWM, and SPDC for particular systems can be compared by referring to this earlier work \cite{how_does_it_scale}.}}
To provide a realistic estimate of resonant TOPDC rates, we use Eqs. (\ref{eq:spont_rateA}), (\ref{eq:spont_rateB}), and (\ref{eq:ratio}) with experimental parameters demonstrated in a {composite AlN-Si$_3$N$_4$} microring system engineered for third harmonic generation (THG) \cite{Surya:18}. From the THG efficiency quoted in \cite{Surya:18}, we can infer a nonlinear coupling rate of $\Lambda_{THG} \approx 0.99 s^{-1}$. It should be noted that phase matching was achieved with the pump field in the fundamental spatial mode, and the generated third harmonic field in a higher order mode. By taking $\Lambda_{F F F T} = \Lambda_{THG}$ to estimate the triplet generation efficiency, we assume the same modes are used in our reverse process. That is, we assume that a higher order spatial mode of the driving field at $\omega_T$ could be injected into the microring system. {While this is challenging, we adopt this scenario for the purpose of using an existing system to estimate the generation rates that may soon be accessible. So far, there have been no efforts toward engineering microring systems for StTOPDC, and it is likely that more practical systems could be envisaged.}
For degenerate SpTOPDC, we take the observed quality factors $Q_F = 4\times10^5 $ and $Q_T = 6.4\times10^4$ \cite{Surya:18}, we assume critical coupling ($\eta=0.5$) for all modes, and from the design of the structure we have $\Delta \kappa = 0$. We then find the rate of triplets at the system's output to be $R_{F F F}^\text{spon}/P_T = 0.03 s^{-1}W^{-1}$. This low generation rate suggests that SpTOPDC will remain experimentally impractical in the near term, unless significant improvements can be made to system parameters such as quality factors and mode overlap. Turning to StTOPDC, we take $\omega_S \approx \omega_G \approx \omega_F$, so $Q_S \approx Q_G \approx Q_F$, and $\Lambda_{G G S P} \approx \Lambda_{F F F T}$. With this, we predict $R_{G G (S)}^\text{stim}/P_P P_S = 1.4 \times 10^9 s^{-1} W^{-2}$ {in the AlN-Si$_3$N$_4$ system. For a 100 mW pump and a 10 mW seed, both being within the range of input powers studied in \cite{Surya:18}, we predict $1.4\times10^6$ pairs per second at the system's output.}
We now consider the properties of the photons that could be generated in these non-degenerate TOPDC processes. {In the following, we assume a pulsed pump, and for StTOPDC we take a CW seed; however, more general excitation scenarios with arbitrary pump and seed pulses could be modelled using the same approach}. To first order in the nonlinear interaction,
\begin{align}
\ket{\Psi} \approx \ket{vac} -\frac{i}{\hbar}\int^{\infty}_{-\infty} dt' H_{NL}^{I}(t') \ket{vac},
\label{eq:firstorder}
\end{align}
where we take the initial and final times to $\pm \infty$ for convenience.
In the case of non-degenerate SpTOPDC, putting the interaction Hamiltonian (\ref{eq:H_NL_spontTG_B}) into (\ref{eq:firstorder}) yields a ket with the form
\begin{align}
\ket{\Psi_{III}} &\approx \ket{vac} + \beta \ket{III},\\ \nonumber
\ket{III} &= \frac{1}{\sqrt{2}} \int d{k}_1 d{k}_2 d{k}_3 \phi({k}_1,{k}_2,{k}_3) a^{\dagger}_G(k_1) a^{\dagger}_G(k_2) a^{\dagger}_S(k_3) \ket{vac},
\end{align}
where $|\beta|^2$ is the probability of generating a photon triplet per pump pulse, and
\begin{align}
\phi&(k_1,k_2,k_3) = \mathcal{N} F_{P-}\left(\frac{v_G}{v_P}(k_1+k_2) + \frac{v_S}{v_P}k_3 + \frac{1}{v_P}\Upsilon\right) \label{eq:triphoton} \\ \nonumber
\times & F^*_{G+}(k_1) F^*_{G+}(k_2) F^*_{S+}(k_3) \alpha_P\left(\frac{v_G}{v_P}(k_1+k_2) + \frac{v_S}{v_P}k_3 + \frac{1}{v_P}\Upsilon\right)
\end{align}
is the triphoton wavefunction with the normalization factor $\mathcal{N}$, where we have introduced $\Upsilon = v_P K_P - v_S K_{S} - 2 v_G K_{G}$.
Next we turn to StTOPDC, where we take Eq. (\ref{eq:H_NL_spontTG_B}) with $a^{\dagger}_S(k_3) \rightarrow \alpha^*_S(k_3)$. In doing this, we effectively trace over the mode $S$, so we now identify a biphoton state rather than a triphoton state. {The Hamiltonian is sensitive to the spectral profile of the seed field, so the properties of the pairs depend on the form of $\alpha^*_S(k_3)$; here we deal with the simple case of a CW seed at $k_S$.} We have
\begin{align}
\ket{\Psi_{II}} &\approx \ket{vac} + \beta' \ket{II},\\ \nonumber
\ket{II} &= \frac{1}{\sqrt{2}} \int dk_1 dk_2 \phi(k_1,k_2) a^{\dagger}_G(k_1) a^{\dagger}_G(k_2) \ket{vac},
\end{align}
where $|\beta'|^2$ is the probability of generating a photon pair per pump pulse. The biphoton wave function is given by
\begin{align}
\phi(k_1,k_2) &= \nonumber \mathcal{N'} F_{P-}\left(\frac{v_G}{v_P}(k_1+k_2) + \frac{v_S}{v_P} k_S + \frac{1}{v_P}\Upsilon\right) F_{G+}^*(k_1) \\ &\times F_{G+}^*(k_2) \alpha_T\left(\frac{v_G}{v_P}(k_1+k_2) + \frac{v_S}{v_P} k_S + \frac{1}{v_P}\Upsilon \right)
\label{eq:biphoton},
\end{align}
where $\mathcal{N'}$ is again a normalization constant.
Comparing Eqs. (\ref{eq:triphoton}) and (\ref{eq:biphoton}), we see that the SpTOPDC triphoton wave function at $k_3 = k_S$ is proportional to the biphoton wave function arising from the corresponding StTOPDC process, seeded by a CW field at $k_S$. From this follows the possibility of using stimulated emission tomography (SET) to characterize the state generated by SpTOPDC \cite{dominguez-serna_third-order_2020}. By characterizing the spectral density of the stimulated $\phi(k_1,k_2)$ for various values of $k_S$, one could build up the joint spectral density of the spontaneous triphoton state, slice by slice. {Recognizing this, one can also see that changing the frequency of the CW seed also changes the spectral properties of the pairs in a nontrivial way, since the dependence of the full triphoton wave function on $k_3$ is nontrivial; this comprises an example of the tunability of the photon pairs from StTOPDC.}
Eq. (\ref{eq:biphoton}) also provides insight into the characteristics of the photon pairs generated by StTOPDC. Its form indicates that an approximately separable biphoton wave function could be generated with the appropriate choice of $\alpha_P(k)$, provided that the generated mode's resonance is much narrower than the resonance at the pump mode. {This latter condition may be naturally satisfied in StTOPDC; the wide range of frequencies and the possible use of higher-order modes could allow for the linewidth at the pump resonance to be significantly different than the linewidths at the generation resonances. Indeed, the quality factors observed in the sample AlN-Si$_3$N$_4$ system satisfy the requirements for generating separable photon pairs; the resonant linewidths at $\omega_G$ and $\omega_P$ are mismatched, with the resonance at $\omega_G$ being the sharper one} \cite{vernonOL, Surya:18}.
\begin{figure}
\caption{{The normalized JSI describing pairs generated by StTOPDC, plotted in terms of dimensionless variables $\kappa_{1(2)}
\label{fig:JSIs}
\end{figure}
{In Fig. \ref{fig:JSIs} we plot the JSI for StTOPDC using the AlN-Si$_3$N$_4$ parameters quoted in our rate estimate above, and a Gaussian pump pulse with a FWHM of $10$ ps \footnotemark[3]. {The low Schmidt number is attributed to the mismatched linewidths at $\omega_G$ and $\omega_P$, and to the pump duration, which is much shorter than the photon dwelling time in the resonator \cite{Surya:18, vernonOL}}.} {This corroborates the expectation that StTOPDC may be naturally suitable for the generation of unentangled photon pairs; achieving a comparable degree of separability is typically more challenging in SFWM, where carefully engineered microring systems or pumping schemes are required\cite{vernonOL, Christensen}.}
\footnotetext[3]{{Here we focus on a single JSI peak, since we have considered only the generation of photons in a single resonance. In general, multiple JSI peaks will exist, corresponding to the different sets of resonances in which pairs could be generated. The separation of the peaks in frequency space would be set by the ring's free spectral range, which is much larger than the resonance linewidths.}}
{We have presented a simple first treatment of resonant StTOPDC; there is a wealth of extensions to investigate, particularly with more complex pumping schemes and photonic systems. Even using a system that is not optimized for StTOPDC, our rate estimates suggest that this process will be viable in integrated photonic systems within the near future. Further theoretical and experimental work on StTOPDC should reveal qualitatively new aspects of quantum nonlinear optics.}\\
\textbf{Funding.} M.B. acknowledges support from the University of Toronto Faculty of Arts \& Science Top Doctoral Fellowship. J.E.S. and M.B. acknowledge support from the Natural Sciences and Engineering Research Council of Canada. M.L. acknowledges support by Ministero dell’Istruzione, dell’ Università e della Ricerca (Dipartimenti di Eccellenza Program (2018–2022)).
\textbf{Data Availability.} Data underlying the results presented in this paper are not publicly available at this time but may be obtained from the authors upon reasonable request.
\textbf{Disclosure.} The authors declare no conflicts of interest.
\bibliographyfullrefs{refs}
\end{document}
|
\begin{document}
\title{Memorization Capacity of Neural Networks with Conditional Computation}
\begin{abstract}
Many empirical studies have demonstrated the performance benefits of conditional computation in neural networks, including reduced inference time and power consumption. We study the fundamental limits of neural conditional computation from the perspective of memorization capacity.
For Rectified Linear Unit (ReLU) networks without conditional computation, it is known that memorizing a collection of $n$ input-output relationships can be accomplished via a neural network with $O(\sqrt{n})$ neurons. Calculating the output of this neural network can be accomplished using $O(\sqrt{n})$ elementary arithmetic operations of additions, multiplications and comparisons for each input. Using a conditional ReLU network, we show that the same task can be accomplished using only $O(\log n)$ operations per input. This represents an almost exponential improvement as compared to networks without conditional computation.\ We also show that the $\Theta(\log n)$ rate is the best possible. Our achievability result utilizes a general methodology to synthesize a conditional network out of an unconditional network in a computationally-efficient manner, bridging the gap between unconditional and conditional architectures.
\end{abstract}
\section{Introduction}
\label{sec:intro}
\subsection{Conditional Computation}
Conditional computation refers to utilizing only certain parts of a neural network, in an input-adaptive fashion \cite{davis2013low,bengio2013estimating,eigen2013learning}. This can be done through gating mechanisms combined with a tree-structured network, as in the case of ``conditional networks'' \cite{ioannou2016decision} or neural trees and forests \cite{tanno2019adaptive,yang2018deep,kontschieder2015deep}. Specifically, depending on the inputs or some features extracted from the inputs, a gate can choose its output sub-neural networks that will further process the gate's input features. Another family of conditional computation methods are the so-called early-exit architectures \cite{teerapittayanon2016branchynet, kaya2019shallow, gormez2022early}. In this case, one typically places classifiers at intermediate layers of a large network. This makes it possible to exit at a certain layer to reach a final verdict on classification, if the corresponding classifier is confident enough of its decision.
Several other sub-techniques of conditional computation exist and have been well-studied, including layer skipping \cite{graves2016adaptive}, channel skipping in convolutional neural networks \cite{gao2018dynamic}, or reinforcement learning methods for input-dependent dropout policies \cite{bengio2015conditional}. Although there are many diverse methods \cite{han2021dynamic}, the general intuitions as to why conditional computation can improve the performance of neural networks remain the same: First, the computation units are chosen in an adaptive manner to process the features that are particular to the given input pattern. For example, a cat image is ideally processed by only ``neurons that are specialized to cats.'' Second, one allocates just enough computation units to a given input, avoiding a waste of resources. The end result is various benefits relative to a network without conditional computation, including reduced computation time, and power/energy consumption \cite{kim2020energy}. Achieving these benefits are especially critical in edge networks with resource-limited devices \cite{koyuncu:c26, koyuncu:c27}. Moreover, conditioning incurs minimal loss, or in some cases, no loss in learning performance.
Numerous empirical studies have demonstrated the benefits of conditional computation in many different settings.\ Understanding the fundamental limits of conditional computation in neural networks is thus crucial, but has not been well-investigated in the literature. There is a wide body of work on a theoretical analysis of decision tree learning \cite{maimon2014data}, which can be considered as an instance of conditional computation. These results are, however, not directly applicable to neural networks.
In \cite{cho2014exponentially}, a feature vector is multiplied by different weight matrices, depending on the significant bits of the feature vector, resulting in an exponential increase in the number of free parameters of the network (referred to as the capacity of the network in \cite{cho2014exponentially}). On the other hand, the potential benefits of this scheme have not been formally analyzed.
\subsection{Memorization Capacity}
In this work, we consider the problem of neural conditional computation from the perspective of memorization capacity. Here, the capacity refers to the maximum number of input-output pairs of reasonably-general position that a neural network of a given size can learn. It is typically expressed as the minimum number of neurons or weights required for a given dataset of size, say $n$.
Early work on memorization capacity of neural networks include \cite{baum1988capabilities, mitchison1989bounds, sontag1990remarks}. In particular, \cite{baum1988capabilities} shows that, for thresholds networks, $O(n)$ neurons and weights are sufficient for memorization. This sufficiency result is later improved to $O(\sqrt{n})$ neurons and $O(n)$ weights by \cite{vershynin2020memory, rajput2021exponential}. There are also several studies on other activation functions, especially the Rectified Linear Unit (ReLU), given its practicality and wide utilization in deep learning applications. Initial works \cite{zhang2021understanding, hardt2016identity} show that $O(n)$ neurons and weights are sufficient for memorization in the case of ReLU networks. This is improved to $O(\sqrt{n})$ weights and $O(n)$ neurons in \cite{yun2019small}. In addition, \cite{park2021provable} proves the sufficiency of $\smash{O(n^{2/3})}$ weights and neurons, and finally, \cite{vardi2021optimal} shows that memorization can be achieved with only $O(\sqrt{n})$ weights and neurons, up to logarithmic factors.
For the sigmoid activation function, it is known that $O(\sqrt{n})$ neurons and $O(n)$ weights \cite{huang2003learning}, or $\smash{O(n^{2/3})}$ weights and neurons \cite{park2021provable} are sufficient. Memorization and expressivity have also been studied in the context of specific network architectures such as convolutional neural networks \cite{cohen2016convolutional,nguyen2018optimization}.
The aforementioned achievability results have also been proven to be tight in certain cases. A very useful tool in this context is the Vapnik-Chervonenkis (VC) dimension \cite{vapnik2015uniform}. In fact, applying the VC dimension theory to neural networks \cite{anthony1999neural}, it can be shown that the number of neurons and weights should be both polynomial in the size of the dataset for successful memorization. Specifically, $\Omega(\sqrt{n})$ weights and $\Omega(n^{1/4})$ neurons are optimal for ReLU networks, up to logarithmic factors. We will justify this statement later on for completeness.
\subsection{Scope, Main Results, and Organization}
We analyze the memorization capacity of neural networks with conditional computation. We describe our neural network and the associated computational complexity models in Section {\textnormal{e}}f{sec:model}. We describe a general method to synthesize conditional networks from unconditional networks in Section {\textnormal{e}}f{sec:synth}. We provide our main achievability and converse results for memorization capacity in Sections {\textnormal{e}}f{sec:reluach} and {\textnormal{e}}f{sec:reluconv}, respectively. We draw our main conclusions in Section {\textnormal{e}}f{sec:conclusions}. Some of the technical proofs are provided in the supplemental material.
We note that this paper is specifically on analyzing the theoretical limits on neural conditional computation. In particular, we show that $n$ input-output relationships can be memorized using a conditional network that needs only $O(\log n)$ operations per input or inference step. The best unconditional architecture requires $O(\sqrt{n})$ operations for the same task. This suggests that conditional models can offer significant time/energy savings as compared to unconditional architectures. In general, understanding the memorization capacity of neural networks is a well-studied problem of fundamental importance and is related to the expressive power of neural networks. A related but separate problem is generalization, i.e. how to design conditional networks that can not only recall the memory patterns with reasonable accuracy but also generalize to unseen examples. The ``double-descent'' phenomenon \cite{belkin2019reconciling, nakkiran2021deep} suggests that the goals of memorization and generalization are not contradictory and that a memorizing network can potentially also generalize well. A further investigation of this phenomenon in the context of conditional networks, and the design of conditional networks for practical datasets remains beyond the scope of the present work.
{\bf Notation: }
Unless specified otherwise, all vector variables are column vectors. We use ordinary font (such as $u$) for vectors, and the distinction between a vector and scalar will be clear from the context. The symbols $O,\Omega$, and $\Theta$ are the standard Bachmann–Landau symbols. Specifically, $f_n \in O(g_n)$ means there is a constant $C>0$ such that $f_n \leq C g_n$ for sufficiently large $n$. On the other hand, if $f_n \in \Omega(g_n)$, then there is a constant $C>0$ such that $f_n > C g_n$ for sufficiently large $n$. We write $ f_n \in \Theta(g_n)$ if $f_n \in O(g_n)$ and $f_n \in \Omega(g_n)$. The set $\mathbb{R}^p$ is the set of all $p$-dimensional real-valued column vectors. The superscript $(\cdot)^T$ is the matrix transpose. The function $\mathbf{1}(\cdot)$ is the indicator function, and $\lceil \cdot {\textnormal{c}}eil$ is the ceiling operator. A function $f(x)$ of variable $x$ is alternatively expressed as the mapping $x\mapsto f(x)$. Operator $\mathrm{rank}(\cdot)$ is the rank of a matrix. Finally, $\|\cdot\|$ is the Euclidean norm.
\section{System Model}
\label{sec:model}
\subsection{Unconditional Feedforward Networks}
Consider an ordinary unconditional feedforward network of a homogeneous set of neurons, all of which have the same functionality. We also allow skip connections, which are mainly utilized for the generality of the model for converse results (The construction in our achievability results also skip layers, but at most one layer at a time). Formally, we consider an $L$-layer network with the ReLU operation $\phi(x) = \max\{0,x\}$, and the input-output relationships
\begin{align}
\label{eq:systemmodel}
y_{\ell} = \phi\left(W_{\ell} \left[y_{\ell-1}^T \cdots y_0^T {\textnormal{i}}ght]^T{\textnormal{i}}ght),\,\ell=1,\ldots,L,
\end{align}
where $y_{0}$ is the input to the neural network, $y_{\ell}$ is the output at Layer $\ell$, and $y_L$ is the neural network output. Also, $W_{\ell}$ is the weight matrix at Layer $\ell$ of appropriate dimensions.
\subsection{Measuring the Cost of Computation}
\label{sec:meascost}
A recurring theme of the paper will be to calculate the output of a neural network given an arbitrary input, however with low computational complexity. In particular, in ({\textnormal{e}}f{eq:systemmodel}), given the outputs of Layer $\ell-1$, the calculation of the outputs of Layer $\ell$ can be accomplished through multiplication with matrix $W_{\ell}$, followed by the activation functions. Each ReLU activation function can be implemented via a simple comparison given local fields. Hence, calculation of the output of Layer $\ell$ can be accomplished with $O(\mathrm{dim}W_{\ell})$ operations (multiplications, additions, and comparisons), and the output $y_L$ of the entire network can be calculated using $O(\sum_{\ell}\mathrm{dim}W_{\ell})$ operations. Here, $\mathrm{dim}W_{\ell}$ represents the product of the number of rows and columns in $W_{\ell}$. In other words, $\mathrm{dim}W_{\ell}$ is the number of dimensions in $W_{\ell}$. Hence, in unconditional architectures, the number of operations to calculate the output of the network is essentially the same (up to constant multipliers) as the number of weights in the network. With regards to our basic measure of complexity, which relies on counting the number of operations, one can argue that multiplication is more complex than addition or comparison, and hence should be assigned a larger complexity. We hasten to note that the relative difficulty of different operations will not affect our final results, which will have an asymptotic nature.
It is instructive to combine the $O(\sum_{\ell}\mathrm{dim}W_{\ell})$ complexity baseline with the memorization results described in Section {\textnormal{e}}f{sec:intro}. Let $X = \{x_1,\ldots,x_n\}\subset\mathbb{R}^p$ be a dataset of inputs. Let $d_1,\ldots,d_n\in\mathbb{R}^r$ be the corresponding desired outputs. In the memorization task, one wishes to design a network that can provide an output of $d_i$ for an input of $x_i$ for each $i\in\{1,\ldots,n\}$. It is known that, up to logarithmic factors, $O(\sqrt{n})$ weights and neurons are sufficient for memorization of $n$ patterns \cite{vardi2021optimal}.
It follows from the discussion in the paragraph above that $O(\sqrt{n})$ operations sufficient to recall a stored memory pattern (i.e. to calculate the output of the neural network for a given $x_i$). The goal of this paper is to accomplish the same task using much fewer operations required per input. Specifically, we will show how to do perfect recall using only $O(\log n)$ operations per input. We shall later show that $\Theta(\log n)$ is, in fact, the best possible rate.
\subsection{Conditional Networks}
In order to achieve perfect recall using a subpolynomial number of operations, we use the idea of conditional computation. The conditioning model that we utilize in this work is a simple but general scenario where we allow executing different sub-neural networks depending on how an intermediate output of the network compares to some real number. Each conditioning is thus counted as one operation. Formally, we describe a conditional neural network via a rooted full binary tree where every vertex has either $0$ or $2$ children. Every vertex is a sequence of operations of the form $\mathtt{v}_{n+1} \leftarrow \phi(\beta_{n}\mathtt{v}_{n} + \cdots + \beta_{1} \mathtt{v}_1)$, where $\beta_1,\ldots,\beta_n$ are weights, and variables $\mathtt{v}_1,\ldots,\mathtt{v}_n$ are either (i) inputs to the neural network, or (ii) defined as new variables preceding $\mathtt{v}_{n+1}$ at the same vertex or at one of the ancestor vertices. Every edge is a conditioning $\mathtt{v} \circ \beta$, where $\mathtt{v}$ should be defined at any one of the vertices that connects the edge to the root node, $\beta$ is a constant weight, and $\circ\in\{\leq , <, =, \neq, >, \geq\}$. We assume that the two edges connected to the same vertex correspond to complementary conditions; e.g. $\mathtt{v}_1 < 3$ and $\mathtt{v}_1 \geq 3$.
An example conditional network, expressed in algorithmic form, is provided in Algorithm {\textnormal{e}}f{alg:cnnexample}, where $u$ represents the input, $o$ is the output, and the subscripts are the vector components. For example, if the input is a vector $u$ with $u_1 > 3$, resulting in an intermediate feature vector with $z_3 \neq 5$, then two operations are incurred due to the conditionings in Lines 1 and 5, and $O(\mathrm{dim}(W_1)+\mathrm{dim}(W_2)+\mathrm{dim}(W_5))$ operations are accumulated due to neural computations.
Our model encompasses various neural conditional computation models in the literature. One example is early exit architectures \cite{teerapittayanon2016branchynet, kaya2019shallow, gormez2022class}. As described in Section {\textnormal{e}}f{sec:intro}, a typical scenario is where one places intermediate \begin{wrapfigure}{r}{0.3\textwidth}
{\bm{s}}pace{-16pt}
\begin{minipage}{0.3\textwidth}
\begin{algorithm}[H]
\caption{An example conditional neural network}\label{alg:cnnexample}
\begin{algorithmic}[1]
\If{$u_1 > 3$}
\State $z = \phi(W_2 \phi(W_1 u))$
\If{$z_3 = 5$}
\State $o = \phi(W_4 z)$
\mathbb{E}lse
\State $o = \phi(W_5 z)$
\mathbb{E}ndIf
\mathbb{E}lse
\State $o = \phi(W_3 u)$
\mathbb{E}ndIf
\end{algorithmic}
\end{algorithm}
\end{minipage}
{\bm{s}}pace{-10pt}
\end{wrapfigure}classifiers to a deep neural network. If an intermediate classifier is confident enough of a decision, then an ``early exit'' is performed with the corresponding class decision. Here, one skips subsequent layers, saving computation resources. The decision to exit is often a simple threshold check, e.g. whether one of the soft probability outputs of the intermediate classifier exceeds a certain threshold. Hence, most early exit networks can be modeled via the simple if-else architecture described above. Mixture of experts (MoE) architectures \cite{shazeer2017outrageously,fedus2022switch} can also be realized under our model. In this case, there are multiple gating networks, each of which is responsible for one expert. One routes a feature vector to only a subset of experts whose gating networks have the largest outputs. The choice of experts can be accomplished through if-else statements. For example, for two gates and experts, the gate with the largest output can be found by comparing the difference between the two gates' outputs against zero. Another gating approach that can be realized as a special case of our model can be found in \cite{cho2014exponentially}.
\section{Synthesizing a Conditional Network out of an Unconditional Network}
\label{sec:synth}
Consider an arbitrary unconditional network as in ({\textnormal{e}}f{eq:systemmodel}), whose implementation requires $O(\sum_{\ell}\mathrm{dim}W_{\ell})$ operations, as discussed in Section {\textnormal{e}}f{sec:meascost}. Suppose that the network is well-trained in the sense that it can already provide the output $d_i$ for a given input $x_i$, for every $i\in\{1,\ldots,n\}$. Out of such an unconditional network, we describe here a general methodology to synthesize a conditional network that requires much fewer than $O(\sum_{\ell}\mathrm{dim}W_{\ell})$ operations, while keeping the input-output relationships $x_i \mapsto d_i,\,i\in\{1,\ldots,n\}$ intact.
We first recall some standard terminology \cite{haykin}. Consider the neuron $[x_1,\ldots,x_n] \mapsto \phi(\sum_i x_i w_i)$. We refer to $x_1,\ldots,x_n$ as the neuron inputs, and $w_1,\ldots,w_n$ as the neuron weights. We can now provide the following definition.
\begin{definition}
Suppose that the activation function satisfies $\phi(0) = 0$. Given some fixed input to a neural network, a neuron with at least one non-zero input is called an active neuron. A neuron that is not active is called an inactive or a deactivated neuron. A weight is called active if it connects an active neuron to another active neuron and is non-zero. A weight is called inactive if it is not active.
\end{definition}
The source of the phrase ``inactive'' is the following observation: Consider an input to the neural network and the corresponding output. By definition, we will obtain the same output after removing all inactive neurons from the network for the same input. We can simply ``ignore'' inactive neurons. Likewise, we can remove any inactive weight and obtain the same output.
Our idea is to condition the computation on the set of active neurons and the corresponding active weights. Note that removing the inactive weights and neurons does not change the network output. Moreover, often the number of active weights given an input can be significantly lower than the overall number of neurons of the unconditional architecture (which determines the number of operations required to perform inference on the unconditional network), resulting in huge computational savings. The complication in this context is that the set of inactive weights depends on the network input. Fortunately, it turns out that determining the set of active weights can be accomplished with fewer operations than actually computing the local fields or outputs of the corresponding active neurons. The final result is provided by the following theorem.
\begin{theorem}
\label{th:synth}
Consider an arbitrary dataset $X = \{x_1,\ldots,x_n\}\subset\mathbb{R}^p$ of inputs. Let $d_1,\ldots,d_n\in\mathbb{R}^r$ be the corresponding desired outputs. Suppose that the unconditional neural network defined by ({\textnormal{e}}f{eq:systemmodel}) satisfies the desired input-output relationships in the sense that for any $i$, if the input to the network is $x_i$, then the output is $d_i$. Also suppose that the input $x_i$ results in $\omega_i$ active weights. Then, there is a conditional network that similarly satisfies all desired input output relationships, and for every $i$, performs at most $p+4\omega_i$ operations given input $x_i$.
\end{theorem}
\begin{proof} (Sketch)
By an `` input configuration,'' we mean a certain subset of neurons that are active at a certain layer of the unconditional ``base'' network. We represent input configurations by binary vectors where ``$0$'' represents an inactive neuron, while ``$1$'' represents an active neuron. As an example, the input configuration $[1\,0\,1]^T$ refers to a scenario where only the first and the third neurons at the layer is active. Analogous to input configurations, ``output configurations'' are binary vectors that represent whether neurons provide zero or non-zero outputs. For example, an output configuration of $[1\,1\,0]^T$ means that only the first and the second neurons provide a non-zero output.
Consider first an unconditional network without skip connections. The key idea is to observe that, given the output configuration of Layer $\ell-1$, one can uniquely obtain the input configuration of Layer $\ell$. Hence, a conditional network can be designed to operate in the following manner: Given an input, we first find the output configuration $\mathtt{OC}_0$ at Layer $0$, which is nothing but the non-zero components of the input vector. This can be done by $p$ conditioning statements, where $p$ is the input dimension. The result is a unique input configuration $\mathtt{IC}_1$ at Layer $1$. Meanwhile, we can obtain the output $y_1$ of Layer $1$ by only calculating the outputs of neurons that correspond to the non-zero components of $\mathtt{IC}_1$, since other neurons at Layer $1$ are guaranteed to have all-zero inputs and thus provide zero output. This can be accomplished via $O(a_1)$ multiplications and additions, where $a_{\ell}$ represents the number of active weights in Layer $\ell$. Then, we find the output configuration $\mathtt{OC}_1$ at Layer $1$, using $|\mathtt{IC}_1|$ conditioning statements on $y_1$. Having obtained $\mathtt{OC}_1$, we can similarly find the Layer $2$ input configuration and output. The conditional network processes the remaining layers recursively in the same manner. The functionality of the unconditional network is reproduced exactly so all input-output relationships are satisfied. Given that $\sum_{\ell} a_{\ell} = \omega_i$, the total complexity is $p+O(\omega_i)$. We refer to the complete proof in Appendix {\textnormal{e}}f{sec:syntthproof} for the precise bound and generalization to networks with skip connections.
\end{proof}
The proof of the theorem in Appendix {\textnormal{e}}f{sec:syntthproof} suggests that the true complexity is closer to $2 \omega_i$ than the actual formal upper bound provided in the theorem statement. The number $2 \omega_i$ stems from the $\omega_i$ additions and $\omega_i$ multiplications that are necessary to calculate the local fields of active neurons.
Theorem {\textnormal{e}}f{th:synth} shows that a design criterion to come up with low computational-complexity neural networks might be to put a constraint on the number of active weights given any training input to an ordinary, unconditional feedforward architecture. Using the theorem, we can synthesize a conditional neural network with the same functionality as the unconditional feedforward network. In the next section, we will apply this idea to the problem of memorization capacity.
We conclude this section by noting that Theorem {\textnormal{e}}f{th:synth} may prove to be useful in other applications. For example, any method that results in sparse representations or weights, such as pruning, will result in many inactive neurons and weights. Sparsity is useful; however, unstructured sparsity is difficult to exploit: For example, multiplying by a matrix half of whose entries are zero at random positions will likely be as difficult as multiplying by a random matrix without sparsity constraints. The construction in Theorem {\textnormal{e}}f{th:synth} may provide computational savings for such scenarios.
Another observation is how Theorem {\textnormal{e}}f{th:synth} can potentially simplify the design of conditional networks. In any conditional architecture, an important question is where to place the decision gates that route information to different parts of the network. It is a combinatorially very difficult if not impossible to optimize a heterogeneous collection of "ordinary neurons" and "gates." Such an optimization is also completely unsuitable for a gradient approach. Hence, most previous works provide some empirical guidelines for gate placement, and do not optimize over the gate locations once they are placed according to these guidelines. The message of Theorem {\textnormal{e}}f{th:synth} is that there is no need to distinguish between gates and neurons. All that needs to be done is to train an unconditional network consisting only of ordinary neurons to be as ``inactive'' as possible, e.g. by increased weight sparsity through regularization \cite{louizos2018learning}. The theorem can then construct a conditional network that can exploit the sparsity to the fullest, placing the gates automatically.
\section{Memorization with Low Computational Complexity}
\label{sec:reluach}
In this section, we introduce our ReLU network to memorize the input-output relationships $x_i \mapsto d_i,\,i=1,\ldots,n$. Our network will only yield $O(\log n)$ active neurons and weights per input. Application of Theorem {\textnormal{e}}f{th:synth} will then imply a conditional network that can achieve perfect recall with only $O(\log n)$ operations per input, as desired.
Given a neural network $f$, and an input $x$ to $f$, let $A(x;f)$ and $W(x;f)$ denote the number of active neurons and active weights, respectively. Our feedforward ReLU network construction is provided by the following theorem.
\begin{theorem}
\label{th:reluach}
Let $X = \{x_1,\ldots,x_n\}\subset\mathbb{R}^p$ be a dataset of input patterns such that for every $i,j\in\{1,\ldots,n\}$ with $i\neq j$, we have $x_i \neq x_j$. Let $\bar{x}_i = [\begin{smallmatrix} 1 \\ x_i \end{smallmatrix}]\in\mathbb{R}^{q}$, where $q=1+p$, be the augmented dataset patterns for biasing purposes. Let $d_1,\ldots,d_n\in\mathbb{R}^r$ be the corresponding desired outputs. Suppose every component of $d_i$ is non-negative for each $i\in\{1,\ldots,n\}$. Then, there is a neural network $f:\mathbb{R}^{q}{\textnormal{i}}ghtarrow\mathbb{R}^r$ such that for every $i\in\{1,\ldots,n\}$, we have $f(\bar{x}_i) = d_i$,
\begin{align}
\label{eq:th1actneuronbound} A(\bar{x}_i;f) & \leq 2(q+1)\lceil \log_2 n {\textnormal{c}}eil + q+r \in O(r+q\log n), \\
\label{eq:th1actweightbound} W(\bar{x}_i;f) & \leq 12q(q+1)\lceil \log_2 n {\textnormal{c}}eil + (r+2)q+r-2 \in O(rq+q^2\log n).
\end{align}
\end{theorem}
\begin{proof} (Sketch)
We provide an illustrative sketch of the proof. Suppose we wish to memorize the two-dimensional dataset given by Fig. {\textnormal{e}}f{fig:divideandconquera}. We can divide the datasets to two parts via a line, the resulting two parts to further two sub-parts, and so on, until reaching singletons, as shown in Fig. {\textnormal{e}}f{fig:divideandconquerb}. The overall network architecture that achieves the performance in the statement of the theorem is then shown in Fig. {\textnormal{e}}f{fig:nnforachievability}. The initial block $\mathbf{T}$ is a basic preprocessing translation. The ``switch'' $\mathbf{S}_{ij}$ corresponds to the line parameterized by weights $w_{ij}$ in Fig. {\textnormal{e}}f{fig:divideandconquerb}. The switch $\mathbf{S}_{ij}$ routes the zero vector to one output path and copy its input to the other output path, depending on the side of the $w_{ij}$-line its input vector resides. The switches are followed by ReLU neurons with weights $\gamma_i$. These neurons map the corresponding input pattern on the active path to its desired output. Finally, the output of the $\gamma_i$-neurons are accumulated. As an example, the signals on the graph for input pattern $\bar{x}_6$ is provided, with $\bar{\bar{x}}_6 \triangleq \mathbf{T}(\bar{x}_6)$. In general, all input-output relationships are satisfied with $O(\log n)$ active neurons per dataset sample. We refer to the complete proof in Appendix {\textnormal{e}}f{sec:reluachthproofer} for the precise bounds.
\end{proof}
\begin{corollary}
\label{toyotacorolla}
Let $x_i,d_i,\,i=1,\ldots,n$ be a sequence of arbitrary input-output pairs as stated in Theorem {\textnormal{e}}f{th:reluach}. Then, there is a conditional network that, for every $i$, provides an output of $d_i$ whenever the input is $x_i$ by performing only $O(rq+q^2\log n)$ operations.
\end{corollary}
\begin{proof}
We apply the synthesis procedure in Theorem {\textnormal{e}}f{th:synth} to the network constructed in Theorem {\textnormal{e}}f{th:reluach}. An alternative more ``direct'' proof (that does not need Theorem {\textnormal{e}}f{th:synth}) is to implement the gating blocks $\mathbf{S}_{ij}$ in Fig. {\textnormal{e}}f{fig:divideandconquer} via if-else conditioning statements, resulting in a similar architecture.
\end{proof}
We recall that for ReLU networks without conditional computation, the best achievability result \cite{vardi2021optimal} requires $O(\sqrt{n\log n})$ neurons and weights for memorizing a dataset of size $n$. Since the construction in \cite{vardi2021optimal} is not optimized for conditional computation, it is easily observed that every input activates all $O(\sqrt{n\log n})$ neurons and weights of the network, resulting in $O(\sqrt{n \log n})$ active weights per input. In fact, the remarkable construction in \cite{vardi2021optimal} is a narrow network of a finite width of only $12$, but depth $O(\sqrt{n\log n})$. Even if every input activates only one neuron at each layer, one obtains $O(\sqrt{n\log n})$ active neurons or arithmetic operations per input. In contrast, we only need $O(\log n)$ active weights or operations (via Corollary {\textnormal{e}}f{toyotacorolla}) per input.
The fact that one cannot fundamentally achieve a sub-polynomial number of neurons without conditional computation is an easy consequence of the VC dimension theory. In our setting, the VC dimension corresponds to the cardinality of the largest dataset that can be memorized. Hence, upper bounds on the VC dimension translate to lower bounds on the memorization capacity. In this context, \cite[Theorem 8.6]{anthony1999neural} provides an $O(n_w^2)$ upper bound on the VC dimension for ReLU networks, where $n_w$ denotes the number of weights in the network. Since any upper bound on the VC dimension is also an upper bound on the cardinality of the largest possible dataset that can be memorized, we have $n \in O(n_w^2)$. It follows that $n_w \in \Omega(\sqrt{n})$ weights are the best possible for ReLU networks without conditional computation, meaning that the results of \cite{vardi2021optimal} are optimal up to logarithmic factors. Also, using the bound $n_w \leq n_e^2$, where $n_e$ is the number of neurons in the network (equality holds in the extreme scenario where every neuron is connected to every other neuron, including itself), we obtain the necessity of $\Omega(n^{1/4})$ neurons.
\begin{figure}
\caption{Dividing a set of point to two equal subsets.}
\label{fig:divideandconquera}
\caption{Continued divisions until reaching singletons.}
\label{fig:divideandconquerb}
\caption{The divide and conquer strategy.}
\label{fig:divideandconquer}
\end{figure}
\begin{figure}
\caption{An example network architecture for the achievability result. The block $\mathbf{T}
\label{fig:nnforachievability}
\end{figure}
In contrast to the deep and narrow architecture that is optimal for unconditional computation, the network that achieves the performance in Theorem {\textnormal{e}}f{th:reluach} is considerably wider but much shallower. In fact, the proof in Appendix {\textnormal{e}}f{sec:reluachthproofer} reveals that our network has width $O(n)$, with $O(n)$ nodes, and depth $O(\log n)$. These numbers are a consequence of the classical binary decision tree that we utilize in our network construction. Thanks to conditional computation, although the network has $O(n)$ nodes, every dataset pattern activates only $O(\log n)$ neurons instead of $O(\sqrt{n})$ neurons without conditional computation. Note that the function $\log n$ grows much slower than $\sqrt{n}$ so that the number of neurons activated by the conditional computation is asymptotically negligible relative to an unconditional network. Moreover, every neuron in the conditional computation network has a bounded number of $q$ weights. The two facts translate to big savings in terms of the cost of computation, thanks to Theorem {\textnormal{e}}f{th:synth}. An interesting open problem that we shall leave as future work is whether one can achieve the same performance with a much smaller network size, e.g. with $O(\sqrt{n})$ neurons, which is known to be optimal. This will also help reduce the size of the network synthesized by Theorem {\textnormal{e}}f{th:synth}.
It should be mentioned that the bounds ({\textnormal{e}}f{eq:th1actneuronbound}) and ({\textnormal{e}}f{eq:th1actweightbound}) on the number of active neurons and weights as stated in Theorem {\textnormal{e}}f{th:reluach} holds only for input patterns that belong to the dataset. For such patterns, only one path from the input to the output of the neural network is activated. The global behavior of the neural network for arbitrary inputs is more complex. A careful analysis of the construction in Appendix {\textnormal{e}}f{sec:reluachthproofer} reveals that near the decision boundaries (the lines in Fig. {\textnormal{e}}f{fig:divideandconquerb}), multiple paths of the network are activated, This will result in more active neurons and weights than what is suggested by the upper bounds in ({\textnormal{e}}f{eq:th1actneuronbound}) and ({\textnormal{e}}f{eq:th1actweightbound}), respectively. However, the measure of such pathological inputs can be made arbitrarily small by tuning the switches appropriately.
\label{sec:reluachthdiscuss}
\section{Ultimate Computational Limits to Memory Recall}
\label{sec:reluconv}
In the previous section, we showed that $O(\log n)$ operations is sufficient to perfectly recall one of $n$ input-output relationships. We now analyze the necessary number of operations per input for successful recall. Our main result in this context is provided by the following theorem.
\begin{theorem}
\label{th:reluconv}
Let the input vectors $x_1,\ldots,x_n\in\mathbb{R}^p$ and the corresponding desired output vectors $d_1,\ldots,d_n\in\mathbb{R}$ satisfy the following property:
\begin{itemize}
\item The matrix $\Bigl[\begin{matrix}x_{i_1} & \cdots & x_{i_{p+1}} \\ d_{i_1} & \cdots & d_{i_{p+1}} \end{matrix} \Bigr]$ has rank $p+1$ for any subset $\{i_1,\ldots,i_{p+1}\}$ of $\{1,\ldots,n\}$.
\end{itemize}
Suppose that a conditional network $f$ satisfies the desired input-output relationships: For every $i$, the output of the network is $d_i$ whenever the input is $x_i$. Also, assume that the number of operations performed on each $x_i$ is bounded above by some $\alpha \geq 0$. Then, we have $\alpha \geq \log_2 \frac{n}{p} \in O(\log n)$.
\end{theorem}
\begin{proof}
Since there are at most $\alpha$ operations per input, there are at most $\alpha$ comparisons per input as well, counting the comparisons needed to implement the neuron activation functions. We can represent the entire conditional computation graph/network as a binary tree where the distance between the root and a leaf node is at most $\alpha$. This results in tree of at most $2^{\alpha}$ leaf nodes. Each node of the tree compares real numbers to intermediate variables, which are linear functions of network inputs or other intermediate variables. Assume now the contrary to the statement of the theorem, i.e. the number of operations for every input is at most $\alpha$, all input-output relationships are satisfied, but $n > 2^\alpha p$. Since there are at most $2^\alpha$ leaf nodes, there is at least one leaf node that admits $1+p$ inputs (i.e. there are $1+p$ input patterns of the dataset such that the corresponding path over the tree ends at the leaf node). Without loss of generality, suppose the indices for these inputs are $\{1,\ldots,1+p\}$. Writing down the input output relationship of the network for the leaf node, we obtain
\begin{align}
\label{rouchecond}
[d_1 \cdots d_{1+p}] = W_0[x_1 \cdots x_{1+p}]
\end{align}
for some $q\times p$ matrix $W_0$. This relationship follows, as by fixing
a path on the tree, we obtain the unique linear transformation $W_0$ that maps the inputs to the neural network to its outputs. According to the Rouch\'{e}–Capelli theorem \cite[Theorem 2.38]{shafarevich2012linear}, a necessary condition for the existence of $W_0$ to solve ({\textnormal{e}}f{rouchecond}) is
\begin{align}
\mathrm{rank}([x_1 \cdots x_{p+1}]) = \mathrm{rank}\Bigl(\Bigl[\begin{matrix}x_1 & \cdots & x_{p+1} \\ d_{1} & \cdots & d_{p+1} \end{matrix} \Bigr]\Bigr).
\end{align}
On the other hand, as a result of the rank condition stated in the theorem, the left hand side rank evaluates to $p$, while the right hand side evaluates to $1+p$. We arrive at a contradiction, which concludes the proof of the theorem.
\end{proof}
The condition on the dataset and the desired outputs that appear in the statement of Theorem {\textnormal{e}}f{th:reluconv} is, up to a certain extent, necessary for the lower bound to hold. For example, if the desired outputs can simply be obtained as a linear transformation of inputs, then one only needs to perform a constant number of operations for each input to obtain the desired outputs, and the lower bound becomes invalid. In this context, the rank condition ensures that subsets of outputs cannot be obtained as linear functions of inputs. However, it should also be mentioned that the condition is not particularly restrictive in limiting the class of datasets where the theorem holds. For example, if the components of the dataset members and the corresponding desired outputs are sampled in an independent and identically distributed manner over any continuous distribution with positive support, it can be shown that the rank condition will hold with probability $1$. Hence, it can be argued that almost all datasets satisfy the rank condition and thus obey the converse result.
Corollary {\textnormal{e}}f{toyotacorolla} has shown that $n$ patterns can be memorized using $O(\log n)$ operations per input. The matching $\Omega(\log n)$ lower bound in Theorem {\textnormal{e}}f{th:reluconv} proves that the $\Theta(\log n)$ rate is the best possible. However, the two results do not resolve how the number of operations should scale with respect to the input and output dimensions.\footnote{Although Theorem {\textnormal{e}}f{th:reluconv} only considers scalar desired outputs, it can easily be extended to the multi-dimensional case. In fact, a successful memorization of, say, two-dimensional output vectors with $o(\log n)$ active neurons would imply the successful memorization of scalar outputs with the same number of neurons (simply by ignoring the neurons that provide the second component of the output), contradicting Theorem {\textnormal{e}}f{th:reluconv}.} This aspect of the problem is left as future work.
\section{Conclusions and Discussions}
\label{sec:conclusions}
We have studied the fundamental limits to the memorization capacity of neural networks with conditional computation. First, we have described a general procedure to synthesize a conditional network out of an ordinary unconditional feedforward network. According to the procedure, the number of operations required to perform inference on an input in the synthesized conditional network becomes proportional to the number of so-called ``active weights'' of the unconditional network given the same input. This reduces the problem of designing good conditional networks to the problem of designing ordinary feedforward networks with a low number of active weights or nodes. Using this idea, we have shown that for ReLU networks, $\Theta(\log n)$ operations per input is necessary and sufficient for memorizing a dataset of $n$ patterns. An unconditional network requires $\Omega(\sqrt{n})$ operations to achieve the same performance. We also described a method to synthesize a conditional network out of an unconditional network in a computationally-efficient manner.
One direction for future work is to study the memorization capacity for a sum-constraint,\ as opposed to a per-input constraint on the number of operations.\ While a per-input constraint makes sense for delay-sensitive applications, the sum-constrained scenario is also relevant for early-exit architectures, where there is a lot of variation on the size of active components of the network. Extensions of our results to different activation functions or to networks with bounded bit complexity can also be considered. In this context, \cite{vardi2021optimal} shows that, for every ${\epsilon}ilon\in[0,\frac{1}{2}]$, $\Theta(n^{{\epsilon}ilon})$ weights with $\Theta(n^{1-{\epsilon}ilon})$ bit complexity is optimal for memorizing $n$ patterns, up to logarithmic factors. This result was proven under a mild separability condition, which restricts distinct dataset patterns to be $\delta$-separated in terms of Euclidean distance. The optimality of the results of \cite{vardi2021optimal} suggests that under a similar separability condition, the bit complexity of our designs can also potentially be reduced without loss of optimality. This will remain as another interesting direction for future work.
Many neural network architectures rely heavily on batch computation because matrix multiplications can be performed very efficiently on modern processors. In this context, one disadvantage of conditional architectures is their general misalignment with the idea of batching. Nevertheless, if there are not too many branches on the network, and if the branch loads are balanced, each subbranch can still receive a relatively large batch size. Fortunately, only a few gates can promise significant performance gains \cite{fedus2022switch}. More work is needed, however, to make networks with aggressive conditioning more efficient in the batch setting. In this context, our synthesis theorem can potentially enable conditional networks to be trained as if they are unconditional, enabling batch computation. We note that in techniques like soft-gating \cite{shazeer2017outrageously}, a batch also traverses the entire computation graph during training \cite{kaya2019shallow}.
The main goal of this paper has been to derive theoretical bounds on the memorization capacity. More research is clearly needed for practical methods to train neural networks that can effectively utilize conditional computation and also generalize well. We hope that the constructions and theoretical bounds provided in this paper will motivate further research in the area.
\marginpar{NEW}savebox{\shortpagebox}
\subsubsection*{Acknowledgments}
This work was supported in part by National Science Foundation (NSF) under Grant CNS-2148182 and in part by Army Research Lab (ARL) under Grant W911NF-2120272.
\appendix
\section{Proof of Theorem {\textnormal{e}}f{th:synth}}
\label{sec:syntthproof}
\input{syntthproof}
\section{Proof of Theorem {\textnormal{e}}f{th:reluach}}
\label{sec:reluachthproofer}
We explicitly construct the neural network that achieves the performance as provided in the theorem statement. Our construction relies on several steps as described in the following:
{\bf Step-1:} Let $u = [u_1\, u_2 \cdots u_q]^T$ be the input to the neural network, where $u_1 = 1$. Let $\bar{u} = [\bar{u}_1\, \bar{u}_2 \cdots \bar{u}_q]^T$ represent the output of the first layer. First, we translate the dataset vectors such that every component of the translated vectors is positive. We also provide a skip connection for the constant input $1$ at the first component. For the former purpose, given $j\in\{1\ldots,p\}$, let $x_{ij}$ denote the $j$th component of dataset pattern $x_i$. We define the constant
\begin{align}
M = 1+\max_{i\in\{1,\ldots,n\}}\max_{j\in\{1,\ldots,p\}} |x_{ij}|.
\end{align}
The input-output relationships of the first layer is then expressed as
\begin{align}
\bar{u}_1 & = u_1 = 1. \\
\bar{u}_j & = \phi\biggl( \Bigr[\begin{matrix} u_1 \\ u_j \end{matrix} \Bigl]^T\Bigr[\begin{matrix} M \\ 1 \end{matrix} \Bigl] \biggr),\,j=2,\ldots,q.
\end{align}
In particular, if $u = \bar{x}_i$ for some $i\in\{1,\ldots,n\}$, then
\begin{align}
\bar{u}_j = \phi(u_j + M) = u_j + M,\,j=2,\ldots,q.
\end{align}
The last equality follows as $u_j + M = x_{ij} + M$ is positive by the definition of $M$. Hence, for the dataset members, the output after the first layer of neurons is an additive translation by the vector $[0\,M\cdots M]^T$. There are $p=q-1$ active neurons in the first layer.
{\bf Step-2:} For a clearer exposition, we first describe the step through an example. We follow a divide and conquer strategy resembling a binary search, which is illustrated in Fig. {\textnormal{e}}f{fig:divideandconquerduplicate}. Suppose that the inputs are two-dimensional $p=2$ and the outputs are scalars $q=1$. In Fig. {\textnormal{e}}f{fig:divideandconqueraduplicate}, we show the second and the third components $[\bar{u}_2\,\bar{u}_3]^T$ of $7$ dataset patterns after the first layer, together with the indices of the patterns. We also show a line $w_{11}^T\bar{u} = 0$ that separates the set of points to two subsets such that one side of the line contains $\lceil \frac{7}{2} {\textnormal{c}}eil = 4$ points, and the other side of the line contains the remaining $3$ points. We will formally show later that, given $m$ points, a separation where one side of the hyperplane contains exactly $\lceil \frac{m}{2} {\textnormal{c}}eil$ of the points is always possible in general.
\begin{figure}
\caption{Dividing a set of point to two equal subsets.}
\label{fig:divideandconqueraduplicate}
\caption{Continued divisions until reaching singletons.}
\label{fig:divideandconquerbduplicate}
\caption{The divide and conquer strategy. This figure is a duplicate of Fig. {\textnormal{e}
\label{fig:divideandconquerduplicate}
\end{figure}
Once an even division or separation is achieved, the next step is to design what we call a ``switching network''. Switching networks are small two-layer neural networks that can be parametrized by any number of points greater than $1$. Let $0_q$ denote the $q$-dimensional all-zero column vector. Roughly speaking, for the scenario in Fig. {\textnormal{e}}f{fig:divideandconqueraduplicate}, the corresponding switching network maps the input $\bar{u}\in\mathbb{R}^3$ to $[\begin{smallmatrix} \bar{u} \\ 0_3 \end{smallmatrix}]\in\mathbb{R}^6$ if $\bar{u}$ remains ``above'' the line $w_{11}^T \bar{u} = 0$, and to $[\begin{smallmatrix} 0_3 \\ \bar{u} \end{smallmatrix}]$ if $\bar{u}$ remains ``below'' the line.\footnote{More precisely, there will be a small margin around the separating hyperplane where the aforementioned input-output relationships may fail. It turns out that this technical complication poses no issues for the patterns that we wish to memorize, as the margin can be made arbitrarily small.} We can now feed the first $3$ components of the output of the switch to one subnetwork, and the last $3$ components to another subnetwork. The two subnetworks are disconnected except that they share different components of the same output as inputs. The first subnetwork follows the same divide and conquer strategy with a switch, but only for the four points that remain over the line $w_{11}^T \bar{u} = 0$.
The second subnetwork similarly processes the three points that remain under the line $w_{11}^T \bar{u} = 0$. The goal of the all-zero outputs is to deactivate one subnetwork when it is no longer relevant to process a given input. Subnetworks have ``subsubnetworks'' and so on until one arrives at a singleton dataset sample at each partition, as shown in Fig. {\textnormal{e}}f{fig:divideandconquerbduplicate}.
Before proceeding to the next step, we formalize the constructions in Step 2 via the following lemma.
\begin{lemma}
\label{lemma:switch}
For $m \geq 2$, let $a_1,\ldots,a_m\in\mathbb{R}^{q}$ be distinct input patters whose first components equal $1$.
\begin{enumerate}[label={[{\textnormal{o}}man*]}]
\item There exists $w\in\mathbb{R}^{q}$, such that
\begin{align}
\label{eq:wcond1} |\{i:w^Ta_i < 0\}| & = \lceil \tfrac{m}{2} {\textnormal{c}}eil, \\
\label{eq:wcond2} |\{i:w^Ta_i > 0\}| & = m-\lceil \tfrac{m}{2} {\textnormal{c}}eil.
\end{align}
\item Suppose further that the components of $a_i$s are all non-negative. Let $w\in\mathbb{R}^{q}$ satisfy ({\textnormal{e}}f{eq:wcond1}) and ({\textnormal{e}}f{eq:wcond2}). Let $0_q$ represent the $q$-dimensional all-zero vector. There is a two-layer network $\mathbf{S}:\mathbb{R}^q {\textnormal{i}}ghtarrow \mathbb{R}^{2q}$
that satisfies the input-output relationships
\begin{align}
\label{eq:saicond}
\mathbf{S}(a_i) = \left\{\begin{array}{rl} \biggl[\begin{matrix} a_i \\ 0_q \end{matrix}\biggr], & w^Ta_i < 0, \\ {\bm{p}}hantom{\scalebox{2.1}{$\int$}} \biggl[\begin{matrix} 0_q \\ a_i \end{matrix}\biggr], & w^Ta_i > 0. \end{array} {\textnormal{i}}ght.,\,i=1,\ldots,m.
\end{align}
The network has $2q+2$ neurons. Two of the $2q+2$ neurons have $q$ weights, and the remaining $2q$ neurons have $2$ weights.
\end{enumerate}
\end{lemma}
\begin{proof}
Let us first prove [i]. Let $\bar{a}_{i} = [a_{i,2},\ldots,a_{i,q}]^T$ denote the $(q-1)$-dimensional vector consisting of all but the first component of $a_i$ (which equals $1$). First, we show the existence of $\bar{w}\in\mathbb{R}^{q-1}$ such that $\bar{w}^T \bar{a}_i \neq \bar{w}^T \bar{a}_j,\,\forall i \neq j$, or equivalently
\begin{align}
\label{desiredwproperty} \bar{w}^T(\bar{a}_i - \bar{a}_j) \neq 0,\,\forall i \neq j.
\end{align}
Since the input patterns $a_i$s are distinct, so are $\bar{a}_i$s, and thus $\bar{a}_i - \bar{a}_j$ are non-zero vectors. It follows that any unit norm $\bar{w}$ sampled uniformly at random on $\{x\in\mathbb{R}^q:\|x\|=1\}$ satisfies ({\textnormal{e}}f{desiredwproperty}) with probability $1$. Let us now order the resulting $(\bar{w}^T\bar{a}_i)$s in ascending order as
\begin{align}
\bar{w}^T\bar{a}_{i_1} < \cdots < \bar{w}^T\bar{a}_{i_{\lceil \frac{m}{2} {\textnormal{c}}eil}} < \bar{w}^T\bar{a}_{i_{\lceil \frac{m}{2} {\textnormal{c}}eil+1}} < \cdots < \bar{w}^T\bar{a}_{i_{m}},
\end{align}
for some permutation $i_1,\ldots,i_m$ of $1,\ldots,m$. We can now tune the bias as
\begin{align}
\bar{\bar{w}} \triangleq - \frac{1}{2}\left(\bar{w}^T\bar{a}_{i_{\lceil \frac{m}{2} {\textnormal{c}}eil}} + \bar{w}^T\bar{a}_{i_{\lceil \frac{m}{2} {\textnormal{c}}eil+1}}{\textnormal{i}}ght).
\end{align}
The effect of the bias is the ordering (note the zero in the middle)
\begin{align}
\bar{\bar{w}} +\bar{w}^T\bar{a}_{i_1} < \cdots < \bar{\bar{w}} +\bar{w}^T\bar{a}_{i_{\lceil \frac{m}{2} {\textnormal{c}}eil}} < 0 < \bar{\bar{w}} +\bar{w}^T\bar{a}_{i_{\lceil \frac{m}{2} {\textnormal{c}}eil+1}} < \cdots < \bar{\bar{w}} +\bar{w}^T\bar{a}_{i_{m}}.
\end{align}
Therefore, the choice $w = [\begin{smallmatrix} \bar{\bar{w}} \\ {\bar{w}} \end{smallmatrix}]$ satisfies conditions ({\textnormal{e}}f{eq:wcond1}) and ({\textnormal{e}}f{eq:wcond2}). This concludes the proof of [i].
We now prove [ii]. Let
\begin{align}
C_1 & \triangleq 1+\max_{i\in\{1,\ldots,m\}}\max_{j\in\{1,\ldots,q\}} |a_{ij}|, \\
C_2 & \triangleq \min_{i\in\{1,\ldots,m\}} |w^Ta_{i}|.
\end{align}
Let $v =[v_1 \cdots v_{q}]^T\in \mathbb{R}^q$ represent an input to the neural network $\mathbf{S}$ that we shall construct. Also, let $v^+ = [v_1^+ \cdots v_{q}^+]^T$ and $v^-=[v_1^- \cdots v_{q}^-]^T \in \mathbb{R}^{q}$ denote the first and the last $q$ components of the $2q$-dimensional output of $\mathbf{S}$. We thus have $\mathbf{S}(v) = [\begin{smallmatrix} v^+ \\ v^- \end{smallmatrix}]$, and set
\begin{align}
v_j^+ & = \phi\Biggl( \biggr[\begin{matrix} v_j \\ {\bm{p}}hantom{\scalebox{1.3}{$\int$}} y^+ \end{matrix} \biggl]^T\biggr[\begin{matrix} 1 \\ {\bm{p}}hantom{\scalebox{1.3}{$\int$}} -\!\frac{C_1}{C_2} \end{matrix} \biggl] \Biggr),\,v_j^- = \phi\Biggl( \biggr[\begin{matrix} v_j \\ {\bm{p}}hantom{\scalebox{1.3}{$\int$}} y^- \end{matrix} \biggl]^T\biggr[\begin{matrix} 1 \\ {\bm{p}}hantom{\scalebox{1.3}{$\int$}} -\!\frac{C_1}{C_2} \end{matrix} \biggl] \Biggr),\,j\in\{1,\ldots,q\},
\end{align}
where $y^+ = \phi(w^T v)$ and $y^- = \phi(-w^T v)$. Let us now verify ({\textnormal{e}}f{eq:saicond}). Consider some index $i$ with $w^T a_i < 0$, and suppose $v = a_i$. For any $j\in\{1,\ldots,q\}$, we have
\begin{align}
v_j^+ & = \phi(v_j - \tfrac{C_1}{C_2}y^+) = \phi(v_j) = \phi(a_{ij}) = a_{ij}.
\end{align}
The last equality holds as the components of $a_i$ are assumed to be all non-negative. Also,
\begin{align}
v_j^- = \phi(v_j - \tfrac{C_1}{C_2}y^-)
= \phi(a_{ij} - \tfrac{C_1}{C_2}|w^Ta_i|) \leq \phi(a_{ij} - C_1) = 0.
\end{align}
Inequality follows as $\frac{|w^Ta_i|}{C_2} \geq 1$ and $\phi$ is monotonic. Since $v_j^- \geq 0$ obviously holds, we have $v_j^- = 0$. This proves the case $w^Ta_i < 0$ in ({\textnormal{e}}f{eq:saicond}). The remaining case $w^Ta_i > 0$ can be verified in a similar manner. This concludes the proof of [ii], and thus that of Lemma {\textnormal{e}}f{lemma:switch} as well.
\end{proof}
\begin{figure}
\caption{An example network architecture for the achievability result. The block $\mathbf{T}
\label{fig:nnforachievabilityduplicate}
\end{figure}
{\bf Step-3: } We can now proceed to describe the full network architecture, as shown in Fig. {\textnormal{e}}f{fig:nnforachievabilityduplicate} for the example in Step 2. The first layer of the network is an additive translation by the vector $[0\,M\cdots M]^T$, and is explained in Step 1 above. We use the notation $\mathbf{T}$ to denote the translation, which is followed by a sequence of switches as described in Step 2. In particular, $\mathbf{S}_{ij}$ provides the outputs of $\bar{u}$ and $0_3$ to its top and bottom branches, respectively, if its input $\bar{u}$ remains above the line defined by $w_{ij}$ in Fig. {\textnormal{e}}f{fig:divideandconquerbduplicate}.
By construction, the neurons on a given path of switches is activated for a unique dataset member. For example, the path $\mathbf{S}_{11},\mathbf{S}_{21},\mathbf{S}_{32}$ is activated only for the dataset member $x_6$. The path of switches that correspond to some $x_i$ is followed by a ReLU neuron whose weights satisfy the property that $\phi(\gamma_i^T \bar{\bar{x}}_i) = d_i$, where $\bar{\bar{x}}_i$ is the output of the first layer when the input to the network is $\bar{x}_i$. Since the first component of $\bar{\bar{x}}_i$ equals $1$ for any $i$, one can simply set the first component of $\gamma_i$ to be equal to $d_i$, and the rest of the component of $\gamma_i$ to be equal to zero. The final layers of the network simply adds all the outputs from the $\gamma_i$-neurons.
In the figure, we also show the induced signals on the branches when the input is $u=\bar{x}_6$ as an example. Note that only the block $\mathbf{T}$, switches $\mathbf{S}_{11},\mathbf{S}_{21},\mathbf{S}_{32}$, the neuron with weight $\gamma_6^T$, and the a subset of the summation neurons remains active. Most of the neurons of the network are deactivated through zero signals. We also note that the desired output signal $d_6$ is obtained.
The construction generalizes to an arbitrary dataset of cardinality $n$ in the same manner. The only difference is that, in order to support $r$-dimensional desired outputs as stated in the theorem, we need to use $r$ ReLU units in place of each $\gamma_i$ to reproduce the $r$ components of the desired output, as opposed to a single ReLU unit in the example above. Also, the final summation unit will consist of $r$ sub-summation units operating on individual components. Formally, the first layer for the general case is the translation $\mathbf{T}$ as before. Next, $\lceil \log_2 n {\textnormal{c}}eil$ layers of switches arranged on a binary tree structure act on the translated inputs, forming $n$ leaves as the output, where each leaf has the same dimension as the input. Let $h_1,\ldots,h_n$ denote the feature vectors produced at the leaf nodes after the switches. By Lemma 1, and a rearrangement of indices, we can guarantee that for every $i$, if the network input is $\bar{x}_i$, then $h_i = \bar{\bar{x}}_i$ and $h_j = 0,\,\forall j \neq i$. Here, $\bar{\bar{x}}_i = \mathbf{T}([1\,\, x_i^T]^T)$, as defined in Fig. {\textnormal{e}}f{fig:nnforachievabilityduplicate}. Each leaf is then followed by a single-layer network that can map $\bar{\bar{x}}_i$ to its corresponding desired output vector $d_i$. Specifically, there exists $U_i$ such that $d_i = \phi(U_i \bar{\bar{x}}_i)$. Indeed, since $\sum_k u_{i,j,k}\bar{\bar{x}}_{i,k} = d_{i,j}$ has to be satisfied, one can pick $u_{i,j,k}=0,\,k\neq 1$ and $u_{i,j,1} = d_{i,j}/\bar{\bar{x}}_{i,1} = d_{i,j}$, where the last equality holds as $\bar{\bar{x}}_{i,1}=1$ by construction. The overall network output is the accumulation of the outputs of the $U_i$-neurons, and is given by $f(\overline{x}_i) = \phi(\sum_{i=1}^n \phi(U_ih_i))$. Let us show that all input-output relationships are satisfied so that the claim $f(\bar{x}_i) = d_i,\,\forall i$ in the statement of the theorem holds. If the input is $\bar{x}_i$, we have $h_i = \bar{\bar{x}}_i$ and $h_j = 0,\,j\neq i$, by construction. As a result, $f(\overline{x}_i) = \phi(\phi(U_i \bar{\bar{x}}_i)) = \phi(d_i) = d_i$, as desired. The last equality holds as the components of $d_i$ are assumed non-zero in the statement of the theorem (A ReLU network cannot provide a negative output.).
Let us now calculate the number of active neurons and weights when the input belongs to any member of the dataset. The block $\mathbf{T}$ always remains active and consists of $q-1$ neurons with weight $2$. There are at most $\lceil \log_2 n {\textnormal{c}}eil$ active switches per input. Each switch contains $2q+2$ neurons with $6q$ weights total. There is one active block $\gamma_i$ consisting of $r$ neurons with $q$ weights each. Finally, the sum unit has $r$ active weights. Hence, an upper bound on the total number of active neurons are given by
\begin{align}
q-1 + (2q+2)\lceil \log_2 n {\textnormal{c}}eil+r+1 & = 2(q+1)\lceil \log_2 n {\textnormal{c}}eil + q+r\\
& \in O(r+q\log n),
\end{align}
and an upper bound on the number of active weights can be calculated to be
\begin{align}
2(q-1) + 6q(2q+2)\lceil \log_2 n {\textnormal{c}}eil + rq+r & =12q(q+1)\lceil \log_2 n {\textnormal{c}}eil + (r+2)q+r-2 \\
& \in O(rq+q^2\log n).
\end{align}
This concludes the proof of the theorem.
\end{document}
|
\begin{document}
\author{Fran\c cois Baccelli\thanks{[email protected]} \\{\small University of Texas at Austin}
\and Mir-Omid Haji-Mirsadeghi\thanks{[email protected]}\\{\small Sharif University of Technology}}
\title{Point-Map-Probabilities of a Point Process \\
and Mecke's Invariant Measure Equation}
\date{}
\mathbf Maketitle
\begin{abstract}
A compatible \mathbf Mathbb Pom{} $\f$ maps, in a translation invariant way,
each point of a stationary point process $\Phi$ to some point of $\Phi$.
It is fully determined by its associated \mathbf Mathbb Pomk{}, $f$, which gives
the image of the origin by $\f$. It was proved by
J. Mecke that if $\f$ is bijective, then the Palm probability of
$\Phi$ is left invariant by the translation of $-f$.
The initial question motivating this paper is the following
generalization of this invariance result: in
the non-bijective case, what probability measures
on the set of counting measures are left invariant
by the translation of $-f$? The \mathbf Mathbb Pomk{}-probabilities of
$\Phi$ are defined from the action of the semigroup of \mathbf Mathbb Pomk{}
translations on the space of Palm probabilities, and more precisely from
the compactification of the orbits of this semigroup action.
If the \mathbf Mathbb Pomk{}-probability exists, is uniquely defined, and if
it satisfies certain continuity properties,
it then provides a solution to this invariant measure problem.
Point-shiftk{}-probabilities are objects of independent interest.
They are shown to be a strict generalization of Palm probabilities:
when $\f$ is bijective, the \mathbf Mathbb Pomk{}-probability
of $\Phi$ boils down to the Palm probability of $\Phi$.
When it is not bijective, there exist cases where the \mathbf Mathbb Pomk{}-probability
of $\Phi$ is singular with respect to its Palm probability.
A tightness based criterion for the existence of the \mathbf Mathbb Pomk{}-probabilities
of a stationary point process is given.
An interpretation of the \mathbf Mathbb Pomk{}-probability as the conditional law of the
point process given that the origin has $\f$-pre-images of all orders is
also provided. The results are illustrated by a few examples.
\mathbf Mathbb End{abstract}
{\bf Key words:} Point process, Stationarity, Palm probability, Point-shift, Point-shiftk, Allocation rule, Va\-gue topology, Mass transport principle, Dynamical system, $\omegaega$-limit set.
\mathbf Noindent{\bf MSC 2010 subject classification:} Primary: 60G10, 60G55, 60G57; Secondary: 60G30, 60F17.
\section*{Introduction}
A \mathbf Mathbb Emph{\mathbf Mathbb Pom{}} is a mapping which is defined on all discrete
subsets $\mathbf Mathbb Phi$ of $\sp$ and maps each point $x\in\mathbf Mathbb Phi$ to some
point $y\in\mathbf Mathbb Phi$; i.e., if $\f$ is a \mathbf Mathbb Pom, for all discrete
$\mathbf Mathbb Phi\subset\sp$ and all $x\in\mathbf Mathbb Phi$, $\f(\mathbf Mathbb Phi,x)\in\mathbf Mathbb Phi$.
Bijective point-shifts were studied in a seminal paper by
J. Mecke in \cite{Me75}.
The concept of point-map was introduced by
H. Thorisson (see \cite{Th00} and the references therein).
Points-maps were further studied
by M. Heveling and G. Last
\cite{HeLa05}.
The latter reference also contains
a short proof of Mecke’s invariance theorem.
Point-shift{}s are also known as allocation rules (see e.g. \cite{LaTh09}).
A \mathbf Mathbb Pom{} is \mathbf Mathbb Emph{compatible with the translations of $\sp$}
or simply \mathbf Mathbb Emph{compatible} if
\mathbf Mathbb Eqn{f_\omegaegarall t\in\sp, \mathbb Quad \f(\mathbf Mathbb Phi+t,x+t)=\f(\mathbf Mathbb Phi,x)+t.}
As will be seen, a translation invariant \mathbf Mathbb Pom{} $\f$ is
fully determined by its \mathbf Mathbb Pomk{} $f$ which associates to
all $\mathbf Mathbb Phi$ containing the origin the image of the latter by $\f$, i.e.,
$f(\mathbf Mathbb Phi)=\f({\mathbf Mathbb Phi},0)$.
The \mathbf Mathbb Pom{} $\f$ is called bijective on the point process $\Phi$ if,
for almost all realizations $\mathbf Mathbb Phi$ of the point process,
$\f(\mathbf Mathbb Phi,\cdot)$ is bijective on the set $\mathbf Mathbb Phi$.
The Palm probability of a translation invariant point process
$\Phi$ is often intuitively described as the distribution of $\Phi$ conditionally
on the presence of a point at the origin.
This definition was formalized by C. Ryll-Nardzewski \cite{RN} based on
the Matthes definition of Palm probabilities
(see e.g. \cite{baccelli03elements}). This is the so called
{\mathbf Mathbb Em local interpretation} of the latter.
The presence of a point at the origin makes
the Palm distribution of $\Phi$ singular with respect to (w.r.t.) the
translation invariant distribution of $\Phi$.
The present paper is focused on the
\mathbf Mathbb Pomk{}-probabilities (or the $f$-probabilities) of $\Phi$.
Under conditions described in the paper, the $f$-pro\-ba\-bi\-li\-ties can
be described as the law of $\Phi$ conditionally
on the event that the origin has $\f$-pre-images of all
orders (Theorem \ref{thmloc}). This event
is not of positive probability in general,
and hence it is not possible to define this conditional probability
in the usual way.
The first aim of this paper is to make this definition rigorous.
The proposed construction is based on dynamical system theory.
The action of the semigroup of translations by $-f$
on probability distributions on counting measures
having a point at the origin is considered;
the $f$-probabilities of $\Phi$ are then defined as the $\omegaega$-limits
of the orbit of this semigroup action on the Palm distribution of $\Phi$
(Definition \ref{deffond}).
As the space of probability
distributions on counting measures is not compact, the existence
of $f$-probabilities of $\Phi$ is not granted.
A necessary and sufficient conditions for their existence
is given in Lemma \ref{kalen}. Uniqueness is not granted either.
An instance of construction of the $f$-probabilities of Poisson point processes
where one has existence and
uniqueness is given in Theorem \ref{regenver}.
It is shown in Section \ref{summary} that,
when they exist, \mathbf Mathbb Pomk{}-probabilities
generalize Palm probabilities.
A key notion to see this is that of
{\mathbf Mathbb Em evaporation}. One says that there is evaporation
when the image of $\Phi$ by the
$n$-th iterate of $F$ tends to the empty counting measure
for $n$ tending to infinity.
When there is no evaporation,
the $f$-probabilities of $\Phi$ are just the Palm
distributions of $\Phi$ w.r.t. certain translation invariant thinnings of $\Phi$
and they are then absolutely continuous w.r.t. the Palm distribution
${\cp_0}$ of $\Phi$; in particular, if $\f$ is bijective, then
the $f$-probability of $\Phi$ exists, is uniquely defined, and coincides with ${\cal P}_0$.
However, in the evaporation case,
the $f$-probabilities of $\Phi$ do not admit a representation
of this type and they are actually singular w.r.t. ${\cp_0}$
(Theorem \ref{theevasin}).
It is also shown in Theorem \ref{continv}
that, under appropriate continuity properties on $f$,
a certain mixture of the $f$-probabilities
of $\Phi$ is left invariant by the shift of $-f$.
This generalizes Mecke's point stationarity theorem which states that
if $\f(\Phi,\cdot)$ is bijective and
if $\Phi$ is distributed according to ${\cal P}_0$,
then so is $\Phi-f$.
Section \ref{pernot} contains
the basic definitions and notation used in the paper, together
with a small set of key examples.
Section \ref{summary} gathers the main results and proofs.
Several more examples are discussed in Section~\ref{proofs}.
The basic tools of point process theory and
dynamical system theory used in the paper are
summarized in the appendix.
\section{Preliminaries and Notation}\label{pernot}
\subsection{General Notation} Each measurable mapping
$h:(X,{\cal X})\to(X^\mathbf Mathbb Prime,{\cal X}^\mathbf Mathbb Prime)$
between two measurable spaces induces a measurable mapping
$h_*:M(X)\to M(X^\mathbf Mathbb Prime)$, where $M(X)$ is the set of
all measures on $X$:
if $\mathbf Mu$ is a measure on $(X,{\cal X})$, $h_*\mathbf Mu$ is the
measure on $(X',{\cal X}')$ defined by
\mathbf Mathbb Eq{\label{indmeasu}h_*\mathbf Mu(A):= (h_*\mathbf Mu)(A)=\mathbf Mu(h^{-1} A) .}
Note that if $\mathbf Mu$ is a probability measure, $h_*\mathbf Mu$ is also
a probability measure.
\subsection{Point Processes}
Let $\mathbf N=\mathbf N(\sp)$ be the space of all locally finite counting measures
(not necessarily simple) on $\sp$.
One can identify
each element of $\mathbf N$ with the associated multi-subset of $\sp$.
The notation $\mathbf Mathbb Phi$ will be used
to denote either the measure or the related multi-set.
Let ${\cal N}$ be the
Borel $\sigma$-field with respect
to the vague topology on the space of counting measures
(see Subsection \ref{RandMeas} in appendix for more on this subject).
The measurable space $(\mathbf N,{\cal N})$ is the
\mathbf Mathbb Emph{canonical space} of point processes.
The \mathbf Mathbb Emph{support} of a counting measure $\mathbf Mathbb Phi$ is the same set
as the multi-set related to $\mathbf Mathbb Phi$, but without the multiplicities, and
it is denoted by $\overline\mathbf Mathbb Phi$.
The set of all counting measure supports
is denoted by $\overline\mathbf N$, i.e.,
$\overline\mathbf N$ is the set of all \mathbf Mathbb Emph{simple} counting measures.
${\cal N}$ naturally induces a $\sigma$-field $\overline{\cal N}$ on $\overline\mathbf N$.
Let $\mathbf N^0$ (respectively, $\overline\mathbf N^0$) denote the set of
all elements of $\mathbf N$ (respectively, $\overline\mathbf N$) which contain
the origin, i.e., for all $\mathbf Mathbb Phi\in \mathbf N^0$
(respectively, $\mathbf Mathbb Phi\in\overline\mathbf N^0$), one has $0\in\mathbf Mathbb Phi$.
A $\mathbf Mathbb Emph{point process}$ is a
couple $\mathbf Mathbb Php$ where $\mathbf Mathbb P$ is a probability measure on a
measurable space $(\Omega,\sf)$ and $\Phi$ is a measurable mapping
from $(\Omega,\sf)$ to $(\mathbf N,{\cal N})$.
If $(\Omega,\sf)=(\mathbf N,{\cal N})$ and $\Phi$ is the identity on $\mathbf N$,
the point process is defined on the canonical space.
Calligraphic letters ${\cal P},{\cal Q},\ellots$
(resp. blackboard bold letters $\mathbf Mathbb P,\mathbb Q,\ellots$) will be used
for probability measures defined on the canonical space
(resp. on $(\Omega,\sf)$).
The \mathbf Mathbb Emph{canonical version} of a point process $(\Phi,\mathbf Mathbb P)$ is
the point process $(\text{id},\mathbf Mathbb Ps\mathbf Mathbb P)$ which is defined on the canonical space.
Here $\text{id}$ denotes the identity on $\mathbf N$.
\subsection{Stationary Point Processes}
Whenever $(\sp,+)$ acts (in a measurable way) on a space,
the action of $t\in\sp$ on that space will be denoted by $\theta_fa_t$.
It is assumed that $(\sp,+)$ acts on the reference
probability space $(\Omegaega,\sf)$, or equivalently
that this space is equipped with a measurable
flow $\theta_fa_t:\Omegaega\rightarrow \Omegaega$, with $t$ ranging over $\sp$.
This is a family of mappings such that
$(\omega,t)\mathbf Mapsto\theta_fa_t\omega$ is measurable, $\theta_fa_0$ is the identity
on $\Omega$ and \mathbf Mathbb Eqn{\theta_fa_s\circ\theta_fa_t = \theta_fa_{s+t}.}
A point process $\Phi$ is then said to be \mathbf Mathbb Emph{compatible} if
\mathbf Mathbb Eq{\label{comp.crit}\Phi(\theta_fa_t\omegaega,B-t)=\Phi(\omegaega, B), \mathbb Quad f_\omegaegarall\omegaega\in\Omegaega, t\in\sp,B\in{\cal B},}
where by convention,
$\Phi(\omega,B):=(\Phi(\omega))(B).$
Here ${\cal B}$ denotes the Borel $\sigma$-algebra on $\R^d$.
The action $\theta_fa_t$ of $t\in \sp$ can also be
used on the space of counting measures to denote the translation by $-t$.
For a counting measure $\mathbf Mathbb Phi\in\mathbf N(\sp)$,
$\theta_fa_t\mathbf Mathbb Phi$ is then the counting measure defined by
$ \theta_fa_t\mathbf Mathbb Phi(B)=\mathbf Mathbb Phi(B+t).$
Using this notation, the compatibility criterion (\ref{comp.crit})
can be rewritten as
\mathbf Mathbb Eqn{\Phi\circ\theta_fa_t=\theta_fa_t\circ \Phi.}
Note that for consistency reasons,
the action $\theta_fa_t$ of $t\in \sp$ on $\sp$ itself is then
$\theta_fa_t x= x-t,\mathbb Quad f_\omegaegarall x\in \sp.$
The probability measure $\mathbf Mathbb P$ on $(\Omegaega,\sf)$ is $\theta_fa_t$-invariant if
$(\theta_fa_{t})_*\mathbf Mathbb P=\mathbf Mathbb P$.
If, for all $t\in\sp$, $\mathbf Mathbb P$ is $\theta_fa_t$-invariant, it is called stationary.
Below, a \mathbf Mathbb Emph{stationary point process}
is a point process $(\Phi,\mathbf Mathbb P)$ such that $\Phi$ is compatible
and $\mathbf Mathbb P$ is stationary.
When the point process is simple and stationary with a non-degenerate
(positive and finite) intensity, its
\mathbf Mathbb Emph{Palm probability} is a classical object in the literature.
The Palm probability
of a general (i.e., not necessarily simple) point process $\Phi$
is defined by
\mathbf Mathbb Eq{\label {pag}{\p_\Phi}[A] :=\frac 1 {\lambda \mathbf No B} \int_{\Omegaega}
\int_{B} \mathbf Mathbf {1} \{\theta_fa_t\omega\in A\} \Phi(\omega,\d t)\mathbf Mathbb P [\d\omega],}
for all $A \in {\sf}$,
and for all Borel sets $B\subset \R^d$ with a non-degenerate
(positive and finite)
Lebesgue measure. Note that the multiplicity of the atoms
of $\Phi$ is taken into account in the last definition.
If a point process $(\Phi,\mathbf Mathbb P)$ is stationary and
has a non-degenerate intensity, the pair
$(\Phi,{\p_\Phi})$ is called the \mathbf Mathbb Emph{Palm version}
of $(\Phi,\mathbf Mathbb P)$.
Expectation w.r.t. ${\p_\Phi}$ will be denoted by ${\e_\Phi}$.
Whenever the context specifies
a reference point process $(\Phi,\mathbf Mathbb P)$, the short notation
${\cal P}$ will be used to denote its distribution: i.e., ${\cal P}=\mathbf Mathbb Ps\mathbf Mathbb P$.
If in addition, $\Phi$ is stationary and with a non-degenerate intensity,
the distribution of its Palm version
will be denoted by ${\cp_0}$, i.e., ${\cp_0}=\mathbf Mathbb Ps{\p_\Phi}$,
and expectation w.r.t. ${\cp_0}$ will be denoted by ${\cal E}_0$.
In the canonical setup, the Palm version
of $(\Phi,\mathbf Mathbb P)=(\text{id},{\cal P})$ is
$(\Phi,{\p_\Phi})=(\text{id},{\cp_0})$.
\subsection{Compatible Point-Shift s}
\label{sec:cpoms}
\subsubsection{Point-Maps}
A \mathbf Mathbb Pom{} on $\mathbf N$ is a measurable function
$\f:\mathbf N\times \R^d \to \R^d$,
which is defined for all pairs $(\mathbf Mathbb Phi, x)$, where $\mathbf Mathbb Phi\in\mathbf N$
and $x\in\mathbf Mathbb Phi$, and satisfies the relation
$\f(\mathbf Mathbb Phi,x) \in \mathbf Mathbb Phi$ for all $x\in \mathbf Mathbb Phi$.
In order to define compatible \mathbf Mathbb Pom{}s,
it is convenient to use the notion of \mathbf Mathbb Emph{\mathbf Mathbb Pomk}.
A measurable function $f:\mathbf N^0 \to \sp$
is called a \mathbf Mathbb Pomk{} if for all
$\mathbf Mathbb Phi$ in $\mathbf N^0$,
one has $f(\mathbf Mathbb Phi)=f(\overline\mathbf Mathbb Phi)$,
i.e., it depends only on $\overline\mathbf Mathbb Phi$, and
if $f(\mathbf Mathbb Phi)\in \overline\mathbf Mathbb Phi$.
If $f$ is a \mathbf Mathbb Pomk, the associated
\mathbf Mathbb Emph{compatible \mathbf Mathbb Pom, $\f$, is}
$$\f(\mathbf Mathbb Phi,x) = f(\theta_fa_x\mathbf Mathbb Phi) + x
=\theta_fa_{-x} f(\theta_fa_x\mathbf Mathbb Phi) .$$
The \mathbf Mathbb Pom{} $\f$ is compatible in the sense that
\begin{eqnarray}\label{compf}
\f(\theta_fa_t\mathbf Mathbb Phi,\theta_fa_tx) &=&
\f(\theta_fa_t\mathbf Mathbb Phi, x -t)
=f(\theta_fa_{x -t}(\theta_fa_t\mathbf Mathbb Phi))+x-t\mathbf Nonumber\\
&=&f(\theta_fa_x\mathbf Mathbb Phi)+x-t=\f(\mathbf Mathbb Phi,x)-t=
\theta_fa_t(\f(\mathbf Mathbb Phi,x)).
\mathbf Mathbb End{eqnarray}
In the rest of this article, \mathbf Mathbb Pom{} always means compatible \mathbf Mathbb Pom{}.
Small letters will be used for \mathbf Mathbb Pomk s and capital letters for
the associated \mathbf Mathbb Pom s.
For the \mathbf Mathbb Pomk{} $f$, the \mathbf Mathbb Emph{action of the \mathbf Mathbb Pomk} on $\mathbf N^0(\sp)$
will be denoted by $\theta_fg$ and defined by
\mathbf Mathbb Eqn{f_\omegaegarall \mathbf Mathbb Phi\in\mathbf N^0(\sp); \mathbb Quad\theta_fg(\mathbf Mathbb Phi)=\theta_fa_{f(\mathbf Mathbb Phi)}(\mathbf Mathbb Phi).}
\subsubsection{Iterates of a Point-Shift}
\label{sec:cpoms-ite}
For all $nfe 0$, all $\mathbf Mathbb Phi\in \mathbf N$ and $x\in \mathbf Mathbb Phi$,
the $n$-th order iterate of the point-shift $\f$
is defined inductively
by $\f^0(\mathbf Mathbb Phi,x)=x$ and
$$\f^{k+1}(\mathbf Mathbb Phi,x)=
\f (\mathbf Mathbb Phi, \f^k(\mathbf Mathbb Phi,x)), \mathbb Quad kfe 0.
$$
For all $n$, $F^n$ is a compatible \mathbf Mathbb Pom{} and
the associated point-map,
which will be denoted by $f^n$, satisfies
\mathbf Mathbb Eq{\label{nthpom} f^n(\mathbf Mathbb Phi)=
f^{n-1}(\mathbf Mathbb Phi) + f(\theta_fa_{f^{n-1}}(\mathbf Mathbb Phi)),\mathbb Quad nfe 1,}
with $f^0(\mathbf Mathbb Phi)=0$ and $\mathbf Mathbb Phi\in\mathbf N^0$. It is easy to verify that for all $n\in \Bbb{N}$, on $\mathbf N^0$,
\mathbf Mathbb Eqn{\theta_fa_{f^n}= \theta_fg^n,}
and hence
\mathbf Mathbb Eq{\label{semigroupg}\theta_fa_{f^{m+n}}=\theta_fa_{f^m}\circ\theta_fa_{f^n}.}
In accordance with
the definition of $F^n$, for all $nfe 1$, let
\mathbf Mathbb Eqn{F^{-n}(\mathbf Mathbb Phi,x)=\{y\in\mathbf Mathbb Phi; F^n(\mathbf Mathbb Phi,y)=x\}.}
\subsubsection{Image Point Processes}
\label{sec:cpoms-mul}
Let $f$ be a \mathbf Mathbb Pomk. For all $\mathbf Mathbb Phi \in \mathbf N$ and all nonnegative
integers $n$, let
\mathbf Mathbb Eq{\label{phifn}
m_f^n(\mathbf Mathbb Phi,y)= \mathbf Mathbb Phi(F^{-n}(\mathbf Mathbb Phi,y))=
\sum_{x\in F^{-n}(\mathbf Mathbb Phi,y)}\mathbf Mathbb Phi(\{x\}),
\mathbb Quad f_\omegaegarall y\in \mathbf Mathbb Phi,}
where by convention, the summation over the empty set is zero.
Note that if $\mathbf Mathbb Phi$ is simple, then
$ m_f^n(\mathbf Mathbb Phi, y)=\mathbf Mathrm{card}(F^{-n}(\mathbf Mathbb Phi,y)).$
\begin{defi}
Assume that $m_f^n(\mathbf Mathbb Phi,y)<\infty$ for all $y\in \mathbf Mathbb Phi$.
The $n$-th image counting measure (of $\mathbf Mathbb Phi$ by $\f$) is then
defined as the counting measure $\mathbf Mathbb Phi^n_f$ with support
$\{y\in\mathbf Mathbb Phi;F^{-n}(\mathbf Mathbb Phi,y)\mathbf Neq \mathbf Mathbb Emptyset\}$,
and such that the multiplicity of $y$ in the support of $\mathbf Mathbb Phi_f^n$
is $m_f^n(\mathbf Mathbb Phi,y)$.
\mathbf Mathbb End{defi}
It will be shown below that,
for all stationary point processes $(\Phi,\mathbf Mathbb P)$,
for all $nfe 0$, $(\Phi^n_f,\mathbf Mathbb P)$ is a stationary point process
(item 1 in Remark \ref{remfincond})
with the same intensity as $\Phi$
(item 2 in Remark \ref{remfincond}).
The point process $\Phi^n_f$ will be referred to as the $n$-th {\mathbf Mathbb Em image
point process} (of $\Phi$ by the \mathbf Mathbb Pomk{}).
\subsection{First Point-Shift{} Examples}
This subsection presents a few basic examples of \mathbf Mathbb Pom{}s.
These examples will allow one to illustrate the main
results in Section \ref{summary}.
More details on these examples and further examples can
be found in Section \ref{proofs}.
\subsubsection{Strip Point-Shift{}} \label{sr0}
The strip \mathbf Mathbb Pom{} was introduced by Ferrari,
Landim and Thorisson \cite{FeLaTh04}.
For all points $x=(x_1,x_2)$ in the plane, let $T(x)$ denote the
half strip $(x_1,\infty)\times [x_2-1,x_2+1]$.
Then $S(\mathbf Mathbb Phi,x)$ is the
left most point of $\mathbf Mathbb Phi$ in $T(x)$ (see Figure \ref{fig0}).
It is easy to verify that $S$ is
compatible. It is not bijective.
Its \mathbf Mathbb Pomk{} will be denoted by $s$.
\begin{rem}
The strip \mathbf Mathbb Pom{} is not well defined when there are more than one
left most point in $T(x)$, or when there is no
point of $\mathbf Mathbb Phi$ in $T(x)$.
However there is no problem if we consider the strip \mathbf Mathbb Pom{}
(and all other \mathbf Mathbb Pom s) on point processes for which the \mathbf Mathbb Pom{}
is almost surely well-defined. Note that these
two difficulties can always be taken care of by fixing,
in some translation invariant manner,
the choice of the image in the case of ambiguity,
and by defining $F(\mathbf Mathbb Phi, x)=x$
in the case of non-existence. By doing so one gets a
\mathbf Mathbb Pom{} defined for all $(\mathbf Mathbb Phi,x)$.
\mathbf Mathbb End{rem}
\subsubsection{Strip Point-Shift{} on the Random Geometric Graph} \label{drgg}
The \mathbf Mathbb Emph{strip \mathbf Mathbb Pomk{} on the random geometric graph}
with the neighborhood radius $r$ is
\mathbf Mathbb Eqn{g(\mathbf Mathbb Phi)=
\begin{cases}
s(\mathbf Mathbb Phi) & \mathbf Norm{s(\mathbf Mathbb Phi)}<r\\
0 & \text{otherwise,}
\mathbf Mathbb End{cases}
}
where $s$ is the strip \mathbf Mathbb Pomk{}.
The associated \mathbf Mathbb Pom{} is depicted in Figure
\ref{fig0}. It will be denoted by $G$. It is not bijective.
Its \mathbf Mathbb Pomk{} will be denoted by $g$.
\begin{figure}[h]
{\cal E}ntering
{{\includegraphics[width=0.47\linewidth]{Strip.pdf}}
{\includegraphics[width=0.47\linewidth]{StripRGG.pdf}}
\caption{Left: Iterates of the strip \mathbf Mathbb Pom{} $S$.
Right: Iterates of $G$, the strip \mathbf Mathbb Pom{} on the random geometric graph.
In both cases, the point
$G^4(\mathbf Mathbb Phi,0)$ is that at the end of the directed path.}
\label{fig0}}
\mathbf Mathbb End{figure}
\subsubsection{Closest Point-Shift}
The \mathbf Mathbb Emph{closest \mathbf Mathbb Pom{}}, $C$, maps each point of
$x\in \mathbf Mathbb Phi$ to the point $y\mathbf Ne x$ of $\mathbf Mathbb Phi$ which is the closest.
This \mathbf Mathbb Pom{} is not bijective either.
The associated \mathbf Mathbb Pomk{} will be denoted by $c$.
It is depicted in Figure \ref{fig01}.
\subsubsection{Mutual-Neighbor Point-Shift}
The \mathbf Mathbb Emph{mutual-neighbor \mathbf Mathbb Pom{}, $N$,}
maps each point
$x\in\mathbf Mathbb Phi$ to the point $y$ of $\mathbf Mathbb Phi$ which is the
closest to $x$, if $x$ is also the point of $\mathbf Mathbb Phi$ which is the
closest to $y$. Otherwise, it maps $x$ to itself.
It is easy to see that $N$ is bijective and involutive:
$N^2\mathbf Mathbb Equiv\text{id}$. The associated \mathbf Mathbb Pomk{} will be denoted by $n$.
It is depicted in Figure \ref{fig01}.
\begin{figure}[h]
{\cal E}ntering
{\fbox{\includegraphics[width=0.47\linewidth]{Closest.pdf}}
\fbox{\includegraphics[width=0.47\linewidth]{Mutual.pdf}}
\caption{Left: the closest \mathbf Mathbb Pom{} $C$.
Right: the mutual-neighbor \mathbf Mathbb Pom{} $N$.
The directed edge emanating from a point indicates the image of the point.
\label{fig01}}
}
\mathbf Mathbb End{figure}
\subsection{Mecke's Point Stationarity Theorem}
One of the motivations of this work is to extend the following
proposition proved by J. Mecke in \cite{Me75}.
\begin{thm}[Point Stationarity]\label{point.sta}
Let $(\Phi,\mathbf Mathbb P)$ be a simple stationary point process and
let $\f$
be a \mathbf Mathbb Pom{} such that $\f(\Phi,\cdot)$ is $\mathbf Mathbb P$-a.s. bijective.
Then the Palm probability of the point-process
is invariant under the action of $\theta_fa_{f}$; i.e.,
\mathbf Mathbb Eq{\label{second.thet}{\p_\Phi}=(\theta_fa_{f(\Phi)})_*{\p_\Phi},}
with
$\theta_fa_{f(\Phi)}$ seen as a map from
$\Omegaega$ to itself defined by
\mathbf Mathbb Eqn{\label{first.thet}
(\theta_fa_{f(\Phi)})(\omega):=\theta_fa_{f(\Phi(\omega))}\omega.
}
Since ${\p_\Phi}[\Phi(\{0\})>0]=1$, $\theta_fa_{f(\Phi)}$
is ${\p_\Phi}$-almost surely well defined.
\mathbf Mathbb End{thm}
\begin{rem}
The fact that $\theta_fg$ is bijective $\Phi_*{\p_\Phi}$-a.s.
is equivalent to the fact that $F$ is
bijective on $\mathbf Mathbb Ps\mathbf Mathbb P$-almost all realizations
of the point process.
\mathbf Mathbb End{rem}
\section{Results}\label{summary}
\subsection{Semigroup Actions of a Point-Shiftk}
Below,
$\mathbf N^0=\mathbf N^0(\R^d)$
and $\mathbf M^1(\mathbf N^0)$ denotes the set of probability measures
on $\mathbf N^0$.
For all \mathbf Mathbb Pomk s $f$ on $\mathbf N^0$,
consider the following actions $\mathbf Mathbb Pi=\{\mathbf Mathbb Pi_n\}$ of $(\Bbb{N},+)$:
\begin{enumerate}
\item $X=\mathbf N^0$, equipped with the vague topology, and
for all $\mathbf Mathbb Phi\in \mathbf N^0$ and $n\in \Bbb{N}$,
$$\mathbf Mathbb Pi_n(\mathbf Mathbb Phi)=\theta_fg^n(\mathbf Mathbb Phi)\in \mathbf N^0,$$
where $\theta_fg^n$ is defined in Subsection~\ref{sec:cpoms-ite}.
\item $X=\mathbf M^1(\mathbf N^0)$, equipped with the weak convergence of probability measures on $\mathbf N^0$,
and for all ${\cal Q}\in \mathbf M^1(\mathbf N^0)$ and $n\in \Bbb{N}$,
$$\mathbf Mathbb Pi_n({\cal Q})=\theta_{f_*}g^n{\cal Q}=(\theta_fg^n)_*{\cal Q}\in \mathbf M^1(\mathbf N^0).$$
\mathbf Mathbb End{enumerate}
\subsection{Periodicity and Evaporation}
The \mathbf Mathbb Pomk{} $f$
will be said to be {\mathbf Mathbb Em periodic} on the stationary point process
$(\Phi,\mathbf Mathbb P)$ if for $\mathbf Mathbb Ps{\p_\Phi}$-almost all $\mathbf Mathbb Phi$,
the action of $\theta_fg^n$ is periodic on $\mathbf Mathbb Phi$, namely if
there exists integers $p=p(\mathbf Mathbb Phi)$ and $K=K(\mathbf Mathbb Phi)$
such that for all $nfe K$, $\theta_fg^n(\mathbf Mathbb Phi)=\theta_fg^{n+p}(\mathbf Mathbb Phi)$.
The case where $p$ is independent of $\mathbf Mathbb Phi$ is known as
$p$-periodicity.
The special case of $1$-periodicity is that where,
$\theta_fg^n(\mathbf Mathbb Phi)$ is
{\mathbf Mathbb Em stationary} (in the dynamical system sense) after some steps,
i.e., such that for all $n>K(\mathbf Mathbb Phi)$, $\theta_fg^n(\mathbf Mathbb Phi) = \theta_fg^K(\mathbf Mathbb Phi)$.
Note that if for all
$x\in\mathbf Mathbb Phi$, the trajectory
$F^n(\mathbf Mathbb Phi,x)$ is stationary,
i.e., such that for all $n>K(\mathbf Mathbb Phi,x)$, $\f^n(\mathbf Mathbb Phi,x)=\f^K(\mathbf Mathbb Phi,x)$,
then $f$ is 1-periodic.
The mutual-neighbor \mathbf Mathbb Pomk{} $n$ on a homogeneous
Poisson point process is 2-periodic.
Similarly, for the closest \mathbf Mathbb Pomk{} $c$, the iterates of this
point-shift form a {\mathbf Mathbb Em descending chain}, namely a sequence
of point of the support of the point process such that the
distance between the $k+1$-st and the $k$-th
is non-increasing in $kfe 0$.
The well known fact that there are no infinite
descending chains in the homogeneous Poisson point process
(see \cite{DaLa05}) implies that $c$ is 2-periodic on such a point process,
with the points of the period being mutual-neighbors.
If $g$ is the strip \mathbf Mathbb Pomk{} on the random geometric graph
defined in Subsection~\ref{drgg},
the strong Markov property of the stationary Poisson point process
on $\sp$ (see \cite{Zu06} for details on the strong Markov property of Poisson point process) gives that the point process on the right half-plane of
$G(0)$ is distributed as the original Poisson point process.
Hence $G$ is a.s. 1-periodic, even
when the underlying random geometric graph is supercritical.
\begin{rem}
Note that there are other ways of defining periodicity, possibly leading to
other periods. For instance, for the mutual-neighbor \mathbf Mathbb Pomk{}
on a Poisson point process, the sequence of image
point processes $\{\Phi^f_n\}_{nfe 0}$
(defined in Subsection \ref{sec:cpoms-mul})
is 1-periodic whereas $f$ is 2-periodic according to
the definition proposed above.
\mathbf Mathbb End{rem}
The point process $(\Phi,\mathbf Mathbb P)$ will be said to {\mathbf Mathbb Em evaporate}
under the action of the \mathbf Mathbb Pomk{} $f$ if $\overline{\Phi^n_f}$
converges a.s. to the null measure as $n$ tends to infinity,
i.e., for $\mathbf Mathbb P$-almost surely, the set
\begin{equation}
\label{eq:definf}
\overline{\Phi_f^\infty}:=\bigcap_{n=1}^\infty \overline{\Phi_f^n}
\mathbf Mathbb End{equation}
is equal to the empty set
(note that $\overline{\Phi^n_f}$ is a non increasing sequence of sets).
Consider the following set
\begin{eqnarray}
\label{eq:III}
I &:= & \{\mathbf Mathbb Phi\in\mathbf N^0; f_\omegaegarall n\in{\cal B}bb N, \f^{-n}(\mathbf Mathbb Phi,0)\mathbf Neq\mathbf Mathbb Emptyset\}
\mathbf Nonumber \\
& = & \{\mathbf Mathbb Phi\in\mathbf N^0; f_\omegaegarall n\in{\cal B}bb N, m^n_f(\mathbf Mathbb Phi,0)>0\}
\mathbf Mathbb End{eqnarray}
(see Subsection \ref{sec:cpoms-ite} for
the definition of $\f^{-n}(\mathbf Mathbb Phi,y)$ and Subsection
\ref{sec:cpoms-mul} for that of $m^n_f$).
\begin{lem}\label{eveq}
For all \mathbf Mathbb Pomk s $f$, the
stationary point process $(\Phi,\mathbf Mathbb P)$
evaporates under the action of $f$ if and only if ${\p_\Phi}[\Phi\in I]=0$.
\mathbf Mathbb End{lem}
\begin{proof}
Let ${\cal P}=\mathbf Mathbb Ps\mathbf Mathbb P$ and ${\cp_0}=\mathbf Mathbb Ps{\p_\Phi}$. If $\chi(\mathbf Mathbb Phi,x)$
is the indicator of the fact that $x$ has $\f$-pre-images of all orders,
then $\chi$ is a compatible marking of the point process
(i.e., $\chi(\mathbf Mathbb Phi,x)= \chi(\theta_fa_x \mathbf Mathbb Phi,0)$
for all $x\in \mathbf Mathbb Phi$).
Therefore if $\Psi$ denotes the sub-point process of the points
with mark $\chi$ equal to 1, then $(\Psi,\mathbf Mathbb P)$ is a stationary
point process and by Campbell's theorem,
\mathbf Mathbb Eq{\label{simmark}\lambda_\Psi=\lambda_\Phi\mathbf Mathbb E_\Phi[\chi(\Phi,0)]
=\lambda_\Phi{\p_\Phi}[\Phi\in I].}
The evaporation of $(\Phi,\mathbf Mathbb P)$ by $f$ means that
$\Psi$ has zero intensity.
According to (\ref{simmark}) this is equivalent to ${\p_\Phi}[\Phi\in I]=0$.
\mathbf Mathbb End{proof}
The homogeneous Poisson point process
on ${\mathbf Mathbb R}^2$ evaporates under the
action of the strip \mathbf Mathbb Pomk{} $s$ (see Section \ref{proofs}).
\subsection{Action of $\theta_{f_*}g$}
\subsubsection{Image Palm Probabilities}
Let $\Phi$ be a stationary point process on $\sp$ and
$f$ be a \mathbf Mathbb Pomk.
Consider the action of $\theta_{f_*}g$ (see Equation (\ref{indmeasu}))
when ${\cal Q}={\cal P}_0$, the Palm distribution of $\Phi$.
It follows from the definition
and from (\ref{pag}) that,
for all $nfe 1$, for all $G\in{\cal N}$
and for all Borel sets $B$ with
non-degenerate Lebesgue measure
\begin{eqnarray}
\label{eq:vbas}
(\theta_fg^n)_*{\cp_0}[G]
& = & \frac 1 {\lambda \mathbf No B}
\int_{\mathbf N} \int_B
\mathbf Mathbf {1} \{\theta_fg^n\circ \theta_fa_t (\mathbf Mathbb Phi) \in G\}
\mathbf Mathbb Phi(\d t)
{\cal P} [\d\mathbf Mathbb Phi].
\mathbf Mathbb End{eqnarray}
In what follows,
$\cp^{\g,n}_0$ is a short notation for
the probability on $\mathbf N^0$ defined in the last equation.
This probability will be referred to as the
\mathbf Mathbb Emph{$n$-th image Palm probability (w.r.t. $f$)} of the point process.
It follows from the semigroup property (\ref{semigroupg}) that
\mathbf Mathbb Eq{\label{cpntocpn+1}\theta_{f_*}g\cp^{\g,n}_0={\cal P}_0^{f,n+1},
\mathbb Quad f_\omegaegarall n\in \Bbb{N} ,}
when letting ${\cp_0}z:={\cp_0}$.
From the mass transport relation \cite{LaTh09}, and
using the image counting measure $\mathbf Mathbb Phi_f^n$ defined
in Subsection~\ref{sec:cpoms-mul}, one gets:
\begin{lem}\label{convfrom0}
For all $nfe 0$, and all $G\in {\cal N}$,
\begin{eqnarray}
\cp^{\g,n}_0 [G]
& = & \frac 1 {\lambda \mathbf No B}
\int_{\mathbf N}
\int_{B}
\mathbf Mathbf {1} \{\theta_fa_t\mathbf Mathbb Phi\in G\} \mathbf Mathbb Phi^n_f (\d t)
{\cal P} [\d\mathbf Mathbb Phi].
\label {cffpan}
\mathbf Mathbb End{eqnarray}
\mathbf Mathbb End{lem}
Note that, in general, the $n$-th image Palm probability
$\cp^{\g,n}_0$ is {\mathbf Mathbb Em not} the Palm probability of
the $n$-th image point process $\Phi^n_f$ (which is the distribution
of $\Phi^n_f$ given that the origin belongs to $\Phi^n_f$ when using
the local interpretation of the Palm probability).
It is rather is the distribution of $\Phi$ given
that the origin is in the $n$-th image process. In both cases,
point multiplicities should be taken into account.
\begin{rem}\label{remfincond}
Equation (\ref{cffpan}) has several important implications:
\begin{enumerate}
\item If $\cal P$ is the distribution of a simple stationary point process,
Equation (\ref{cffpan}) gives
\begin{eqnarray}
\cp^{\g,n}_0 [G] & = &
{\cal E}_0 [m^n_f 1_{G}], \mathbb Quad f_\omegaegarall G,
\label {cffpan2}
\mathbf Mathbb End{eqnarray}
with $m^n_f$ the random variable $m^n_f(\mathbf Mathbb Phi,0)$
(see Equation (\ref{phifn})) and
$1_{G}$ the indicator function $1_{G}(\mathbf Mathbb Phi)$.
So taking $G=\mathbf N_0$ gives
\begin{equation}\label{eqmean1}
{\cal E}_0 [m^n_f]=1,
\mathbf Mathbb End{equation}
which shows that, ${\cal P}_0$ a.s.,
$m^n_f(\mathbf Mathbb Phi)<\infty$.
This in turn implies that, ${\cal P}$ a.s.,
for all $y\in \mathbf Mathbb Phi$, $m^n_f(\mathbf Mathbb Phi,y)<\infty$.
\item
Equation (\ref{eqmean1}) together with the Campbell-Mecke formula
imply that the intensity of $\Phi_f^n$
is equal to that of $\Phi$, as already mentioned.
\item
Equation (\ref{cffpan2}) shows that $\cp^{\g,n}_0$ is absolutely continuous
w.r.t. ${\cal P}_0$, with
Radon-Nikodym derivative
$$m^n_f:= m^n_f(\mathbf Mathbb Phi,0).$$
\mathbf Mathbb End{enumerate}
\mathbf Mathbb End{rem}
\begin{prop}
For all simple point
processes ${\cal P}$, for all $n$ and $G$,
\begin{eqnarray}
\cp^{\g,n}_0 [G] = {\cal E}_0 \left[
\frac {m^n_f}{{\cal E}_0 [ m^n_f\mathbf Mid m^n_f>0]}
1_{G} \mathbf Mid m^n_f>0\right].
\label {cffpan3}
\mathbf Mathbb End{eqnarray}
\mathbf Mathbb End{prop}
\begin{proof}
Equation (\ref{cffpan}) implies that
\begin{eqnarray*}
\cp^{\g,n}_0 [G] & = &
{\cal E}_0 [m^n_f 1_{G}]=
{\cal E}_0 [m^n_f 1_{G} 1_{m^n_f>0}],
\label {cffpan22}
\mathbf Mathbb End{eqnarray*}
Taking $G=\mathbf N_0$ gives
\begin{eqnarray*}
{\cal P}_0 [m^n_f>0]= \frac 1
{{\cal E}_0 [ m^n_f\mathbf Mid m^n_f>0]}.
\label {cffpan33}
\mathbf Mathbb End{eqnarray*}
Equation (\ref{cffpan3}) then follows immediately.
\mathbf Mathbb End{proof}
\subsubsection{Definition and Existence
of Point-Map-Probabilities}
\begin{defi}
\label{deffond}
Let $f$ be a \mathbf Mathbb Pomk{} and let $\cal P$ be
a stationary point process with
Palm distribution ${\cal P}_0$.
Every element of the $\omegaega$-limit set (see (\ref{eq:omegalim})) of ${\cal P}_0$
(where limits are w.r.t. the topology of the convergence
in distribution of probability measures on $\mathbf N^0$
-- cf. Subsection~\ref{RandMeas})
under the action of $\{(\theta_fg^n)_*\}_{n\in \Bbb{N}}$
will be called a \mathbf Mathbb Emph{$f$-probability of} ${\cal P}_0$.
In particular, if the limit of the sequence
$((\theta_fg^n)_* {\cal P}_0)_{n=1}^\infty=(\cp^{\g,n}_0)_{n=1}^\infty$ exists,
it will be called {\mathbf Mathbb Em the} $f$-probability of ${\cal P}_0$
and denoted by ${\cp_0}i$.
\mathbf Mathbb End{defi}
Let $A_{{\cal P}_0}$ denote the orbit of ${\cal P}_0$.
The set of $f$-probabilities of ${\cal P}_0$ is hence the set of
all accumulation points of the closure $\mathbf Mathrm{cl}(A_{{\cal P}_0})$ of
$A_{{\cal P}_0}$, or equivalently the elements of
$\mathbf M^1(\mathbf N^0)$ the neighborhoods of which contain infinitely
many elements of $A_{{\cal P}_0}$ -- see the definitions
in Section \ref{pernot}.
\begin{rem}
In view of (\ref{cffpan2}), for all $\cal P$ simple,
the existence of a unique $f$-probability ${\cp_0}i$
is equivalent to
\begin{eqnarray}
\lim_{n\to \infty} \int_{\mathbf N_0} h(\mathbf Mathbb Phi) m^n_f(\mathbf Mathbb Phi) {\cal P}_0(\d \mathbf Mathbb Phi)
= \int_{\mathbf N_0} h(\mathbf Mathbb Phi) {\cp_0}i(\d \mathbf Mathbb Phi),
\label {wconv}
\mathbf Mathbb End{eqnarray}
for all bounded and continuous functions $h:\mathbf N_0\to \Bbb{R}$.
\mathbf Mathbb End{rem}
\begin{cor}
Let ${\cal Q}$ be a $f$-probability.
Let $I$ be the set defined in (\ref{eq:definf}).
If for all positive integers $n$, $\mathbf Mathbb Phi\to\mathbf 1_{m_f^n(\mathbf Mathbb Phi)>0}$
is ${\cal Q}$-a.s. continuous, then ${\cal Q}[I]=1$,
\mathbf Mathbb End{cor}
\begin{proof}
The statement is an immediate consequence of Lemma~\ref{lem:discont.meas.zero}.
\mathbf Mathbb End{proof}
The relative compactness of $A_{{\cal P}_0}$
(and the existence of $f$-probabilities) is not granted in general.
The next lemmas give conditions
for this relatively compactness to hold.
From Lemma 4.5. in \cite{Ka76}, one gets:
\begin{lem}\label{kalen}
A necessary and sufficient condition for the set
$A_{{\cal P}_0}$ to be relatively compact in $\mathbf M^1(\mathbf N^0(\R^d))$ is that
for all bounded Borel subsets $B$ of $\sp$,
\mathbf Mathbb Eq{\label{sufcon}
\lim_{r\to\infty}\limsup_{n\to \infty} \cp^{\g,n}_0[\mathbf Mathbb Phi\in \mathbf N^0 \mathbf Mbox{ s.t. }
\mathbf Mathbb Phi(B)>r]=0. }
\mathbf Mathbb End{lem}
Examples of \mathbf Mathbb Pomk{} and point process pairs where
the last relative compactness property does
not hold are provided in Subsection \ref{CEPM}.
On stationary point processes,
all the \mathbf Mathbb Pomk{}s discussed in Section \ref{sr0} satisfy
this relative compactness property.
For the periodic cases
(e.g. $c$, $n$ and $g$ on Poisson point processes), the result follows
from Proposition \ref{prop25} below, whereas for the strip \mathbf Mathbb Pomk{} $s$, the
proof is given in Subsection \ref{Markovian}.
The \mathbf Mathbb Pomk{} $f$ will be said to have
{\mathbf Mathbb Em finite orbits} on the stationary point process
$(\Phi,\mathbf Mathbb P)$ if for $\mathbf Mathbb Ps{\p_\Phi}$-almost all $\mathbf Mathbb Phi$,
$\{\theta_fg^n(\mathbf Mathbb Phi)\}_{n\in \Bbb{N}}$ is finite.
\begin{prop}
\label{prop25}
If $f$ has finite orbits
on the stationary point process $(\Phi,\mathbf Mathbb P)$,
then the set $A_{{\cal P}_0}$ is relatively compact.
\mathbf Mathbb End{prop}
\begin{proof}
For all bounded Borel subsets $B$ of $\sp$ and $\mathbf Mathbb Phi\in\mathbf N^0$, let
$$R_B(\mathbf Mathbb Phi):=\mathbf Max_{n=0}^\infty\lg (\theta_fg^n\mathbf Mathbb Phi)(B)\rg.$$
Since $f$ has finite orbits, the RHS is the maximum over finite number of terms and hence $R_B$ is well-defined and finite. Clearly $R_B(\mathbf Mathbb Phi)feq R_B(\theta_fg \mathbf Mathbb Phi)$ and therefore the distribution of the random variable $R_B$ under $\cp^{\g,n}_0$ is stochastically decreasing w.r.t. $n$. Hence
\begin{eqnarray*}
&&\hspace{-1cm}\lim_{r\to\infty}\limsup_{n\to \infty} \cp^{\g,n}_0[\mathbf Mathbb Phi\in \mathbf N^0 \mathbf Mbox{ s.t. } \mathbf Mathbb Phi(B)>r] \\
&\leq&\lim_{r\to\infty}\limsup_{n\to \infty} \cp^{\g,n}_0[\mathbf Mathbb Phi\in \mathbf N^0 \mathbf Mbox{ s.t. } R_B(\mathbf Mathbb Phi)>r]\\
&\leq&\lim_{r\to\infty}{\cp_0}[\mathbf Mathbb Phi\in \mathbf N^0 \mathbf Mbox{ s.t. } R_B(\mathbf Mathbb Phi)>r]=0.
\mathbf Mathbb End{eqnarray*}
\mathbf Mathbb End{proof}
\begin{rem}
It is easy to check that the following statements are equivalent.
(1) $f$ has finite orbits; (2) $f$ is periodic; and (3)
for $\mathbf Mathbb Ps\mathbf Mathbb P$-almost all $\mathbf Mathbb Phi$,
for all $x\in\mathbf Mathbb Phi$,
$\{F^n(\mathbf Mathbb Phi,x)\}_{n\in \Bbb{N}}$ has finitely many different points.
\mathbf Mathbb End{rem}
So, for instance, for
the directional \mathbf Mathbb Pomk{} on the random geometric graph $g$,
$A_{{\cal P}_0}$ is relatively compact as this \mathbf Mathbb Pomk{} is 1-periodic.
\subsection{On Palm and Point-Shiftk{}-Probabilities}
This subsection is focused
on the relation between Palm probabilities and \mathbf Mathbb Pomk{}-probabilities.
Throughout the subsection, $f$ is a \mathbf Mathbb Pomk{}, and $(\Phi,\mathbf Mathbb P)$
is a simple and stationary point process with non-degenerate intensity.
The distribution of $\Phi$ is denoted by $\cal P$ and
its Palm probability by ${\cal P}_0$.
\subsubsection{Conditional Interpretation of the Point-Shiftk{}-Probability}
The next theorem, which immediately follows from
Equation (\ref{cffpan3}), gives
a conditional definition of the $f$-probability from ${\cal P}_0$ :
\begin{thm}
\label{thmloc}
Let $\cal P$ be a simple stationary point process on
$\sp$.
For all $n\in \mathbf Mathbb N$ and $\mathbf Mathbb Phi\in \mathbf N$,
let $m^n_f(\mathbf Mathbb Phi):=m^n_f(\mathbf Mathbb Phi,0)$.
For all $n$, $m^n_f(\mathbf Mathbb Phi)$ is ${\cal P}_0$ a.s. finite.
If there exists a unique $f$-probability ${\cp_0}i$ for $\cal P$,
then, for all $G$ such that $\cp^{\g,n}_0 [G]$ tends to ${\cp_0}i[G]$
as $n$ tends to infinity, one has
\begin{eqnarray}
{\cp_0}i [G]
= \lim_{n\to \infty}
{\cal E}_0 \left[
\frac {m^n_f}{{\cal E}_0 [ m^n_f\mathbf Mid m^n_f>0]}
1_{G}
\mathbf Mid m^n_f>0\right].
\label {cffpan4}
\mathbf Mathbb End{eqnarray}
\mathbf Mathbb End{thm}
Notice that,
in addition to the conditioning, there
is a Radon-Nikodym derivative (w.r.t.
${\cal P}_0[\cdot\mathbf Mid m^n_f >0$]) equal to
$m^n_f(\mathbf Mathbb Phi)/{\cal E}_0 [ m^n_f\mathbf Mid m^n_f>0].$
\subsubsection{The Periodic Case}
Below, for all stationary point processes $(\Psi,\mathbf Mathbb P)$ defined on
$(\Omegaega, \cal F)$ with a positive intensity,
$\mathbf Mathbb P_\Psi$ denotes the Palm probability w.r.t. $\Psi$
on $(\Omegaega, \cal F)$.
\begin{lem}\label{finite.trap}
If $f$ is 1-periodic on the (simple) stationary point
process $(\Phi,\mathbf Mathbb P)$, then a.s., for all $x\in\Phi$, $\lim_n \Phi^n_f(\{x\})$
exists and is finite.
\mathbf Mathbb End{lem}
\begin{proof}
If $x$ is a trap of $\Phi $, i.e. $F(\Phi,x)=x$, then
$(\Phi^f_n(x))_{n=1}^\infty$ is non-decreasing in $n$.
Let $\Psi$ be the thinning of $\Phi$ to traps of $\Phi$
for which the above limit is not finite.
The compatibility of $F$ implies that $(\Psi,\mathbf Mathbb P)$
is a stationary point process.
If $B$ is the unit box in $\sp$ and $K$ is a positive integer,
for $n$ large enough, one has
\mathbf Mathbb Eqn{\lambda_\Phi =
\int_\Omegaega\Phi^n_f(B) \mathbf Mathbb P(\d \omegaega)feq
\int_\Omegaega\Phi^n_f(\Psi\cap B) \mathbf Mathbb P(\d \omegaega)
feq \int_\Omegaega K\Psi(B)\mathbf Mathbb P(\d \omegaega) =K\lambda_\Psi,}
where $\lambda_\Phi$ and $\lambda_\Psi$ denote the
intensities of the point processes.
Therefore $\lambda_\Psi\leq\lambda_\Phi/K$,
which proves that $\lambda_\Psi=0$.
Hence, a.s., at the traps of $\Phi$, the limit exists and is finite.
Given this, it is easy to verify that if $y\in\Phi$ is not a trap,
for $n$ large enough, $\Phi^n_f(y)=0$ and hence the limit
exists for all points of $\Phi$.
\mathbf Mathbb End{proof}
When $\lim_n \mathbf Mathbb Phi^n_f$ exists and is a counting measure,
it is denoted it by $\Phi^\infty_f$.
Hence, in the 1-periodic case, $(\Phi^\infty_f,\mathbf Mathbb P)$ is
well defined and a non-degenerate stationary point process.
\begin{thm}\label{thmcyc1}
If $f$ is 1-periodic on $(\Phi,\mathbf Mathbb P)$,
then the $f$-probability ${\cp_0}i$ of
${\cal P}_0=\Phi_* \mathbf Mathbb P_\Phi$
exists and is given by
\mathbf Mathbb Eq{{\cp_0}i= \Phi_*\ \mathbf Mathbb P_{\Phi_f^\infty}.}
Let $m^\infty_f(\Phi)$ denote the multiplicity
of the origin under $\mathbf Mathbb P_{\Phi_f^\infty}$.
Then ${\cp_0}i$ is absolutely continuous
with respect to ${\cal P}_0$, with
\mathbf Mathbb Eq{\label{tleq1}\frac{\d{\cp_0}i}{\d{\cal P}_0}(\mathbf Mathbb Phi)=
m^\infty_f(\mathbf Mathbb Phi).}
In addition, ${\cp_0}i= \theta_{f_*}g{\cp_0}i$.
\mathbf Mathbb End{thm}
\begin{proof}
In the 1-periodic case, for all bounded Borel sets $B$,
$\Phi_f^n(B)$ a.s. coincides with $\Phi_f^\infty(B)$ for $n$ large enough,
so that by letting $n$ to infinity in (\ref{cffpan}),
one gets that for all $G\in {\cal N}$, the limit
\mathbf Mathbb Eq{\label {ffpani}
\lim_n
\cp^{\g,n}_0[G] =
\frac 1 {\lambda \mathbf No B}
\int_{\mathbf N}
\int_{B}
\mathbf Mathbf {1} \{\theta_fa_t\mathbf Mathbb Phi\in G\}
\mathbf Mathbb Phi_f^\infty(\d t)
{\cal P} [\d\mathbf Mathbb Phi]}
exists.
Since $\mathbf Mathbb Phi_f^\infty$ is a stationary point process with
the same intensity as the original point process
(because of the conservation of intensity),
${\p_\Phi}i$ is the distribution
of $\Phi$ with respect to the Palm
distribution of $\Phi_f^\infty$ indeed. In addition,
for all $H\in {\cal F}$
\begin{eqnarray*}
\mathbf Mathbb P_{\Phi_f^\infty}[H]
&=&\frac 1 {\lambda \mathbf No B}
\int_\Omega \int_B
\mathbf Mathbf {1} \{\theta_fa_t\omega\in H\}
\Phi_f^\infty(\omega,\d t)
\mathbf Mathbb P [\d\omega]\\
&=&\frac 1 {\lambda \mathbf No B}
\int_\Omega
\int_B
\Phi_f^\infty(\omega,\{t\})
\mathbf Mathbf {1} \{\theta_fa_t\omega\in H\}
\Phi(\omega,\d t)
\mathbf Mathbb P [\d\omega]\\
&=&\frac 1 {\lambda \mathbf No B}
\int_\Omega
\int_B
\Phi_f^\infty(\theta_fa_t\omega,\{0\})
\mathbf Mathbf {1} \{\theta_fa_t\omega\in H\}
\Phi(\omega,\d t)
\mathbf Mathbb P [\d\omega]\\
&=&\mathbf Mathbb E_\Phi\lk\Phi_f^\infty(\{0\})\mathbf Mathbf {1}_H(\Phi)\rk=
\mathbf Mathbb E_\Phi\lk m_f^\infty(\Phi)\mathbf Mathbf {1}_H(\Phi)\rk,
\mathbf Mathbb End{eqnarray*}
where the second equality stems from the facts that
$\overline{\Phi_f^\infty}\subset\Phi$ and that $\Phi$ is simple.
This proves (\ref{tleq1}) when $H=\Phi^{-1} G$.
Finally since $f$ is 1-periodic, ${\cp_0}i$-almost surely, $f\mathbf Mathbb Equiv0$
which proves that ${\cp_0}i$ is invariant under the action of $\theta_{f_*}g$ .
\mathbf Mathbb End{proof}
The \mathbf Mathbb Pomk{} $g$ provides an examples where
Theorem \ref{thmcyc1} holds.
See Subsection \ref{CHC}.
Note that similar statements hold in the $p$-periodic case.
In this case, $f^p$
is 1-periodic on the point processes $\{(\Phi,\cp^{\g,n}_0)\}_{n=0}^{p-1}$,
and hence there exists at most $p$ \mathbf Mathbb Pomk{}-probabilities.
Details on this fact are omitted.
\subsubsection{The Evaporation Case}
The next theorem shows that
in contrast to Theorem~\ref{thmcyc1} where the $f$-probability is
absolutely continuous with respect to the Palm probability,
there are cases where the $f$-probability and the Palm probability
are singular. This theorem is based on
the following lemma:
\begin{lem}\label{infimage}
Let $I$ be the set defined in (\ref{eq:III}).
If ${\cal Q}$ is a probability distribution on $\mathbf N^0$
which satisfies $\theta_{f_*}g{\cal Q}={\cal Q}$, then ${\cal Q}[I]=1$.
In this case, ${\cal Q}$-almost surely, there exists a bi-infinite
path (which can be a periodic orbit) which passes through the origin;
i.e., $\{y_i=y_i(\mathbf Mathbb Phi)\}_{i\in\mathbb Z}$ is such that $y_0=0$
and $ \f(\mathbf Mathbb Phi,y_i)=y_{i+1}$.
\mathbf Mathbb End{lem}
\begin{proof}
Let $M_n:=\{\mathbf Mathbb Phi\in\mathbf N^0; \f^{-n}(\mathbf Mathbb Phi,0)=\mathbf Mathbb Emptyset\}$,
where $ \f^n(\mathbf Mathbb Phi,\cdot)$ is
defined in Subsection \ref{sec:cpoms-ite}.
It is sufficient to show that, for all $n>0$, ${\cal Q}[M_n]=0$.
But the invariance of ${\cal Q}$ under the action of $\theta_{f_*}g$ gives
\begin{eqnarray*}
{\cal Q}[M_n] & = & \theta_{f_*}g^n{\cal Q}[M_n]={\cal Q}\lk(\theta_fg)^{-n}M_n\rk
\\ & = & {\cal Q}\lk
\{\mathbf Mathbb Phi\in\mathbf N^0;\f^{-n}(\mathbf Mathbb Phi,\f^n(\mathbf Mathbb Phi,0))=\mathbf Mathbb Emptyset\}\rk=0.
\mathbf Mathbb End{eqnarray*}
The proof of the second statement is clear if the orbit of $\mathbf Mathbb Phi$ is periodic under the action of $\theta_fg$ and if not, it is an immediate
consequence of K\"onig's infinity lemma \cite{Ko90}.
\mathbf Mathbb End{proof}
\begin{thm}
\label{theevasin}
If the stationary point process $(\Phi,\mathbf Mathbb P)$ evaporates under the action of $f$, and if the $f$-probability ${\cp_0}i$
of
${\cal P}_0=\Phi_* \mathbf Mathbb P^0_\Phi$
exists and satisfies ${\cp_0}i= \theta_{f_*}g{\cp_0}i$,
then ${\cp_0}i$ is singular with respect to ${\cal P}_0$.
\mathbf Mathbb End{thm}
\begin{proof}
The result is obtained when
combining Lemmas~\ref{infimage} and~\ref{eveq}.
\mathbf Mathbb End{proof}
It is shown in Subsection \ref{Markovian}
that the assumptions of Theorem \ref{theevasin} are satisfied
by the strip \mathbf Mathbb Pomk{} $s$
on Poisson point processes in ${\mathbf Mathbb R}^2$.
\begin{rem}
The case with evaporation is that where the conditioning representation
given in Equation (\ref{cffpan4}) is w.r.t. an event whose probability
w.r.t. ${\cal P}_0$ tends to 0 as $n$ tends to infinity.
\mathbf Mathbb End{rem}
\begin{rem}
The singularity property established in Theorem \ref{theevasin}
can be completed by the following observation:
under the assumptions of this theorem,
there is no finite and measurable $U=U(\mathbf Mathbb Phi)\in \mathbf Mathbb Phi$ (resp.
$V=V(\mathbf Mathbb Phi)\in \mathbf Mathbb Phi$) such that
${\cp_0}i = (\theta_fa_U)_* {\cal P}$,
(resp. ${\cp_0}i = (\theta_fa_V)_* {\cal P}_0$),
i.e., there is no {\mathbf Mathbb Em shift-coupling} giving ${\cp_0}i$ as function
of ${\cal P}$ (resp. ${\cal P}_0$). The proof is by
contradiction: evaporation implies that
${\cal P}$ (resp. ${\cal P}_0$) a.s.,
$\theta_fa_x \mathbf Mathbb Phi\mathbf Notin I$ for all $x\in \mathbf Mathbb Phi$.
But this together with ${\cp_0}i = (\theta_fa_U)_* {\cal P}$
(resp. ${\cp_0}i = (\theta_fa_V)_* {\cal P}_0$)
imply that ${\cp_0}i[I]=0$, which contradicts the fact that,
under the assumptions of Theorem \ref{theevasin}, ${\cp_0}i[I]=1$.
\mathbf Mathbb End{rem}
\subsection{Mecke's Point-Stationarity Revisited}
\subsubsection{Mecke's Invariant Measure Equation}
Consider the following \mathbf Mathbb Pomk{} invariant measure equation
\mathbf Mathbb Eq{\label{inveq}\theta_{f_*}g{\cal Q}={\cal Q},}
where the unknown is ${\cal Q}\in M^1(\mathbf N^0)$.
From Mecke's point stationarity Theorem \ref{point.sta},
if $\theta_fg$ (or equivalently $F$) is bijective,
then the Palm probability ${\cal P}_0$ of any simple stationary point process
solves (\ref{inveq}). From Theorem \ref{thmcyc1},
if $f$ is 1-periodic on $(\Phi,\mathbf Mathbb P)$, then the $f$-probability
of $\Phi$ exists and from the last statement of this theorem,
it satisfies (\ref{inveq}).
More precisely, a solution to (\ref{inveq}) was built
from the Palm probability ${\cal P}_0$ of $\Phi$ by Equation (\ref{tleq1}).
Equation (\ref{inveq}) will be referred to as
{\mathbf Mathbb Em Mecke's invariant measure equation}.
The bijective case shows that the solution of (\ref{inveq})
is not unique in general (all Palm probabilities are solution).
A natural question is whether one can
construct a solution of (\ref{inveq}) from the Palm probability
of a stationary point process beyond
the bijective and the 1-periodic cases, for instance when
$\Phi$ evaporates under the action of $f$.
Consider the Ces\`aro sums
\mathbf Mathbb Eq{\label{cesdef} \widetilde\cp^{\g,n}_0:=\frac 1n \sum_{i=0}^{n-1}{\cal P}_0^{f,i}, \mathbb Quad n\in
\Bbb{N}.}
When the limit of $\widetilde\cp^{\g,n}_0$ as $n$ tends to infinity exists
(w.r.t. the topology of $\mathbf M^1(\mathbf N^0)$), let
\mathbf Mathbb Eq{\label{cesdefinfty} {\widetilde\cp_0}i:=\lim_{n\to \infty}\frac
1n \sum_{i=0}^{n-1}{\cal P}_0^{f,i}.}
In general, ${\widetilde\cp_0}i$ is not a $f$-probability.
\begin{thm}\label{continv}
Assume there exists a subsequence $(\widetilde{\cal P}_0^{f,n_i})_{i=1}^\infty$ which converges to a probability measure ${\widetilde\cp_0}i$. If $\theta_{f_*}g$
is continuous at ${\widetilde\cp_0}i $, then ${\widetilde\cp_0}i$ solves Mecke's invariant
measure equation (\ref{inveq}).
\mathbf Mathbb End{thm}
\begin{proof}
From (\ref{cpntocpn+1}),
\begin{eqnarray}\label{inv.thet.ces}
\theta_{f_*}g\widetilde\cp^{\g,n}_0-\widetilde\cp^{\g,n}_0&=&\hspace{-.2cm}\frac 1n\lp\sum_{i=0}^{n-1}\theta_{f_*}g{\cal P}_0^{f,i}-\sum_{i=0}^{n-1}{\cal P}_0^{f,i}\rp\mathbf Nonumber\\
&=&\hspace{-.2cm}\frac 1n\lp\sum_{i=1}^n{\cal P}_0^{f,i}-\sum_{i=0}^{n-1}{\cal P}_0^{f,i}\rp=\frac 1n\lp\cp^{\g,n}_0-{\cp_0}\rp.
\mathbf Mathbb End{eqnarray}
Therefore, if the subsequence $(\widetilde{\cal P}_0^{f,n_i})_{i=1}^\infty$ converges in distribution w.r.t. the vague topology to a probability measure ${\widetilde\cp_0}i$, then (\ref{inv.thet.ces}) implies that the sequence $(\theta_{f_*}g\widetilde{\cal P}_0^{f,n_i})_{i=1}^\infty$ converges to ${\widetilde\cp_0}i$ as well. Now the continuity of $\theta_{f_*}g$ at ${\widetilde\cp_0}i$ implies that $(\theta_{f_*}g\widetilde{\cal P}_0^{f,n_i})_{i=1}^\infty$ converges to $\theta_{f_*}g{\widetilde\cp_0}i$ and therefore $\theta_{f_*}g{\widetilde\cp_0}i={\widetilde\cp_0}i.$
\mathbf Mathbb End{proof}
\begin{rem}
Here are some comments on the last theorem:
\begin{enumerate}
\item A sufficient condition for the existence of
a converging
subsequence in Theorem \ref{continv} is the relative compactness
condition of Lemma \ref{kalen}.
\item When the sequence $(\cp^{\g,n}_0)_{n=1}^\infty$
converges to ${\cp_0}i$, then $(\widetilde\cp^{\g,n}_0)_{n=1}^\infty$
converges to ${\cp_0}i$ too, and hence
Theorem \ref{continv} implies the invariance of
the $f$-probability ${\cp_0}i$ under the action of $\theta_{f_*}g$,
whenever $\theta_{f_*}g$ has the required continuity.
\item
If instead of $(\widetilde\cp^{\g,n}_0)_{n=1}^\infty$, $(\cp^{\g,n}_0)_{n=1}^\infty$
has convergent subsequences with different limits, i.e., if the set of
$f$-probabilities is not a singleton,
then none of the $f$-probabilities satisfies (\ref{inveq}).
However, it follows from Lemma \ref{lemcherche} in the appendix
that if $(\theta_fa_f)_*$ is continuous, and if $(\cp^{\g,n}_0)_{n=1}^\infty$ is
relatively compact, then the set of $f$-probabilities of ${\cal P}_0$ is
compact, non empty and $(\theta_fa_f)_*$-invariant.
\item
The conditions listed in Theorem \ref{continv} are all
required.
There exist \mathbf Mathbb Pomk{}s $f$ such that $(\widetilde\cp^{\g,n}_0)_{n=1}^\infty$
has no convergent subsequence (see Subsection \ref{CEPM});
there also exist \mathbf Mathbb Pomk{}s $f$ such that
$(\cp^{\g,n}_0)_{n=1}^\infty$ is convergent, but $\theta_{f_*}g$
is not continuous at the limit and ${\cp_0}i$ is not
invariant under the action of $\theta_{f_*}g$ (see Subsection \ref{sr}).
The use of Ces\`aro limits is required too as there
exist \mathbf Mathbb Pomk{}s $f$ such that
$(\cp^{\g,n}_0)_{n=1}^\infty$ is not convergent,
whereas $(\widetilde\cp^{\g,n}_0)_{n=1}^\infty$ converges to a limit
which satisfies (\ref{inveq}) (see Subsection \ref{nonuniqueconv.sub.}).
\mathbf Mathbb End{enumerate}
\mathbf Mathbb End{rem}
\subsubsection{Continuity Condition}
In case of existence of ${\widetilde\cp_0}i$,
Theorem~\ref{continv} gives a sufficient condition for ${\widetilde\cp_0}i$
to solve (\ref{inveq}); however since ${\widetilde\cp_0}i$ lives
in the space of probability measures on counting measures,
the verification of the continuity of $\theta_{f_*}g$ at ${\widetilde\cp_0}i$
can be difficult. The following propositions give more handy
tools to verify the continuity criterion.
\begin{prop}\label{f.cont}
If $\theta_fg$ is ${\widetilde\cp_0}i$-a.s. continuous,
then $\theta_{f_*}g$ is continuous at ${\widetilde\cp_0}i$.
\mathbf Mathbb End{prop}
\begin{proof}
The proof is an immediate consequence of
Proposition~\ref{cont.m.cont} in the appendix,
as the space $\mathbf N(\sp)$ is a Polish space.
\mathbf Mathbb End{proof}
\begin{prop}\label{s.cont}
If $f$ is ${\widetilde\cp_0}i$-almost surely continuous,
then $\theta_{f_*}g$ is ${\widetilde\cp_0}i$-continuous.
\mathbf Mathbb End{prop}
\begin{proof}
One can verify that $\theta_fa:\sp\times\mathbf N\to\mathbf N$ defined by $\theta_fa(t,\mathbf Mathbb Phi)=\theta_fa_t\mathbf Mathbb Phi$ is continuous. Also $h:\mathbf N^0\to\sp\times\mathbf N$ defined by $h(\mathbf Mathbb Phi)=(f(\mathbf Mathbb Phi),\mathbf Mathbb Phi)$ is continuous at continuity points of $f$ in $\mathbf N^0$. Hence $\theta_fg=\theta_fa\circ h$ is continuous at continuity points of $f$.
\mathbf Mathbb End{proof}
The converse of the statement of Proposition~\ref{s.cont} does not hold in
general (see Subsection \ref{CEPM}).
Combining the last propositions and Theorem \ref{continv} gives:
\begin{cor}
If the limit ${\widetilde\cp_0}i$ defined in (\ref{cesdefinfty}) exists and
if in addition $f$ is ${\widetilde\cp_0}i$-almost surely continuous,
then $\theta_{f_*}g$ is continuous at ${\widetilde\cp_0}i$, and ${\widetilde\cp_0}i$ then
solves Mecke's invariant measure equation (\ref{inveq}).
\mathbf Mathbb End{cor}
In Theorem \ref{continv} and the last propositions,
the continuity of the mapping $\theta_{f_*}g$
is required at some specific point only.
The continuity of $f$
is a stronger requirement which does not hold for most interesting cases as
shown by the following proposition
(see Appendix \ref{seccontpos} for a proof).
\begin{prop}\label{contpos}
For $dfeq 2$, there is no continuous \mathbf Mathbb Pomk{} on the whole
space $\mathbf N^0$ other than the point-map of the identity \mathbf Mathbb Pom{};
i.e., the \mathbf Mathbb Pomk{} which maps all $\mathbf Mathbb Phi\in\mathbf N^0$ to the origin.
\mathbf Mathbb End{prop}
\subsubsection{Regeneration}
In certain cases, the existence of ${\cp_0}i$ can be established using the theory of regenerative processes \cite{As03}. This method can be used when the point process satisfies the \mathbf Mathbb Emph{strong Markov property} such as Poisson point processes \cite{Mo05}.
Assume $f$ is a fixed \mathbf Mathbb Pomk{} and $(\Phi,{\p_\Phi})$ is the Palm version of a stationary point process.
For $nfeq 0$
let
\begin{equation}
\label{eq:X}
X^gai=X^gai(f,\Phi)=f^n(\Phi)\in \sp,
\mathbf Mathbb End{equation}
where $f^n$ is defined in (\ref{nthpom}). Note that ${\p_\Phi}$-almost surely, $\Phi\in\mathbf N^0$ and hence $X_n$ is well defined.
Finally, denote $\theta_fa_{X^gai}\Phi$ by $\Phi_n$
(this point process should not be confused with
$\Phi^n_{f}$ defined in Subsection \ref{sec:cpoms-mul})
and by $\Phi_n^r$ the restriction of $\Phi_n$ to the sphere of radius $r$
centered at the origin. Using this notation,
Lemma~\ref{convfrom0} gives $(\theta_fa_{X^gai})_*{\cp_0}=\cp^{\g,n}_0$ or
equivalently $(\Phi_n)_*{\p_\Phi}=\cp^{\g,n}_0$.
The following theorem leverages classical results in the theory of regenerative processes.
\begin{thm}\label{regenver}
If, for all $r>0$, there exists a strictly increasing sequence of non-lattice integer-valued random variables
$(\mathbf Mathbb Eta_i)_{i=1}^\infty$, which may depend on $r$, such that
\begin{enumerate}
\item $(\mathbf Mathbb Eta_{i+1}-\mathbf Mathbb Eta_i)_{i=1}^\infty$ is a sequence of i.i.d. random variables with finite mean,
\item the sequence $Y_i:=(\Phi_{\mathbf Mathbb Eta_i}^r,\Phi_{\mathbf Mathbb Eta_i+1}^r, \ellots, \Phi_{\mathbf Mathbb Eta_{i+1}-1}^r)$ is an i.i.d. sequence and $Y_{i+1}$ is independent of $\mathbf Mathbb Eta_1,\ellots,\mathbf Mathbb Eta_i$,
\mathbf Mathbb End{enumerate}
then the $f$-probability ${\cp_0}i$ exists and, for all bounded and measurable
functions $h$ and for ${\cp_0}$-almost all $\mathbf Mathbb Phi$,
\begin{equation}\label{regeneq}
\lim_{k\to\infty} \frac 1 k \sum_{n=0}^{k-1} h(\theta_fg^n\mathbf Mathbb Phi) =
\int_{\mathbf N^0} h(\mathbf Mathbb Psi) {\cp_0}i(\d\mathbf Mathbb Psi).
\mathbf Mathbb End{equation}
If in addition, for all $n$, $f$ is $\cp^{\g,n}_0$-almost surely continuous, then ${\cp_0}i$ is invariant under the action of $\theta_{f_*}g$ and $\theta_fg$ is ergodic on $(\mathbf N^0,{\cal N}^0,{\cp_0}i)$.
\mathbf Mathbb End{thm}
\begin{proof}
In order to prove the weak convergence of $\cp^{\g,n}_0$ to ${\cp_0}i$, it is sufficient to show the convergence in all balls of integer radius $r$ around the origin. Note that $\cp^{\g,n}_0$ is the distribution of $\Phi_n$ and hence, to prove the existence of ${\cp_0}i$, it is sufficient to prove the convergence of the distribution of $\Phi_n^r$ for all $r\in {\cal B}bb N$.
Note that the sequence $(\mathbf Mathbb Eta_i)_{i=1}^\infty$ forms a sequence of regenerative times for the configurations in $B_r(0)$. Since $\mathbf N^0$ is metrizable (c.f. \cite{As03}, Theorem~B.1.2), the distribution of $\Phi_n^r$ converges to a distribution ${\cal P}_{0,r}^f$ on configurations of points in $B_r(0)$ satisfying
\begin{equation}\label{regeneq2}
\frac 1{\ce_0[\mathbf Mathbb Eta_2-\mathbf Mathbb Eta_1]}\ce_0\lk\sum_{n=\mathbf Mathbb Eta_1}^{\mathbf Mathbb Eta_2-1} h(\Phi_n^r)\rk =
\int_{\mathbf N^0} h(\mathbf Mathbb Psi\cap B_r(0)) {\cal P}_{0,r}^{f}\d(\mathbf Mathbb Psi\cap B_r(0)),
\mathbf Mathbb End{equation}
for all $h:\mathbf N_0\to \Bbb{R}^+$.
Since the distributions $({\cal P}_{0,r}^f)_{r=1}^\infty$ are the limits of $(\Phi_n^r)_{r=1}^\infty$, they satisfy the consistency condition of Kolmogorov's extension theorem and therefore there exists a probability distribution ${\cp_0}i$ on $\mathbf N^0$ having ${\cal P}_{0,r}^f$ as the distribution of its restriction to $B_r(0)$.
This proves the existence of the $f$-probability.
The left-hand side of (\ref{regeneq2}) can be replaced by
an ergodic average
(c.f. \cite{As03} Theorem B.3.1); i.e., for all $r\in{\cal B}bb N$,
for ${\cp_0}$-almost all $\mathbf Mathbb Phi\in\mathbf N^0$,
\begin{eqnarray*}\lim_{k\to\infty}\frac 1{k}\sum_{n=0}^{k-1} h(\theta_fg^n\mathbf Mathbb Phi\cap B_r(0)) &=&
\int_{\mathbf N^0} h(\mathbf Mathbb Psi\cap B_r(0)) {\cal P}_0^{f,r}\d(\mathbf Mathbb Psi\cap B_r(0))\\
&=&
\int_{\mathbf N^0} h(\mathbf Mathbb Psi\cap B_r(0)) {\cp_0}i(\d\mathbf Mathbb Psi).
\mathbf Mathbb End{eqnarray*}
Finally $r$ varies in the integers and hence the last equation gives (\ref{regeneq}), for ${\cp_0}$-almost all $\mathbf Mathbb Phi$.
By defining $h$ as the continuity indicator of $f$, the $\cp^{\g,n}_0$-almost sure continuity of $f$ and (\ref{regeneq}) give its ${\cp_0}i$-almost sure continuity and hence that of $\theta_{f_*}g$ at ${\cp_0}i$. Therefore ${\cp_0}i$ is invariant under the action of $\theta_{f_*}g$. Also ergodicity is clear from regeneration.
\mathbf Mathbb End{proof}
The main technical difficulty for using Theorem~\ref{regenver}
consists in finding an appropriate sequence $(\mathbf Mathbb Eta_i)_{i=1}^\infty$.
Proposition~\ref{fstpa} below leverages the strong Markov property
of Poisson point processes to find appropriate sequences and prove
the existence of the \mathbf Mathbb Pomk{} probability for the \mathbf Mathbb Pomk{} $s$ for
homogeneous Poisson point processes.
Proposition~\ref{fuapa} uses the same approach to show that
the same holds true for the directional \mathbf Mathbb Pomk{} $d_\alphapha$.
Other examples can be found
in Section \ref{proofs}.
\section{More on Examples}\label{proofs}
\subsection{Strip Point-Shift{}} \label{sr}
Let ${\cal P}_0$ denote the Palm distribution of the homogeneous Poisson
point process on $\R^2$. It follows from results in \cite{FeLaTh04}
(in Theorem 3.1. of this reference,
the authors proved that the graph of this \mathbf Mathbb Pom{} has
finite branches, which is equivalent to evaporation)
that ${\cal P}_0$ evaporates under the action of the strip \mathbf Mathbb Pomk{} $s$.
It is also shown in Proposition~\ref{fstpa} below that ${\cal P}_0$,
admits a unique $s$-probability which
satisfies the continuity requirements of Theorem \ref{continv}.
This \mathbf Mathbb Pom{} also allows one to illustrate
the need of the continuity property in Theorem \ref{continv}.
Consider the setup of Proposition~\ref{fstpa}.
For all $\mathbf Mathbb Phi\in\mathbf N^0$ such that the origin has infinitely many pre-images,
change the definition of the \mathbf Mathbb Pomk{} $s$ as follows: it is
now the closest point on the
right half plane which has no other point of of $\mathbf Mathbb Phi$ in the
ball of radius $1$ around it.
Due to evaporation, this changes the definition of $s$ on a set
of measure zero under ${\cal P}_0^{s,n}$, for all $n\in{\cal B}bb N$, and hence,
the sequence $({\cal P}_0^{s,n})_{n=1}^\infty$
is again converging
to the same limit as that defined in the proof of Proposition~\ref{fstpa}.
But under the action of the new $s$, $(\theta_fa_{s})_*{\cal P}_0^{s}$
is not equal to ${\cal P}_0^{s}$ due to the facts
that (i) $0$ has infinitely many pre-images ${\cal P}_0^{s}$-a.s. and (ii)
there is no point of the point process in the ball of radius $1$.
This does not agree with the fact that, in the right half plane,
the distribution of ${\cal P}_0^{s}$ is a Poisson point process (see
the proof of Proposition~\ref{fstpa}). Hence,
${\cal P}_0^{s}$ is not invariant under the action of $(\theta_fa_{s})_*$.
\subsection{Directional Point-Shift{}}
\label{dr}
The directional \mathbf Mathbb Pom{} was introduced in \cite{BaBo07}.
Let $e_1$ be the first coordinate unit vector
The \mathbf Mathbb Emph{directional \mathbf Mathbb Pomk{} $d$}
maps the origin to the nearest
point in the right half-space, defined by $e_1$,
i.e., for all $\mathbf Mathbb Phi\in\mathbf N^0$,
\mathbf Mathbb Eq{\label {drd}d(\mathbf Mathbb Phi):=\argmin\{\mathbf Norm{y};y\in \mathbf Mathbb Phi,y\cdot e_1>0\}.}
The associated \mathbf Mathbb Pom{} will be denoted by $D$.
The directional \mathbf Mathbb Pomk{} on $\R^2$ with deviation limit $\alphapha$,
$d_{\alpha}$, is similar to $d$, except that the
point $y$ is chosen in the cone with angle $2\alpha$ and central
direction $e_1$ rather than in a half plane; i.e.,
\mathbf Mathbb Eq{\label {drda}
d_{\alpha}(\mathbf Mathbb Phi,x):=\argmin\{\mathbf Norm{y};y\in \mathbf Mathbb Phi,\frac{y}{\mathbf Norm{y}}\cdot u>\cos \alpha\}.}
When $\alpha=\frac\mathbf Mathbb Pi 2$ one has $d_{\alpha}=d$.
Its \mathbf Mathbb Pom{} is denoted by $D_{\alpha}$.
When $\alpha<\mathbf Mathbb Pi/2$, it can be shown that
the homogeneous Poisson point process on $\R^2$ evaporates under
the action of $d_\alpha$, and from Proposition~\ref{fuapa} below,
it admits a unique $d_\alpha$-probability which
satisfies the continuity requirements of Theorem \ref{continv}.
\subsection{Regeneration} \label{Markovian}
This subsection is focused on the existence of \mathbf Mathbb Pomk{}-probabilities for
point-maps defined on Poisson point processes.
It is based on Theorem~\ref{regenver} and is illustrated by two examples.
\begin{prop}\label{fstpa}
If $s$ is the strip \mathbf Mathbb Pomk{}, and
$(\Phi,\mathbf Mathbb P)$ is a homogeneous Poisson point process
in the plane with distribution ${\cal P}$, then
the $s$-probability exists and is given by (\ref{regeneq}).
In addition, for all $n$, $s$ is ${\cal P}_0^{s,n}$-almost surely continuous.
Therefore the action of $(\theta_fa_{s})_*$ preserves ${\cal P}_0^{s}$ and is ergodic.
\mathbf Mathbb End{prop}
\begin{proof}
The random vector, $X_1=X_1(\Phi)$ defined in Equation (\ref{eq:X}),
depends only on the points of $\Phi$
which belong to the rectangle $R_0(\Phi)=[0,x_1]\times[-1,1]$,
where $x_1$ is the first coordinate of the left most point of
$\Phi\cap T(0)$. It is easy to verify that $R_0(\Phi)$
is a stopping set (c.f. \cite{Mo05} and \cite{Zu06}).
Let $R_n(\Phi)$ be the rectangle which is needed to determine the
image of the origin in $\theta_fa_{X^gai}\Phi$ under the action of $S$.
Let $R_n+X^gai$ be the translation of the set $R_n$
by the vector $X^gai$
Then it is clear that
\mathbf Mathbb Eq{\label{unv.dif}U_k=\bigcup_{n=0}^k \lpR_n+X^gai\rp}
is also a stopping set.
As a consequence, the strong Markov property of Poisson point
process (c.f. \cite{Zu06}) implies, given $X_0,\ellots,X^gai$,
the point process on the right half-plane of $X^gai$ is distributed
as the original Poisson point process. Let
\mathbf Mathbb Eqn{p_n=\mathbf Mathbb Pi_1(X_{n+1}-X^gai),}
where $\mathbf Mathbb Pi_1$ is the projection on the first coordinate.
Since ${\cal P}_0^{s,n}$, restricted to the right half-plane,
is the distribution of a Poisson point process and since
the sequence $(p_n)_{n=1}^\infty$ depends only on the
configuration of points in the right half-plane,
$(p_n)_{n=1}^\infty$ is a sequence of i.i.d. exponential
random variables with parameter $2\lambda$, where $\lambda$
is the intensity of the point process.
Also if $\mathbf Mathbb Eta_i$ is the integer $n$ such that, for the $i$-th time,
$p_{n}$ is larger than $2r$, then the sequence
$(\mathbf Mathbb Eta_i)_{i=1}^\infty$ forms a sequence of regenerative
times for configuration of points in $B_r(0)$.
Combining this with the distribution of $p_n$ gives that
$(\mathbf Mathbb Eta_i)_{i=1}^\infty$ satisfies the required
conditions in Theorem~\ref{regenver}.
Finally, consider the discontinuity points of $s$. Let $\mathbf Mathbb Phi\in\mathbf N^1$ with
$s(\mathbf Mathbb Phi)=x=(x_1,x_2)$. It is shown below that if
$\mathbf Mathbb Phi$ is a discontinuity point of
$s$, then either $x$ lies on the boundary of $T(0)$ or there is a point
of $\mathbf Mathbb Phi$ other than the origin and $x$ which lies on the perimeter of
the rectangle $[0,x_1]\times[-1,1]$. This proves that, for all $n$,
the discontinuity points of $s$ are of $\cp^{\g,n}_0$-zero measure. To prove the
continuity claim, assume that $\mathbf Mathbb Phi$ satisfies none of the above condition.
Hence there exists $\mathbf Mathbb Epsilon>0$ such that, $x_1>\mathbf Mathbb Epsilon$,
$x_2\in[-(1-\mathbf Mathbb Epsilon),1-\mathbf Mathbb Epsilon]$ and there is no other point of $\mathbf Mathbb Phi$
in $[-\mathbf Mathbb Epsilon,x_1+2\mathbf Mathbb Epsilon],[-1-\mathbf Mathbb Epsilon,1+\mathbf Mathbb Epsilon]$. Therefore, for
$\mathbf Mathbb Psi\in\mathbf N^0$ close enough to $\mathbf Mathbb Phi$ in the vague topology, there is a
point $y=(y_1,y_2)\in\mathbf Mathbb Psi$ in an $\mathbf Mathbb Epsilon$-neighborhood of $x$, which gives
$y\in(0,x_1+\mathbf Mathbb Epsilon)\times(-1,1)$ and since there is no point of
$\mathbf Mathbb Psi$ other than $0$ and $y$ in $[0,x_1+\mathbf Mathbb Epsilon]\times[-1,1]$,
$s(\mathbf Mathbb Psi)=y$, which proves the claim.
Therefore all conditions of Theorem~\ref{regenver} are satisfied,
which proves the proposition.
\mathbf Mathbb End{proof}
Note that the proof shows that the distribution ${\cal P}^s_0$
on the right half-plane is homogeneous Poisson with the original intensity.
\begin{prop}\label{fuapa}
Let $d_{\alpha}$ be the directional \mathbf Mathbb Pomk{} defined in Subsection \ref{dr}
with $\alpha<\mathbf Mathbb Pi/2$. Under the assumptions of Proposition~\ref{fstpa},
the $d_{\alpha}$-probability exists and is given by (\ref{regeneq}).
In addition, for all $n$, $d_{\alpha}$ is ${\cal P}_0^{d_{\alpha},n}$-almost surely
continuous and hence the action of $(\theta_fa_{d_{\alpha}})_*$
preserves ${\cal P}_0^{d_{\alpha}}$ and is ergodic.
\mathbf Mathbb End{prop}
\begin{proof}
The proof is similar to that of Proposition~\ref{fstpa}, but more subtle.
It uses the same notation as that of Theorem~\ref{regenver}.
Let $C^{\alpha}$ denote the cone with angle $2\alpha$, central direction $e_1$,
and apex at the origin.
Let $X_1(\mathbf Mathbb Phi)$ be the point of $C^{\alpha}\cap\mathbf Mathbb Phi$ which is
the closest to the origin (other than the origin itself).
Let $C^{\al}_0(\mathbf Mathbb Phi)$ be the closed subset of $C^{\alpha}$ consisting
of all points of $C^{\alpha}$ which are not
farther to the origin than $X_1(\mathbf Mathbb Phi)$. This set will
be referred to as a bounded cone below.
One may verify that $C^{\al}_0(\mathbf Mathbb Phi)$ is a stopping set and that
$X_1$ is determined by $C^{\al}_0$.
Let $C^{\al}_n(\mathbf Mathbb Phi)$ be the closed bounded cone which
is needed to determine the image of the origin in
$\theta_fa_{X^gui}\mathbf Mathbb Phi$ under the action of $d_{\al}$.
It is easy to verify that
\mathbf Mathbb Eq{U_k=\bigcup_{n=0}^k \lpC^{\al}_n+X^gui\rp,}
is also a stopping set. It is a simple geometric fact that
\mathbf Mathbb Eq{\label{empty.inetr}U_{n-1}\cap C^{\mathbf Mathbb Pi/2-\alpha}(X^gui)=\{X^gui\},}
and as a consequence, given $U_{n-1}$, the point process in
$C^{\mathbf Mathbb Pi/2-\alpha}+X^gui$ is distributed as the original point process.
This fact together with the facts that $U_n$ is a stopping set
and $C^{\al}_n$ has no point of the point process other than $X^gui$
and $X_{n+1}$, give that, in the $n$-th step, with probability at least
$\mathbf Min\{1,(\mathbf Mathbb Pi/2-\alpha)/(\alpha)\}$,
$X_{n+1}$ is in $C^{\mathbf Mathbb Pi/2-\alpha}(X^gui)$.
Let $\mathbf Mathbb Eta_i$ be the $i$-th time for which
$X_{n+1}\in C^{\mathbf Mathbb Pi/2-\alpha}(X^gui)$ and has a distance more than $2r$
from the edges of $C^{\mathbf Mathbb Pi/2-\alpha}(X^gui)$.
The Poisson distribution of points in $C^{\mathbf Mathbb Pi/2-\alpha}(X^gui)$
gives that the random variables $\mathbf Mathbb Eta_{i+1}-\mathbf Mathbb Eta_i$
are stochastically bounded by an exponential random variable
and hence they satisfy all requirements of Theorem~\ref{regenver}.
As in the case of the strip \mathbf Mathbb Pom, it can be shown that if $d_\alpha$ is
not continuous at $\mathbf Mathbb Phi\in\mathbf N^0$ then either there is no point in the
interior of $C^{\al}$ or there is a point on the perimeter of $C^{\al}_0(\mathbf Mathbb Phi)$.
Note that since $U_{n-1}$ is a stopping set and
$(C^{\al}+X^gai)\capU_{n-1}$ has no point of the point process
other than $X^gui$, $X_{n+1}$ is distributed as in a Poisson point
process in $C^{\al}_n+X^gai$ given the fact that some parts contain no point.
Therefore since the discontinuities of $d_{\alpha}$ are of probability
zero under the Poisson distribution, they are of probability zero under
all ${\cal P}_0^{d_{\alpha},n}$ and hence
Theorem~\ref{regenver} proves the statements of the proposition.
\mathbf Mathbb End{proof}
The statement of Proposition~\ref{fuapa} is also true in the case
$\alpha=\mathbf Mathbb Pi/2$ and can be proved using ideas similar to those
in the proof for $\alpha<\mathbf Mathbb Pi/2$. However the technical details
of the proof in this case may hide the main idea and this case
is hence ignored in the proposition.
\subsection{Condenser and Expander Point-Shift}
\label{CEPM}
Assume each point $x\in\mathbf Mathbb Phi$ is marked with
\mathbf Mathbb Eqn{\mathbf Nu_p(x)=\#(\mathbf Mathbb Phi\cap B_1(x)) \mathbb Quad (\text{respectively }
\mathbf Nu_m(x)=\sup\{r>0: \mathbf Mathbb Phi\cap B_r(x)=\{x\}\}),}
where $B_r(x)=\{y\in \R^2:\ \mathbf Norm{x-y}<r\}$.
Note that $\mathbf Nu_p(x)$ and $\mathbf Nu_m(x)$ are always positive.
The \mathbf Mathbb Emph{condenser \mathbf Mathbb Pom} $P$
(respectively \mathbf Mathbb Emph{expander \mathbf Mathbb Pom} $M$) acts on
counting measures
as follows: it goes from each point $x\in\mathbf Mathbb Phi$
to the closest point $y$ such that $\mathbf Nu_p(y)feq 2\mathbf Nu_p(x)$
(respectively $\mathbf Nu_m(y)feq 2\mathbf Nu_m(x)$).
It is easy to verify that both \mathbf Mathbb Pom s are compatible
and almost surely well-defined on the homogeneous Poisson point process.
Poisson point processes evaporate under the action
of both \mathbf Mathbb Pom{}s $P$ and $M$.
The condenser \mathbf Mathbb Pomk{} provides
an example where no $f$-probability exists.
Let $(\text{id},{\cal P})$ be the Poisson point process
with intensity one on $\R^2$ and let $p$ be the condenser \mathbf Mathbb Pomk{}. Clearly
\mathbf Mathbb Eqn{{\cal P}_0^{p,n}[\mathbf Mathbb Phi(B_1(0))>2^n]=1.}
Therefore the tightness criterion is not satisfied and thus
there is no convergent subsequence of $({\cal P}_0^{p,n})_{n=1}^\infty$.
Similarly, the expander \mathbf Mathbb Pomk{} allows
one to show that there is no converse to Proposition \ref{s.cont}.
More precisely, $\theta_fa_m$ is continuous
${\cal P}_0^m$-almost surely but the \mathbf Mathbb Pomk{} is ${\cal P}_0^m$-almost
surely discontinuous. Hence
the converse of the statement of Proposition~\ref{s.cont}
does not hold in general.
Consider $m$ on the homogeneous Poisson point process.
One can verify that $({\cal P}_0^{m,n})_{n=1}^\infty$
converges to the probability measure concentrated on the
counting measure $\delta_0$ with a single point at the origin.
In this example, $\theta_fa_m$ is ${\cal P}_0^m$-a.s. continuous.
This follows from the fact that when looking at the point process
in any bounded subset of $\sp$, it will be included in some ball
of radius $r$ around the origin and therefore
the configuration of points in it will be constant
(only one point at the origin) after finitely many application of $\theta_fa_m$.
But the \mathbf Mathbb Pomk{} $m$ makes larger and larger steps and hence the sequence of
laws of $m$ under ${\cal P}_0^{m,n}$ diverges.
Hence $m$ is almost surely not continuous at the realization
$\delta_0$ on which ${\cal P}_0^{m}$ is concentrated.
\subsection{Closest Hard Core Point-Shift}
\label{CHC}
By definition, the image of $x\in \mathbf Mathbb Phi$ by the
\mathbf Mathbb Emph{closest hard core \mathbf Mathbb Pom} $H$ is the closest point $y$
of $\mathbf Mathbb Phi$ (including $x$ itself) such that $\mathbf Mathbb Phi(B_1(y))=1$.
Its \mathbf Mathbb Pomk{} will be denoted by $h$.
The \mathbf Mathbb Pomk{} $h$ is 1-periodic.
It provides an illustration of Theorem \ref{thmcyc1}.
Consider $h$ acting on a stationary Poisson point process
of intensity one in the plane.
For the simple counting measure $\mathbf Mathbb Phi$,
let $\Psi(\mathbf Mathbb Phi)$ denote sub-point process of $\mathbf Mathbb Phi$
made of points $y$ of $\mathbf Mathbb Phi$ such that $\mathbf Mathbb Phi(B_1(y))=1$.
If $\mathbf Mathbb Phi$ is chosen w.r.t. ${\cal P}$, then $\Psi(\mathbf Mathbb Phi)$
is also a stationary point process.
Let ${\cal Q}_0$ denote the Palm probability of $\Psi(\mathbf Mathbb Phi)$.
Then ${\cp_0}i$ is absolutely continuous w.r.t. ${\cal Q}_0$ and its
Radon-Nikodym derivative at each $\Psi(\mathbf Mathbb Phi)\in\mathbf N^0$ is proportional
to the number of points of $\mathbf Mathbb Phi$ in the Voronoi cell of the
origin in $\Psi(\mathbf Mathbb Phi)$.
\subsection{Quadri-Void Grid Point-Shift{}}\label{nonuniqueconv.sub.}
Let $\mathbf Mathbb Psi=\mathbb Z\backslash 4\mathbb Z$; i.e., those integers which are not multiple
of $4$. If $U$ is a uniform random variable in $[0,4)$, then $\mathbf Mathbb Psi+U$
is a stationary point process on the real line which will be called the
quadri-void grid below. The Palm distribution of this point process
has mass of $\frac 13$ on $\theta_fa_{1}\mathbf Mathbb Psi,\theta_fa_{2}\mathbf Mathbb Psi$ and $\theta_fa_{3}\mathbf Mathbb Psi$.
Let $q$ be the \mathbf Mathbb Pomk{} defined by
\mathbf Mathbb Eqn{q({\theta_fa_{1}\mathbf Mathbb Psi})=2 \text{ , }
q({\theta_fa_{2}\mathbf Mathbb Psi})=1\text{ and }q({\theta_fa_{3}\mathbf Mathbb Psi})= -2.}
For odd values of $n>0$, one has
\mathbf Mathbb Eqn{{\cal P}_0^{q,n}[\mathbf Mathbb Phi=\theta_fa_{3}\mathbf Mathbb Psi]=\frac 23,\mathbb Quad {\cal P}_0^{q,n}[\mathbf Mathbb Phi=\theta_fa_{1}\mathbf Mathbb Psi]=\frac 13,}
whereas for even values of $n>0$,
\mathbf Mathbb Eqn{{\cal P}_0^{q,n}[\mathbf Mathbb Phi=\theta_fa_{3}\mathbf Mathbb Psi]=\frac 13,\mathbb Quad {\cal P}_0^{q,n}[\mathbf Mathbb Phi=\theta_fa_{1}\mathbf Mathbb Psi]=\frac 23.}
Therefore $({\cal P}_0^{q,n})_{n=1}^\infty$
has two convergent subsequences with different limits,
one for even and one for odd values of $n$,
and none of these limits is invariant under the action of
$(\theta_fa_q)_*$.
However, the sequence $(\widetilde {\cal P}^{q,n}_0)_{n=1}^\infty$
converges to a limit $\widetilde {\cal P}^q_0$ which is the mean
of the odd and even $g$-probabilities, i.e.,
\mathbf Mathbb Eqn{\widetilde {\cal P}^q_0[\mathbf Mathbb Phi=\theta_fa_{3}\mathbf Mathbb Psi]=\frac 12,\mathbb Quad \widetilde {\cal P}^q_0[\mathbf Mathbb Phi=\theta_fa_{1}\mathbf Mathbb Psi]=\frac 12,}
and it is invariant under the action of $(\theta_fa_q)_*$.
\appendix
\section{Random Measures}\label{RandMeas}
This subsection summarizes the results about random measures which are used in this paper in order to have a self-contained paper. The interested reader should refer to \cite{Ka76,Ka02}. No proofs are given.
Let $S$ be a locally compact (all points have a compact neighborhood) second countable (has a countable base) Hausdorff space. In this case, $S$ is known to be Polish, i.e., there exists some separable and complete metrization $\rho$ of $S$.
Let ${\cal B}(S)$ be the Borel algebra of $S$ and ${\cal B}_b(S)$ be all bounded elements of ${\cal B}(S)$; i.e., all $B\in{\cal B}(S)$ such that the closure of $B$ is compact. Let $\mathbf M(S)$ be the class of all Radon measures on $(S,{\cal B}(S))$; i.e., all measures $\mathbf Mu$ such that for all $B\in{\cal B}_b(S)$, $\mathbf Mu B<\infty$ and let $\mathbf N(S)$ be the subspace of all ${\cal B}bb N$-valued measures in $\mathbf M(S)$.
The elements of $\mathbf N(S)$ are {\mathbf Mathbb Em counting measures}.
For all $\mathbf Mu$ in $\mathbf M(S)$, define
\mathbf Mathbb Eqn{{\cal B}_b(S)^\mathbf Mu:=\{B\in{\cal B}_b(S);\mathbf Mu(\mathbf Mathbb Partial B)=0\}.}
Let $C_b(S)$ (respectively $C_c(S)$) be the class of all continuous and bounded (respectively continuous and compact support) $h:S\to\R^{+}$. Let
\mathbf Mathbb Eqn{\mathbf Mu h:=\int_S h(x)\mathbf Mu(\d x),}
where the latter is equal to $\sum_{x\in\mathbf Mu}h(x)$ when $\mathbf Mu$ is a counting measure. Note that in the summation one takes the multiplicity of points into account.
The class of all finite intersections of $\mathbf M(S)$-sets (or $\mathbf N(S)$-sets) of the form $\{\mathbf Mu:s<\mathbf Mu h<t\}$ with real $r$ and $s$ and arbitrary $h\in C_c(S)$ forms a base of a topology on $\mathbf N(S)$ which is known as the \mathbf Mathbb Emph{vague topology}. In the vague topology $\mathbf N(S)$ is closed in $\mathbf M(S)$ (\cite{Ka76}, p. 94, A 7.4.).
A necessary and sufficient condition for the convergence in this topology (\cite{Ka76}, p. 93) is:
\mathbf Mathbb Eqn{\mathbf Mu_n\tov\mathbf Mu \Leftrightarrow f_\omegaegarall h\in C_c(S),\ \mathbf Mu_n h\to\mathbf Mu h.}
If one considers the subspace of all bounded measures in $\mathbf N(S)$, one may replace $C_c(S)$ by $C_b(s)$. This leads to the \mathbf Mathbb Emph{weak topology} for which
\mathbf Mathbb Eqn{\mathbf Mu_n\tow \mathbf Mu \Leftrightarrow f_\omegaegarall h\in C_b(S), \mathbf Mu_n h\to\mathbf Mu h.}
The convergence in distribution of the random variables
$\xi_1,\xi_2,\ellots$, defined on $(\Omegaega, \cal F, \mathbf Mathbb P)$
and taking their values in $(S,{\cal B}(S))$,
to the random element $\xi$ is defined as follow
\mathbf Mathbb Eqn{\xi_n\tod \xi\Leftrightarrow(\xi_n)_*\mathbf Mathbb P\tow (\xi)_*\mathbf Mathbb P.}
The next lemma describes the relation between the convergences in the vague topology and the weak one.
\begin{lem}[\cite{Ka76}, p.95, A 7.6.]\label{weak.vague}
For all bounded $\mathbf Mu,\mathbf Mu_1,\mathbf Mu_2,\ellots\in\mathbf M(S)$, one has
\mathbf Mathbb Eqn{\mathbf Mu_n\tow\mathbf Mu\Leftrightarrow\mathbf Mu_n\tov\mathbf Mu \text{ and } \mathbf Mu_nS\to\mathbf Mu S.}
\mathbf Mathbb End{lem}
According to Lemma~\ref{weak.vague}, when discussing the convergence of probability measures, there is no difference between the vague and the weak convergence.
The following proposition is a key point in the development of the theory of random measures and random point processes (\cite{Ka76}, p. 95 A 7.7.).
\begin{prop}\label{vague.p}
Both $\mathbf M(S)$ and $\mathbf N(S)$ are Polish in the vague topology. Also the subspaces of bounded measures in $\mathbf M(S)$ and $\mathbf N(S)$ are Polish in the weak topology.
\mathbf Mathbb End{prop}
Proposition~\ref{vague.p} allows one to define measures on $\mathbf M(S)$ or $\mathbf N(S)$ which are Polish spaces and use for them the theory available for $S$. If $\cal M$ (respectively ${\cal N}$) is the $\sigma$-algebra generated by the vague topology on $\mathbf M(S)$ (respectively $\mathbf N(S)$), a {\mathbf Mathbb Em random measure} (respectively {\mathbf Mathbb Em random point process}) on $S$ is simply a random element of $(\mathbf M(S),{\cal M})$ (respectively$(\mathbf N(S),{\cal N})$). Note that a random point process is a special case of a random measure.
The next theorem and lemmas give handy tools to deal with convergence in distribution of random measures on $S$.
\begin{thm}[\cite{Ka76}, p.22, Theorem 4.2.]
If $\mathbf Mu, \mathbf Mu_1,\mathbf Mu_2,\ellots$ are random measures on $S$ (i.e., random elements of $(\mathbf M(S),{\cal M)}$), then
\mathbf Mathbb Eqn{\mathbf Mu_n\tod\mathbf Mu\Leftrightarrow\mathbf Mu_n h\tod\mathbf Mu h, \mathbb Quad f_\omegaegarall h\in C_c(S).}
\mathbf Mathbb End{thm}
\begin{lem}[\cite{Ka76}, p.22, Lemma 4.4.]\label{lem:discont.meas.zero}
If $\mathbf Mu,\mathbf Mu_1,\mathbf Mu_2,\ellots$ are random measures on $S$ satisfying $\mathbf Mu_n\tod\mathbf Mu$, then $\mathbf Mu_nh\tod\mathbf Mu h$ for every bounded measurable function $h:S\to\R^+$ with bounded support satisfying $\mathbf Mu (D_h)=0$ almost surely, where $D_h$ is the set of all discontinuity points of $h$. Furthermore,
\mathbf Mathbb Eqn{(\mathbf Mu_n B_1,\ellots, \mathbf Mu_n B_k)\tod (\mathbf Mu B_1, \ellots \mathbf Mu B_k),\mathbb Qquad k\in {\cal B}bb N, \mathbb Quad B_1,\ellots B_k\in{\cal B}_b(S)^\mathbf Mu.}
\mathbf Mathbb End{lem}
\begin{lem}[\cite{Ka76}, p.23, Lemma 4.5.]
A sequence $(\mathbf Mu_n)_{n=1}^\infty$ of random measures on $S$ is relatively compact w.r.t. the convergence in distribution in the vague topology if and only if
\mathbf Mathbb Eqn{\lim_{t\to\infty}\limsup_{n\to\infty}\mathbf Mathbb P[\mathbf Mu_nB>t]=0,\mathbb Quad f_\omegaegarall B\in{\cal B}_b(S).}
\mathbf Mathbb End{lem}
Denote by $P(S)$ the set of all probability measures on $S$. Clearly $P(S)\subset M(S)$ and according to Lemma~\ref{weak.vague}, the weak and the vague topologies on $P(S)$ coincide.
\begin{prop}[\cite{Bi68} p.30, Theorem 5.1.]\label{cont.m.cont}
If $S$ and $T$ are Polish spaces and $h:(S,{\cal B}(S))\to (T,{\cal B}(T))$ is a measurable mapping, then $h_*$
is continuous w.r.t. the weak topology
at point $\mathbf Mathbb P\in P(S)$
if $h$ is $\mathbf Mathbb P$-almost surely continuous.
\mathbf Mathbb End{prop}
Note that the version of Proposition~\ref{cont.m.cont} which is in \cite{Bi68}, is expressed for metric spaces. But, as noted in the beginning of the appendix, Polish spaces are metrizable and hence one can apply the same statement for such spaces.
\section{Semigroup Actions}
Let $X$ be a Hausdorff space.
An action of $(\Bbb{N},+)$ on $X$ is a collection $\mathbf Mathbb Pi$
of mappings $\mathbf Mathbb Pi_n:X\to X$, $n\in \Bbb{N}$, such that
for all $x\in X$, and $m,n\in \Bbb{N}$,
$\mathbf Mathbb Pi_m \circ \mathbf Mathbb Pi_n(x)= \mathbf Mathbb Pi_{m+n} (x)$.
When each of the mappings $\mathbf Mathbb Pi_n$ is continuous,
$\mathbf Mathbb Pi$ is also often referred to as a discrete time dynamical system.
On a Hausdorff space $X$, one can
endow the set $X^X$ with a topology,
e.g. that of pointwise convergence.
The closure of the action of $\Bbb{N}$
is then the closure $\overline \Pi$ of the set
$\Pi=\{\mathbf Mathbb Pi_n,n\in \Bbb{N}\}\subset X^X$
w.r.t. this topology. A classical instance (see e.g.
\cite{ElElNe00}) is that where the space $X$ is compact, the
mappings $\mathbf Mathbb Pi_n$ are all continuous,
and the topology on $X^X$ is that of pointwise convergence.
Then $\overline \Pi$ is compact.
Denote the orbit $\{x,\mathbf Mathbb Pi(x),\mathbf Mathbb Pi_2(x),\cdots\}$
of $x\in X$ by $A_x$.
For all $x\in X$, the
closure $ \mathbf Mathrm{cl}{A_x}$ of
$A_x$ is a closed $\mathbf Mathbb Pi$-invariant
subset of $X$. If, for all $n$, $\mathbf Mathbb Pi_n$ is continuous,
then the restriction of $\mathbf Mathbb Pi$ to $\mathbf Mathrm{cl} {A_x}$
defines a semigroup action of $\Bbb{N}$.
The compactness of $\mathbf Mathrm{cl} {A_x}$ is not granted when
$X$ is non-compact. When it holds,
several important structural
properties follow as illustrated by the next lemmas
where $X$ is a metric space with distance $d$. Let
\begin{equation}\label{eq:omegalim}
\omegaega_x=\{y\in X \mathbf Mbox{ s.t. } \mathbf Mathbb Exists n_1<n_2<\cdots \in \Bbb{N}
\mathbf Mbox{ with } \mathbf Mathbb Pi_{n_i} (x) \to y\}
\mathbf Mathbb End{equation}
denote the {\mathbf Mathbb Em $\omegaega$-limit set} of $x$.
\begin{lem} [Lemma 4.2, p. 134, and p. 166 in \cite{BrTa10}]
\label{lemcherche}
Assume that $\mathbf Mathbb Pi_n$ is continuous
for all $n$ and that $\mathbf Mathrm{cl} {A_x}$ is compact.
Then, for all neighborhoods $U$ of $\omegaega_x$, there exists
an $N=N(U,x)$ such that $\mathbf Mathbb Pi_n(x)\in U$ for all $nfe N$.
Moreover $\omegaega_x$ is non empty,
compact and $\mathbf Mathbb Pi$-invariant.
\mathbf Mathbb End{lem}
In words, under the compactness and continuity conditions,
the orbit is attracted to the $\omegaega$-limit set.
\begin{lem} [Lemma 2.9, p. 95 in \cite{BrTa10}]
\label{lemtak}
If $\mathbf Mathrm{cl} {A_x}$ is compact, then the
following property holds:
for all $\mathbf Mathbb Epsilon >0$, there exists $N= N(\mathbf Mathbb Epsilon,x)\in \Bbb{N}$ such that
for all $y\in \mathbf Mathrm{cl} {A_x}$, the set $\{\mathbf Mathbb Pi_n(x), 0\le n\le N\}$ contains
a point $z$ such that $d(y,z)\le \mathbf Mathbb Epsilon$.
If in addition $\mathbf Mathbb Pi_n$ is continuous
for all $n$, then the last property
is equivalent to the compactness of $\mathbf Mathrm{cl} {A_x}$.
\mathbf Mathbb End{lem}
In words, under the compactness condition,
in a long enough interval, the trajectory $\mathbf Mathbb Pi_n(x)$ visits a neighborhood
of every point of $\mathbf Mathrm{cl} {A_x}$.
\section{Proof of Proposition \ref{contpos}}
\label{seccontpos}
Let $g$ be a \mathbf Mathbb Pomk{} the image of which at $\mathbf Mathbb Phi\in\mathbf N^0$ is
$x\in\mathbf Mathbb Phi$, with $x\mathbf Neq 0$. Assume there is a point
$y\in\mathbf Mathbb Phi$ with $y\mathbf Notin\{0,x\}$. Since $\mathbf Mathbb Phi$ is a discrete
subset of $\sp$ and $dfeq 2$ there exist curves
$famma_1,famma_2:[0,1]\to\sp$ such that
\begin{enumerate}
\item \label{ends} $famma_1(0)=famma_2(1)=x$ and $famma_2(0)=famma_1(1)=y$;
\item \label{intersections}$famma_1$ and $famma_2$ only intersect at their end-points;
\item \label{points}$famma_1$ and $famma_2$ contain no point of $\mathbf Mathbb Phi$ other than $x$ and $y$.
\mathbf Mathbb End{enumerate}
Now let $\Gamma$ be a closed curve in $\mathbf N^0$ defined as
\mathbf Mathbb Eqn{\Gamma:[0,1]\to\mathbf N^0;\mathbb Quad
\Gamma(t)=(\mathbf Mathbb Phi\backslash \{x,y\})\cup \{famma_1(t),famma_2(t)\},
\ t\in[0,1].}
The continuity of $g$, \ref{intersections}. and \ref{points}. imply
that for all $t\in[0,1]$, $g(\Gamma(t))=famma_1(t)$.
Hence $g(\Gamma(0))=x$ and $g(\Gamma(1))=y$.
But it follows from \ref{ends}. that $\Gamma(0)=\Gamma(1)=\mathbf Mathbb Phi$,
which contradicts the fact that $x$ and $y$ are different
points of $\mathbf Mathbb Phi$. When $\mathbf Mathbb Phi=\{0,x\}$, one obtains
the contradiction by letting $x$ go to infinity whereas in this situation,
$\{0,x\}$ converges to $\{0\}$ in the vague topology.
\section*{Acknowledgments}
The authors would like to thank H. Thorisson, K. Alishahi, A. Khezeli
and A. Sodre, as well as the anonymous reviewer, for their
very valuable comments on this work.
The early stages of this work were initiated
at Ecole Normale Sup\'erieure and INRIA,
where they were supported by a grant
from Minist\`ere des Affaires Etrang\`eres.
The later stages were pursued
at the University of Texas at Austin and were supported
by a grant of the Simons Foundation
(\#197982 to UT Austin). The second author expresses
his gratitude to the higher administration of Sharif University of Technology,
especially to S.-G. Miremadi, for their crucial support.
\mathbf Mathbb End{document}
|
\begin{document}
\begin{frontmatter}
\mathfrak{t}itle{Interior Regularity for a generalized Abreu Equation \mathfrak{t}noteref{label1}}
\mathfrak{t}notetext[label1]{Li acknowledges the support of NSFC Grants NSFC11521061.
Sheng acknowledges the support of NSFC Grants NSFC11471225.}
\author[Li]{An-Min Li}
\address[Li]{Yangtze Center of Mathematics Department of Mathematics
Sichuan University Chengdu, 610064, China}
\ead{[email protected]}
\author[Li]{Zhao Lian}
\address[Li]{Department of Mathematics Sichuan University
Chengdu, 610064, China}
\ead{[email protected]}
\author[Sheng]{Li Sheng\corref{cor1}}
\address[Sheng]{Department of Mathematics Sichuan University Chengdu, 610064, China}
\ead{[email protected]}
\cortext[cor1]{Corresponding author}
\begin{abstract}
We study a generalized Abreu Equation in $n$-dimensional polytopes and derive interior estimates of solutions under the assumption of the
uniform $K$-stability.
\end{abstract}
\begin{keyword}
Interior estimates\sep generalized Abreu Equation.
\MSC[2008] 53C55 \sep 35J60
\end{keyword}
\end{frontmatter}
\section{Introduction}\label{Sec-Intro}
The existence of extremal and contant scalar curvature is a central problem in K\"ahler geometry. In a series of papers \cite{D1}, \cite{D2}, \cite{D3}, and \cite{D4}, Donaldson studied
this problem on toric manifolds and proved the existence
of metrics of constant scaler curvatures on toric surfaces
under an appropriate stability condition. Later on in \cite{CLS1} and \cite{CLS2}, Chen, Li and Sheng proved the existence of metrics of prescribed scaler curvatures on toric surfaces under the uniform stability condition.
\v
It is important to generalize the results of Chen, Li and Sheng to more general K\"ahler manifold. This is one of a sequence of papers, aiming at generalizing the results of Chen, Li and Sheng to homogeneous toric bundles. The primary goal of this paper is to study the following nonlinear fourth-order partial differential equation
for an $n$-dimensional convex function $u$
\begin{equation}\label{eqn 1.1}
\frac{1}{\mathbb{D}}\sum_{i,j=1}^n\frac{\partial^2 \mathbb{D}u^{ij}}{\partial \xi_i\partial \xi_j}=-A.
\end{equation}
Here, $\mathbb{D}>0$ and $A$ are two given smooth functions on $\bar{\Delta}$ and $(u^{ij})$ is the inverse of the Hessian matrix $(u_{ij})$. The equation \eqref{eqn 1.1} was introduced by Donaldson \cite{D5}
in the study of the scalar curvature of toric fibration, see also \cite{R} and \cite{N-1}. In \cite{LSZ} the authors also derived this PDE in the study of the scalar curvature of homogeneous toric bundles. We call \eqref{eqn 1.1} a generalized Abreu Equation. The main result is the following interior estimate
\begin{theorem}\label{theorem_1.3}
Let $\Delta$ be a bounded open polytope in $\real^n$
and $\mathbb{D}>0$, $A$ be two smooth functions on $\bar\Delta$.
Suppose $(\Delta, \mathbb{D}, A)$ is uniformly $K$-stable and $u$ is a solution in $\mathbf{S}_{p_o}$
of the equation \eqref{eqn 1.1}.
Then, for any $\Omega\subset\subset \Delta$, any nonnegative integer $k$ and any constant $\alpha\in (0,1)$,
\begin{equation*}
\|u\|_{C^{k+3,\alpha}(\Omega)}\leq C,
\end{equation*}
where $C$ is a positive constant depending only on $n$, $k$, $\alpha$,
$\Omega$, $\mathbb{D}$, $\|A\|_{C^k(\bar\Delta)}$ and $\lambda$ in the uniform $K$-stability.
\end{theorem}
A equivalent statement of Theorem \ref{theorem_1.3} is the following
\begin{theorem}\label{theorem_1.2}
Suppose that $(\Delta, \mathbb{D}, A)$ is uniformly $K$-stable and that
$\{A^{(k)}\}$ is a sequence of smooth functions in $\bar\Delta$ such that
$A^{(k)}$ converges to $A$ smoothly in $\bar \Delta$.
Assume $u^{(k)}\in \mathbf{S}_{p_o}$ is a sequence of solutions of
the generalized Abreu Equation
\begin{equation}\label{eqn 1.6}
\sum_{i,j}\frac{\partial^2(\mathbb{D}u^{(k)ij})}{\partial\xi_i\partial\xi_j}=-A^{(k)}\mathbb{D}\quad\mathfrak{t}ext{in }\Delta.
\end{equation}
Then there is a subsequence, still denoted by $u^{(k)}$, such that
$u^{(k)}$ converges smoothly, in any compact set $\Omega\subset \Delta$,
to some smooth and strictly convex function $u$ in $\Delta$.
\end{theorem}
The main ideal of the proof is following:
\v
Note that, as Donaldson pointed out that, the uniform stability of $(\Delta, \mathbb{D}, A)$ implies
that there is a subsequence, still denoted by $u^{(k)}$, locally uniformly converging to $u$ in $\Delta$. The key point is to prove that $u$ is smooth and strictly convex. We consider the Legendre transform
$f^{(k)}$ of $u^{(k)}.$
Then $f^{(k)}$ satisfy the PDE
\begin{equation}\label{eqn 1.3}
-\sum_{i,j} f^{ij}\frac{\p^2(\log \mathbb F)}{\p x_i \p x_j} -\sum_{i,j}f^{ij} \frac{\p (\log \mathbb F)}{\p x_i}\frac{\p (\log \mathbb D)}{\p x_j} = A.\end{equation}
In Section \ref{Sec-Determinants}, we derive an uniform lower bound and an uniform upper bound of the determinants of the Hessian of $f^{(k)}$. We can not directly apply the Caffarelli and Guti\'{e}rrez theory to the PDE \eqref{eqn 1.3}.
We prove a convergence theorem for this PDE in Section \ref{Sec-Convergence}. Then Theorem \ref{theorem_1.2}
follows.
\section{Uniform stability}\label{Sec-Uniform}
Let $\Delta$ be a Delzant polytope in $\real^n$, $c_k$ be a constant and
$h_k$ be an affine linear function in $\mathbb R^n$, $k=1, \cdots, d$.
Suppose that
$\Delta$ is defined by linear inequalities $h_k(\xi)-c_k>0$, for $k=1, \cdots, d$,
where each $h_k(\xi)-c_k=0$ defines a facet of $\Delta$.
Write $\delta_k(\xi)=h_k(\xi)-c_k$
and set
\begin{equation}\label{eqn2.1}
v(\xi)=\sum_k\delta_k(\xi)\log\delta_k(\xi).
\end{equation}
This function was first introduced by Guillemin \cite{Guillemin1994}.
It defines a K\"ahler metric on the toric variety defined by $\Delta$.
We introduce several classes of functions. Set
\begin{align*}
\mc C&=\{u\in C(\bar\Delta):\, \mathfrak{t}ext{$u$ is
convex on $\bar\Delta$ and smooth on $\Delta$}\},\\
\mathbf{S}&=\{u\in C(\bar\Delta):\, \mathfrak{t}ext{$u$ is convex on $\bar\Delta$
and $u-v$ is smooth on $\bar\Delta$}\},\end{align*}
where $v$ is given in \eqref{eqn2.1}.
For a fixed
point $p_o\in \Delta$, we consider
\begin{align*}
{\mc C}_{p_o}&=\{u\in \mc C:\, u\geq u(p_o)=0\},\\
\mathbf{S}_{p_o}&=\{ u\in \mathbf{S} :\, u\geq u(p_o)=0\}.\end{align*}
We say functions in ${\mc C}_{p_o}$ and ${\mathbf{S}}_{p_o}$ are {\it normalized} at $p_o$. Let
\begin{eqnarray*}
\mc C_\ast&=&\{u| \mbox{there exist a constant $C>0$ and a sequence of $\{u^{(k)}\}$ in ${\mc C}_{p_o}$ }\\
&&\mbox{such that
$\int_{\partial\Delta}u^{(k)} \mathbb{D}d\sigma<C$ and
$u^{(k)}$ locally uniformly converges to} \\
&& \mbox{$u$ in $\Delta$}\}.
\end{eqnarray*}
For any $u\in \mc C_\ast,$ define $u$ on boundary as
$$u(q)=\lim_{\Delta\ni \xi\mathfrak{t}o q} u,\;\;\; q\in \partial \Delta.$$
Let $P>0$ be a constant, we define
$$
\mc C_\ast^P=\{u \in\mc C_\ast| \int_{\partial\Delta}u \mathbb{D}d\sigma\leq P \}.
$$
\v
Following \cite{N-1} we consider the functional
\begin{equation}\label{eqn 2.2}
\mc F_A(u)=-\int_\Delta \log\det(u_{ij})\mathbb{D}d \mu+\mc L_A(u),
\end{equation}
where
\begin{equation}\label{eqn 2.3}
\mc L_A(u)=\int_{\partial\Delta}u \mathbb{D}d\sigma-\int_\Delta Au \mathbb{D} d\mu.
\end{equation}
$\mc F_A$ is called the Mabuchi functional
and $\mc L_A$ is closely related to the Futaki invariants. The Euler-Lagrangian equation for $\mc F_A$ is \eqref{eqn 1.1}.
It is known that,
if $u\in \mathbf{S}$ satisfies the equation \eqref{eqn 1.1}, then $u$ is an absolute minimizer for
$\mc F_A$ on $\mathbf{S}$.
\begin{defn}\label{defn_1.5}
Let $\mathbb{D}>0$ and $A$ be two smooth functions on $\bar\Delta$.
Then, $({\Delta},\mathbb{D},A)$ is called {\em uniformly $K$-stable} if
the functional $\mc L_A$ vanishes on affine-linear functions and
there exists a constant $\lambda>0$
such that, for any $u\in {\mc C}_{p_o}$,
\begin{equation}\label{eqn 1.6}
\mc L_A(u)\geq \lambda\int_{\partial \Delta} u \mathbb{D}d \sigma.
\end{equation}
We also say that $\Delta$ is
$(\mathbb{D}, A,\lambda)$-stable.
\end{defn}
\begin{remark}
The conditions in Definition \ref{defn_1.5} are exactly the contents of Condition 1
\cite{N-1}. Following Donaldson we call it the {\em uniform $K$-stability}.
\end{remark}
Using the same method in \cite{CLS4} we immediately get
\begin{theorem}\label{theorem_2.3}
If the equation \eqref{eqn 1.1} has a solution in $\mathbf{S}$, then $(\Delta, \mathbb{D}, A)$ is uniform K-stable.
\end{theorem}
Namely, the uniform K-stability is a necessary condition for existing a solution of \eqref{eqn 1.1} in $\mathbf{S}$. We pose the
\begin{question}\label{question_1.8}
Let $\Delta \subset \mathbb{R}^{n}$ be a Delzant polytope, $\mathbb{D}>0$ and $A$ be two smooth functions on $\bar\Delta$.
Does the uniform K-stability of $(\Delta, \mathbb{D}, A)$ imply that the equation \eqref{eqn 1.1} has a solution in $\mathbf{S}$?
\end{question}
Assume that $v\in \mathbf{S}_{p_o}$ is the solution of the equation \eqref{eqn 1.1}, and $u$ is a convex function. For any segment $I\subset\subset \Delta$, $u$ defines a convex function $w:=u|_I$ on $I$. It defines a Monge-Ampere measure on $I$, we denote this by $N$. The key point of the proof in \cite{CLS4} is the following lemma.
\begin{lemma}\label{lemma_2.5}
Let $u\in \mc C^P_\ast$ and $u^{(k)}\in \mathcal C$ locally uniformly converges to $u.$ If $N(I)=m>0$, then $$\mc L_A(u^{(k)})> \mathfrak{t}au m$$ for some positive constant $\mathfrak{t}au$ independent of k.
\end{lemma}
In our present case this lemma still holds due to $C^{-1}\leq \mathbb{D}\leq C$ for some constant $C>0$. For reader's convenience we give the proofs here.
\v\n
{\bf Proof of Lemma \ref{lemma_2.5}.} Let $p$ be the midpoint of $I$. We choose coordinate system $\{0,\xi\}$ such that
$p$ is the origin, $I$ is on the $\xi_1$ axis and $I=(-a,a)$. Set $I_{\epsilon}=[-a+\epsilon,a-\epsilon].$ By choosing $\epsilon$ small we can assume that
\begin{equation}\label{eqn 2.7}
N(I_{\epsilon})\geq \frac{3m}{4}.
\end{equation}
Suppose that there is a Euclidean ball $B:=B_{\epsilon_o}(0)$ in $\xi_1=0$ plane
such that
$I\mathfrak{t}imes B\subset\subset \Delta$. Suppose that $u$ is a limit of a sequence $u^{(k)}\in \mc C$.
Then $u^{(k)}$ converges to $u$ uniformly on $I\mathfrak{t}imes B$. We have
\begin{equation}\label{eqn_5.1a}
\mc L_A(u^{(k)})= \int_{\Delta} v^{ij} u^{(k)}_{ij}\mathbb{D}d\mu.
\end{equation}
Consider the functions
$$
w^{(k)}_{\xi}(\xi_1)=u^{(k)}(\xi_1,\xi),\;\;\; \xi_1\in I, \xi\in B.
$$
We denote by
$N^{(k)}_{\xi}$ the Monge-Ampere measure on $I$ induced by $w^{(k)}_{\xi}$ . We claim that
there exists a small $B$ and large $K$ such that for any $\xi\in B$, $k>K$
\begin{equation}
N^{(k)}_{\xi}(I)\geq m/2.
\end{equation}
In fact, if not, then there exists a subsequence of $k$, still denote by $k$, and a sequence of
$\xi_k\in B$ with $\xi_k\mathfrak{t}o 0$ such that $N^{(k)}_{\xi_k}(I)< m/2$. However, by the weakly convergence of Monge-Ampere measure, we have
$$N(I_{\epsilon} )\leq \lim_{k\mathfrak{t}o\infty}N^{(k)}_{\xi_k}(I)\leq m/2,$$ this contradicts \eqref{eqn 2.7}.
\v
On the other hand, the eigenvalues of $v^{ij}$ are bounded below in $I\mathfrak{t}imes B$, let $\delta$ be the lower bound. Then
\begin{eqnarray*}
\mc L_A(u^{(k)})
&\geq& \int_{I\mathfrak{t}imes B} v^{ij}u^{(k)}_{ij}\mathbb{D}d\mu
\geq \frac{\delta}{C} \int_{I\mathfrak{t}imes B} Trace(u^{(k)}_{ij})d\mu \\
&\geq& \frac{\delta}{C}\int_{I\mathfrak{t}imes B} u^{(k)}_{11}d\mu=\frac{\delta}{C}\int_B N^{(k)}_{\xi}(I)d\xi
\geq \frac{m\delta}{2C} Vol(B).
\end{eqnarray*}
This completes the proof of Lemma \ref{lemma_2.5}.
\v\n
Then by the same method as in \cite{CLS4} we can prove Theorem \ref{theorem_2.3}.
\section{Estimates of the Determinant}\label{Sec-Determinants}
Set
\begin{equation}\label{eqn 3.1}
\mathbb{F}:=\frac{\mathbb D}{\det(u_{ij})},\;\;\;U^{ij}= \det(u_{kl})u^{ij}.\end{equation}
Since $\sum_{i} U^{ij}_{i}=0,$ the generalized Abreu Equation \eqref{eqn 1.1} can be written in terms of $(\xi, u)$ as
\begin{equation}\label{eqn 3.2}
-\sum_{i,j} U^{ij}\frac{\p^2\mathbb F}{\p \xi_i \p \xi_j} = A\mathbb D.
\end{equation}
Through the normal map $\nabla u$ we can view $\mathbb{D}$ as function in $x$. In terms of $(x, f)$ the PDE \eqref{eqn 3.2} can be written as
\begin{equation}\label{eqn 2.1}
-\sum_{i,j} f^{ij}\frac{\p^2(\log \mathbb F)}{\p x_i \p x_j} -\sum_{i,j}f^{ij} \frac{\p (\log \mathbb F)}{\p x_i}\frac{\p (\log \mathbb D)}{\p x_j} = A.
\end{equation}
\subsection{\bf The lower bound of the determinant}\label{lower bound}
\v
\label{sect_3.1}
The following Lemma is proved in \cite{N-1} for toricfibration. It can be extend directly to the generalized Abreu Equation \eqref{eqn 1.1}.
\begin{lemma}\label{lemma_3.1} Let $\Delta$ be a bounded open polytope in $\real^n$ and $\mathbb{D}>0$, $A$ be two smooth functions on $\bar\Delta$. Let $u\in \mathcal{C}$ be a strictly convex function satisfying the generalized Abreu Equation \eqref{eqn 1.1}. Suppose that $\mathbb{F}=0$ on $\partial \Delta$. Then
$$\det ( u_{ij})\geq \mff C_1(sup_{\Delta} A)^{-n}$$ everywhere in $\Delta$,
where $\mff C_1$ is a constant depending on $n$, $\mathbb{D}$ and $\Delta$.
\end{lemma}
In the following we derive a more stronger estimate than Lemma \ref{lemma_3.1}, which will be used in our next papers. First we prove a preliminary lemma.
\begin{lemma}\label{lemma 3.2}
Let $\Delta$ be a bounded open polytope.
Suppose that $\mathbb{F}=0$ on $\partial \Delta$. Let $E$ be an edge of
$\Delta$. Suppose that $E$ is given by $\xi_1=0$. Set $$v(\alpha,\beta,C) =- \xi_1^{\alpha}(C-\xi_1)^{\beta}\left(C - \sum_{j=2}^{n}
\xi_j^2\right)^{\beta},$$ where $\alpha,\beta,C$ are constants. Then for any $ \frac{1}{2n}\leq\alpha,\beta \leq 1-\frac{1}{2n}$, there exists constants $C,C_{1}>0$ depending only on $n$ and $diam(\Delta)$ such that
$v$ is strictly convex and
\begin{equation}
\det(v_{ij})>C_{1}(\epsilon_0) \xi_1^{n\alpha-2}.
\end{equation}
\end{lemma}
\begin{proof} Choose $C>0$ large such
that
\begin{equation}\label{eqn 3.5}
\Delta \subset \left \{\xi \;|\xi_1\leq
\frac{C}{m}\right\}\bigcap \left\{\xi\;|\sum_{j=2}^{n} \xi_j^2 \leq
\frac{C}{m}\right\},
\end{equation}
where $m=8n$. We calculate $det(v_{ij})$. For any point
$\xi$, By taking an orthogonal transformation of $\xi_2,...,\xi_n$,
we may assume that $\xi = (\xi_1,\xi_2,0,...,0)$. By a direct
calculation we have
$$v_{11}= -v\left[-\left(\frac{\alpha }{\xi_{1} } -
\frac{\beta}{C-\xi_{1}}\right)^2+\frac{\alpha}{\xi_{1}^{2}} +
\frac{\beta}{(C-\xi_{1})^{2}} \right],$$
$$v_{12} =-v \left(\frac{\alpha }{\xi_{1} } -
\frac{\beta}{C-\xi_{1}}\right)\frac{2\beta
\xi_2}{ C-\xi_2^2 },\;\;\;\;\;v_{ij}= 0,\;\;i>2, \;\;i\ne j.$$$$v_{22}=-v\left[\frac{2\beta(C+\xi_2^2)}{(C-\xi_2^2)^{2}}
- \frac{4\beta^2 \xi_2^2}{(C-\xi_2^2)^{2}}\right],\;\;\;\;v_{ii}= -v\frac{2\beta}{ C-\xi_2^2 } ,\;\;\;i>2.$$ Denote $A-B= v_{11}v_{22}-v_{12}^2 ,\;D=\prod_{i=3}^{n} v_{ii}.$ The determinant of
$(v_{ij})$ is $\det(v_{ij}) =(A-B)\cdot D.$ A direct calculation gives us
\begin{align*}
A-B=&\frac{2\beta v^2}{\xi^2_{1} (C-\xi_{1})^2(C-\xi_{2}^2)^2}\left[ \alpha(C-\xi_{1})^2((1-\alpha)C+ (1-2\beta- \alpha)\xi_{2}^2)\right.\\
&\left.+ \beta \xi_{1}^2 ((1-\beta)C+ (1-3\beta)\xi_{2}^2) +2\alpha \beta\xi_{1}(C-\xi_{1})(C+\xi_{2}^2) \right] \\
D=&\prod_{i=3}^{n} v_{ii}=\left[-v\frac{2\beta}{ C-\xi_2^2 }\right]^{n-2}.
\end{align*}
For any $\alpha,\beta$ satisfy $\frac{1}{2n}\leq \alpha,\beta\leq
1-\frac{1}{2n}$, by $m>4(2n-1),$ we have
\begin{equation}\label{3.3}
A-B\geq \frac{ \alpha\beta v^2}{\xi^2_{1} (C-\xi_{2}^2)^2}\frac{C(2n-1)}{2nm} .
\end{equation}
It is easy to check that $v$ is strictly convex and
\begin{equation}
\det(v_{ij})>C(n) \xi_1^{n\alpha-2}.
\end{equation}
\end{proof}
Now we prove
\noindent \vskip
0.1in \noindent
\begin{lemma} Let $\Delta$ be a bounded open polytope in $\real^n$
and $\mathbb{D}>0$, $A$ be two smooth functions on $\bar\Delta$.
Let $u\in \mathcal{C}$ be a strictly convex function satisfying the generalized Abreu Equation \eqref{eqn 1.1}.
Suppose that $\mathbb{F}=0$ on $\partial \Delta$. Let $E$ be an edge of
$\Delta$. Suppose that $E$ is given by $\xi_1=0$. Let $p\in E^o$.
Then the following estimate holds in a neighborhood of $p$
$$ det(u_{ij})\geq \frac{b}{\xi_1}$$
for some constant $b>0$ depending only on $n$, $diam(\Delta)$, $\max_{\bar\Delta}\mathbb D$, $\min_{\bar\Delta}\mathbb D$ and $\|A\|_{L^{\infty}(\Delta)}$.
\end{lemma}
\begin{proof} First we prove that there exists a constant $b_{0}>0$ such that
\begin{equation}\label{eqn 3.7}
det(u_{ij})\geq
b_0\xi_1^{-(1-\frac{1}{n}) }.\end{equation}
Choose $\beta=\frac{1}{2}$ in Lemma \ref{lemma 3.2}. Let $C>0$ and $m=8n$ be constants such
that \eqref{eqn 3.5} holds. We discuss two cases. \v\n
{\bf Case 1.} $n=2.$ We choose $\alpha=\frac{1}{2}$ and consider
the following function
$$h= \mathbb F + b_1v.$$
Obviously, $h<0$ on $\partial \Delta$. We have
\begin{eqnarray*}
\sum U^{ij}h_{ij}& =& -A\mathbb D + b_1 \det(u_{ij})\sum u^{ij}v_{ij}
\\
&\geq&
-A\mathbb D + nb_1 \det(u_{ij})^{1-1/2}(\det(v_{ij}))^{1/2}\\
&\geq& -A\mathbb D +nb_1d_1C(\epsilon_0).
\end{eqnarray*}
Here we use the estimate $\det(D^2u)\geq d_1$. By choosing the
constant $b_1$ large, we have $\sum U^{ij}h_{ij} \geq 0$. So $h$
attains its maximum on $\partial \Delta$. Then $w \leq b_1 \mathbb D^{-1} |v|.$ It
follows that
$$\det(u_{ij})\geq b_2\xi_1^{\frac{-1}{2}}.$$
for some constant $b_2$.
\v\n
{\bf Case 2. } $n\geq 3$. Choose a sequence $\{\alpha_{k}\}$ such that
$$
\alpha_{k}=2\left(1-(1-\mathfrak{t}frac{1}{n})^{k}\right),\;\;\;\;\;\;\forall k\geq 1.
$$
Obviously,
\begin{equation}\label{eqn 3.8}
\alpha_{k}-\frac{2}{n}=(1-\frac{1}{n})\alpha_{k-1},\;k\geq 2,
\end{equation}
and there is $k^\star\in \mathbb Z^{+}$ such that $\alpha_{k^\star}<1-\frac{1}{n}$ and $\alpha_{k^\star+1}\geq 1-\frac{1}{n}.$
\v\n
We first let $\alpha=\alpha_{1}$, $h= \mathbb F + b_2v$. By the same argument as in Case 1 we get
$$
\det(u_{ij})\geq b_2'\xi_1^{\frac{-2}{n}}.
$$
Next we let $\alpha=\alpha_{2}$, $h= \mathbb F + b_3v$. Then
\begin{eqnarray*}
\sum U^{ij}h_{ij}& \geq & -A\mathbb D + nb_3 \det(u_{ij})^{1-1/n}(\det(v_{ij}))^{1/n}\\
&\geq& -A\mathbb D +nb_3 b_{2'}^{1-\frac{1}{n}}\xi_1^{-\alpha_1(1-\frac{1}{n})+\alpha_{2}-\frac{2}{n}} \geq -A\mathbb D +nb_3 b_{2'}^{1-\frac{1}{n}}.
\end{eqnarray*}
We choose $b_3$ such that $\sum U^{ij}h_{ij}>0$. Then we have
$$
\det(u_{ij})\geq b_3'\xi_1^{-\alpha_2}.
$$
We iterate the process to improve the estimate. After finite many steps we get $\det(u_{ij})\geq b'\xi_1^{-\alpha_{k^\star}}.$ Then we set $\alpha=1-\mathfrak{t}frac{1}{n},$ and repeat the argument above to get
\eqref{eqn 3.7}.
\v
Next we consider the function
$$v'= \xi_1^{\alpha}\left(C + \sum_{j=2}^{n}
\xi_j^2\right) - a \xi_1,$$ where $a>0$, $\alpha >1$ are constants,
$C>0$ is the constant as before. We choose $a$ large such that
$v'\leq 0$ on $\Delta$. For any point $\xi$ we may assume that $\xi
= (\xi_1,\xi_2,0,...,0)$. By a direct calculation we have
$$v'_{11}= \alpha (\alpha-1)\xi_1^{\alpha-2}(C+
\xi_2^2),$$
$$v'_{ii}= 2\xi_1^{\alpha}\;\;\;i\geq 2,\;\;\;\;v'_{12}= 2\alpha \xi_2\xi_1^{\alpha-1},$$
$$det(D^2v')=
2^{n-1}\left[\alpha (\alpha-1)(C+ \xi_2^2) -
2\alpha^2\xi_2^2\right]\xi_1^{n\alpha-2}.$$ Then for large $C$, we
conclude that $v'$ is convex and
\begin{equation}
det(D^2v')\geq C_1\xi_1^{n\alpha-2}.
\end{equation}
Set $\alpha=1+\frac{1}{n^2}$.
Consider
the function
$$h' = \mathbb F+ b_5v'.$$
Obviously, $h' < 0$ on $\partial \Delta$. We have
\begin{eqnarray*}
\sum U^{ij}h_{ij}& =& -A\mathbb D + b_5\det(u_{ij})\sum u^{ij}v'_{ij}\\
& \geq&
-A\mathbb D + nb_5 \det(u_{ij})^{1-1/n}\det(v'_{ij})^{1/n}\\
&\geq& -A\mathbb D + nb_5
C(n)\xi_1^{-(1-\frac{1}{n})^2}C_1\xi_1^{\alpha-\frac{2}{n}}
\\
&=&-A\mathbb D +nb_5C(n)C_1.
\end{eqnarray*}
We choose $b_5$ such that
$\sum U^{ij}h_{ij}\geq 0$. By the maximum principle we have $w \leq C_5 \mathbb D^{-1} |v'|\leq aC_5\xi_1$. It
follows that
$\det(u_{ij})(\xi)\geq {\mff C_5}{\xi_1}\inv $ for some
constant $\mff C_5>0$ independent of $p.$
\end{proof}
\subsection{\bf The upper bound of the determinant }\label{sect_4.2}
\v
Let $u\in \mathbf{S}_{p_o}$ be a solution of the generalized Abreu Equation \eqref{eqn 1.1}. In this section, we derive a global upper bound of the determinant of the
Hessian of $u$.
The proof of the following lemma is standard
\begin{lemma}\label{lemma_3.3}
Suppose that $u\in \mathbf{S}_{p_o}$ satisfies the generalized Abreu Equation \eqref{eqn 1.1}. Assume
that the section
$$\bar{S}_u(p_o,C)=\{\xi\in \Delta:\, u(\xi)\leq C\}$$
is compact and that there is a constant $b>0$ such that $$\sum
_{k=1}^n \left(\frac{\partial u}{\partial \xi_k}\right)^2 \leq b
\quad\mathfrak{t}ext{on }\bar{S}_u(p_o,C).$$ Then,
$$\det (u_{ij})\leq \mff C_2\quad\mathfrak{t}ext{in }S_u(p_o,C/2),$$
where $ \mff C_2$ is a positive constant depending on
$n$, $C$ and $b$.
\end{lemma}
Following \cite{CHLS} we derive a global estimate for the upper bound of $\det (u_{ij})$ for the generalized Abreu Equation \eqref{eqn 1.1}.
This upper bound relates to the Legendre transforms of solutions.
For any point $p$ on $\partial \Delta$, there is an affine coordinate
$\{\xi_1,..., \xi_n\}$, such that, for some $1\leq m \leq n$,
a neighborhood $U\subset \bar\Delta$ of $p$ is defined
by $m$ inequalities
$$\xi_1\ge 0,\quad ...,\quad \xi_m\geq 0,$$
with $\xi(p)=0.$ Then, $v$ in \eqref{eqn2.1} has the form
$$v=\sum_{i=1}^{m}\xi_i\log \xi_i+\alpha(\xi),$$
where $\alpha$ is a smooth function in $\bar U$.
By Proposition 2 in \cite{D2}, we have the following result.
\begin{lemma}\label{lemma_3.4} There holds
$$\det(v_{ij})= \big[\xi_1\xi_2 ...\xi_m \beta(\xi)\big]^{-1}\quad\mathfrak{t}ext{in }\Delta,$$
where $\beta(\xi)$ is smooth up to the boundary and $\beta(0)=1$.
\end{lemma}
For any $q\in \Delta$ denote by $d_E(q,\partial \Delta)$ the Euclidean distance from $q$ to $\partial \Delta$.
By Lemma \ref{lemma_3.4}, we have
\begin{equation}\label{eqn 3.2}
\det(v_{ij})\leq \frac{C}{[d_E(p,\partial \Delta)]^n}\quad\mathfrak{t}ext{in } \Delta,
\end{equation}
where $C$ is a positive constant.
\v
Recall that $p_o\in\Delta$ is the point we fixed for $\mc S_{p_o}$.
Now we choose coordinates $\xi_1,...,\xi_n$ such that $\xi(p_o)=0$. Set
$$x_i=\frac{\partial u}{\partial \xi_i},\;\;\; f=\sum_i x_i\xi_i - u.$$
\begin{lemma}\label{lemma_3.5} Let $\Delta$ be a bounded open polytope in $\real^n$ and $\mathbb{D}>0$, $A$ be smooth functions on $\bar\Delta$.
Let $u\in \mathbf{S}_{p_o}$ be a strictly convex function satisfying the generalized Abreu Equation \eqref{eqn 1.1}.
Assume, for some positive constants $d$ and $b$,
$$\frac{1+\sum x_i^2}{(d + f)^2}\leq b\quad\mathfrak{t}ext{in }\mathbb R^n.$$
Then,
$$\exp\left\{ -\mff C_3 f \right\}\frac{\det (u_{ij})}{\left(d+f\right)^{2n}}\leq \mff C_4
\quad\mathfrak{t}ext{in }\Delta,$$
where $\mff C_3$ is a positive constant depending only on $n$ and $\Delta$,
and $\mff C_4$ is a positive constant depending only on
$n$, $d$, $b$, $\mathbb{D}$ and $\max_{ \bar\Delta}|A|$.
\end{lemma}
\begin{proof}
Let $v$ be given as in \eqref{eqn2.1}.
By adding a linear function, we assume that $v$ is also normalized at $p_o$.
Denote $g=L(v)$. By \eqref{eqn 3.2}, it is straightforward to check that
there exists a positive constant $C_1$ such that
$$\det(v_{ij})e^{-C_1g}\mathfrak{t}o 0\quad\mathfrak{t}ext{as }p\mathfrak{t}o \partial \Delta.$$
Since $u=v+\phi $ for some $\phi\in C^\infty(\bar \Delta)$, then
\begin{equation}\label{eqn3.a}\det(u_{ij})e^{-C_1 f }\mathfrak{t}o 0\quad\mathfrak{t}ext{as }p\mathfrak{t}o \partial \Delta.
\end{equation}
Consider the function for some constant $\varepsilon$ to be determined,
$$\mathcal{F}=\exp\left\{ -C_1 f + \varepsilon\frac{1+\sum x_i^2}{(d + f)^2}\right\}
\frac{1}{\mathbb F\left(d+f\right)^{2n}},$$
where $\mathbb{F}$ is defined in \eqref{eqn 3.1}, $\varepsilon$ is a positive number to be determined latter.
Obviously, $\mathcal{F}\mathfrak{t}o 0$ as $p\in\partial\Delta$.
Assume $\mathcal{F}$ attains its maximum at an interior point $p^*$.
Then at $p^*$, we have
$$
\frac{\p}{\p x_{j}} \mathcal{F}=0,\;\;\;\;\;\;\;\sum f^{ij}\frac{\p^2 \mathcal{F}}{\p x_{i}\p x_{j}}\leq 0.
$$
Thus,
\begin{equation}\label{eqn 2.10}
-(log\mathbb F)_i - C_1f_{i}- \frac{2n f_{i}}{d+f}
+ \varepsilon\frac{1+\sum x_k^2}{(d + f)^2}\left[\frac{(\sum x_k^2)_{i}}{1+\sum x_k^2}-
2\frac{f_{i}}{d + f}\right]=0,\end{equation}
and
\begin{align}\label{eqn3.4} \begin{split}
& \sum_{i,j}f^{ij} (\log \mathbb D)_{i}(\log \mathbb F)_{j} + A- C_1n - \frac{2n^2}{d+f}
+ \frac{2n\|\nabla f\|^2}{(d+f)^2}\\
&\quad+\varepsilon\frac{1+\sum x_k^2}{(d + f)^2}
\bigg[\frac{2\sum_{k} f^{kk}}{1+\sum x_k^2}-\frac{\|\nabla\sum x_k^2\|^2}{(1+\sum x_k^2)^2}
-\frac{2n}{d + f} + \frac{2\|\nabla f\|^2}{(d + f)^2}\bigg]\\
&\quad+\varepsilon\frac{1+\sum x_k^2}{(d + f)^2}\left\|\left(\frac{\nabla(\sum x_k^2)}{1+\sum x_k^2}-
\frac{2\nabla f}{d + f}\right)\right\|^2
\leq 0, \end{split}
\end{align}
where we used \eqref{eqn 2.1} and denote $F_{i}=\frac{\p F }{\p x^{i}},F_{ij}=\frac{\p^2 F }{\p x^{i} \p x^{j}}$ for any function $F$.
Since $$\sum \left| \mathfrak{t}frac{\p \log \mathbb D}{\p \xi_{i}} \right|\leq C,$$ and
\begin{equation*}
\sum_{i,j}f^{ij} \frac{\p\log \mathbb D}{\p x_{i}} \frac{\p \log \mathbb F}{\p x_{j}}=\sum_{i,j,k}f^{ij}\frac{\p \xi_{k}}{\p x_i}\frac{\p\log \mathbb D}{\p \xi_{k}}\frac{\p \log \mathbb F}{\p x_{i}}=\sum_{i}\frac{\p\log \mathbb D}{\p \xi_{i}}\frac{\p \log \mathbb F}{\p x_{i}},
\end{equation*}
we have
$$
\left|\sum_{i,j}f^{ij} (\log \mathbb D)_{i}(\log \mathbb F)_{j}\right|\leq C\sum |(\log \mathbb F)_{j}|.
$$
By $\left|\frac{\p f}{\p x_{i}}\right|=|\xi_{i}|\leq diam(\Delta)$,\;$\frac{\sum x_{k}^2}{(d+f)^2}\leq b$ and \eqref{eqn 2.10} we have, at $p^{*},$
\begin{equation}\label{eqn 2.12}
\left|\sum_{i,j}f^{ij} (\log \mathbb D)_{i}(\log \mathbb F)_{j}\right|\leq C \sum |(\log \mathbb F)_{j}|\leq C_{3}.
\end{equation}
where $C_{3}$ is the constant depending only on $b,diam(\Delta)$ and $n.$
Inserting \eqref{eqn 2.12} into \eqref{eqn3.4}, we obtain
\begin{align}\label{eqn3.6}\begin{split}&\varepsilon\frac{1+\sum x_k^2}{(d + f)^2}
\bigg[\frac{2\sum f^{ii}}{1+\sum x_k^2}-\frac{4\langle\nabla \sum x_k^2,\nabla f\rangle}{(1+\sum x_k^2)(d+f)}
-\frac{2n}{d + f} + \frac{6\|\nabla f\|^2}{(d + f)^2}\bigg]\\
&\quad
+ \frac{2n\|\nabla f\|^2}{(d+f)^2}- \frac{2n^2}{d+f}
+A- C_2n-C_{3}\leq 0.\end{split}\end{align}
By the Schwarz inequality, we have
\begin{equation*}
\left|\frac{4\langle\nabla \sum x_k^2,\nabla f\rangle}{(1+\sum x_k^2)(d+f)}\right|
\leq \frac{\|\nabla\sum x_k^2\|^2}{4(1+\sum x_k^2)^2} + \frac{16\|\nabla f\|^2}{(d + f)^2}.\end{equation*}
Hence,
\begin{equation}\label{eqn3.7}
\left|\frac{4\langle\nabla \sum x_k^2,\nabla f\rangle}{(1+\sum x_k^2)(d+f)}\right|
\leq \frac{\sum f^{ii}}{(1+\sum x_k^2)^2} + \frac{16\|\nabla f\|^2}{(d + f)^2}.\end{equation}
Combining \eqref{eqn3.6} and \eqref{eqn3.7} yields
\begin{align*}&\varepsilon\frac{1+\sum x_k^2}{(d + f)^2}
\bigg[\frac{\sum f^{ii}}{1+\sum x_k^2}
-\frac{2n}{d + f} - \frac{10\|\nabla f\|^2}{(d + f)^2}\bigg]\\
&\quad
+ \frac{2n\|\nabla f\|^2}{(d+f)^2}- \frac{2n^2}{d+f}
+A- C_2n- C_3\leq 0.\end{align*}
By choosing $\varepsilon>0$ such that $10\varepsilon b\leq 1$, we have
$$\varepsilon\frac{\sum f^{ii}}{(d + f)^2}+ A- C_4\leq 0.$$
By the relation between the geometric mean and the arithmetic mean, we get
$$\frac{\det(u_{ij})}{\left(d+f\right)^{ {2n} }}
=\frac{(\det(f^{ij}))^{-1}}{\left(d+f\right)^{ 2n }}\leq \frac{C(n)(\sum f^{ii})^{n}}{\left(d+f\right)^{ 2n }}\leq C_5.$$
Therefore, $\mathcal{F}(p^*)\le C_6$, and hence $\mathcal{F}\le C_6$ everywhere. The definition of $\mathcal{F}$ and the bound of $\mathbb D$ implies
$$\exp\left\{ -C_2 f \right\}\frac{\det(u_{ij})}{\left(d+f\right)^{ {2n} }}\leq C_7.$$
This is the desired estimate. \end{proof}
\section{ Convergence theorems in section}\label{Sec-Convergence}
\v
Let $\Omega^*\subset \mathbb{R}^n$. Denote by $\mathcal{F}(\Omega^*,C)$ the class of smooth convex functions
defined on $\Omega^*$ such that
$$ \inf_{\Omega^*} {u}=0,\;\;\;
u= C>0\;\;on\;\;\partial \Omega^*.$$
\begin{lemma}\label{lemma 4.1}
Let $\Omega^*\subset \mathbb{R}^n$ be a normalized domain, $u\in \mc F(\Omega^*, C)$ be a function satisfying the generalized Abreu Equation \eqref{eqn 1.1}. Suppose that there is a constant $C_1>0$ such that in $\Omega^*$ \begin{equation}\label{eqn_A}C_1\inv\leq
\det(u_{ij})\leq C_1.\end{equation}
Then for any $\Omega^\circ\subset\subset \Omega^* $, $p>2$, we have the
estimate \begin{equation}\|u\|_{W^{4, p}(\Omega^\circ)}\leq C ,\;\;\;\;\|u\|_{C^{3,\alpha}(\Omega^\circ)}\leq C,
\end{equation} where $C$ depends on $n, p, \mathbb{D}, C_1, \|A\|_{L^{\infty}(\Delta)}$, $dist(\Omega^\circ,
\partial \Omega^* )$.
\end{lemma}
\v\n
{\bf Proof.} In \cite{C-G} Caffarelli-Gutierrez proved a H\"older estimate of
$\det(u_{ij})$ for homogeneous linearized Monge-Amp\`ere { equations}
assuming that the Monge-Amp\`ere measure $\mu[u]$ satisfies some
condition, which is guaranteed by \eqref{eqn_A}. { Consider the
generalized Abreu Equation
$$ \sum U^{ij}\mathbb F_{ij}= -A\mathbb D
,\;\;\;\mathbb{F}:=\frac{\mathbb D}{\det(u_{ij})} $$ where $A\in
L^\infty(\Omega).$ Since $\mathbb{D}\in C^{\infty}(\bar{\Delta})$ and $\mathbb{D}>0$, by the same argument in \cite{C-G} one can obtain the H\"older
continuity of $\det(u_{ij})$. Then Caffarelli's $C^{2,\alpha}$
estimates for Monge-Amp\`ere { equations \cite{C1}} give us
$$\|u\|_{C^{2,\alpha}(\Omega^*)}\leq C_2.$$
Following from the standard elliptic regularity theory we have
$\|u\|_{W^{4, p}(\Omega^*)}\leq C_3 $. By the Sobolev embedding theorem
\[\|u\|_{C^{3,\alpha}(\Omega^\circ)}\leq C_4 \|u\|_{W^{4, p}(\Omega^*)}.
\]
Then the lemma follows.
$\blacksquare$
\v
Let $\Omega\subset \mathbb{R}^n$. Denote by $\mathcal{F}(\Omega,C)$ the class of smooth convex functions
defined on $\Omega$ such that
$$ \inf_{\Omega} {f}=0,\;\;\;
f= C>0\;\;on\;\;\partial \Omega.$$ Next we prove
the following convergence theorem.
\begin{theorem}\label{theorem_4.2}
Let $\Omega\subset \mathbb R^{n}$ be a normalized domain. Let $f_{(k)}\in \mc
F(\Omega,C)$ be a sequence of functions satisfying the equation \begin{equation}\label{equ 4.4}
-\sum_{i,j} f_{(k)}^{ij}\frac{\p^2(\log \mathbb F_{(k)})}{\p x_i \p x_j} -\sum_{i,j}f_{(k)}^{ij}\frac{\p (\log \mathbb F_{(k)})}{\p x_i}\frac{\p (\log \mathbb D)}{\p x_j}=A_{(k)}.
\end{equation}
Suppose that $A_{(k)}$ $C^{m}$-converges to $A$ on $\bar\Omega$ and there are constants $0< C_1<C_2$ independent of $k$ such that
\begin{equation} \label{equ 4.5}
C_1\leq det\left(\frac{\p^2 f_{(k)}}{\p x_i\p x_j}\right)\leq C_2\end{equation}
hold in $\Omega$. Then there exists a subsequence
of functions, without loss of generality, still denoted by
$f_{(k)}$, { locally uniformly } { converging} to a function $f_\infty$ { in $\Omega$} and, for any open set $\Omega_o$ with $\bar{\Omega}_o\subset \Omega$, and for any $\alpha\in (0,1)$, $f_{(k)}$ $C^{m+3,\alpha}$-converges
to $f_\infty$ in $\Omega_o$.
\v
\end{theorem}
\v\n
{\bf Proof.} It is obvious that there exists a subsequence
of functions, locally uniformly converging to a function $f_\infty$ in $\Omega$. A fundamental result on Monge-Amp\`ere equation tell us that $f_\infty$ is $C^{1,\alpha}$ and strictly convex (see \cite{Gui}).
Suppose that $f_\infty(p)=0$ for some point $p\in \Omega$. We choose the coordinates $x=(x_1,...,x_n)$ such that $x(p)=0$.
Put
$$u_{(k)}=\sum x_i\frac{\p f_{(k)}}{\p x_i} - f_{(k)},\;\;\;\Omega^*_{(k)}=\nabla f_{(k)}(\Omega),$$
$$u_{\infty}=\sum x_i\frac{\p f_{\infty}}{\p x_i} - f_{\infty},\;\;\;\Omega^*_{\infty}=\nabla f_{\infty}(\Omega).$$
We have $f_{\infty}(0)=u_{\infty}(0)=0$.
The key point of the proof of the Theorem is the following
\v\n
{\bf Claim.} There are constants $C>0$, $b> r >0$ such that $\bar{S}_{u_{\infty}}(0,C)$ is compact and
$$D_r(0)\subset S_{u_{\infty}}(0,C)\subset D_b(0).$$
\v\n
$$
x_{n+1}=f(x)
$$
{\bf Proof of Claim.} Denote
$$M:=\{(x, f_{\infty}(x))| x\in \Omega\},\;\;\;M^*:=\{(\xi, u_{\infty}(\xi))| \xi\in \Omega^*_{\infty}\}.$$
Then $M$ is a $C^{1,\alpha}$ strictly convex hypersurface with the support hyperplane $H=\{x | x_{n+1}=0\}$ at $0$. We look at the geometry meaning of $u_{\infty}$. Let $q\in \Omega$ be a point near $0$. The support hyperplane of $M$ at $(q,f_{\infty}(q))$ is given by
$$H_{(q,f_{\infty}(q))}=\left\{ (x_1,...,x_n,x_{n+1})| \sum x_i\frac{\p f}{\p x_i}(q) + x_{n+1}=\sum x_i(q)\frac{\p f}{\p x_i}(q) + f(q)\right\}.$$
The intersection
\begin{equation} \label{equ 4.2}
H_{(q,f_{\infty}(q))}\bigcap \{(0,...,0,x_{n+1}\}=(0,...,0, -u_{\infty}(q)).\end{equation}
In particular, we have
\v
{\bf ($\star$)} $u_{\infty}$ is monotonically increase along every ray from $0$: $\{x_i=a_it, t\geq 0, \;i=1,...,n\}$,
where $a_i$ are constants with $\sum a_i^2=1$.
\v
\v\n
By strictly convexity of $f_{\infty}$ we can find $b_1>b_2>0$, $d_1>d_2>0$ such that
\begin{itemize}
\item[(1)] $\bar{S}_{f_{\infty}}(0,b_2)\subset \bar{S}_{f_{\infty}}(0,b_1)\subset D_{d_1}(0)\subset \Omega$,
\item[(2)] $dist\left(\bar{S}_{f_{\infty}}(0,b_2), \p \bar{S}_{f_{\infty}}(0,b_1)\right)\geq d_2$.
\end{itemize}
Then
$$|\nabla f_{\infty}|\leq \frac{b_1}{d_1}\;\;\;\;\forall x\in \bar{S}_{f_{\infty}}(0,b_2).$$
It follows that $\bar{S}_{u_{\infty}}(0,C)\subset D_b(0)$ for some constant $b>0$. By compactness we can find $p\in \p S_{f_{\infty}}(0,b_2)$ such that $u_{\infty}(p)=min_{\p S_{f_{\infty}}(0,b_2)}\{u_{\infty}\}.$
By {\bf ($\star$)} we can find a set $\Omega^\circ\subset \bar{S}_{f_{\infty}}(0,b_2)\}$ such that
$$u_{\infty}(x)=u_{\infty}(p)\;\;\;\forall x\in \p \Omega^\circ.$$
Let $q\in \p\Omega^\circ$ be the point with $f_{\infty}(q)=min_{\p\Omega^\circ}\{f_{\infty}\}$.
By the strictly convexity of $f_{\infty}$, we have $f_{\infty}(q)>0$. Consider the convex cone $V$ with vertex $(0,0)$ and the base
$$\{(x_1,x_2,...,x_n, f_{\infty}(q))|x_1^2 + ... + x_n^2=d_1^2\}.$$
By the comparison theorem of normal maps, there exists a Euclidean ball $D_{r}(0)$ such that $D_{r}(0)\subset \nabla f_{\infty}(\bar{S}_{f_{\infty}}(0, f_{\infty}(q))$. We choose $C=u_{\infty}(p)$. Then
$$D_r(0)\subset S_{u_{\infty}}(0,C)\subset D_b(0).$$
The claim follows.
\v\n
By the claim we conclude that
$$\bar{S}_{u_{(k)}}(0,C/2):=\{\xi |u_{(k)}\leq C/2\}$$
is compact and contain a Euclidean ball for $k$ large enough. By \eqref{equ 4.5} we have
\begin{equation} \label{equ 4.3}
\frac{1}{C_2}\leq det\left(\frac{\p^2 u_{(k)}}{\p \xi_i\p \xi_j}\right)\leq \frac{1}{C_1}\end{equation}
A direct calculation shows that $u_{(k)}$ satisfy the generalized Abreu Equation \eqref{eqn 1.1}. By Lemma \ref{lemma 4.1} $u_{(k)}$ $C^{m+3}$-converges to $u_{\infty}$. It follows that $f_{(k)}$ $C^{m+3}$-converges to $f_{\infty}$ in a neighborhood of $0$.
\v
Now let $p\in \Omega$ be an arbitrary point, let $l$ be the linear function defining the support hyperplane of $M$ at $(p,f_{\infty}(p))$. Let
$$\mathfrak{t}ilde{f}_{\infty}= f_{\infty}- l.$$
We use $\mathfrak{t}ilde{f}_{\infty}$ instead $f$ and use the same argument above. The theorem follows. $\blacksquare$
\section{Proof of the Main Theorem}\label{Sec-ProofMain}
Since $(\Delta,\mathbb{D},A)$ is uniformly $K$-stable and $A^{(k)}$ converges to $A$ smoothly in $\bar \Delta$,
then $(\Delta, \mathbb{D},A^{(k)})$ is uniformly $K$-stable for large $k$, i.e.,
$\Delta$ is $(A^{(k)},\mathbb{D},\lambda)$-stable
for some constant $\lambda>0$ independent of $k$.
Since $u^{(k)}$ satisfies the generalized Abreu Equation \eqref{eqn 1.1}, then
$$\mc L_{A_k}(u^{(k)})=\int_{\Delta}\sum_{i,j} (u^{(k)})^{ij}(u^{(k)})_{ij}\mathbb{D}d\mu=n\int_{\Delta}\mathbb{D}d\mu
$$ and hence,
$$\int_{\partial \Delta} u d \sigma\leq n\lambda^{-1}\frac{max_{\Delta}\mathbb{D}}{min_{\Delta}\mathbb{D}} \mbox{Area}(\Delta).
$$
It follows that $u^{(k)}$ locally and
uniformly converges to a convex function $u$ in $\Delta$.
\v
{\it Claim.} For any point $\xi\in \Delta$ and any
$B_{\delta}(\xi)\subset \Delta$, there exists a point
$\xi_o\in B_{\delta}(\xi)$ such that $u$ has second derivatives
and is strictly convex at $\xi_o$. Here, $B_{\delta}(\xi)$ denotes the
Euclidean ball centered at $\xi$ with radius $\delta.$
\v
The proof of the claim is the same as in \cite{CLS5}, see also \cite{CHLS}.
\v
We now choose coordinates such that $\xi_o=0$.
By adding linear functions, we assume that all $u^{(k)}$ and $u$ are normalized at $0$.
Since $u$ is strictly convex at $0$, there exist constants $\epsilon'>0$, $d_2>d_1>0$ and $b' >0$,
independent of $k$, such that, for large $k$,
$$B_{d_1}(0)\subset\bar{S}_{u^{(k)}}(0,\epsilon')\subset B_{d_2}(0)\subset \Delta,$$ and
$$
\sum_i \left(\frac{\partial u^{(k)}}{\partial \xi_i}\right)^2\leq b'
\quad\mathfrak{t}ext{in } S_{u^{(k)}}(0,\epsilon').$$ By
Lemma \ref{lemma_3.1} and Lemma \ref{lemma_3.3}, we have
\begin{equation}\label{eqn 4.3}
C_1\leq \det(u_{ij}^{(k)}) \leq C_2
\quad\mathfrak{t}ext{in }S_{u^{(k)}}(0,\frac{1}{2}\epsilon'),\end{equation}
where $C_1<C_2$ are positive constants independent of $k$.
By Lemma \ref{lemma 4.1} $\{u^{(k)}\}$ converges smoothly to $u$.
Therefore, $u$ is a smooth and
strictly convex function in $S_u(0,\epsilon'/2)$.
\v
Let $f^{(k)}$ be the Legendre transform of $u^{(k)}$.
Then, $\{f^{(k)}\}$ locally
uniformly converges to a convex function $f$ defined in the
whole $\mathbb{R}^n$. Furthermore, in a neighborhood of $0$,
$f$ is a smooth and strictly convex function such that its
Legrendre transform $u$ satisfies the generalized Abreu Equation \eqref{eqn 1.1}.
By the convexity of $f^{(k)}$ and the local and uniform convergence of
$\{f^{(k)}\}$ to $f$, we conclude, for any $k$,
$$\frac{1+\sum_i x_i^2}{(d+ f^{(k)})^2} \leq b\quad\mathfrak{t}ext{in }\mathbb R^n,$$
and, for any $C>1$,
$$B_r(0)\subset S_{f^{(k)}}(0,C)\subset B_{R_C}(0),$$
for some positive constants $d$, $b$, $r$ and $R_C=R(C)>0$. By Lemma \ref{lemma_3.1} and Lemma \ref{lemma_3.5}, we have
$$\exp\{-\mff C_3 C\}\frac{1}{(d+C)^{2n}}\leq \det (f_{ij}^{(k)})\leq \mff C_1 .$$
We note that each $f^{(k)}$ satisfies \eqref{equ 4.4}. By Theorem \ref{theorem_4.2}
we conclude that $\{f^{(k)}\}$ uniformly and smoothly converges to $f$
in $S_f(0,C/2)$. Since $C$ is arbitrary, $f$ is a smooth and strictly convex function in $\mathbb{R}^n$,
and the sequence $\{f^{(k)}\}$ locally and smoothly converges to $f$. By Legendre transforms,
we obtain that $u$ is a smooth and strictly convex function in $\Delta$
and that the sequence $\{u^{(k)}\}$ locally and smoothly converges to $u$.
This completes the proof of Theorem \ref{theorem_1.2}.
\v\v\v\v
\end{document}
|
\begin{document}
\title{\boldmath\textbf{$E_6$, the Group:\\
The structure of $SL(3,\mathbb{O})$}}
\author{
Aaron Wangberg\\
Department of Mathematics \& Statistics\\
Winona State University\\
Winona, MN 55987\\
\texttt{[email protected]}
\and
Tevian Dray\\
Department of Mathematics\\
Oregon State University\\
Corvallis, OR 97331\\
\texttt{[email protected]}
}
\date{\today}
\maketitle
\begin{abstract}
We present the subalgebra structure of $\mathfrak{sl}(3,\mathbb{O})$, a particular real form of
$e_6$ chosen for its relevance to particle physics and its close relation to
generalized Lorentz groups. We use an explicit representation of the Lie
group $SL(3,\mathbb{O})$ to construct the multiplication table of the corresponding
Lie algebra $\mathfrak{sl}(3,\mathbb{O})$. Both the multiplication table and the group are then
utilized to find various nested chains of subalgebras of $\mathfrak{sl}(3,\mathbb{O})$, in which
the corresponding Cartan subalgebras are also nested where possible. Because
our construction involves the Lie group, we simultaneously obtain an explicit
representation of the corresponding nested chains of subgroups of $SL(3,\mathbb{O})$.
\end{abstract}
\section{Introduction}
The group $E_6$ has a long history of applications in
physics~\cite{Jordan,JNW,Albert}, and is a candidate gauge group for a Grand
Unified Theory~\cite{Georgi}. A description of the group $E_{6(-26)}$ as
$SL(3,\mathbb{O})$ was given in~\cite{Denver}, generalizing the interpretation of
$SL(2,\mathbb{O})$ as (the double cover of) $SO(9,1)$ discussed in~\cite{Lorentz}.
An interpretation combining spinor and vector representations of the Lorentz
group in 10 spacetime dimensions was described in~\cite{York}. In this paper,
we fill in some further details of the structure of $SL(3,\mathbb{O})$, in the
process obtaining nested chains of subgroups that respect this Lorentzian
structure.
We begin by reviewing the construction of both $SL(2,\mathbb{O})$ and $SL(3,\mathbb{O})$ at
the group level in Section~\ref{Group}, then describe the construction of the
Lie algebra $\mathfrak{sl}(3,\mathbb{O})$ in Section~\ref{Algebra}. In Section~\ref{Chains}, we
use this information to construct various chains of subgroups and subalgebras,
some but not all of which are simple, and in Section~\ref{conclusion} we
discuss our results.
\section{The Group}
\label{Group}
\mathfrak{su}bsection{Lorentz transformations}
\label{Lorentz}
\begin{table}[btp]
\begin{center}
$$
\begin{array}[t]{|l|c|}
\hline
\hbox{Boosts} &
\begin{array}{ccl}
\noalign{
}
B_{tz} & t \longleftrightarrow z &
\mathbf{M} = \left( \begin{array}{cc}
\exp\left(\frac{\alpha}{2}\right) & 0 \\
0 & \exp\left(-\frac{\alpha}{2}\right) \\
\end{array} \right) \\
\noalign{
}
B_{tx} & t \longleftrightarrow x &
\mathbf{M} = \left( \begin{array}{cc}
\cosh\left(\frac{\alpha}{2}\right) & \sinh\left(\frac{\alpha}{2}\right) \\
\sinh\left(\frac{\alpha}{2}\right) & \cosh\left(\frac{\alpha}{2}\right)
\end{array} \right) \\
\noalign{
}
B_{tq} & t \longleftrightarrow q &
\mathbf{M} = \left( \begin{array}{cc}
\cosh\left(\frac{\alpha}{2}\right) & -q \sinh\left(\frac{\alpha}{2}\right)\\
q \sinh\left(\frac{\alpha}{2}\right) & \cosh\left(\frac{\alpha}{2}\right)
\end{array} \right) \\
\noalign{
}
\end{array} \\
\hline
\hline
\begin{minipage}{0.85in}
Simple \\ Rotations
\end{minipage} &
\begin{array}{ccl}
\noalign{
}
R_{xq} & x \longleftrightarrow q &
\mathbf{M} = \left( \begin{array}{cc}
\exp\left(-\frac{q\alpha}{2}\right) & 0 \\
0 & \exp\left(\frac{q\alpha}{2}\right)
\end{array} \right) \\
\noalign{
}
R_{xz} & x \longleftrightarrow z &
\mathbf{M} = \left( \begin{array}{cc}
\cos\left(\frac{\alpha}{2}\right) & \sin\left(\frac{\alpha}{2}\right) \\
-\sin\left(\frac{\alpha}{2}\right) & \cos\left(\frac{\alpha}{2} \right)
\end{array} \right) \\
\noalign{
}
R_{zq} & q \longleftrightarrow z &
\mathbf{M} = \left( \begin{array}{cc}
\cos\left(\frac{\alpha}{2}\right) & q \sin\left(\frac{\alpha}{2}\right) \\
q \sin\left(\frac{\alpha}{2}\right) & \cos\left(\frac{\alpha}{2}\right)
\end{array} \right) \\
\noalign{
}
\end{array} \\
\hline
\hline
\begin{minipage}[c]{0.85in}
Transverse \\ Rotations
\end{minipage} &
\begin{array}{ccl}
R_{p,q} & p \longleftrightarrow q &
\mathbf{M}_1 = -p \; \mathbf{I}_2 \\
& &\mathbf{M}_2 = \left(\; \cos \left(\frac{\alpha}{2}\right) p
+ \sin \left( \frac{\alpha}{2} \right) q \; \right) \mathbf{I}_2
\end{array} \\
\hline
\end{array}
$$
\caption{Finite octonionic Lorentz transformations. The group transformation
is given by \hbox{$\mathbf{X}\longmapsto\mathbf{M}\mathbf{X}\mathbf{M}^\dagger$} for boosts and
simple rotations, and by
$\mathbf{X}\longmapsto\mathbf{M}_2(\mathbf{M}_1\mathbf{X}\mathbf{M}_1^\dagger)\mathbf{M}_2^\dagger$ for
transverse rotations. The parameters $p$ and $q$ are imaginary unit
octonions.}
\label{Finite}
\end{center}
\end{table}
A $2\times2$ Hermitian matrix
\begin{equation}
\mathbf{X} = \left( \begin{array}{cc} t+z& x-q\\ x+q& t-z\end{array} \right)
\end{equation}
with $t,z,x\in\mathbb{R}$ and pure imaginary $q\in\mathbb{K}=\mathbb{R},\mathbb{C},\mathbb{H},\mathbb{O}$, is a
representation of an ($m+1$)-dimensional spacetime vector for
$m+1=||\mathbb{K}||+2\in\{3,4,6,10\}$. In this setting, the squared Lorentzian norm
of $\mathbf{X}$ is given by $\det\mathbf{X}$. Lorentz transformations preserve
$\det\mathbf{X}$ and must also preserve the Hermiticity of $\mathbf{X}$. Any Lorentz
transformation can be described as the composition of maps of the form
\begin{equation}
\mathbf{X} \mapsto \mathbf{M} \mathbf{X} \mathbf{M}^\dagger
\label{Mtwo}
\end{equation}
for certain \textit{generators} $\mathbf{M}$. In the octonionic case, the
determinant-preserving transformations of the form~(\ref{Mtwo}) constitute
$SL(2,\mathbb{O})$, the (double cover of the) Lorentz group $SO(9,1)$. We adopt the
explicit set of generators constructed by Manogue and Schray~\cite{Lorentz},
as given in Table~\ref{Finite}.
An important feature of these transformations is that the \textit{transverse
rotations} between octonionic units require \textit{nesting}; the lack of
associativity prevents one from combining the given transformations of the
form~(\ref{Mtwo}) into a single such transformation. We will return to this
point in Section~\ref{Algebra}.
The exceptional Jordan algebra~$\mathbf{H}_3(\mathbb{O})$, also known as the Albert algebra,
consists of~\hbox{$3\times3$} octonionic Hermitian matrices under the Jordan
product, and forms a~$27$-dimensional representation of~$E_6$~\cite{corrigan},
which is precisely the group that preserves the determinant of Jordan
matrices; in this sense, $E_6=SL(3,\mathbb{O})$. There are three natural ways to
embed a $2\times2$ Hermitian matrix in a $3\times3$ Hermitian matrix, as
illustrated in Table~\ref{Types}, which we refer to as \textit{types}.
Furthermore, $SL(2,\mathbb{O})$ sits inside $SL(3,\mathbb{O})$ under the identification
\begin{equation}
\mathbf{M} \longmapsto
\mathcal{M} = \left( \begin{array}{c|c}
\mathbf{M} & 0 \\ \hline 0 & 1
\end{array}\right)
\label{typeI}
\end{equation}
If we take $\mathcal{X}\in\mathbf{H}_3(\mathbb{O})$ to be of type~1, as per Table~\ref{Types}, then
under the transformation
\begin{equation}
\mathcal{X} \mapsto \mathcal{M} \mathcal{X} \mathcal{M}^\dagger
\label{Mthree}
\end{equation}
we recover not only the \textit{vector} transformation~(\ref{Mtwo}) on
$\mathbf{X}$, but also the \textit{spinor} transformation
\(
\theta \mapsto \mathcal{M}\theta
\)
on the 2-component octonionic column $\theta$.
\footnote{Further discussion of ``vectors'' and ``spinors'' can be found
in~\cite{York}.}
\begin{table}[tbp]
\begin{center}
\begin{tabular}{ccccc}
\textrm{Type 1} & & \textrm{Type 2} & & \textrm{Type 3}\\
$\left( \begin{array}{c|c}
\mathbf{X} & \theta \\ \hline \theta^\dagger & \cdot
\end{array}\right)$ & &
$\left( \begin{array}{c|c}
\cdot & \theta^\dagger \\ \hline \theta & \mathbf{X}
\end{array}\right)$ & &
$\left( \begin{array}{c|c|c}
\mathbf{X}_{2,2} & \theta_2 & \mathbf{X}_{2,1} \\
\hline \overline{\theta_2} & \cdot & \overline{\theta_1} \\
\hline \mathbf{X}_{1,2} & \theta_{1} & \mathbf{X}_{1,1} \end{array}\right)$ \\
\end{tabular}
\caption{Three natural locations of a vector~$\mathbf{X}$, a spinor~$\theta$, and a
dual spinor~$\theta^\dagger$ in~$\mathcal{X}\in E_6$.}
\label{Types}
\end{center}
\end{table}
We generalize this construction to all three types. We write $M^1$ (instead
of $\mathcal{M}$) for the type~1 version of $\mathcal{M}$, as defined by~(\ref{Mthree}). Then
type~2 and~3 versions of $\mathcal{M}$ can be obtained as
\begin{equation}
M^2 = \mathcal{T} M^1 \mathcal{T}^\dagger \hspace{1.5cm}
M^3 = \mathcal{T} M^2 \mathcal{T}^\dagger \hspace{1.5cm}
\label{typetran}
\end{equation}
so that the group transformation
\begin{equation}
\mathcal{T}
= \left( \begin{array}{ccc}
0 & 0 & 1 \\ 1 & 0 & 0 \\ 0 & 1 & 0
\end{array} \right)
\in E_6
\end{equation}
cyclically permutes the 3 types. We discuss \textit{type transformations} of
the form~(\ref{typetran}) in more detail in Section~\ref{Type}
\mathfrak{su}bsection{A new basis for transverse rotations}
As outlined in~\cite{Denver,York}, $E_6$ can be viewed as the appropriate
union of these 3 copies of \hbox{$SO(9,1,\mathbb{R}) = SL(2,\mathbb{O})$}. But we have
$3\times45=135$ elements, and we need to find a way to reduce this number to
$|E_6|=78$.
We start by constructing a new basis for the transverse rotations in
$SL(2,\mathbb{O})$.
Each transverse rotation $R_{p,q}$ listed in Table~\ref{Finite} rotates a
single plane spanned by the orthogonal imaginary octonions~$p$ and~$q$, and
rotations of the 21 independent planes generate $SO(7)$. Since
\hbox{$G_2\mathfrak{su}bset SO(7)$}, we choose a basis for $G_2$ and extend it to
$SO(7)$. For each basis octonion, say $q=i$, there are three pairs of basis
octonions, in this case $\lbrace j,k \rbrace$, $\lbrace k\ell,j\ell \rbrace$,
$\lbrace \ell,i\ell \rbrace$, which generate quaternionic subalgebras
containing $q$. We have chosen the ordering of the pairs so that adding $q$
leads to a right-handed, three-dimensional coordinate frame, and so that
$\ell$ only appears (if at all) in the last pair. A choice of pairs for each
basis octonion that satisfies these conditions is given in Table~\ref{AGS}.
We now define the combinations
\begin{align}
A_i(\alpha) &= R_{j,k}(\alpha) \circ R_{k\ell,j\ell}(-\alpha) \nonumber\\
G_i(\alpha) &= R_{j,k}(\alpha) \circ R_{k\ell,j\ell}(\alpha)
\circ R_{\ell,i\ell}(-2\alpha) \label{AGSdef}\\
S_i(\alpha) &= R_{j,k}(\alpha) \circ R_{k\ell,j\ell}(\alpha)
\circ R_{\ell,i\ell}(\alpha) \nonumber
\end{align}
and use the conventions in Table~\ref{AGS} to similarly define $A_q$, $G_q$,
and $S_q$ for the remaining basis octonions.
As we discuss in more detail below, the 14 transformation of the form $A_q$
and $G_q$ generate the group~$G_2$, the 7 transformations $A_q$ together with
$G_\ell$ generate the subgroup $SU(3)\mathfrak{su}bset G_2$ which fixes~$\ell$, and all
21 of these transformations, which generate $SO(7)$, are orthogonal (but not
normalized) at the Lie algebra level. We will use these properties to
eliminate redundant group generators.
\begin{table}[tbp]
\begin{center}
\begin{tabular}{|c|c|c|c|}
\hline
$q$ & First pair & Second pair & Third pair \\
\hline
\hline
$i$ & $(j , k )$ & $(k\ell , j\ell)$ & $(\ell , i\ell)$ \\
$j$ & $(k , i )$ & $(i\ell , k\ell)$ & $(\ell , j\ell)$ \\
$k$ & $(i , j )$ & $(j\ell , i\ell)$ & $(\ell , k\ell)$ \\
$k\ell$ & $(j\ell , i)$ & $(j , i\ell) $ & $(k , \ell) $ \\
$j\ell$ & $(i , k\ell)$ & $(i\ell , k) $ & $(j , \ell) $ \\
$i\ell$ & $(k\ell , j)$ & $(k , j\ell) $ & $(i , \ell) $ \\
$\ell$ & $(i\ell , i)$ & $(j\ell , j) $ & $(k\ell , k)$ \\
\hline
\end{tabular}
\caption{Quaternionic subalgebras chosen for~$A_q$,~$G_q$, and~$S_q$.}
\label{AGS}
\end{center}
\end{table}
\section{The Lie Algebra}
\label{Algebra}
\mathfrak{su}bsection{Constructing the algebra}
\label{E6alg}
We begin by associating each transformation in the Lie group with a vector in
the Lie algebra. Each of the~$135$ transformations is a one-parameter curve
in the group. Given a one-parameter curve~$R(\alpha)$ in a classical Lie
group, the traditional method for associating it with the Lie algebra
generator~$\dot R$ is to find its tangent vector
$\dot R = \frac{\partial R(\alpha)}{\partial\alpha} \big|_{_{\alpha = 0}}$
at the identity element in the group. However, the transverse rotations are
\textit{nested}, that is, they involve more than one operation, and the lack
of associativity prevents one from working with the group elements by
themselves. Instead, we let our one-parameter transformations $R(\alpha)$ act
on elements $\mathcal{X}\in\mathbf{H}_3(\mathbb{O})$, producing a curve~$R(\alpha)(\mathcal{X})$ in~$\mathbf{H}_3(\mathbb{O})$. We
then define the Lie algebra element $\dot{R}\in\mathfrak{e}_6$ to be the map taking
$\mathcal{X}$ to the tangent vector at the identity to this curve in~$\mathbf{H}_3(\mathbb{O})$. That
is, we have the association indicated in Figure~\ref{Commutators} between the
group transformations and the tangent vectors.
\begin{figure}
\caption{Calculating Lie algebra elements and their commutators.}
\label{Commutators}
\end{figure}
We also use group orbits to construct the commutator of two tangent vectors.
In the traditional approach to the classical matrix groups, the commutator of
the tangent vectors~$\dot R_1$ and~$\dot R_2$ is defined as \hbox{$[ \dot R_1,
\dot R_2 ] = \dot R_1 \dot R_2 - \dot R_2 \dot R_1$}. However, we are working
in~$\mathbf{H}_3(\mathbb{O})$, not~$\mathfrak{e}_6$. To find the commutator of the Lie algebra elements
$\dot{R_1}$ and $\dot{R_2}$ associated with curves~$R_1(\alpha)(\mathcal{X})$
and~$R_2(\alpha)(\mathcal{X})$, we create a new curve in~$\mathbf{H}_3(\mathbb{O})$ defined by
\begin{equation}
[ R_1, R_2](\alpha)(\mathcal{X})
= R_1(\frac{\alpha}{2}) \circ R_2(\frac{\alpha}{2})
\circ R_1(-\frac{\alpha}{2}) \circ R_2(-\frac{\alpha}{2})(\mathcal{X})
\end{equation}
where~$\circ$ denotes composition. This new path is not a one-parameter
curve, and its first derivative is identically zero at~$\alpha = 0$, but its
second derivative is tangent to the curve $[R_1, R_2](\alpha)(\mathcal{X})$
at~$\alpha=0$. Therefore, we define the commutator of~$\dot R_1$ and~$\dot
R_2$ by the following action on $\mathbf{H}_3(\mathbb{O})$
\begin{equation}
\left[ \dot R_1, \dot R_2 \right] (\mathcal{X})
= \frac12 \frac{\partial^2}{\partial \alpha^2}
\bigl[R_1, R_2\bigr](\alpha)(\mathcal{X}) \big|_{_{\alpha = 0}}
\end{equation}
which agrees with the usual definition for matrix Lie groups~\cite{gilmore}.
Our construction of the commutator is summarized in Figure~\ref{Commutators}.
Since we are using the local action of~$SL(3,\mathbb{O})$ on~$\mathbf{H}_3(\mathbb{O})$ to give a
homomorphic image of~$\mathfrak{sl}(3,\mathbb{O})$, our construction does not lead to a readily
available exponential map giving the group element corresponding
to~$[\dot R_1,\dot R_2]$. In particular, we are not always able to find the
one-parameter curve whose tangent vector is~$[\dot R_1,\dot R_2]$.
\mathfrak{su}bsection{Linear dependencies}
\label{Depend}
We shall now give the dependencies among the group transformations by using
linear dependencies among the Lie algebra elements. In doing so, we will
indicate which transformations can be eliminated, leaving our preferred basis
for the group~$SL(3,\mathbb{O})$ and the algebra~$\mathfrak{sl}(3,\mathbb{O})$. Since we are using a
homomorphic image of the Lie algebra~$\mathfrak{sl}(3,\mathbb{O})$, we check that the indicated
dependencies actually do provide dependencies among the group transformations.
We begin with the transverse rotations. Among the~$21$
transformations~$A_q$, $G_q$, and~$S_q$ of each type, direct computation
shows that
\begin{equation}
\dot A^1_q
= \dot A^2_q
= \dot A^3_q \hspace{2cm}
\dot G^1_q
= \dot G^2_q
= \dot G^3_q
\end{equation}
for each basis octonion~$q$. That is, the transformations~$A_q$ and~$G_q$ are
type independent, allowing us to drop the type designation and simply
write~$\dot A_q$ and~$\dot G_q$. These fourteen transformations generate
$G_2=\textrm{Aut}(\mathbb{O})$, which is the smallest of the exceptional Lie groups. We
refer to the type independence of these transformations as \textit{strong
triality}.
When added to the fourteen~$G_2$ transformations, the seven
transformations~$S^a_q$ produce a basis for the~$SO(7)$ of type~$a$, with
\hbox{$a=1,2,3$}. However, the transformations~$S^a_q$ are not independent,
since
\begin{equation}
\dot S^1_q + \dot S^2_q + \dot S^3_q = 0
\end{equation}
Hence, the union of any two of the~$SO(7)$ subgroups contains the third. In
particular, we may use the group transformations generated by~$S^a_q$ of
type~$1$ and type~$2$ to generate the type~$3$ transformations generated
by~$S^3_q$. These linear dependences have reduced our~$3\times21=63$
transverse rotations by~$28+7=35$, trimming our original~$135$ transformations
down to~$100$.
Turning to $SO(8)$, we have the relations
\begin{align}
0 &= \dot R^1_{xq} + \dot R^2_{xq} + \dot R^3_{xq} \nonumber\\
\dot R^2_{xq} &= -\frac{1}{2}\dot R^1_{xq} - \frac{1}{2}\dot S^1_q
\label{Srels}\\
\dot S^2_q &= \frac{3}{2} \dot R^1_{xq} - \frac{1}{2}\dot S^1_q \nonumber
\end{align}
which allow us to eliminate a further 21 transformations. We have in fact
expressed all $SO(8)$ transformations of types~2 and~3 in terms of $SO(8)$
transformations of type~1; in this sense, there is only one~$SO(8)$! Again,
this is a result of \textit{triality}.
Having reduced the~$135$ transformations to~$100$ and then by another~$21$
to~$79$, we are left with~$52$ rotations, which preserve the trace of
\hbox{$\mathcal{X}\in\mathbf{H}_3(\mathbb{O})$}, and which form the Lie group~$F_4=SU(3,\mathbb{O})$.
Among the remaining~$27$ boosts, we expect only one additional linear
dependency, which turns out to be
\begin{equation}
\dot B^1_{tz} + \dot B^2_{tz} + \dot B^3_{tz} = 0
\end{equation}
which we use to eliminate~$\dot B^2_{tz}$ and~$\dot B^3_{tz}$ in favor of the
combination~$\dot B^2_{tz} - \dot B^3_{tz}$. The resulting $78$ Lie algebra elements are indeed
independent, and turn out to be orthogonal (but not normalized) with respect
to the Killing form.
We have therefore constructed both the group $E_6=SL(3,\mathbb{O})$, and its Lie
algebra \hbox{$\mathfrak{e}_6=\mathfrak{sl}(3,\mathbb{O})$}; the complete commutation table
for~$\mathfrak{sl}(3,\mathbb{O})$ can be found online at~\cite{commutation_table_online}. In
retrospect, the counting is easy: There is one $SO(8)$ (28 elements), 3 types
of each of the remaining elements of $SO(9)$ (24 elements, yielding $F_4$),
and 3 types of the 9 boosts, with one final dependency, yielding 26 boosts in
all.
Our basis can be simplified slightly by noticing that~(\ref{Srels}) implies
\begin{equation}
\dot S^1_q = \dot R^3_q - \dot R^2_q
\end{equation}
where the operations on the RHS commute. Thus, the diagonal \textit{phase}
transformations $S^1_q$ can in fact be constructed \textit{without} nesting,
which however is essential for the $G_2$ transformations $A_q$ and $G_q$.
This provides another way to count the basis of $\mathfrak{e}_6$: There are 64
independent trace-free $3\times3$ octonionic matrices, $24+14=38$ of which are
anti-Hermitian (infinitesimal rotations), and $24+2=26$ of which are Hermitian
(boosts), together with the 14 nested transformations making up $\mathfrak{g}_2$, for a
total of 78 independent elements in $\mathfrak{e}_6$~\cite{corrigan}.
We can further identify the 6 elements
\begin{equation}
C = \lbrace \dot B^1_{tz}, \dot B^2_{tz}-\dot B^3_{tz}, \dot R^{1}_{x\ell},
\dot A_\ell, \dot G_\ell, \dot S^1_{\ell} \rbrace
\end{equation}
as a commuting set, and therefore a preferred (orthogonal) basis for the
Cartan subalgebra~$h$. We call these basis elements the \textit{Cartan
elements} of~$\mathfrak{e}_6$.
\section{Subalgebra Chains}
\label{Chains}
\mathfrak{su}bsection{Basic subalgebra chains}
\label{LorentzSub}
We begin with a discussion of $\mathfrak{g}_2\mathfrak{su}bset \mathfrak{so}(7)$. Our basis selects a
preferred $\mathfrak{su}(3)$ subalgebra of $\mathfrak{g}_2$, namely the $\mathfrak{g}_2$ transformations
which fix the preferred complex subalgebra of $\mathbb{O}$ generated by $\ell$.
G\"unaydin denotes the corresponding~$SU(3)$ subgroup of~$G_2$
as~$SU(3)^C$~\cite{gunaydin_1974}; we prefer to use the name $\mathfrak{su}C$ for this
subalgebra. Explicitly, we have
\begin{equation}
\mathfrak{su}C = \langle \dot A_i, \cdots, \dot A_{i\ell}, \dot A_\ell, \dot G_\ell \rangle
\end{equation}
which is also a subalgebra of the (type~1, say) $\mathfrak{so}(6)\mathfrak{su}bset\mathfrak{so}(7)$ that
fixes $\ell$.
Through a conventional choice of $A_\ell$, our basis also selects a preferred
quaternionic subalgebra of $\mathbb{O}$, generated by $\lbrace k,k\ell,\ell \rbrace$,
and a preferred subalgebra $\mathfrak{su}H\mathfrak{su}bset\mathfrak{su}C$ that fixes this quaternionic
subalgebra, namely
\begin{equation}
\mathfrak{su}H = \langle \dot A_k, \dot A_{k\ell}, \dot A_\ell \rangle
\end{equation}
Extending to $\mathfrak{so}(7)$, there is clearly an $\mathfrak{so}(4)$ that fixes $\mathbb{H}$; we have
\begin{equation}
\mathfrak{so}(4) = \mathfrak{so}(3) \oplus \mathfrak{so}(3) = \mathfrak{su}H \oplus
\langle \dot G_k - \dot S^1_k, \dot G_{k\ell} - \dot S^1_{k\ell},
\dot G_\ell - \dot S^1_\ell \rangle
\end{equation}
as can be seen by studying Table~\ref{AGS}. Another interesting $\mathfrak{so}(3)$
subalgebra of $\mathfrak{so}(7)$ is the complement of this $\mathfrak{so}(4)$, an orthogonal basis
for which is given by the combinations $\dot G_q + 2\dot S^1_q$ for
$q\in\textrm{Im}\mathbb{H}$.
We can use our particular choice of basis for the Lie algebra~$\mathfrak{e}_6$ to
identify two separate~$SO(n)$ subgroup structures within the Lie group~$E_6$.
Figure~\ref{Type123} shows the~$SO(n)$ subgroup chain of~$SO(9,1)$ of type~$1$
in~$SL(3,\mathbb{O})$, while Figure~\ref{Type123g2} shows the three~$SO(9)$ subgroup
chains of~$F_4$ within~$E_6$. In both subgroup structures, there is only
one~$SO(8)$. While~$G_2 \mathfrak{su}bset SO(7)$, it is not a subset of~$SO(6)$ in
Figure~\ref{Type123}. Hence, we omit~$G_2$ from Figure~\ref{Type123}, but
include it in Figure~\ref{Type123g2} since our preferred basis for~$SO(7)$
includes a basis for~$G_2$. The figures indicate which Cartan element is
added to a group when it is expanded to a larger group, as well as giving the
classification of the corresponding Lie algebra.
\begin{figure}
\caption{Chain of subgroups $SO(n)\mathfrak{su}
\label{Type123}
\end{figure}
\begin{figure}
\caption{Chain of subgroups
$SU(3)^C\mathfrak{su}
\label{Type123g2}
\end{figure}
\mathfrak{su}bsection{Type transformations}
\label{Type}
The \textit{discrete type transformation}~(\ref{typetran}) induced by~$\mathcal{T}$
cyclically permutes Lorentz transformations of types~1, 2, or~3. We have
\begin{align}
\mathcal{T}^3 &= \mathcal{I} \\
\mathcal{T}^\dagger &= \mathcal{T}^{-1}
\end{align}
where $\mathcal{I}$ is the $3\times3$ identity matrix, and $\mathcal{T}\in SL(3,\mathbb{O})$, since
\begin{equation}
\det(\mathcal{T}\mathcal{X}\mathcal{T}^\dagger) = \det(\mathcal{X})
\end{equation}
for $\mathcal{X}\in\mathbf{H}_3(\mathbb{O})$. Although $\mathcal{T}$ is not one of our elementary group
transformations, there are numerous identities of the form
\begin{align}
\mathcal{T} &= R^1_{xz}(-\pi) \circ R^2_{xz}(-\pi) \nonumber\\
\mathcal{T} &= R^2_{xz}(\pi) \circ R^1_{xz}(\pi)
\circ R^2_{xz}(\pi) \circ R^1_{xz}(\pi) \\
\mathcal{T} &= R^1_{xz}(\pi) \circ R^3_{xz}(\pi)
\circ R^2_{xz}(\pi) \circ R^1_{xz}(\pi) \nonumber
\end{align}
These expressions make clear that $\mathcal{T}\in SL(3,\mathbb{O})$. Furthermore, each of
these expressions may be expanded into a (different) \textit{continuous type
transformation} $\mathcal{T}(\alpha)\in SL(3,\mathbb{O})$ by letting the single fixed angle
($\pi$ or~$-\pi$) become arbitrary. The resulting transformations are
\textit{not} one-parameter subgroups of~$SL(3,\mathbb{O})$, but they do connect
transformations of different types. We are therefore led to explore subgroups
of $SL(3,\mathbb{O})$ that contain these (real!)\ type transformations, although it
suffices to consider subgroups containing $\mathcal{T}$ itself.
\mathfrak{su}bsection{Type-independent subgroups}
\label{TypeSubs}
We list here some important groups which contain type transformations. The
standard representation of~$SO(3,\mathbb{R})$ is the group
\begin{equation}
SO(3,\mathbb{R})_s = \langle R^1_{xz}, R^2_{xz}, R^3_{xz} \rangle
\end{equation}
This group obviously contains~$\mathcal{T}$, as does the standard representation
\begin{equation}
SL(3,\mathbb{R})_s
= \langle R^1_{xz}, R^2_{xz}, R^3_{xz},
B^1_{tz}, B^2_{tz}, B^1_{tx}, B^2_{tx}, B^3_{tx} \rangle
\end{equation}
of $SL(3,\mathbb{R})$. Using~$\ell$ as our preferred complex unit, we have the
standard representations
\begin{equation}
SU(3,\mathbb{C})_s
= \langle R^1_{xz}, R^2_{xz}, R^3_{xz},
R^1_{x\ell}, R^2_{x\ell}, R^1_{z\ell}, R^2_{z\ell}, R^3_{z\ell} \rangle
\end{equation}
of~$SU(3,\mathbb{C})$, and
\begin{equation}
SL(3,\mathbb{C})_s
= SU(3,\mathbb{C})_s \cup \langle B^1_{tz}, B^2_{tz}, B^1_{tx},
B^2_{tx}, B^3_{tx}, B^1_{t\ell}, B^2_{t\ell}, B^3_{t\ell} \rangle
\end{equation}
of $SL(3,\mathbb{C})$. These four groups are important because they contain the type
transformation~$\mathcal{T}$. If, for instance, some type~1 transformation~$R^1$ is
in a group~$G$ that has one of these groups as a subgroup, then $G$ must also
contain the corresponding type~2 and~3 transformations~$R^2$ and~$R^3$; we say
that $G$ is \textit{type independent}.
The standard representations~$SO(3,\mathbb{R})_s$ and~$SU(3,\mathbb{C})_s$ differ from our
preferred representations $SOH=SUH$ and~$SUC$, which are subgroups
of~$G_2$. For instance, the groups $SU(3,\mathbb{C})_s$ and~$SUC$ are both type
independent, but in~$SU(3,\mathbb{C})_s$ the transformations~$R^1, R^2$ and~$R^3$ are
distinct while in~$SUC$ the three transformations are equal; $SUC$ does not
contain~$\mathcal{T}$, nor does it need to.
We use the type transformation~$\mathcal{T}$ to provide insight into the structure of
the Lie algebra~$\mathfrak{sl}(3,\mathbb{O})$. The algebras~$\mathfrak{g}$ in the left column of
Figure~\ref{TypeSub} are subalgebras of the type~$1$ copy of~$\mathfrak{sl}(2,\mathbb{O})$,
while each algebra~$\mathfrak{g}'$ in the right column is the largest subalgebra
of~$\mathfrak{sl}(3,\mathbb{O})$ such that~$\mathfrak{g}\oplus\mathfrak{g}'$ is still simple. When we
restrict~$\mathfrak{g}$ to a smaller subalgebra of~$\mathfrak{sl}(2,\mathbb{O})$, it is sometimes
possible to expand the type-independent subalgebra~$\mathfrak{g}'$ to a larger
subalgebra of~$\mathfrak{sl}(3,\mathbb{O})$. Each arrow in the diagram indicates inclusion,
and a similar diagram holds for the corresponding subgroups of $SL(3,\mathbb{O})$.
\begin{figure}
\caption{Type-dependent and type-independent subalgebras of~$\mathfrak{e}
\label{TypeSub}
\end{figure}
\mathfrak{su}bsection{\boldmath Reduction of~$\mathbb{O}$ to~$\mathbb{H}$,~$\mathbb{C}$, and~$\mathbb{R}$}
\label{Reduction}
We can also find subalgebras of~$\mathfrak{e}_6$ by restricting our generators to be
quaternionic, complex, or real.
Our preferred quaternionic subalgebra of $\mathbb{O}$ is $\mathbb{H}=\langle 1,k,k\ell,\ell
\rangle$, so we discard transformations involving $i$, $j$, $i\ell$, or
$j\ell$. We therefore discard $3\times4=12$ boosts, $3\times4=12$ simple
rotations involving $z$, and $4$ simple rotations involving $x$ --- but we
must add back in 3 rotations involving $x$ of type~2, since we can no longer
use the middle relation in~(\ref{Srels}) to eliminate them. Turning to the
transverse rotations, we need only consider transformations of type~1, and, as
discussed in Section~\ref{LorentzSub} (or after studying Table~\ref{AGS}), we
see that we must retain only the combinations $\dot G_q - \dot S^1_q$ for
$q\in\textrm{Im}\mathbb{H}$, thus discarding the remaining $21-3=18$ elements of $\mathfrak{so}(7)$. We
are left with $52-34+3=21$ rotations, and $26-12=14$ boosts.
The~$21$ compact generators form the algebra~$\mathfrak{su}(3,\mathbb{H})$, a real form
of~$\mathfrak{c}_3=\mathfrak{sp}(6)$, while all~$35$ together form~$\mathfrak{sl}(3,\mathbb{H})$, a real form
of~\hbox{$\mathfrak{a}_5=\mathfrak{su}(6,\mathbb{C})$}. Restricting only to type~$1$ transformations,
we obtain~$10$ rotations and~$5$ boosts, thus reducing~$\mathfrak{sl}(3,\mathbb{H})$
to~\hbox{$\mathfrak{sl}(2,\mathbb{H})=\mathfrak{so}(5,1)$}, a real form of~$\mathfrak{d}_3=\mathfrak{so}(6)$, and
$\mathfrak{su}(3,\mathbb{H})$ to~$\mathfrak{su}(2,\mathbb{H})=\mathfrak{so}(5)$, a real form of~\hbox{$\mathfrak{c}_2=\mathfrak{sp}(4)$}.
Furthermore, the subalgebra
$\mathfrak{so}H=\langle\dot A_k,\dot A_{k\ell},\dot A_\ell\rangle$ fixes $\mathbb{H}$. Thus,
for each of the above subalgebras $\mathfrak{g}$, we have $\mathfrak{g}\oplus\mathfrak{so}H\in\mathfrak{sl}(3,\mathbb{O})$.
In particular, \hbox{$\mathfrak{sl}(3,\mathbb{H}) \oplus \mathfrak{so}H$} is therefore a subalgebra of
$\mathfrak{sl}(3,\mathbb{O})$.
When restricting $\mathbb{O}$ to our preferred complex subalgebra
\hbox{$\mathbb{C}=\langle1,\ell\rangle$}, we obtain the classical Lie
algebras~$\mathfrak{su}(3,\mathbb{C})_s$ and~$\mathfrak{sl}(3,\mathbb{C})_s$ as previously discussed. As there
is only one octonionic unit used to form~$\mathbb{C}$, we do not need to use any of
the transformations from~$SO(7)$, so we have 8 rotations and 8 boosts. Using
all~$16$ transformations gives~$\mathfrak{sl}(3,\mathbb{C})_s$, a real form
of~$\mathfrak{a}_2\oplus\mathfrak{a}_2=\mathfrak{su}(3,\mathbb{C})\oplus \mathfrak{su}(3,\mathbb{C})$ with~$8$ boosts, whereas we
obtain~$\mathfrak{su}(3,\mathbb{C})_s$ by using only the~$8$ compact generators. Further
restricting to the type~$1$ transformations reduces these two algebras
to~$\mathfrak{sl}(2,\mathbb{C})_s=\mathfrak{so}(3,1)_s$ and~$\mathfrak{su}(3,\mathbb{C})_s$, which are real forms
of~\hbox{$\mathfrak{d}_2=\mathfrak{su}(2,\mathbb{C})\oplus \mathfrak{su}(2,\mathbb{C})$} and~$\mathfrak{a}_1=\mathfrak{su}(3,\mathbb{C})$.
When we restrict~$\mathfrak{sl}(3,\mathbb{C})_s$ to~$\mathfrak{sl}(2,\mathbb{C})\mathfrak{su}bset \mathfrak{sl}(2,\mathbb{O})$ (of type~1,
say), the smaller algebra no longer contains the type transformation~$\mathcal{T}$,
but it does involve the octonionic direction~$\ell$. Thus,
$\mathfrak{sl}(2,\mathbb{C})\oplus \mathfrak{so}(6)$, where~$\mathfrak{so}(6)\mathfrak{su}bset \mathfrak{so}(7)$ fixes~$\ell$, is also a
subalgebra of~$\mathfrak{sl}(2,\mathbb{O})\mathfrak{su}bset \mathfrak{sl}(3,\mathbb{O})$.
Finally, by restricting to real transformations, we are left with 3 rotations
and 5 boosts, which is a real form of~$\mathfrak{a}_2=\mathfrak{su}(3,\mathbb{C})$ with~$5$ non-compact
elements. This algebra may be further restricted to either~$\mathfrak{so}(3,\mathbb{R})_s$,
whose group contains the type transformation, or~$\mathfrak{so}(2,1)_s$, which is a
type~$1$ non-compact form of~$\mathfrak{a}_1=\mathfrak{so}(3,\mathbb{R})$.
The above discussion of the result of restricting~$\mathfrak{sl}(3,\mathbb{O})$
to~$\mathfrak{sl}(n,\mathbb{K})$ for~$n = 1,2,3$ and \hbox{$\mathbb{K}=\mathbb{R},\mathbb{C},\mathbb{H},\mathbb{O}$} is
summarized in Figure~\ref{SubAlg}. For each algebra~$\mathfrak{g}$ in
Figure~\ref{SubAlg}, we then list in Figure~\ref{PerpAlg} the maximal
subalgebra~$\mathfrak{g}'$ of~$\mathfrak{e}_6$ such that~$\mathfrak{g}\oplus\mathfrak{g}'\in\mathfrak{sl}(3,\mathbb{O})$.
Here,~$\mathfrak{so}(6)$ again denotes the subalgebra of type 1 which permutes
$\lbrace i,j,k,k\ell,j\ell,i\ell \rbrace$ but fixes~$\ell$.
Although~$\mathfrak{so}(6)\not\mathfrak{su}bset\mathfrak{g}_2$, we do have~\hbox{$\mathfrak{su}C\mathfrak{su}bset\mathfrak{so}(6)$}.
We also write $\mathfrak{u}(1)m$ for the non-compact real representation of $\mathfrak{d}_1$
generated by $\dot B^2_{tz} - \dot B^3_{tz}$, which is discussed further in the next section.
Again, similar diagrams can be drawn for the corresponding subgroups of
$SL(3,\mathbb{O})$.
\begin{figure}
\caption{Subalgebras~$\mathfrak{sl}
\label{SubAlg}
\end{figure}
\begin{landscape}
\begin{figure}
\caption{Subalgebras~$\mathfrak{sl}
\label{PerpAlg}
\end{figure}
\end{landscape}
\mathfrak{su}bsection{Subalgebras fixing type}
\label{FixEll}
Having just considered the subalgebras of $\mathfrak{g}_2$, and hence of $\mathfrak{e}_6$, that
leave invariant a preferred complex or quaternionic subalgebra of $\mathbb{O}$, we
now ask what subalgebra of $\mathfrak{e}_6$ fixes all type~1 elements, that is, which
transformations leave $\mathbf{X}$ alone in the first decomposition of
\hbox{$\mathcal{X}\in\mathbf{H}_3(\mathbb{O})$} shown in Table~\ref{Types}. This subalgebra, which we
will call $\textrm{stab}(I)$, turns out to be quite different from any of the others
discussed previously.
Clearly, no transformation in (type~1) $\mathfrak{sl}(2,\mathbb{O})$ will be in $\textrm{stab}(I)$. We
therefore seek transformations of types~2 and~3. Direct computation shows
that certain \textit{null rotations} will do the job. Each of the 6 vector
spaces defined by
\begin{equation}
b^a_\pm = \langle \dot B^a_{tx} \mp \dot R^a_{xz},
\dot B^a_{tq} \pm \dot R^a_{zq} \rangle
\end{equation}
is in fact an abelian subalgebra of $\mathfrak{sl}(3,\mathbb{O})$, and in each case the given
basis elements are null according to the Killing form --- the Killing
form is in fact identically zero on each of these subalgebras. Each of these
subalgebras fixes all elements of a particular type; we have
\begin{equation}
\textrm{stab}(I) = b^2_+ \oplus b^3_-
\end{equation}
with cyclic permutations holding for $\textrm{stab}(II)$ and $\textrm{stab}(III)$.
Since $\textrm{stab}(I)$ contains no elements of (type~1) $\mathfrak{sl}(2,\mathbb{O})$, we expect that
$\mathfrak{sl}(2,\mathbb{O})\oplus\textrm{stab}(I)$ will be a $45+16=61$-dimensional subalgebra of
$\mathfrak{sl}(3,\mathbb{O})$. Checking commutators, this turns out to be correct, but with an
unexpected surprise: $\textrm{stab}(I)$ is an ideal of $\mathfrak{sl}(2,\mathbb{O})\oplus\textrm{stab}(I)$, so
this subalgebra is neither simple nor semisimple.
If we further define
\begin{equation}
\textrm{stab}(I)^\perp = b^2_- \oplus b^3_+
\end{equation}
to be the 16 null rotations of types~2 and~3 that are not in $\textrm{stab}(I)$, then
we have the intriguing decomposition
\begin{equation}
\mathfrak{sl}(3,\mathbb{O}) = \mathfrak{sl}(2,\mathbb{O}) \oplus \textrm{stab}(I) \oplus \textrm{stab}(I)^\perp \oplus \mathfrak{u}(1)m
\end{equation}
with $\mathfrak{u}(1)m$ again denoting the non-compact real representation of $\mathfrak{d}_1$
generated by $\dot B^2_{tz} - \dot B^3_{tz}$.
We can now easily determine the subalgebras of $\mathfrak{e}_6$ that, say, leave $\mathbb{H}$
or $\mathbb{C}$ in type 1 elements invariant. All we have to do is combine the
relevant subalgebra of $\mathfrak{sl}(2,\mathbb{O})$ --- in this case $\mathfrak{su}H$ or $\mathfrak{su}C$,
respectively --- with $\textrm{stab}(I)$. Each such algebra, here
$\mathfrak{su}H\oplus\textrm{stab}(I)$ and $\mathfrak{su}C\oplus\textrm{stab}(I)$ is a subalgebra of $\mathfrak{e}_6$
which, however, is neither simple nor semisimple. Two further examples are
the 52-dimensional subalgebras $\mathfrak{su}(2,\mathbb{O})\oplus\textrm{stab}(I)$, which fixes
(type~1)~$t$, and $\mathfrak{so}(8,1)_\ell\oplus\textrm{stab}(I)$, where $\mathfrak{so}(8,1)_\ell$ fixes
(type~1)~$\ell$ (and therefore does not contain $g_2$).
\section{Conclusion}
\label{conclusion}
In this paper, we have given an explicit description of the subgroup structure
of $SL(3,\mathbb{O})$, based on the ``type'' structure inherent in the embedding of
$SL(2,\mathbb{O})$ in $SL(3,\mathbb{O})$, and on the structure of $SL(2,\mathbb{O})$ itself. In
the process, we have provided explicit realizations of some of the remarkable
properties of $G_2$. The internal structure of $G_2$, such as the $SU(3)$ and
$SU(2)$ subgroups fixing either a complex or quaternionic subalgebra, may be
especially relevant to attempts to use $SL(3,\mathbb{O})$ to describe fundamental
particles, as discussed further in~\cite{York}. Furthermore, we have seen
explicitly how $G_2$ is preserved under triality, as discussed
in~\cite{Denver}. Finally, we have constructed the groups leaving the type
structure invariant, which we suspect may play a prominent role in describing
the interactions of fundamental particles.
However, the story is only partially complete. There are other interesting
subgroups of $SL(3,\mathbb{O})$, closely related to the 4 other real forms of $E_6$.
In particular, we have not yet identified any of the $C_4$ subgroups of $E_6$.
In other work~\cite{E6cartan}, we extend, and in a sense complete, the present
investigation by constructing and discussing chains of subgroups adapted to
these other subgroups. We hope that the resulting maps of $E_{6(-26)}$ will
prove useful in further attempts to apply the exceptional groups to nature.
\section*{Acknowledgments}
This paper is a revised version of Chapter~4 of a dissertation submitted by AW
in partial fulfillment of the degree requirements for his Ph.D.\ in
Mathematics at Oregon State University~\cite{aaron_thesis}. The revision was
made possible in part through the support of a grant from the John Templeton
Foundation.
\end{document}
|
\begin{document}
\author[Berget]{Andrew Berget} \email{[email protected]}
\author[Manion]{Andrew Manion} \email{[email protected]}
\author[Maxwell]{Molly Maxwell} \email{[email protected]}
\author[Potechin]{Aaron Potechin} \email{[email protected]}
\author[Reiner]{Victor Reiner} \email{[email protected]}
\address{
Department of Mathematics\\
University of California\\
Davis, CA 95616
}
\address{
Department of Mathematics\\
Princeton University\\
Princeton, NJ 08540
}
\address{
Department of Mathematics and Computer Science\\
Colorado College\\
14 E. Cache la Poudre St.\\
Colorado Springs, CO 80903
}
\address{
Department of Mathematics\\
Massachusetts Institute of Technology\\
Cambridge, MA 02139
}
\address{
School of Mathematics\\
University of Minnesota\\
206 Church St. S.E.\\
Minneapolis, MN 55455
}
\thanks{Work of all authors supported by NSF grants DMS-0245379 and
DMS-0601010, and completed partly during REU programs at the University
of Minnesota during the summers of 2003, 2004 and 2008.}
\title{The critical group of a line graph}
\keywords{Critical group, line graph, regular graph.}
\begin{abstract}
The critical group of a graph is a finite abelian group whose order
is the number of spanning forests of the graph. This paper provides
three basic structural results on the critical group of a line
graph.
\begin{itemize}
\item The first deals with connected graphs containing no cut-edge.
Here the number of independent cycles in the graph, which is known
to bound the number of generators for the critical group of the
graph, is shown also to bound the number of generators for the
critical group of its line graph.
\item The second gives, for each prime $p$, a constraint on the
$p$-primary structure of the critical group, based on the largest
power of $p$ dividing all sums of degrees of two adjacent
vertices.
\item The third deals with connected graphs whose line graph is
regular. Here known results relating the number of spanning trees
of the graph and of its line graph are sharpened to exact
sequences which relate their critical groups.
\end{itemize}
The first two results interact extremely well with the third. For
example, they imply that in a regular nonbipartite graph, the critical
group of the graph and that of its line graph determine each other
uniquely in a simple fashion.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction and main results}
\label{intro}
The critical group $K(G)$ of a graph $G$ is a finite abelian group
whose order is the number $\kappa(G)$ of spanning
forests\footnote{Throughout this paper all spanning forests are
assumed to be maximal in the sense that adding an edge of $G$ to a
spanning forest creates a cycle. This removes the possibility of a
connected graph containing a disconnected spanning forest.} of the
graph. One can define $K(G)$ in several ways, closely related to the
{\it cycle} and {\it bond} spaces of the graph, the {\it graph
Laplacian}, as well as a certain {\it chip-firing game} that is
played on the vertices of the graph and is called the {\it abelian
sandpile model} in the physics literature. The interested reader
can find some of the standard results on $K(G)$ in \cite{Bacher,
Biggs1} and \cite[Chapter 13]{Godsil}. Some of this material is
reviewed in Sections~\ref{rods-section} and
\ref{graph-critical-groups} below, along with unpublished results from
the bachelor's thesis of D. Treumann \cite{Treumann} on functoriality
for critical groups.
The critical group $K(G)$ and its relation to the structure of the
graph $G$ remain, in general, mysterious. The goal of this paper is
to compare the structure of the critical group of a {\it connected
simple graph} (that is, a connected graph having no multiple edges
and no loops) with that of the critical group of its {\it line graph}.
Recall that for a graph $G=(V,E)$, its line graph ${\operatorname{line \,\,}} G=(V_{{\operatorname{line \,\,}}
G}, E_{{\operatorname{line \,\,}} G})$ has vertex set $V_{{\operatorname{line \,\,}} G}:=E$, the edge set of
$G$, and an edge in $E_{{\operatorname{line \,\,}} G}$ corresponding to each pair of edges
in $E$ that are incident at a vertex.
\begin{figure}
\caption{A graph $G$ and its line graph ${\operatorname{line \,\,}
\label{fig:linegraph}
\end{figure}
Our main results say that, under three different kinds of hypotheses,
the structure of $K({\operatorname{line \,\,}} G)$ is not much more complicated than that
of $K(G)$, as we now explain.
\subsection{The hypothesis of no cut-edge}
It is well-known, and follows from one of the definitions of $K(G)$ in
Section~\ref{graph-critical-groups}, that for a connected graph $G$, the number
$\beta(G):=|V|-|E|+1$ of independent cycles in $G$ gives an upper
bound on the number of generators required for $K(G)$; that is,
\begin{equation}
\label{critical-group-invariant-factor-form}
K(G) = \bigoplus_{i=1}^{\beta(G)} \ensuremath{\mathbb{Z}}Z_{d_i}
\end{equation}
where $\ensuremath{\mathbb{Z}}Z_d$ denotes the cyclic group $\ensuremath{\mathbb{Z}}Z/d\ensuremath{\mathbb{Z}}Z$ ({\it not} the
$d$-adic integers), and the $d_i$ are positive integers (some of which
may be $1$). Our first main result asserts that the same bound on the
number of generators holds for $K({\operatorname{line \,\,}} G)$ when one assumes that $G$
is {\it $2$-edge-connected}, that is, $G$ is connected and contains no
cut-edge.
\begin{theorem}
\label{2-edge-connected-theorem}
When the simple graph $G$ is $2$-edge-connected, the critical group
$K({\operatorname{line \,\,}} G)$ can be generated by $\beta(G)$ elements.
\end{theorem}
\noindent Note that one needs {\it some} hypothesis on the graph $G$
for this conclusion to hold. For example, a {\it star graph}
$K_{1,n}$ (= one vertex of degree $n$ connected to $n$ vertices of
degree one) has $\beta(K_{1,n})=0$. However, its line graph is the
complete graph $K_n$ and thus, according to
Proposition~\ref{Lorenzini's-theorem} below, has critical group
$K({\operatorname{line \,\,}} K_{1,n})=\ensuremath{\mathbb{Z}}Z_n^{n-2}$, requiring $n-2$ generators.
Theorem~\ref{2-edge-connected-theorem} is proven in
Section~\ref{2-edge-connected-section}, using a useful presentation of
$K({\operatorname{line \,\,}} G)$ given in Section~\ref{line-graph-presentation-section}.
\subsection{The hypothesis that degree sums of adjacent vertices are
divisible by $p$}
As $K({\operatorname{line \,\,}} G)$ is a finite abelian group, its structure is completely
determined once one knows, for each prime $p$, the structure of its {\it
$p$-primary component} or {\it $p$-Sylow subgroup} ${\operatorname{Syl}}_p(K({\operatorname{line \,\,}}
G))$. Section~\ref{divisibility-section} below proves the following
stringent constraint on this $p$-primary structure, based on the
largest power $k(p)$ such that $p^{k(p)}$ divides all of the sums
$\deg_G(v)+\deg_G(w)$ as one runs through all edges $e=\{v,w\}$ in the
edge set $E$ of $G$. Here $\deg_G(v)$ is the number of edges of $G$ with
$v$ as an endpoint; it is the {\it degree of the vertex $v$}.
\begin{theorem}
\label{divisibility-theorem}
Let $G=(V,E)$ be a connected simple graph that contains at least one
cycle of even\footnote{This even length cycle need not be {\it
minimal}. For example, a connected graph with two cycles $C_1,
C_2$ of odd length will also contain a cycle of even length that
traverses $C_1$, follows a path from $C_1$ to $C_2$, then
traverses $C_2$ and follows the same path back to $C_1$.} length.
Use the abbreviated notation $K:=K({\operatorname{line \,\,}} G)$, and let $p$ be a prime
for which the quantity $k(p) \geq 1$.
Then for $G$ bipartite, one has
$$
K /p^{k(p)} K
\cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-1} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(p^k, |V|)},
$$
while for $G$ nonbipartite, one has
$$
K /p^{k(p)} K
\cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-2} \oplus
\begin{cases}
0 &\text{ if }p\text{ is odd,} \\
\ensuremath{\mathbb{Z}}Z_2^2 &\text{ if }p=2\text{ and }|V|\text{ is even,} \\
\ensuremath{\mathbb{Z}}Z_4 &\text{ if }p=2\text{ and }|V|\text{ is odd.} \\
\end{cases}
$$
\end{theorem}
\subsection{The regularity hypothesis}
Our third class of main results deals with the situation where ${\operatorname{line \,\,}}
G$ is regular, that is, all its vertices have the same degree. Say
that a graph is {\it $d$-regular} if all of its vertices have
degree $d$. It is an easy exercise to check that, for
connected graphs $G$, one has ${\operatorname{line \,\,}} G$ regular only in these two
situations:
\begin{itemize}
\item $G$ itself is $d$-regular. In this case, ${\operatorname{line \,\,}} G$ will be
$(2d-2)$-regular.
\item $G$ is bipartite and $(d_1,d_2)${\it -semiregular}, meaning that
its vertex bipartition $V=V_1 \sqcup V_2$ has all vertices in $V_i$
of degree $d_i$ for $i=1,2$. In this case, ${\operatorname{line \,\,}} G$ will be
$(d_1+d_2-2)$-regular.
\end{itemize}
Two classical theorems of graph spectra explain how the the numbers of spanning trees $\kappa(G)$ and
$\kappa({\operatorname{line \,\,}} G)$ determine each other in this situation. The first is due
originally to Vahovskii \cite{Vahovskii} and later Kelmans \cite{Kelmans}, then rediscovered by Sachs \cite[\S 2.4]{CvetkovicDoobSachs},
while the second is due originally to Cvetkovi\'c \cite[\S
5.2]{Rubey} (see also \cite[Theorem 3.9]{Mohar}). \vskip .1in
\noindent
\begin{theorem*}
Let $G$ be a connected graph with ${\operatorname{line \,\,}} G$ regular.\\
\noindent (\textbf{Sachs}) If $G$ is $d$-regular, then
\begin{equation}\label{Sachs'-theorem}
\kappa({\operatorname{line \,\,}} G)= d^{\beta(G)-2} \,\, 2^{\beta(G)} \,\, \kappa(G).
\end{equation}
\noindent (\textbf{Cvetkovi\'c}) If $G$ is bipartite and
$(d_1,d_2)$-semiregular, then
\begin{equation}\label{Cvetkovic's-theorem}
\kappa({\operatorname{line \,\,}} G) =
\frac{(d_1+d_2)^{\beta(G)}}{d_1 d_2}
\left( \frac{d_1}{d_2} \right)^{|V_2|-|V_1|} \,\, \kappa(G).
\end{equation}
\end{theorem*}
\noindent These results suggest a close relationship between the
critical groups $K(G)$ and $K({\operatorname{line \,\,}} G)$ in both of these situations.
\subsubsection{Regular graphs}
We focus first on such a relation underlying Sachs' equation
\eqref{Sachs'-theorem}, as here one can be quite precise.
The occurrence of the factor $2^{\beta(G)} \,\, \kappa(G)$ within
\eqref{Sachs'-theorem} suggests consideration of the {\it edge
subdivision} graph ${\operatorname{sd \,}} G$, obtained from $G$ placing a new vertex
at the midpoint of every edge of $G$. It is well-known that
\begin{equation}
\label{subdivision-complexity-relation}
\kappa({\operatorname{sd \,}} G) = 2^{\beta(G)} \,\, \kappa(G),
\end{equation}
due to an obvious $2^{\beta(G)}$-to-$1$ surjective map from the
spanning trees of ${\operatorname{sd \,}} G$ to those of $G$\footnote{More explicitly,
there are exactly $\beta(G)$ edges that do not lay on a given
spanning tree of $G$. Upon subdividing, there are $2^{\beta(G)}$ ways
to extend the resulting tree to a spanning tree of ${\operatorname{sd \,}} G$.}.
Underlying this relation, Lorenzini \cite{Lorenzini} observed that the
critical groups $K({\operatorname{sd \,}} G)$ and $K(G)$ also determine each other in a
trivial way: $K(G)$ has the form given in
\eqref{critical-group-invariant-factor-form} if and only if for the
same positive integers $d_1,d_2,\ldots,d_{\beta(G)}$ one has the
following form for $K({\operatorname{sd \,}} G)$:
\begin{equation}
\label{subdivision-invariant-factor-form}
K({\operatorname{sd \,}} G) = \bigoplus_{i=1}^{\beta(G)} \ensuremath{\mathbb{Z}}Z_{2d_i}.
\end{equation}
See Proposition~\ref{Lorenzini's-theorem} below. In light of
\eqref{subdivision-complexity-relation}, one might expect that
equation \eqref{Sachs'-theorem} generalizes to a short exact sequence
of the form
\begin{equation}
\label{naive-hope}
0
\rightarrow \ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2}
\rightarrow K({\operatorname{line \,\,}} G)
\rightarrow K({\operatorname{sd \,}} G)
\rightarrow
0
\end{equation}
where $\ensuremath{\mathbb{Z}}Z_d$ denotes a cyclic group of order $d$. This is never far
from the truth. After reviewing and developing some theory of critical
groups and their functoriality in Sections~\ref{rods-section} and
\ref{graph-critical-groups} below, we use functoriality to prove the
following result in Section~\ref{regular-result-section}.
\begin{theorem}
\label{regular-result}
For any connected $d$-regular simple graph $G$ with $d \geq 3$, there
is a natural group homomorphism $ f:K({\operatorname{line \,\,}} G) \rightarrow K({\operatorname{sd \,}} G) $
whose kernel-cokernel exact sequence takes the form
$$
0
\rightarrow \ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2} \oplus C
\rightarrow K({\operatorname{line \,\,}} G)
\overset{f}{\rightarrow} K({\operatorname{sd \,}} G)
\rightarrow C
\rightarrow
0
$$
in which the cokernel $C$ is the following cyclic $d$-torsion group:
$$
C=
\begin{cases}
0 & \text{ if } G \text{ is non-bipartite and }d\text{ is odd},\\
\ensuremath{\mathbb{Z}}Z_2 & \text{ if } G \text{ is non-bipartite and }d \text{ is even},\\
\ensuremath{\mathbb{Z}}Z_d & \text{ if } G \text{ is bipartite}.
\end{cases}
$$
\end{theorem}
It turns out that Theorems~\ref{2-edge-connected-theorem} and
\ref{divisibility-theorem} interact very well with
Theorem~\ref{regular-result}. When $G$ is a $d$-regular simple
$2$-edge-connected graph, Theorem~\ref{2-edge-connected-theorem}
implies that $K({\operatorname{line \,\,}} G)$ needs at most $\beta(G)$ generators, while
Proposition~\ref{Lorenzini's-theorem} implies that $K({\operatorname{sd \,}} G)$ requires at
least $\beta(G)$ generators, forcing $K({\operatorname{line \,\,}} G)$ to require either
$\beta(G)-1$ or $\beta(G)$ generators. This shows that the exact
sequence in Theorem~\ref{regular-result} is about as far as possible
from being split, and gives it extra power in determining the
structure of $K({\operatorname{line \,\,}} G)$ given that of $K(G)$ (and hence also $K({\operatorname{sd \,}}
G)$).
Even more precisely, it will be shown in
Section~\ref{regular-nonbipartite-section} that when $G$ is both
$d$-regular and {\it nonbipartite},
Theorems~\ref{divisibility-theorem} and \ref{regular-result} combined
show that $K(G)$ and $K({\operatorname{line \,\,}} G)$ determine each other uniquely in the
following fashion.
\begin{corollary}
\label{regular-nonbipartite-corollary}
For $G$ a simple, connected, $d$-regular graph with $d \geq 3$ which
is nonbipartite, after uniquely expressing
$$
K(G) \cong \bigoplus_{i=1}^{\beta(G)} \ensuremath{\mathbb{Z}}Z_{d_i}
$$
with $d_i$ dividing $d_{i+1}$, one has
$$
K({\operatorname{line \,\,}} G) \cong \left( \bigoplus_{i=1}^{\beta(G)-2} \ensuremath{\mathbb{Z}}Z_{2dd_i}
\right) \oplus
\begin{cases}
\ensuremath{\mathbb{Z}}Z_{2d_{\beta(G)-1}} \oplus \ensuremath{\mathbb{Z}}Z_{2d_{\beta(G)}} &
\text{ for }|V|\text{ even,}\\
\ensuremath{\mathbb{Z}}Z_{4d_{\beta(G)-1}} \oplus \ensuremath{\mathbb{Z}}Z_{d_{\beta(G)}} &
\text{ for }|V|\text{ odd.}
\end{cases}
$$
\end{corollary}
\subsubsection{Semiregular bipartite graphs}
Section~\ref{semi-regular} uses functoriality to prove the following
result analogous to Theorem~\ref{regular-result} and suggested by
Cvetkovi\'c's equation \eqref{Cvetkovic's-theorem}.
\begin{theorem}
\label{semiregular-result}
Let $G$ be a connected bipartite $(d_1,d_2)$-semiregular graph $G$.
Then there is a group homomorphism
$$
K({\operatorname{line \,\,}} G) \overset{g}{\rightarrow} K(G)
$$
whose kernel-cokernel exact sequence
\begin{equation}
0
\rightarrow \ker(g)
\rightarrow K({\operatorname{line \,\,}} G)
\overset{g}{\rightarrow} K(G)
\rightarrow {\operatorname{coker}}(g)
\rightarrow
0
\end{equation}
has ${\operatorname{coker}}(g)$ all ${\operatorname{lcm}}(d_1,d_2)$-torsion, and has
$\ker(g)$ all $\frac{d_1+d_2}{\gcd(d_1,d_2)} {\operatorname{lcm}}(d_1,d_2)$-torsion.
\end{theorem}
\noindent
Note that this result describes the kernels and cokernels less
completely than Theorem~\ref{regular-result}.
Section~\ref{semi-regular} discusses examples illustrating why this is
necessarily the case.
Section~\ref{example-section} illustrates some of the preceding
results by showing how they apply to the examples of complete graphs and
complete bipartite graphs, as well as the $1$-skeleta of
$d$-dimensional cubes and the Platonic solids.
\subsubsection{Directed line graphs}
The reader should compare our results with recent results of Levine
\cite{Levine} on the critical group of a \textit{directed} line
graph. If $G$ is a directed graph, then the directed line graph
$\mathcal{L}G$ is defined so that a pair of directed edges $e$ and $f$
of $G$ are adjacent (and oriented from $e$ to $f$) if the head of $e$
is equal to the tail $f$.
The critical group of a directed graph is defined as the torsion
subgroup of the cokernel of the Laplacian matrix of $G$. Levine proves
\cite[Theorem~1.2]{Levine} that if $G$ is a strongly connceted
Eulerian directed graph, then there is a surjective group homomorphism
from the critical group of $\mathcal{L}G$ to the critical group of
$G$. Moreover, when the $G$ is balanced and $d$-regular, the
kernel of this homomorphism is the $d$-tosion subgroup of the critical
group of $\mathcal{L}G$. As can be seen from
Theorem~\ref{regular-result}, we do not obtain such easily stated
results in the undirected case.
\section{Some theory of lattices} \label{rods-section}
This section recalls some of the theory of rational lattices in
Euclidean spaces and their determinant groups, along with
functoriality and Pontrjagin duality for these groups, borrowing
heavily from Bacher, de la Harpe, and Nagnibeda \cite{Bacher} and
Treumann \cite{Treumann}. In the next section, these constructions
will be specialized to critical groups of graphs.
\subsection{Rational orthogonal decompositions}
Consider ${\mathbb R}^m$ with its usual inner product $\langle \cdot, \cdot
\rangle$ in which the standard basis vectors $e_1,\ldots,e_m$ are
orthonormal. The $\ensuremath{\mathbb{Z}}Z$-span of this basis is the integer lattice
$\ensuremath{\mathbb{Z}}Z^m$.
\begin{definition}
A {\it rational orthogonal decomposition} is an orthogonal
${\mathbb R}$-vector space decomposition of ${\mathbb R}^m = B^{\mathbb R} \oplus Z^{\mathbb R}$ in
which $B^{\mathbb R}$, $Z^{\mathbb R} $ are ${\mathbb R}$-subspaces which are rational, that
is, spanned by elements of $\ensuremath{\mathbb{Z}}Z^m$.
\end{definition}
\begin{example}\label{graphs}
The main example of interest for us will be the following, discussed
further in Section~\ref{graph-critical-groups}. If $G=(V,E)$ is a
graph with $|E|=m$, then the space $Z^{\mathbb R}$ of $1${\it-cycles}
together with its orthogonal complement, the space $B^{\mathbb R}$ of {\it
bonds} or $1${\it-coboundaries}, give a rational orthogonal
decomposition ${\mathbb R}^E \cong {\mathbb R}^m = B^{\mathbb R} \oplus Z^{\mathbb R}$. Here one
must fix an (arbitrary) orientation of the edges in $E$ in order to
make the identification ${\mathbb R}^E \cong {\mathbb R}^m$. In the remaining
sections, the basis element of ${\mathbb R}^E$ corresponding to an edge
$\{u,v\}$ of $G$ oriented from $u$ to $v$ will sometimes be denoted
$e$ and sometimes $(u,v)$, with the convention that
$(v,u)=-(u,v)=-e$ in ${\mathbb R}^E$.
\end{example}
An $r$-dimensional rational subspace $\Lambda^{\mathbb R} \subset {\mathbb R}^m$
inherits the inner product $\langle \cdot, \cdot \rangle$. The space
$\Lambda^{\mathbb R}$ contains two lattices of rank $r$, namely $\Lambda:=
\Lambda^{\mathbb R} \cap \ensuremath{\mathbb{Z}}Z^m$ and its {\it dual lattice}
\[
\Lambda^\# := \{ x \in \Lambda^{\mathbb R}: \langle x, \lambda \rangle \in \ensuremath{\mathbb{Z}}Z
\text{ for all } \lambda \in \Lambda \}.
\]
Since $\langle \Lambda, \Lambda \rangle \subset \langle \ensuremath{\mathbb{Z}}Z^m , \ensuremath{\mathbb{Z}}Z^m
\rangle = \ensuremath{\mathbb{Z}}Z$, one has an inclusion $\Lambda \subset \Lambda^\#$.
Their quotient is called the {\it determinant group}
$$
\det(\Lambda):= \Lambda^\# / \Lambda.
$$
Given a rational orthogonal decomposition ${\mathbb R}^m = B^{\mathbb R} \oplus
Z^{\mathbb R}$, one obtains two determinant groups $\det(B), \det(Z)$, which
turn out to be both isomorphic to what we will call the {\it critical
group}
$$
K:=\ensuremath{\mathbb{Z}}Z^m / (B \oplus Z)
$$
of the rational orthogonal decomposition. Indeed, if $\pi_B, \pi_Z$
denote the orthogonal projections from ${\mathbb R}^m$ onto $B^{\mathbb R}, Z^{\mathbb R}$,
then these maps turn out to give rise to surjections from $\ensuremath{\mathbb{Z}}Z^m$ onto
$B^\#$ and $Z^\#$, respectively, and which induce isomorphisms (see
\cite[Proposition 3]{Bacher})
$$
\begin{matrix}
\det(B) & \cong & K & \cong & \det(Z) \\
B^\#/B & \overset{\pi_B}{\longleftarrow} &\ensuremath{\mathbb{Z}}Z^m/(B \oplus Z)
&\overset{\pi_Z}{\rightarrow} & Z^\#/Z .
\end{matrix}
$$
One can compute the critical group $K$ very explicitly as the
(integer) cokernel of several matrices, for example via their {\it
Smith normal form}. If the lattices $B, Z$ have $\ensuremath{\mathbb{Z}}Z$-bases $\{
b_1,\ldots,b_\alpha\} , \, \{ z_1, \ldots, z_\beta \}$ then let $M_B, \, M_Z, \,
M_{B \oplus Z}$ be matrices having columns given by
$\{b_i\}_{i=1}^\alpha, \, \{z_j\}_{j=1}^\beta, \, \{b_i\}_{i=1}^\alpha \cup
\{z_j\}_{j=1}^\beta$, respectively. The {\it Gram matrices} $M_B^t
M_B, \, M_Z^t M_Z$ express the bases for $B, Z$ in terms of the dual
bases for $B^\#, Z^\#$, and hence
\begin{align*}
K & \cong \ensuremath{\mathbb{Z}}Z^m /(B \oplus Z) = {\operatorname{coker}} M_{B \oplus Z}, \\
& \cong B^\#/B = {\operatorname{coker}} (M_B^t M_B), \\
& \cong Z^\#/Z = {\operatorname{coker}} (M_Z^t M_Z).
\end{align*}
\subsection{Functoriality}
\label{functoriality-subsection}
Suppose that one has two rational orthogonal decompositions ${\mathbb R}^{m_i}
= B^{\mathbb R}_i \oplus Z^{\mathbb R}_i$ for $i=1,2$, and an ${\mathbb R}$-linear map $f:
{\mathbb R}^{m_1} \rightarrow {\mathbb R}^{m_2}$. When does $f$ induce a
homomomorphism $f: K_1 \rightarrow K_2$ between their critical groups?
It is natural to assume that $f$ carries the integer lattice
$\ensuremath{\mathbb{Z}}Z^{m_1}$ into $\ensuremath{\mathbb{Z}}Z^{m_2}$, that is, $f$ is represented by a matrix
in $\ensuremath{\mathbb{Z}}Z^{m_2 \times m_1}$. Note that this already implies that the
adjoint map $f^t: {\mathbb R}^{m_2} \rightarrow {\mathbb R}^{m_1}$ with respect to the
standard inner products will also satisfy $f^t(\ensuremath{\mathbb{Z}}Z^{m_2}) \subset
\ensuremath{\mathbb{Z}}Z^{m_1}$, since this map is represented by the transposed $\ensuremath{\mathbb{Z}}Z^{m_1
\times m_2}$ matrix.
What one needs further to induce homomorphisms of critical groups is
that $f(B_1) \subset B_2$ and $f(Z_1) \subset Z_2$. The following
proposition gives a useful reformulation.
\begin{proposition}
\label{Treumann-characterization}
For a linear map $f: {\mathbb R}^{m_1} \rightarrow {\mathbb R}^{m_2}$ satisfying
$f(\ensuremath{\mathbb{Z}}Z^{m_1}) \subset \ensuremath{\mathbb{Z}}Z^{m_2}$, one has
$$
f(B_1) \subset B_2 \Longleftrightarrow f^t(Z_2) \subset Z_1
\Longleftrightarrow f(Z_1) \subset Z_2^\#
$$
and
$$
f(Z_1) \subset Z_2 \Longleftrightarrow f^t(B_2) \subset B_1
\Longleftrightarrow f(B_1) \subset B_2^\#.
$$
\end{proposition}
\begin{proof}
All of the implications follow using the adjointness of $f, f^t$
with respect to the pairings on ${\mathbb R}^{m_1}, {\mathbb R}^{m_2}$, along with
the definitions of $B_i^\#, Z_i^\#$ and the fact that $B_i=
Z_i^\perp$.
\end{proof}
When a linear map $f: {\mathbb R}^{m_1} \rightarrow {\mathbb R}^{m_2}$ satisfies all
of the conditions in the previous proposition, we say that $f$ is a {\it
morphism of rational orthogonal decompositions}. It is clear that
$f$ then induces a homomorphism $K_1 \rightarrow K_2$ between the
critical groups, denoted here also by $f$.
Note the following property of such maps $f$ for future use.
\begin{proposition}\label{f-commutes-with-projections}
Any morphism $f: {\mathbb R}^{m_1} \rightarrow {\mathbb R}^{m_2}$ of rational
orthogonal decompositions intertwines the projection maps onto
either $B_i^{\mathbb R}$ or $Z_i^{\mathbb R}$. That is, the following diagram
commutes:
\begin{equation}
\xymatrix{
{\mathbb R}^{m_1} \ar[d]_{\pi_{B_1}}\ar[r]^f& {\mathbb R}^{m_2}\ar[d]^{\pi_{B_2}} \\
B_1^{\mathbb R} \ar[r]_{f^t}& B_2^{\mathbb R}
}
\end{equation}
and the same holds replacing $B_i$ by $Z_i$ everywhere.
\end{proposition}
\begin{proof}
Given $x_1 \in {\mathbb R}^{m_1}$ and $b_2 \in B_2^{\mathbb R}$, note that
$$
\langle \pi_{B_2}(f(x_1)), b_2 \rangle =
\langle f(x_1) , b_2 \rangle =
\langle x_1, f^t(b_2) \rangle =
\langle \pi_{B_1}(x_1), f^t(b_2) \rangle =
\langle f(\pi_{B_1}(x_1)), b_2 \rangle.
$$
Since this equality holds for any test vector $b_2 \in B_2^{\mathbb R}$, one
concludes that $\pi_{B_2}(f(x_1)) = f(\pi_{B_1}(x_1))$.
\end{proof}
\subsection{Pontrjagin duality}
Every finite abelian group $K$ is isomorphic to its {\it Pontrjagin
dual}
$$
K^* := {\operatorname{Hom}}_\ensuremath{\mathbb{Z}}Z(K,{\mathbb Q}/\ensuremath{\mathbb{Z}}Z).
$$
This isomorphism is not, in general, natural (although the isomorphism
$K \cong K^{**}$ is). However, for critical groups $K=\ensuremath{\mathbb{Z}}Z^m/(B \oplus
Z)$ associated with a rational orthogonal decomposition, the
isomorphism comes about naturally from the pairing
$$
\begin{matrix}
\ensuremath{\mathbb{Z}}Z^m \times \ensuremath{\mathbb{Z}}Z^m & \rightarrow & {\mathbb Q} \\
(x , y ) & \mapsto & \langle \pi(x),\pi(y) \rangle
\end{matrix}
$$
where $\pi$ is either of the orthogonal projections $\pi_B$ or $\pi_Z$.
This induces a pairing
$$
\langle \cdot, \cdot \rangle: K \times K \rightarrow {\mathbb Q}/\ensuremath{\mathbb{Z}}Z
$$
which is nondegenerate in the sense that the following map is an isomorphism:
\begin{equation}
\label{Pontrjagin-duality-iso}
\begin{matrix}
K & \rightarrow & {\operatorname{Hom}}_\ensuremath{\mathbb{Z}}Z(K,{\mathbb Q}/\ensuremath{\mathbb{Z}}Z) &(= K^*)\\
x & \mapsto & \langle x, \cdot \rangle.
\end{matrix}
\end{equation}
Pontrjagin duality is contravariant in the following sense. Given a
homomorphism $f: K_1 \rightarrow K_2$ of abelian groups, there is a
dual morphism $f^* : K_2^* \rightarrow K_1^*$ given by $f^*(g) = g
\circ f.$ The next proposition asserts that this duality interacts as
one would expect with morphisms of rational orthogonal decompositions.
\begin{proposition}
\label{Pontrjagin-dual-is-adjoint}
For a morphism $f: {\mathbb R}^{m_1} \rightarrow {\mathbb R}^{m_2}$ of rational
orthogonal decompositions, Pontrjagin duality identifies $f^t$ with
$f^*$, in the sense that the following diagram commutes:
\begin{align}
\label{Pontrjagin-duality-diagram}
\xymatrix{
K_2 \ar[d]\ar[r]^{f^t} & K_1\ar[d] \\
K_2^*\ar[r]_{f^*} & K_1^*
}
\end{align}
Here the vertical maps are both Pontrjagin duality isomorphisms as in
\eqref{Pontrjagin-duality-iso}.
\end{proposition}
\begin{proof}
Unravelling the definitions, this amounts to checking that if $x_i
\in \ensuremath{\mathbb{Z}}Z^{m_i}$ for $i=1,2$, then one has $\langle f^t(x_2), x_1
\rangle = \langle x_2, f(x_1) \rangle$.
\end{proof}
This last proposition has a useful consequence.
\begin{corollary}
\label{identify-kernel-cokernel}
For a morphism $f: {\mathbb R}^{m_1} \rightarrow {\mathbb R}^{m_2}$ of rational
orthogonal decompositions, the maps induced by $f, f^t$ on critical
groups satisfy $\ker(f)^* \cong {\operatorname{coker}}(f^t)$ and ${\operatorname{coker}}(f)^* \cong
\ker(f^t)$.
\end{corollary}
\begin{proof}
Pontrjagin duality generally gives $\ker(f)^* \cong {\operatorname{coker}}(f^*)$ and
${\operatorname{coker}}(f)^* \cong \ker(f^*)$, so this follows from
Proposition~\ref{Pontrjagin-dual-is-adjoint}.
\end{proof}
\section{The critical group of a graph}
\label{graph-critical-groups}
This section particularizes the discussion of critical groups from the
previous section to the context of Example~\ref{graphs}, that is, the
critical group $K(G)$ for a graph $G=(V,E)$. It also recalls how one
can use spanning trees/forests to be more explicit about some of these
constructions, and reviews for later use some other known results
about critical groups of graphs.
We will use the term ``spanning tree'' when discussing connected
graphs and ``spanning forest'' when no connectivity is
assumed.
\subsection{Cycles, bonds, Laplacians, and spanning trees}
\label{cycles-bonds-section}
Let $G=(V,E)$ be a graph. After picking an orientation for its edges,
the usual {\it cellular boundary map} from $1$-chains to $0$-chains
with real or integer coefficients
$$
\begin{aligned}
{\mathbb R}^E \overset{\partial_G}{\longrightarrow} {\mathbb R}^V \\
\ensuremath{\mathbb{Z}}Z^E \overset{\partial_G}{\longrightarrow} \ensuremath{\mathbb{Z}}Z^V
\end{aligned}
$$
is defined ${\mathbb R}$- or $\ensuremath{\mathbb{Z}}Z$-linearly as follows: A basis element $e$
corresponding to an edge directed from vertex $u$ to vertex $v$ is
sent to $\partial_G(e)=+v-u$. One considers the negative $-e$ of this
basis element as representing the same edge but directed from $v$ to
$u$, which is consistent with
$$
\partial_G(-e) = +u-v = -\partial_G(e).
$$
Elements in the kernel of $Z^{\mathbb R} := \ker \partial_G$ are called {\it
cycles}, while elements in the perpendicular space
$B^{\mathbb R}:={\operatorname{im}} \partial_G^t$ are called {\it bonds}. Thus ${\mathbb R}^E = B^{\mathbb R}
\oplus Z^{\mathbb R}$ is a rational orthogonal decomposition associated with
the graph $G=(V,E)$, and we denote by $K(G)$ the associated critical
group.
The lattice $B$ of bonds is known to be spanned by the signed
incidence vectors $b(V_1,V_2)$ of the directed edges that span across
a {\it cut} (partition) $V=V_1 \sqcup V_2$. The lattice $Z$ of cycles
is known to be spanned by the signed incidence vectors $z(C)$ coming
from {\it directed cycles} in $G$.
If one wants a smaller $\ensuremath{\mathbb{Z}}Z$-spanning set for $B$, one can take the
vectors $b_G(\{v\}, V \setminus \{v\})$ for cuts that isolate single
vertices; this vector $b_G(\{v\}, V \setminus \{v\})$ is exactly the
row vector of the $|V| \times |E|$ boundary map $\partial_G$ indexed
by $v$. To simplify notation, we will write
$$
b_G(v):=b_G(\{v\}, V \setminus \{v\})
$$
for this bond, and we will call it the bond at $v$ in $G$. In order to
select out of this spanning set a $\ensuremath{\mathbb{Z}}Z$-basis for $B$, one should omit
exactly one vertex from each connected component of $G$.
Here are a few consequences of these facts:
\begin{enumerate}\label{bond-consequences}
\item[(i)] The Gram matrix $M_B^t M_B$ corresponding to the above
mentioned $\ensuremath{\mathbb{Z}}Z$-basis for $B$ gives what is usually called a {\it
(reduced) Laplacian matrix} $\overline{L(G)}$; the matrix $M_B$ is
obtained from $\partial_G^t$ by removing the columns corresponding
to the chosen vertex in each connected component of $G$. As a
consequence, one has by Kirchhoff's {\it Matrix-tree Theorem} (see,
e.g., \cite[Theorem 2.2.12]{West}) that
$$
|K(G)| = \det\overline{L(G)} = \kappa(G),
$$
the number of spanning forests in $G$.
\item[(ii)]
(The chip-firing/dollar-game/sandpile/Picard presentations for $K(G)$)
Given a connected graph $G=(V,E)$ with boundary map $\ensuremath{\mathbb{Z}}Z^E
\overset{\partial}{\longrightarrow} \ensuremath{\mathbb{Z}}Z^V$, bond lattice
$B:={\operatorname{im}} \partial^t$, and any vertex $v_0$ in $V$, one has an
isomorphism
$$
\begin{aligned}
K(G) &\cong {\operatorname{coker}} (M^t_B M_B)\\
&\cong {\operatorname{coker}} (\overline{L(G)})\\
&\cong \ensuremath{\mathbb{Z}}Z^{V \setminus \{v_0\}} / \overline{L(G)}\\
&\cong \ensuremath{\mathbb{Z}}Z^V / \left(\ensuremath{\mathbb{Z}}Z v_0 + {\operatorname{im}} \left(\partial_G \partial_G^t \right) \right)\\
&\cong \ensuremath{\mathbb{Z}}Z^V / \left(\ensuremath{\mathbb{Z}}Z v_0 + \partial_G(B) \right)\\
&\cong \ensuremath{\mathbb{Z}}Z^V / \left(\ensuremath{\mathbb{Z}}Z v_0 + \ensuremath{\mathbb{Z}}Z(\partial_G \, b_G(v)_{v \in V} )\right).
\end{aligned}
$$
\item[(iii)] For any vertex $u$ of $G$, one has the relation
$$ \sum_{\{v\in V: \{u,v\} \in E\}} (u,v)= 0 $$
in $K(G)=\ensuremath{\mathbb{Z}}Z^E/(B \oplus Z)$.
\item[(iv)] For any directed cycle $u_0 \rightarrow u_1 \rightarrow
\cdots \rightarrow u_{\ell-1} \rightarrow u_{\ell}=u_0$ in $G$, one
has the relation $$\sum_{i=0}^{\ell-1} (u_i,u_{i+1})= 0$$ in
$K(G)=\ensuremath{\mathbb{Z}}Z^E/(B \oplus Z)$.
\end{enumerate}
Fixing a particular spanning forest $T$ for $G$ allows one to
simultaneously construct $\ensuremath{\mathbb{Z}}$-bases of $B$ and $Z$. Removing any edge
$e$ in the forest $T$ creates a new connected component in the forest,
say with vertex set $V_e \subset V$; ranging over all edges $e$ in
$T$, the signed incidence vectors $b^T_e$ for the cuts $V=V_e \sqcup(V
- V_e) $ form a $\ensuremath{\mathbb{Z}}$-basis for $B$. Dually, adding any edge $e$ in $E
- T$ to $T$ creates a unique cycle in $T \cup \{e\}$; ranging over all
edges in $E - T$, the signed incidence vectors $z^T_e$ of these cycles
form a $\ensuremath{\mathbb{Z}}Z$-basis for $Z$.
\subsection{A presentation for $K( {\operatorname{line \,\,}}
G)$}\label{line-graph-presentation-section}
Proposition~\ref{alternate-presentation} below gives a useful
presentation for $K({\operatorname{line \,\,}} G)$ that is an immediate consequence of the
last equation in assertion \eqref{bond-consequences}(ii) above. It
will be used both in the proof of
Theorem~\ref{2-edge-connected-theorem} and in the analysis of $K({\operatorname{line \,\,}}
K_n)$ in Section~\ref{complete-graph-section}.
Let $G=(V,E)$ be a connected simple graph, so that ${\operatorname{line \,\,}}
G=(V_{{\operatorname{line \,\,}} G}, E_{{\operatorname{line \,\,}} G})$ is also connected. Identify the vertex
set $V_{{\operatorname{line \,\,}} G}$ of the line graph of $G$ with the edge set $E$ of
$G$. After picking arbitrary orientations for the edges of ${\operatorname{line \,\,}} G$,
consider the boundary map for ${\operatorname{line \,\,}} G$:
\[
\partial_{{\operatorname{line \,\,}} G}: \ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{line \,\,}} G}} \longrightarrow
\ensuremath{\mathbb{Z}}Z^E \,\, ( = \ensuremath{\mathbb{Z}}Z^{V_{{\operatorname{line \,\,}} G}}).
\]
\begin{proposition}\label{alternate-presentation}
Given a connected simple graph $G=(V,E)$ and any edge $e_0$ in $E$,
one has an isomorphism
\[
\begin{aligned}
K({\operatorname{line \,\,}} G) & \cong \ensuremath{\mathbb{Z}}Z^E /(\ensuremath{\mathbb{Z}}Z e_0 + \partial_{{\operatorname{line \,\,}} G}(B_{{\operatorname{line \,\,}} G}))\\
&\cong \ensuremath{\mathbb{Z}}Z^E / \left(\ensuremath{\mathbb{Z}}Z e_0 + \ensuremath{\mathbb{Z}}Z(\partial_{{\operatorname{line \,\,}} G} \, b_{{\operatorname{line \,\,}}
G}(e)_{e \in E}) \right).
\end{aligned}
\]
\end{proposition}
\subsection{Lorenzini's result on edge subdivisions}
\label{subdivision-example}
The {\it edge subdivision} of a graph $G$ is the graph ${\operatorname{sd \,}} G$
obtained by creating a new midpoint vertex called $uv$ for every edge
$\{u,v\}$ of $G$; that is, $\{u,v\}$ is removed and replaced by two
edges $\{u, uv\}, \{v,uv\}$ in ${\operatorname{sd \,}} G$. An orientation of $G$ induces
an orientation of ${\operatorname{sd \,}} G$: If $u$ is oriented towards $v$ then $u$ is
oriented towards $uv$ and $uv$ is oriented towards $v$.
\begin{figure}
\caption{A graph $G$ and its edge subdivision ${\operatorname{sd \,}
\end{figure}
In \cite{Lorenzini} Lorenzini first observed that the critical groups
$K({\operatorname{sd \,}} G)$ and $K(G)$ determine each other in a trivial way, using the
description $K=Z^\#/Z$ as we now explain. If $\{C_1,\ldots,C_\beta\}$
is any set of directed cycles in $G$ whose incidence vectors
$\{z(C_i)\}_{i=1}^\beta$ give a $\ensuremath{\mathbb{Z}}Z$-basis for $Z_G$, then one can
subdivide those same cycles to obtain a $\ensuremath{\mathbb{Z}}Z$-basis $\{z_{{\operatorname{sd \,}}
C_i}\}_{i=1}^\beta$ for $Z_{{\operatorname{sd \,}} G}$. One then checks that
\[
\langle z_{{\operatorname{sd \,}} C_i}, z_{{\operatorname{sd \,}} C_j} \rangle = 2 \langle z(C_i), z(C_j) \rangle,
\]
for each $i,j$, since the inner product counts (with signs) the overlap
of edges between cycles $C_i, C_j$, and these overlaps double
in size after the subdivision.
Hence one has the following relation between their Gram
matrices:
\begin{equation}
\label{subdivision-Gram-matrix-relation}
M_{{\operatorname{sd \,}} G}^t M_{{\operatorname{sd \,}} G} = 2 M_G^t M_G
\end{equation}
and the following simple relation between their cokernels, the
critical groups:
\begin{proposition}[Lorenzini \cite{Lorenzini}]
\label{Lorenzini's-theorem}
Let $G$ be a graph with $\beta$ independent cycles. Expressing $ K(G)
\cong \bigoplus_{i=1}^\beta \ensuremath{\mathbb{Z}}Z_{d_i} $ for positive integers
$d_1,d_2, \ldots,d_\beta \geq 1$, one has $ K({\operatorname{sd \,}} G) \cong
\bigoplus_{i=1}^\beta \ensuremath{\mathbb{Z}}Z_{2d_i}$.
\end{proposition}
It will be useful later to have an expression of this result in terms of
explicit morphisms (as was done also in \cite{Lorenzini}). Consider
the pair of adjoint maps defined ${\mathbb R}$-linearly by
\[
\begin{matrix}
{\mathbb R}^{E_{{\operatorname{sd \,}} G}} & \overset{h}{\longrightarrow} &{\mathbb R}^{E_G} \\
(u,uv) & \longmapsto & (u,v) \\
(uv,v) & \longmapsto & (u,v) \\
& & \\
{\mathbb R}^{E_G} & \stackrel{h^t}{\longrightarrow} &{\mathbb R}^{E_{{\operatorname{sd \,}} G}} \\
(u,v) & \longmapsto & (u,uv)+(uv,v).
\end{matrix}
\]
One can easily check that these are morphisms of rational orthogonal
decompositions, and hence give rise to a morphism $h: K({\operatorname{sd \,}} G)
\rightarrow K(G)$ of critical groups. The relation
\eqref{subdivision-Gram-matrix-relation} between the two $\beta \times
\beta$ Gram matrices shows that the kernel-cokernel exact sequence
associated to $h$ takes this form:
$$
\begin{matrix}
0 \longrightarrow & \ker(h)
& \longrightarrow & K({\operatorname{sd \,}} G)
& \overset{h}{\longrightarrow} & K(G)
& \longrightarrow 0 \\
0 \longrightarrow & \ensuremath{\mathbb{Z}}Z_2^\beta
& \longrightarrow & \bigoplus_{i=1}^\beta \ensuremath{\mathbb{Z}}Z_{2d_i}
& \longrightarrow & \bigoplus_{i=1}^\beta \ensuremath{\mathbb{Z}}Z_{d_i}
& \longrightarrow 0.
\end{matrix}
$$
Proposition~\ref{Lorenzini's-theorem} is equivalent to the assertion
that $K({\operatorname{sd \,}} G)$ can be generated by $\beta$ elements and fits into an
exact sequence of this form, generalizing equation
\eqref{subdivision-complexity-relation} from the Introduction.
\subsection{A non-standard treatment of the complete graph}
\label{nonstandard-complete-graph-subsection}
Let $K_n$ be the complete graph on $n$ vertices. A celebrated formula
of Cayley asserts that $\kappa(K_n)=n^{n-2}$ (see, e.g., \cite[Section
13.2]{Godsil}). Generalizing this to compute the critical group
$K(K_n)$ is a favorite example of many papers in the subject. We
approach this calculation in a slightly non-standard way here, mainly
because it will provide us with a crucial technical lemma for later
use in Section~\ref{kernel-subsection}.
\begin{proposition}
\label{complete-graph}
The complete graph $K_n$ has critical group
$$
K(K_n) \cong \ensuremath{\mathbb{Z}}Z_n^{n-2}.
$$
Furthermore, in the presentation $K(K_n) = \ensuremath{\mathbb{Z}}Z^E/(B \oplus Z)$, a
minimal generating set is provided by the images of any set of $n-2$
edges which form a spanning tree connecting $n-1$ out of the $n$
vertices.
\end{proposition}
\begin{proof}
Since Cayley's formula implies $|K(K_n)| = |\ensuremath{\mathbb{Z}}Z_n^{n-2}|$, it will
suffice to show that $K(K_n)$ is all $n$-torsion and that it can be
generated by $n-2$ elements as in the second assertion. Let
$[n]:=\{1,2,\ldots,n\}$ denote the vertex set $V$ for $K_n$.
To show $K(K_n)$ is all $n$-torsion, given any directed edge
$e=(i,j)$ in $K_n$, we will prove that $n \cdot e$ is equal to a sum of
cycles and bonds. Indeed, we can take the sum of the directed cycles $(i,j) + (j,k)
+ (k,i)$ for $k \in [n] - \{i,j\}$, and add the two bonds
$$
\begin{aligned}
b(\{i\},[n]-\{i\}) &=(i,1) + (i,2) + \cdots + (i,n)\\
b([n]-\{j\},\{j\}) &=(1,j) + (2,j) + \cdots + (n,j).
\end{aligned}
$$
For the second assertion, let $T$ be a collection of $n-2$ edges
that form a spanning tree connecting $n-1$ out of the $n$
vertices. By symmetry, we may assume that $n$ is the vertex that is
isolated by $T$. The edges of $K_n$ can be partitioned into two
sets, $E(K_{n-1})$ and $\{(i,n)\}_{i=1}^{n-1}$.
Any edge $e$ in $E(K_{n-1})$ either lies in $T$ or $T \cup \{e\}$
contains a unique cycle
that lets one express $e$ in terms of the elements of $T$ modulo
$Z(K_{n-1})$, and hence modulo $Z=Z(K_{n})$.
For each $1 \leq i \leq n-1$, the bond
\[
b_i := \sum_{\substack{ j=1 \\ j \neq i}}^n (i,j) \equiv 0 \mod B,
\]
and it follows that
\[
(i,n) \equiv - \sum_{\substack{ j=1 \\ j \neq i}}^{n-1} (i,j) \mod B.
\]
The edges in the sum all belong to $K_{n-1}$ and thus, according to
the previous paragraph, can be written in terms of $T$ modulo
$Z$. It follows that $(i,n)$ can be written in terms of $T$ modulo
$B+Z$.\qedhere
\end{proof}
\section{Proof of Theorem~\ref{2-edge-connected-theorem}}
\label{2-edge-connected-section}
Recall the statement of Theorem~\ref{2-edge-connected-theorem}:
\vskip .1in
\noindent
{\bf Theorem~\ref{2-edge-connected-theorem}.}
{\it
When the simple graph $G$ is $2$-edge-connected, the critical group $K({\operatorname{line \,\,}} G)$ can
be generated by $\beta(G)$ elements.
}
\vskip .1in
The $\beta(G)$ generators will come from the set of edges in the complement $E \setminus T$
of a carefully chosen spanning tree $T$ for $G$. For this we introduce
the following technical condition, which we have not encountered elsewhere.
\begin{definition}
\label{absorption-order-definition}
\rm
For a connected graph $G=(V,E)$, say that a spanning tree $T \subset E$ for $G$ has an \emph{absorption order in} $G$ if one can
linearly order the union $V \sqcup T$ of its vertices and edges in the following way:
\begin{enumerate}
\item[(i)] The order begins with a vertex $v_0$ in $V$ followed by an edge $e_0$ of $T$, such that
$e_0$ is the unique edge of $T$ incident to $v_0$ (so $v_0$ is a leaf-vertex of $T$ attached along the leaf-edge $e_0$).
\item[(ii)] For every other vertex $v$ in $V \setminus \{v_0\}$, there exists an edge $e=\{v,w\}$
such that $w$ occurs earlier in the order than $v$, and the edge $e$ either lies in $E \setminus T$ or
occurs earlier in the order than $v$.
\item[(iii)] For every other edge $e$ in $T \setminus \{e_0\}$, there exists a vertex $v$
incident to $e$ which occurs earlier in the order than $e$,
and every other edge incident to $v$ either lies in $E \setminus T$
or occurs earlier in the order than $e$.
\end{enumerate}
\end{definition}
The relevance of an absorption order for a spanning tree is given by the algebraic consequence
in the following proposition.
Say that an orientation of the edges of a tree $T$ is {\it bipartite}
if, for every vertex $v$, the edges of $T$ incident to $v$ are either all oriented toward
$v$ or all oriented away from $v$.
\begin{proposition}
\label{absorption-order-significance}
Let $G=(V,E)$ be a simple graph, and assume it has a spanning tree $T \subset E$ which has an absorption order in $G$.
Then the images of the basis elements in $\ensuremath{\mathbb{Z}}Z^E$ corresponding to the edges $E \setminus T$ not lying on $T$
give a set of $\beta(G)$ generators for $K({\operatorname{line \,\,}} G)$, using the presentation from Proposition~\ref{alternate-presentation}
$$
K({\operatorname{line \,\,}} G) \cong \ensuremath{\mathbb{Z}}Z^E / \left(\ensuremath{\mathbb{Z}}Z e_0 + \partial_{{\operatorname{line \,\,}} G}(B_{{\operatorname{line \,\,}} G}) \right),
$$
assuming that the orientation chosen for $G$ restricts to a bipartite
orientation of $T$ (although ${\operatorname{line \,\,}} G$ may be oriented arbitrarily),
and the edge $e_0$ is the designated leaf-edge of $T$ appearing second
in the absorption order.
\end{proposition}
To prove this, note the following crucial lemma:
\begin{lemma}
\label{bond-difference-lemma}
When a connected graph $G=(V,E)$ is oriented in a way that restricts to a bipartite orientation for
a spanning tree $T \subset E$, then any edge $e=\{v,w\}$ has
$$
b_G(v) \equiv \pm b_G(w) \quad \mod \quad \ensuremath{\mathbb{Z}}Z e + \ensuremath{\mathbb{Z}}Z (E\setminus T) + \partial_{{\operatorname{line \,\,}} G} ( B_{{\operatorname{line \,\,}} G} ).
$$
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{bond-difference-lemma}.]
Label the edges of $G$ incident to $v$ other than $e$ by
$$
\underbrace{e_1,\ldots,e_p,}_{\text{ in }T}
\underbrace{e_{p+1},e_{p+2},\ldots,e_P,}_{\text{ in }E \setminus T}
$$
and those incident to $w$ other than $e$ by
$$
\underbrace{f_1,\ldots,f_q,}_{\text{ in }T}
\underbrace{f_{q+1},f_{q+2},\ldots,f_Q}_{\text{ in }E \setminus T}.
$$
With these notations, one then has
\begin{equation}
\label{p-and-q-compare-1}
\begin{aligned}
\partial_{{\operatorname{line \,\,}} G} b_{{\operatorname{line \,\,}} G}(e)
&= (e_1-e)+\cdots+(e_P-e)+(f_1-e)+\cdots+(f_Q-e) \\
&= (e_1+\cdots+e_P)+(f_1+\cdots+f_Q)-(P+Q)e.
\end{aligned}
\end{equation}
Because the orientation of $G$ when restricted to $T$ is bipartite,
\begin{equation}
\label{p-and-q-compare-2}
\begin{aligned}
b_G(v) &= \pm( e_1+\cdots+e_p) \pm e_{p+1} \pm e_{p+2} \pm \cdots \pm e_P \\
b_G(w) &= \pm( f_1+\cdots+f_q) \pm f_{q+1} \pm f_{q+2} \pm \cdots \pm f_Q.
\end{aligned}
\end{equation}
Comparison of \eqref{p-and-q-compare-1} and \eqref{p-and-q-compare-2} shows that one of the two expressions
$b_G(v) + b_G(w)$ or $b_G(v) - b_G(w)$ differs from $\partial_{{\operatorname{line \,\,}} G} b_{{\operatorname{line \,\,}} G}(e)$ by
a $\ensuremath{\mathbb{Z}}Z$-linear combination of the edges in
$$
\{e\} \cup \{e_{p+1},e_{p+2},\cdots,e_P,f_{q+1},f_{q+2},\cdots,f_Q\}.
$$
Since the second set in the above union lies in $E \setminus T$, the lemma follows.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{absorption-order-significance}.]
One needs to show that the subgroup of $\ensuremath{\mathbb{Z}}Z^E$ defined by
$$
I:=\ensuremath{\mathbb{Z}}Z (E \setminus T) + \ensuremath{\mathbb{Z}}Z e_0 + \partial_{{\operatorname{line \,\,}} G}( B_{{\operatorname{line \,\,}} G} )
$$
is {\it all} of $\ensuremath{\mathbb{Z}}Z^E$. Since $E \setminus T$ is a subset of $I$,
it is enough to show that every edge $e$ in $T$ lies in $I$. More
strongly, one shows by induction on their location in the absorption
order for $T$, that not only does every edge $e$ in $T$ lie in $I$,
but also every vertex $v$ in $V$ has $b_G(v)$ lying in $I$.
The base case for this induction deals both with the first vertex
$v_0$ and the first edge $e_0$, which come at the beginning of the
absorption order. Since $v_0$ is a leaf vertex of $T$ along the
edge $e_0$, one has $b_G(v_0)$ in $I$, since the only edges incident to
$v_0$ are $e_0$ and edges of $E \setminus T$. For the edge $e_0$,
note that it trivially lies in $I$.
In the inductive step, the next element in the absorption order is
either a vertex $v \neq v_0$ or an edge $e \neq e_0$.
If the next element is a vertex $v \neq v_0$, then by
Definition~\ref{absorption-order-definition}(ii), there exists an
edge $e=\{v,w\}$ for which $b_G(w)$ lies in $I$ by induction, and
either $e$ lies in $E \setminus T$ (so that $e$ is in $I$) or $e$ is
earlier in the order than $v$ (so that $e$ is in $I$ by induction).
Hence Lemma~\ref{bond-difference-lemma} shows that $b_G(v)$ also lies in
$I$.
If the next element is an edge $e \neq e_0$, then by
Definition~\ref{absorption-order-definition}(iii), there exists a
vertex $v$ incident to $e$ for which $b_G(v)$ lies in $I$ by
induction. Note that $b_G(v)$ is a $\pm 1$ combination of all the
edges $e'$ incident to $v$, and all of these other edges $e' \neq e$
either have $e'$ in $E \setminus T$ (so that $e'$ is in $I$) or $e'$
is earlier in the order than $e$ (so that $e'$ is in $I$ by
induction). Hence $e$ also lies in $I$.
\end{proof}
To show that $2$-edge-connected graphs $G$ always have a spanning tree
$T$ with an absorption order, we recall the well-known reformulation
of 2-edge-connectivity in terms of ear decompositions; see
e.g., \cite[Definition 4.2.7]{West}.
\begin{definition}\label{ear-def}
Let $G=(V,E)$ be a simple graph. An \it ear \rm of $G$ is a walk
that alternates (incident) vertices $u_i$ and edges $e_i$
\begin{equation}
\label{ear-walk-labelling}
v:=u_1,e_1,u_2,e_2,\ldots,u_\ell,e_\ell,u_{\ell+1}:=w
\end{equation}
such that the internal vertices $u_2,\ldots,u_{\ell}$ are each of
degree $2$ in $G$. If $v \neq w$, it is called an {\it open ear}
(and necessarily $\ell \geq 1$), while if $v = w$, it is called a
{\it closed ear} (and necessarily $\ell \geq 3$, because $G$ is
simple).
An \it{ear decomposition} \rm of $G$ is a decomposition of its
vertices and edges
$$
P_0 \cup P_1 \cup \cdots \cup P_k
$$
such that $P_0$ is a cycle, and for $1 \leq i \leq k$, $P_i$ is an
ear of $P_0 \cup P_1 \cup \cdots \cup P_i$.
\end{definition}
\begin{figure}
\caption{Graphs with open and closed ears.}
\label{fig:eardecomp}
\end{figure}
\begin{proposition}[{\cite[Theorem 4.2.8]{West}}]
\label{ear-prop}
A graph is 2-edge-connected if and only if it has an ear
decomposition.
\end{proposition}
In light of Proposition~\ref{absorption-order-significance}, the
following result implies Theorem~\ref{2-edge-connected-theorem}.
\begin{proposition}
\label{trees-with-absorption-orders-exist}
Let $G=(V,E)$ be a simple $2$-edge-connected graph. Then $G$ has at
least one spanning tree $T \subset E$ with an absorption order in $G$.
\end{proposition}
\begin{proof}
Induct on the number $k$ of ears in an ear decomposition $P_0 \cup
P_1 \cup \cdots \cup P_k$ for $G$.
In the base case $k=0$, the graph $G=P_0$ is a $n$-cycle. Label its
vertices $V=\{v_0,v_1,\ldots,v_{n-1}\}$ and edges
$E=\{e_0,e_1,\ldots,e_{n-1}\}$ so that $e_i=\{v_i,v_{i+1}\}$ with
indices taken modulo $n$. Then one can easily check that
$T=\{e_0,e_1,\ldots,e_{n-2}\}$ is a spanning tree, and
$(v_0,e_0,v_1,e_1,\ldots,v_{n-2},e_{n-2},v_{n-1})$ is an absorption
order for $T$ in $G$.
In the inductive step, one may assume that $G^-:=P_0 \cup P_1 \cup
\cdots \cup P_{k-1}$ has a spanning tree $T^-$ with an absorption
order in $G^-$. Choose the labelling of the endpoints $v,w$ of the
ear $P_k$ so that $v$ comes weakly earlier than $w$ in the
absorption order for $T^-$, where the vertices and edges of $P_k$
are labelled as in \eqref{ear-walk-labelling}. Extend $T^-$ to
$$
T:=T^- \sqcup \{u_2,u_3,\ldots,u_{\ell}\}
$$
which is easily seen to be a spanning tree for $G$. One extends the
absorption order for $T^-$ in $G^-$ to one for $T$ in $G$ by
inserting the subsequence
\begin{equation}
\label{ear-subsequence}
(u_2,e_2,u_3,e_3,u_4,\ldots,u_{\ell},e_{\ell})
\end{equation}
into the absorption sequence for $T^-$ in one of two possible locations,
depending upon whether $v$ and $w$ are the initial vertex $v_0$ of
the absorption order of $T^-$, or not.
First we assume that $P_k$ is an open ear (that is, $v \neq w$) or
$P_k$ is a closed ear with $v=w \neq v_0$. In this case, one can
check that inserting the subsequence \eqref{ear-subsequence} {\it
immediately after $v$} in the absorption order for $T^-$ in $G^-$
gives an absorption order for $T$ in $G$.
In the case that $P_k$ is a closed ear with $v=w=v_0$, one checks
that inserting the subsequence \eqref{ear-subsequence} {\it at the
very beginning} of the absorption order for $T^-$ in $G^-$ gives
an absorption order for $T$ in $G$. Note that $u_2,e_2$ become the
``new'' $v_0, e_0$ in this absorption order. \qedhere
\end{proof}
We remark that the converse of
Proposition~\ref{trees-with-absorption-orders-exist} is
false. For example, one can check that
the simple graph $G$ on vertex set $V=\{1,2,3,4\}$ with
edges $\{12,13,23,34\}$, which is {\it not} $2$-edge-connected,
{\it does} have an absorption ordering for any
choice of a spanning tree $T$ in $G$.
We have not investigated extensively the problem of
characterizing which graphs $G$
contain a spanning tree $T$ with an absorption ordering.
\section{Proof of Theorem~\ref{divisibility-theorem}}
\label{divisibility-section}
For a prime $p$ let $k(p)$ be the largest integer such that $p^{k(p)}$ divides
all of the sums $\deg_G(v)+\deg_G(w)$ as one runs through all edges
$e=\{v,w\}$ in the edge set $E$ of $G$. The goal of this section is to
give a proof of Theorem~\ref{divisibility-theorem}, which we now
recall.
\begin{divtheorem*}
Let $G=(V,E)$ be a connected simple graph that contains at least one
cycle of even length. Use the abbreviated notation $K:=K({\operatorname{line \,\,}}
G)$, and let $p$ be a prime for which the quantity $k(p) \geq 1$.
Then for $G$ bipartite, one has
$$
K /p^{k(p)} K
\cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-1} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(p^k, |V|)},
$$
while for $G$ nonbipartite, one has
$$
K /p^{k(p)} K
\cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-2} \oplus
\begin{cases}
0 &\text{ if }p\text{ is odd,} \\
\ensuremath{\mathbb{Z}}Z_2^2 &\text{ if }p=2\text{ and }|V|\text{ is even,} \\
\ensuremath{\mathbb{Z}}Z_4 &\text{ if }p=2\text{ and }|V|\text{ is odd.}
\end{cases}
$$
\end{divtheorem*}
\begin{proof}
One works again with the presentation from
Proposition~\ref{alternate-presentation}
$$
K:=K({\operatorname{line \,\,}} G) =\ensuremath{\mathbb{Z}}Z^E/\left(\ensuremath{\mathbb{Z}}Z e_0 + \ensuremath{\mathbb{Z}}Z\left( \partial_{{\operatorname{line \,\,}} G}
b_{{\operatorname{line \,\,}} G}(e) \right)_{e \in E} \right)
$$
for some choice of an edge $e_0$ in $E$. Given a vertex $v$ in $V$,
let $\delta_G(v)$ denote the element of $\ensuremath{\mathbb{Z}}Z^E$ which is the sum with
coefficient $+1$ of the basis elements in $\ensuremath{\mathbb{Z}}Z^E$ corresponding to
edges incident with $v$. Given any edge $e=\{v,w\}$ in $E$, reasoning
as in equation \eqref{p-and-q-compare-1}, one finds that
$$
\partial_{{\operatorname{line \,\,}} G} b_{{\operatorname{line \,\,}} G}(e)
= \delta_G(v)+\delta_G(w)-(\deg_G(v)+\deg_G(w)) e.
$$
Letting $q:=p^{k(p)}$, one has therefore in $K/qK$ the relation
$$
\partial_{{\operatorname{line \,\,}} G} b_{{\operatorname{line \,\,}} G}(e) \equiv \delta_G(v)+\delta_G(w)
$$
and one can write a presentation for $K/qK$ as
\begin{equation}
\label{divisible-presentation}
K/qK=\ensuremath{\mathbb{Z}}Z_q^E / \left(\ensuremath{\mathbb{Z}}Z_q e_0
+ \ensuremath{\mathbb{Z}}Z_q ( \delta_G(v)+\delta_G(w))_{e=\{v,w\} \in E} \right).
\end{equation}
We now make a particular choice of the edge $e_0$ for
this presentation, and exhibit a subset of $E$ having size
$\beta(G)-2$ or $\beta(G)-1$ which will
represent $\ensuremath{\mathbb{Z}}Z_q$-linearly independent elements in $K/qK$.
Because $G$ contains an even-length (not necessarily minimal) cycle,
it is possible to choose an edge $e_0$ in $E$ which lies on a {\it minimal}
cycle, so that $E \setminus \{e_0\}$ still connects all of $V$, and
so that $E \setminus \{e_0\}$ contains at least one odd cycle
in the case where $G$ is nonbipartite. Now, in the bipartite case, pick
$S \subset E \setminus \{e_0\}$ to be minimal with respect to the
property that $S$ connects all of $V$. In the non-bipartite case, pick $S$ to be minimal with respect to the following three properties: first, $S$ must connect all of $V$; second, $S$ must contain a unique cycle; and third, this cycle must be of odd length. This means that when $G$ is bipartite, $S$ is a spanning tree that avoids $e_0$,
and when $G$ is nonbipartite, $S$ is a unicyclic spanning subgraph that
avoids $e_0$, whose unique cycle $C$ is of odd length.
We first wish to show that, in either case, the images of the
elements $E \setminus S \setminus \{e_0\}$ are $\ensuremath{\mathbb{Z}}Z_q$-linearly independent
in the presentation \eqref{divisible-presentation}; note that this
set $E\setminus S \setminus \{e_0\}$ has cardinality $\beta(G)-1$ when
$G$ is bipartite, and cardinality $\beta(G)-2$ when $G$ is nonbipartite.
So assume that $E \setminus S \setminus \{e_0\}$ are $\ensuremath{\mathbb{Z}}Z_q$-linearly dependent
in $K/qK$. Grouping the $\ensuremath{\mathbb{Z}}Z_q$-coefficients $c_v$ in front of each $\delta_G(v)$, one would have a sum
$\sum_{v \in V} c_v \delta_G(v)$ lying in
$\ensuremath{\mathbb{Z}}Z_q e_0 + \ensuremath{\mathbb{Z}}Z_q(E\setminus S \setminus \{e_0\})$.
Thus this sum should have zero coefficient on every edge $e=\{v,w\}$ in $S$,
implying that $c_v=-c_w$ for every such edge. Because $S$ is a spanning set
of edges, this forces the existence of a constant $c$ in $\ensuremath{\mathbb{Z}}Z_q$ for which
every $v$ in $V$ has $c_v = \pm c$. In fact, when $G$ is bipartite with
vertex bipartition $V=V_1 \sqcup V_2$, this forces $c_{v_1}=c=-c_{v_2}$
for all $v_1$ in $V_1$ and $v_2$ in $V_2$, while for $G$ nonbipartite,
the existence of the odd cycle $C$ inside $S$ forces $c_v=c=-c$ for
all $v$ in $V$. In either case, this means that $c_v=-c_w$ for all
edges $e=\{v,w\}$ in $E$, and hence the sum $\sum_{v \in V} c_v \delta_G(v)$
is actually zero in $\ensuremath{\mathbb{Z}}Z_q^E$. Thus the linear independence is trivial.
It only remains now to analyze the quotient
\begin{align}
\label{leftover-quotient}
\frac{K/qK}{\ensuremath{\mathbb{Z}}Z_q(E\setminus S \setminus \{e_0\})}
=
\ensuremath{\mathbb{Z}}Z_q^E / \left( \ensuremath{\mathbb{Z}}Z_q(E \setminus S)
+ \ensuremath{\mathbb{Z}}Z_q (\delta_G(v)+\delta_G(w))_{e=\{v,w\} \in E} \right).
\end{align}
Note that when $m$ is odd, for any sequence of vertices
$v_0,v_1,\ldots,v_{m-1},v_m$ one has a telescoping alternating sum
$$
\sum_{i=0}^{m-1} (-1)^i \left(\delta_G(v_i)+\delta_G(v_{i+1})\right) = \delta_G(v_0) + \delta_G(v_m).
$$
Also note that $S$ will contain paths of edges of odd length between
\begin{itemize}
\item every pair $(v_1,v_2)$ in $V_1 \times V_2$ when $G$ is bipartite, and
\item every ordered pair $(v,w)$ in $V \times V$ when $G$ is nonbipartite.
\end{itemize}
Thus, in either case, one has
$$
\ensuremath{\mathbb{Z}}Z_q (\delta_G(v)+\delta_G(w))_{e=\{v,w\} \in E}
\quad = \quad
\ensuremath{\mathbb{Z}}Z_q (\delta_G(v)+\delta_G(w))_{e=\{v,w\} \in S}.
$$
Using this last equation, one can rewrite the quotient on the right of
\eqref{leftover-quotient} as
\begin{equation}
\label{leftover-quotient-rewritten}
\ensuremath{\mathbb{Z}}Z_q^S / \ensuremath{\mathbb{Z}}Z_q (\delta_S(v)+\delta_S(w))_{e=\{v,w\} \in S} \\
\end{equation}
where here we regard $S$ itself as a graph, namely the edge-induced
subgraph of $G=(V,E)$ having the same vertex set $V$ and edge set $S
\subset E$.
Note that this last expression in \eqref{leftover-quotient-rewritten}
does not depend upon the ambient graph $G$, but only on the subgraph
$S$. We therefore rename it $K_q(S)$ to emphasize this dependence on
$S$ alone. It remains to analyze this group $K_q(S)$ in both the
bipartite and nonbipartite cases.
\noindent \emph{Case 1}: $G$ is bipartite (and hence so is $S$). It follows
that $S$ is a spanning tree on $V$, with vertex bipartition $V = V_1
\sqcup V_2$. By the above discussion,
$$
\begin{aligned}
K_q(S)
&= \ensuremath{\mathbb{Z}}Z_q^S / \ensuremath{\mathbb{Z}}Z_q ( \delta_S(v)+\delta_S(w) )_{(v_1,v_2) \in V_1 \times V_2} \\
&= \ensuremath{\mathbb{Z}}Z_q^S / \ensuremath{\mathbb{Z}}Z_q \left( \sum_{v \in V} c_v \delta_S(v):
\sum_{v_1 \in V_1} c_{v_1} = \sum_{v_2 \in V_2} c_{v_2}
\text{ in }\ensuremath{\mathbb{Z}}Z_q \right).
\end{aligned}
$$
We first show by induction on $|S|$ that $K_q(S)$ is cyclic, generated by the image of
any {\it leaf edge} $e$ of $S$, that is, an edge $e$ incident to some {\it leaf vertex} $v$
having $\deg_S(v)=1$. The base case $|S|=1$ is trivial. In the inductive
step, pick another leaf edge $e'$ in $S$; we will show it has image $0$ in
the quotient $K_q(S)/\ensuremath{\mathbb{Z}}Z_q e$. If $e'$ is incident to leaf vertex $v'$, then
for any $c$ in $\ensuremath{\mathbb{Z}}Z_q$, one has
$$
e' + c e = \delta_S(v') + c \delta_S(v).
$$
Taking $c=-1$ (respectively $+1$) when $v,v'$ lie in the same
(resp. different) set $V_1$ or $V_2$, one obtains an element that is
zero in $K_q(S)$, and hence $e' \equiv 0$ in $K_q(S)/\ensuremath{\mathbb{Z}}Z_q e$. Now, replacing $S$ by $S \setminus \{e'\}$,
one can induct on $|S|$,
completing the inductive step and showing that $K_q(S)$ is generated
by $e$.
We next analyze the order of this cyclic generator $e$ within
$K_q(S)$. We claim that $c \cdot e = 0$ in $K_q(S)$ if and only if
$c$ lies in $|V| \ensuremath{\mathbb{Z}}Z_q$. This would finish the proof in the bipartite
case, as it would show that $K_q(S)$ is isomorphic to the subgroup of
$\ensuremath{\mathbb{Z}}Z_q$ generated by the element $|V|$. This subgroup is isomorphic
to $\ensuremath{\mathbb{Z}}Z_{\gcd(q,|V|)}$, where $q=p^k$. Hence this would imply $K/qK
\cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-1} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(p^{k(p)},|V|)}$, as
desired.
To see the claim, assume that $c \cdot e = 0$ in $K_q(S)$ for some $c$
in $\ensuremath{\mathbb{Z}}Z_q$. This means one has a sum $\sum_{v \in V} c_v \delta_G(v)
=c \cdot e$ in which $\sum_{v_1 \in V_1} c_{v_1} = \sum_{v_2 \in V_2}
c_{v_2}$. This happens if and only if the sum has zero coefficient on
all edges $e'$ in $S \setminus \{e\}$. If $e=\{v,w\}$ with the leaf
vertex $v$ lying in $V_1$, and $w$ in $V_2$, this means
$c_{v_2}=c_w=-c_{v_1}$ for all $v_1 \in V_1 \setminus \{v\}$ and $v_2
\in V_2 \setminus \{w\}$. Then the condition $\sum_{v_1 \in V_1}
c_{v_1} = \sum_{v_2 \in V_2} c_{v_2}$ forces
$$
c_v+(|V_1|-1)(-c_w)=|V_2|(c_w)
$$
i.e., $c_v=(|V|-1)c_w$. Hence this can occur if and only if
$c=c_v+c_w=|V|c_w$, that is, if $c$ lies in $|V| \ensuremath{\mathbb{Z}}Z_q$.
\noindent \emph{Case 2}. $G$ is nonbipartite (and hence so is $S$). In this
case $S$ is a spanning unicyclic graph, whose unique cycle $C$ is of
odd length. By the above discussion,
$$
\begin{aligned}
K_q(S)
&= \ensuremath{\mathbb{Z}}Z_q^S / \ensuremath{\mathbb{Z}}Z_q ( \delta_S(v)+\delta_S(w) )_{(v,w) \in V \times V} \\
&= \ensuremath{\mathbb{Z}}Z_q^S /
\left\{ \sum_{v \in V} c_v \delta_S(v):
\sum_{v \in V} c_{v} \in 2\ensuremath{\mathbb{Z}}Z_q \right\}.
\end{aligned}
$$
Thus, if one defines the tower of $\ensuremath{\mathbb{Z}}Z$-lattices (i.e., free abelian
groups)
$$
L:=\ensuremath{\mathbb{Z}}Z^S \ \supset\
M:=\ensuremath{\mathbb{Z}}Z(\delta_S(v))_{v \in V} \ \supset \
N:=\left\{ \sum_{v \in V} c_v \delta_S(v):
\sum_{v \in V} c_{v} \in 2\ensuremath{\mathbb{Z}}Z\right\},
$$
then one has a short exact sequence
\begin{equation}
\label{lifted-sequence}
0 \rightarrow \underbrace{\frac{M\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}{N\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}}_{(M/N) \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}
\rightarrow \underbrace{\frac{L\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}{N\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}}_{K_q(S)}
\rightarrow \underbrace{\frac{L\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}{M\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q}}_{(L/M) \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q} \rightarrow 0
\end{equation}
Here we have used on the two ends of the sequence the fact\footnote{That is, taking tensor products $(-)_{\otimes \ensuremath{\mathbb{Z}}Z} \ensuremath{\mathbb{Z}}Z_q$ is
{\it right exact}, so when applied to the exact sequence
$$B \rightarrow A \rightarrow A/B \rightarrow 0$$
it gives the exact sequence
$$
B \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q \rightarrow A \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q \rightarrow (A/B)
\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q \rightarrow 0.
$$
} that for any pair of nested abelian groups $B \subset A$, one has
$$
\left( A \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q \right)/ \left( B \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q \right)
\cong
\left( A/B \right) \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q.
$$
Furthermore, it is easy to see that both $M/N$ and $L/M$ are
isomorphic to $\ensuremath{\mathbb{Z}}Z_2$:
\begin{itemize}
\item
The isomorphism $M/N \cong \ensuremath{\mathbb{Z}}Z_2$ comes from choosing any $\ensuremath{\mathbb{Z}}Z$-basis for the
lattice $M$, thus identifying $M \cong \ensuremath{\mathbb{Z}}Z^{|V|}$, and noting that
under this identification, $N$ is identified with the index $2$ sublattice
$\{ x \in \ensuremath{\mathbb{Z}}Z^{|V|}: \sum_{v \in V} x_v \in 2\ensuremath{\mathbb{Z}}Z \}$.
\item
The isomorphism $L/M \cong \ensuremath{\mathbb{Z}}Z_2$ is equivalent to the assertion
that the square {\it (unsigned) edge-node incidence} matrix having columns indexed by
the nodes $V$ and rows indexed by the edges $S$ will have determinant
$\pm 2$. This is a well-known fact for connected unicyclic graphs $S$ whose
unique cycle $C$ is odd; see, e.g., \cite[p. 560, proof of Thm. 3.3]{Stanley-zonotopes}.
It is easily proven by first checking that the determinant is
scaled by $\pm 1$ when one removes a row and column corresponding to
a leaf edge and its incident leaf vertex in $S$.
This reduces the assertion to the case where $S=C$ is just an odd cycle
itself, where the determinant can be calculated directly via Laplace expansion.
\end{itemize}
Hence both of the outer terms $(M/N) \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q , (L/M)
\otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q $ in the short exact sequence
\eqref{lifted-sequence} are isomorphic to $\ensuremath{\mathbb{Z}}Z_2 \otimes_\ensuremath{\mathbb{Z}}Z \ensuremath{\mathbb{Z}}Z_q$,
which vanishes for $p$ odd and equals $\ensuremath{\mathbb{Z}}Z_2$ for $p=2$. Thus
\eqref{lifted-sequence} shows that $K_q(S)$ vanishes for $p$ odd, and
shows for $p=2$ that $K_q(S)$ is either $\ensuremath{\mathbb{Z}}Z_2^2$ or $\ensuremath{\mathbb{Z}}Z_4$. To
distinguish these possibilities when $p=2$, we analyze the additive
orders of each edge $e$ in $S$ as elements of $K_q(S)$.
Note that for any leaf edge $e$ in $S$, say with leaf vertex $v$, one
has $e=\delta_S(v)$, and hence $2e=\delta_S(v)\equiv 0$ in $K_q(S)$.
Thus using a leaf-induction, one sees that any edge $e$ in $S
\setminus C$ has $2e \equiv 0$ in $K_q(S)$.
Meanwhile, we claim that for any edge $e=\{v,w\}$ in $C$, one has $c
\cdot e \equiv 0$ in $K_q(S)$ if and only if $|V| \cdot c$ lies in $4
\ensuremath{\mathbb{Z}}Z_q$. To see the claim, assume that $c \cdot e = 0$ in $K_q(S)$ for
some $c$ in $\ensuremath{\mathbb{Z}}Z_q$. This means one has a sum $\sum_{v \in V} c_v
\delta_G(v) =c \cdot e$ with $\sum_{v \in V} c_v \in 2\ensuremath{\mathbb{Z}}Z_q$. This
happens if and only if the sum has zero coefficient on all edges $e'$
in $S \setminus \{e\}$. Applying this for the edges $e'$ in $C
\setminus \{e\}$, one concludes that $c_v=c_w$, and hence
$c=c_v+c_w=2c_v$. Applying this for the remaining edges $e'$ in $S
\setminus C$, one concludes that $c_w = \pm c_v$ for all $w$ in $V$.
But then the condition that $\sum_{w \in V} c_w$ lies in $2\ensuremath{\mathbb{Z}}Z_q$
means that $|V|\cdot c_v$ also lies in $2\ensuremath{\mathbb{Z}}Z_q$, i.e., that $|V|\cdot
c=2|V|\cdot c_v$ lies in $4\ensuremath{\mathbb{Z}}Z_q$. One concludes that edges $e$ in
$C$ have order $2$ when $|V|$ is even, and order $4$ when $|V|$ is
odd. Since every edge $e$ in $S \setminus C$ has $2e \equiv 0$ in $K_q(S)$, this implies $K_q(S)
\cong \ensuremath{\mathbb{Z}}Z_2^2$ when $|V|$ is even and $K_q(S) \cong \ensuremath{\mathbb{Z}}Z_4$ when $|V|$
is odd.
\end{proof}
\section{Proof of Theorem~\ref{regular-result}}
\label{regular-result-section}
Recall here the statement of Theorem~\ref{regular-result}.
\begin{regtheorem*}
For any connected $d$-regular simple graph $G$ with $d \geq 3$,
there is a group homomorphism
$$
K({\operatorname{line \,\,}} G) \overset{f}{\rightarrow} K({\operatorname{sd \,}} G)
$$
whose kernel-cokernel exact sequence takes the form
$$
0
\rightarrow \ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2} \oplus C
\rightarrow K({\operatorname{line \,\,}} G)
\overset{f}{\rightarrow} K({\operatorname{sd \,}} G)
\rightarrow C
\rightarrow
0
$$
in which the cokernel $C$ is the following cyclic $d$-torsion group:
$$
C=
\begin{cases}
0 & \text{ if } G \text{ is non-bipartite and }d\text{ is odd},\\
\ensuremath{\mathbb{Z}}Z_2 & \text{ if } G \text{ is non-bipartite and }d \text{ is even},\\
\ensuremath{\mathbb{Z}}Z_d & \text{ if } G \text{ is bipartite}.
\end{cases}
$$
\end{regtheorem*}
\subsection{Defining the morphism $f$}
\label{defining-f-subsection}
We begin our proof of the theorem by first defining a linear map
$f:{\mathbb R}^{E_{{\operatorname{line \,\,}} G}} \rightarrow {\mathbb R}^{E_{{\operatorname{sd \,}} G}}$ which will turn out
to be a morphism of rational orthogonal decompostions.
\begin{definition}\label{f-definition}
Define a ${\mathbb R}$-linear map $f:{\mathbb R}^{E_{{\operatorname{line \,\,}} G}} \rightarrow
{\mathbb R}^{E_{{\operatorname{sd \,}} G}}$ by setting
$$
f(uv,vw) = (uv,v) + (v,vw)
$$
for every pair of edges $\{u,v\}, \{v,w\}$ of $G$ incident at some
vertex $v$ (see Figure~\ref{fig:f-def}). Equivalently, the adjoint
map $f^t$ is defined by
$$
f^t(uv,v) = \sum_{w \in V: \{v,w\} \in E} (uv,vw).
$$
\end{definition}
\begin{figure}
\caption{The action of $f$ on a single edge of ${\operatorname{line \,\,}
\label{fig:f-def}
\end{figure}
The following definitions and lemma will be useful both for showing that
$f$ gives a morphism, and in our later analysis.
\begin{definition}\label{local-global-defn}
Given a directed cycle
$$
C=\{(v_1,v_2), (v_2,v_3), \ldots, (v_{m-1},v_m), (v_m,v_1) \}
$$
in $G$, let
$$
\begin{aligned}
{\operatorname{sd \,}} C &:= \{(v_1,{v_1 v_2}),({v_1 v_2},v_2),
(v_2,{v_2 v_3}),({v_2 v_3},v_3), \ldots \}\\
{\operatorname{line \,\,}} C &:= \{(v_1 v_2, v_2 v_3) ,(v_2 v_3, v_3 v_4), \ldots,
(v_{m-1}v_m, v_m v_1), (v_m v_1, v_1 v_2) \}
\end{aligned}
$$
denote corresponding cycles in ${\operatorname{sd \,}} G, {\operatorname{line \,\,}} G$.
Cycles in ${\operatorname{line \,\,}} G$ of the form ${\operatorname{line \,\,}} C$ where $C$ is a cycle of $G$
will be called {\it global} cycles. A cycle in ${\operatorname{line \,\,}} G$ will be
called {\it local (to vertex $v$)} if every vertex $v_iv_j$ of ${\operatorname{line \,\,}}
G$ visited by the cycle has $v \in \{v_i, v_j\}$.
\end{definition}
\begin{lemma}
\label{induced-cycles}
Let $G$ be a graph, and let $\{C\}$ be a set of directed cycles
indexing a spanning set $\{z(C)\}$ for the cycle space $Z_G$. Then
\begin{enumerate}
\item $Z_{{\operatorname{sd \,}} G}$ will be spanned by the incidence vectors
$\{z({\operatorname{sd \,}} C) \}$ of the associated subdivided cycles, and
\item $Z_{{\operatorname{line \,\,}} G}$ will be spanned by the incidence vectors $\{ z({\operatorname{line \,\,}} C) \}$ for
their associated global cycles together with all local cycles.
\end{enumerate}
\end{lemma}
\begin{proof}
Assertion (1) of was implicitly used in
Section~\ref{subdivision-example}, and should be clear either from
elementary algebraic topology or from the discussion of bases for
$Z_G$ coming from spanning forests at the beginning of
Section~\ref{cycles-bonds-section}.
For assertion (2), given any directed cycle in ${\operatorname{line \,\,}} G$, put an
equivalence relation on its edges by taking the transitive closure
of the following relation: two consecutive edges $(uv,vw), (vw,wx)$
in the cycle are equivalent if there exists a vertex $y$ of $G$
contained in $\{u,v\} \cap \{v,w\} \cap \{w,x\}$. The global cycles
in ${\operatorname{line \,\,}} G$ are by definition those in which the equivalence
classes for this relation all have cardinality two (N.B.: here one
is using the assumption that $G$ is simple). Given a cycle $z$ in
${\operatorname{line \,\,}} G$ that contains equivalence classes of size at least $3$, it
is easy to see that one can always add a local cycle to $z$ and
reduce the number of such equivalence classes: if the equivalence
class and its neighbors in $z$ correspond to these terms
$$
\cdots + (ab_1,yb_1)+(yb_1,yb_2)+(yb_2,yb_3)+\cdots
+(yb_{t-1},yb_t)+(yb_t,b_tc)+\cdots
$$
where $a,c \neq y$, then subtracting the local cycle
$$
(yb_1,yb_2)+(yb_2,yb_3)+\cdots +(yb_{t-1},yb_t)+(yb_t,yb_1)
$$
gives a result that looks locally like
\[
\cdots + (ab_1,yb_1) + (yb_1,yb_t) + (yb_t,b_tc)+ \cdots . \qedhere
\]
\end{proof}
\begin{corollary}
For any $d$-regular simple graph $G$, the map $f: {\mathbb R}^{E_{{\operatorname{line \,\,}} G}}
\rightarrow {\mathbb R}^{E_{{\operatorname{sd \,}} G}}$ from Definition~\ref{f-definition} is a
morphism of the associated rational orthogonal decompositions, and
hence induces a group homomorphism
$$
f: K({\operatorname{line \,\,}} G) \rightarrow K({\operatorname{sd \,}} G).
$$
\end{corollary}
\begin{proof}
By Proposition~\ref{Treumann-characterization}, one must show both
$f(Z_{{\operatorname{line \,\,}} G}) \subset Z_{{\operatorname{sd \,}} G}$ and $f^t(Z_{{\operatorname{sd \,}} G}) \subset
Z_{{\operatorname{line \,\,}} G}$.
To show $f(Z_{{\operatorname{line \,\,}} G}) \subset Z_{{\operatorname{sd \,}} G}$, using
Lemma~\ref{induced-cycles}(ii), it suffices to show that $f$ takes
both global and local cycles in ${\operatorname{line \,\,}} G$ to cycles in $Z_{{\operatorname{sd \,}} G}$.
This is easy (and requires no assumption about the $d$-regularity of
$G$): local cycles map to $0$ under $f$, and a global cycle of the
form ${\operatorname{line \,\,}} C$ satisfies $f(z({\operatorname{line \,\,}} C)) = z({\operatorname{sd \,}} C)$.
To show $f^t(Z_{{\operatorname{sd \,}} G}) \subset Z_{{\operatorname{line \,\,}} G}$, using
Lemma~\ref{induced-cycles}(i), it suffices to show for every
directed cycle $C$ in $G$ that $f$ takes the subdivided cycle
\[
z({\operatorname{sd \,}} C)=(v_1 v_2,v_2)+(v_2,v_2v_3)+(v_2v_3,v_3)+(v_3,v_3v_4)+
\cdots+(v_kv_1,v_1)+(v_1,v_1v_2)
\]
to a sum of cycles in $Z_{{\operatorname{line \,\,}} G}$. The regularity of $G$ implies
that each $v_i$ has $d-2$ neighbors off the cycle; label them
$u_i^1,\ldots,u_i^{d-2}$. Then one can write
$$
(f^t)(z({\operatorname{sd \,}} C)) = 2 z({\operatorname{line \,\,}} C) + \zeta_1 + \cdots + \zeta_{d-2}
$$
where for $j=1,2,\ldots,d-2$ one defines the element of $Z_{{\operatorname{line \,\,}}
G}$
\begin{align*}
\zeta_j :=
(v_1v_2,v_2u_2^j)+(v_2u_2^j,v_2v_3)+(v_2v_3,v_3u_3^j)+&(v_3u_3^j,v_3v_4)+
\cdots \\+&(v_kv_1,v_1u_1^j)+(v_1u_1^j,v_1v_2).
\end{align*}
An example with $d=3$ is shown in Figure \ref{f-transpose},
depicting the subdivided cycle ${\operatorname{sd \,}} C$ in ${\operatorname{sd \,}} G$, and then its
image under $f^t$ in ${\operatorname{line \,\,}} G$, which decomposes into $2$ copies of
the inner cycle ${\operatorname{line \,\,}} C$ along with $1$ $(=d-2)$ outer cycle
$\zeta_1$.
\end{proof}
\begin{figure}
\caption{An example of a subdivided cycle in ${\operatorname{sd \,}
\label{f-transpose}
\end{figure}
\subsection{The kernel and cokernel of $f$ are $d$-torsion}
\label{d-torsion-subsection}
\begin{proposition}
\label{f-scaling-prop}
For any $d$-regular connected graph $G$, both maps
$$
\begin{aligned}
f^t f &: K({\operatorname{line \,\,}} G) \rightarrow K({\operatorname{line \,\,}} G)\\
f f^t &: K({\operatorname{sd \,}} G) \rightarrow K({\operatorname{sd \,}} G)
\end{aligned}
$$
are scalar multiplications by $d$.
\end{proposition}
\begin{proof}
The proofs of these are straightforward computations:
\begin{align*}
f^t f (uv,vw) &= f^t(uv,v) + f^t(v,vw) \\
&= \sum_{x \in V:\{v,x\} \in E} (uv,vx) + (xv,vw) \\
&= d \cdot (uv,vw) +
\sum_{x \in V:\{v,x\} \in E} \left( (uv,vx) + (xv,vw) + (vw,uv) \right) \\
&= d \cdot (uv,vw) \text{ mod } Z_{{\operatorname{line \,\,}} G} .\\
f f^t (uv,v) &= \sum_{x \in V:\{v,x\} \in E} f(uv,vx) \\
&= \sum_{x \in V:\{v,x\} \in E} (uv,v) + (v,vx) \\
&= d \cdot (uv,v) + \sum_{x \in V:\{v,x\} \in E} (v,vx) \\
&=d \cdot (uv,v) \text{ mod } B_{{\operatorname{sd \,}} G}.\qedhere
\end{align*}
\end{proof}
\begin{corollary}
\label{f-torsion-corollary}
For any $d$-regular connected graph $G$,
both $\ker(f)$ and ${\operatorname{coker}}(f)$ are all $d$-torsion.
\end{corollary}
\begin{proof}
For $x \in \ker(f)$ and $y \in {\operatorname{coker}}(f)$, one has
\begin{align*}
d \cdot x &= f^t f(x) = f^t ( 0 ) = 0, \\
d \cdot y &= f f^t(y) \in {\operatorname{im}}(f). \qedhere
\end{align*}
\end{proof}
\subsection{Analyzing the cokernel}
\label{cokernel-subsection}
\begin{proposition}
\label{cokernel-description}
For any $d$-regular connected graph $G$, the group $C:={\operatorname{coker}}(f)$ is a
cyclic group as described in Theorem~\ref{regular-result}.
\end{proposition}
\begin{proof}
We will use the presentation
\begin{equation}
\label{coker-presentation}
C:={\operatorname{coker}}(f) : = K( {\operatorname{sd \,}} G ) / {\operatorname{im}}(f)
= \ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{sd \,}} G}} / \left( B_{{\operatorname{sd \,}} G} + Z_{{\operatorname{sd \,}} G} + {\operatorname{im}}(f) \right),
\end{equation}
which follows from our first definition of the critical group
(as in Section~\ref{rods-section}).
To see that $C$ is cyclic, note that there are two ways for a pair
of edges in ${\operatorname{sd \,}} G$ to be incident at a vertex, and in either case
their images in $C$ will differ by a sign:
\begin{align*}
(u,uv) &= -(uv,v) \mod B_{{\operatorname{sd \,}} G},\\
(uv,v) &= -(v,vw) \mod {\operatorname{im}}(f).
\end{align*}
Since $G$ is connected, this shows $C$ is cyclic, generated by the
image of any directed edge of ${\operatorname{sd \,}} G$. Furthermore, it is a
quotient of $\ensuremath{\mathbb{Z}}Z_d$ by Corollary~\ref{f-torsion-corollary}.
When $G$ is bipartite, in order to show $C = \ensuremath{\mathbb{Z}}Z_d$, it will suffice
to exhibit a surjection $C \twoheadrightarrow \ensuremath{\mathbb{Z}}Z_d$. Let the
vertex set $V$ for $G$ have bipartition $V=V_1 \sqcup V_2$ and
assume that all the edges of $G$ are oriented from $V_1$ to
$V_2$. Define an abelian group homomorphism
\[
\phi:\ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{sd \,}} G}} \to \ensuremath{\mathbb{Z}}Z, \qquad \phi(v_1, v_1v_2) =
\phi(v_1v_2,v_2) = 1,
\]
where $v_i \in V_i$ for $=1,2$. One can check that each of the three
subgroups $B_{{\operatorname{sd \,}} G}, Z_{{\operatorname{sd \,}} G}, {\operatorname{im}}(f)$ by which one mods out in
\eqref{coker-presentation} is mapped via $\phi$ into the subgroup
$d\ensuremath{\mathbb{Z}}Z$:
\begin{itemize}
\item Any directed cycle $C$ in ${\operatorname{sd \,}} G$ has
$\phi(z(C))=0$ (due to the fact that $C$ will have even length),
\item any edge $e$ of ${\operatorname{line \,\,}} G$ has $\phi(f(e))=0$,
\item any vertex $v_1v_2$ in ${\operatorname{sd \,}} G$ has $\phi(b_{{\operatorname{sd \,}} G}(v_1v_2))=0$,
\item any vertex $v_i$ in ${\operatorname{sd \,}} G$ has $\phi(b_{{\operatorname{sd \,}}
G}(v_i))= (-1)^{i-1} d$, where $i=1,2$.
\end{itemize}
Thus $\phi$ induces a surjection from $C$ onto $\ensuremath{\mathbb{Z}}Z_d$, as desired.
If $G$ is not bipartite, it contains some (directed) odd cycle
$C$. Pick any directed edge $e$ in the subdivision ${\operatorname{sd \,}} C$ and use
the two relations (a), (b) to rewrite it successively as $\pm$ the
other directed edges in the cycle. It changes sign each time one
uses (a) to pass through a vertex of ${\operatorname{sd \,}} C$ that comes from an edge
of $G$. Since there are an odd number of such edges in the cycle,
it will change sign an odd number of times before it returns,
yielding
$$
e = -e \text{ mod } B_{{\operatorname{sd \,}} G} + {\operatorname{im}}(f).
$$
Hence $2e =0$ in $C$, so $C$ is a quotient of $\ensuremath{\mathbb{Z}}Z_2$.
Since $C$ is also a quotient of $\ensuremath{\mathbb{Z}}Z_d$, when $d$ is odd, one must
have $C=0$. When $d$ is even, consider the index $2$ sublattice
$\Lambda$ of $\ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{sd \,}} G}}$ consisting of those vectors whose sum
of coordinates is even. Without any parity assumption on $d$, it is
true that ${\operatorname{im}}(f) \subset \Lambda$ (by definition of $f$) and
$Z_{{\operatorname{sd \,}} G} \subset \Lambda$ (because the subdivided cycles ${\operatorname{sd \,}} C$
have evenly many edges). The assumption that $d$ is even implies
that $B_{{\operatorname{sd \,}} G}$ also lies in $\Lambda$: $B_{{\operatorname{sd \,}} G}$ is generated by
the bonds in ${\operatorname{sd \,}} G$ of the form $b_{{\operatorname{sd \,}} G}(v)$ for vertices $v$
of ${\operatorname{sd \,}} G$, and every vertex in ${\operatorname{sd \,}} G$ has degree either $2$ or
$d$. Consequently, the presentation \eqref{coker-presentation}
shows that $C$ surjects onto $\ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{sd \,}} g}}/\Lambda \cong \ensuremath{\mathbb{Z}}Z_2$.
\end{proof}
\subsection{Analyzing the kernel}
\label{kernel-subsection}
It remains to understand $\ker(f)$, or equivalently by
Proposition~\ref{identify-kernel-cokernel}, to understand its
Pontrjagin dual
\begin{equation}
\label{transpose-cokernel-presentation}
{\operatorname{coker}}(f^t) = \ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{line \,\,}} G}}/ \left( Z_{{\operatorname{line \,\,}} G} + B_{{\operatorname{line \,\,}} G} + {\operatorname{im}}(f^t) \right).
\end{equation}
This will come about by reformulating this presentation, in order to
analyze it {\it locally}.
\begin{definition}
For each vertex $v \in V_G$ of a $d$-regular simple graph
$G=(V_G,E_G)$, define inside ${\operatorname{line \,\,}} G$ the {\it $d$-clique local to $v$}
$$
K^{(v)}_d=(V(K^{(v)}_d), E(K^{(v)}_d))
$$
to be the vertex-induced subgraph of ${\operatorname{line \,\,}} G$ on the vertex set
$$
V(K^{(v)}_d) :=\{vw: vw \in E_G\}.
$$
\end{definition}
Note that the edges of ${\operatorname{line \,\,}} G$ form a disjoint decomposition
\begin{equation}
\label{disjoint-edge-decomposition}
E_{{\operatorname{line \,\,}} G}=\bigsqcup_{v \in V_G} E(K^{(v)}_d)
\end{equation}
since $G$ was assumed to be a simple graph. Also note that a cycle in
${\operatorname{line \,\,}} G$ is local to vertex $v$, as in
Definition~\ref{local-global-defn}, if and only if it is supported on
the edges $ E(K^{(v)}_d)$. If one lets $Z^{global}_{{\operatorname{line \,\,}} G}$ be the
span of global cycles $\{z_{{\operatorname{line \,\,}} C}\}$ coming from any spanning set
of cycles $\{z_C\}$ for $Z_G$, then Lemma~\ref{induced-cycles} (ii)
implies
$$
Z_{{\operatorname{line \,\,}} G} = Z^{local}_{{\operatorname{line \,\,}} G} + Z^{global}_{{\operatorname{line \,\,}} G}.
$$
To simplify the presentation \eqref{transpose-cokernel-presentation},
note that for a vertex $vw$ of ${\operatorname{line \,\,}} G$, the bond
$$
b_{{\operatorname{line \,\,}} G}(vw) = f^t(vw,v) + f^t(vw,w)
$$
lies in ${\operatorname{im}}(f^t)$, and consequently, $B_{{\operatorname{line \,\,}} G} \subset {\operatorname{im}}(f^t)$.
Note also that the decomposition \eqref{disjoint-edge-decomposition}
leads to a family of compatible direct sum decompositions
\begin{align*}
\ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{line \,\,}} G}} &= \bigoplus_{v \in V_G} \ensuremath{\mathbb{Z}}Z^{E(K_d^{(v)})} \\
Z^{local}_{{\operatorname{line \,\,}} G} &= \bigoplus_{v \in V_G} Z_{K_d^{(v)}} \\
{\operatorname{im}}(f^t) & = \bigoplus_{v \in V_G} B_{K_d^{(v)}}.
\end{align*}
This gives the simplified presentation
\begin{equation}
\label{simplified-presentation}
\begin{aligned}
{\operatorname{coker}}(f^t) &= \left( \bigoplus_{v \in V_G} \ensuremath{\mathbb{Z}}Z^{E(K^{(v)}_d)} /
\left( B_{K_d^{(v)}} + Z_{K_d^{(v)}} \right) \right)
/ Z^{global}_{{\operatorname{line \,\,}} G} \\
&= \left( \bigoplus_{v \in V_G} K(K^{(v)}_d) \right) / Z^{global}_{{\operatorname{line \,\,}} G}.
\end{aligned}
\end{equation}
We use this presentation to prove the following lemma, which together
with Proposition~\ref{cokernel-description} immediately implies
Theorem~\ref{regular-result}.
\begin{lemma}
\label{kernel-description}
For a connected $d$-regular graph $G$,
$$
\ker(f) \cong \ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2} \oplus C
$$
where $C:={\operatorname{coker}}(f)$ is as described in Theorem~\ref{regular-result}.
\end{lemma}
\begin{proof}
We claim that it suffices to prove these two bounds on $\ker(f)$:
\begin{itemize}
\item[](i) There is a surjection $\ker(f) \twoheadrightarrow
\ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2}$ and,
\item[](ii) $\ker(f)$ can be generated by $\beta(G)-1$ elements.
\end{itemize}
To see this claim, note that since $\ker(f)$ is all $d$-torsion by
Corollary~\ref{f-torsion-corollary}, assertion (ii) would imply a
surjection $\ensuremath{\mathbb{Z}}Z_d^{\beta(G)-1} \twoheadrightarrow \ker(f)$.
Together with (i), this would imply $\ker(f) \cong
\ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2}\oplus C'$ for some cyclic group $C'$. But then
exactness of the sequence
$$
0 \rightarrow
\underbrace{\ker(f)}_{\ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2} \oplus C'} \rightarrow
K({\operatorname{line \,\,}} G) \overset{f}{\rightarrow}
K({\operatorname{sd \,}} G) \rightarrow
\underbrace{{\operatorname{coker}}(f)}_{C} \rightarrow
0
$$
forces
$$
\left( d^{\beta(G)-2} |C'| \right) |K({\operatorname{sd \,}} G)| = |K({\operatorname{line \,\,}} G)| |C|.
$$
From this equation and equation \eqref{Sachs'-theorem} one deduces $|C'|=|C|$.
Since both $C'$ and $C$ are cyclic, this means $C' \cong C$, as desired.
In the proofs of assertions (i) and (ii), one uses the fact that $\ker(f)={\operatorname{coker}}(f^t)$.
Moreover, setting $n:=|V_G|$, one can rewrite the direct sum from \eqref{simplified-presentation} as
\begin{equation}
\label{local-direct-sum}
\bigoplus_{v \in V_G} K(K^{(v)}_d)
\cong \bigoplus_{v \in V_G} \ensuremath{\mathbb{Z}}Z_d^{d-2}
\cong \ensuremath{\mathbb{Z}}Z_d^{n(d-2)}.
\end{equation}
For assertion (i), we use some easy numerology. Note that
$Z^{global}_{{\operatorname{line \,\,}} G}$ can be generated by $\beta(G)$ elements,
and also that
$$
\beta(G) = |E_G| - |V_G| + 1 = \frac{dn}{2} - n +1 = \frac{n(d-2)}{2} + 1
$$
so that
$$
n(d-2)-\beta(G) = \beta(G)-2.
$$
Since it is easily seen that that any quotient of an abelian group $\ensuremath{\mathbb{Z}}Z_d^a$ by a subgroup that can be generated by $b$ elements
will have a surjection to $\ensuremath{\mathbb{Z}}Z_d^{a-b}$, one can apply this with $a=n(d-2)$ and $b=\beta(G)$ to
the presentation \eqref{simplified-presentation}, and conclude that there is a
surjection ${\operatorname{coker}}(f^t) \twoheadrightarrow \ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2}$.
For assertion (ii), the idea will be to start with the
$$
n(d-2)=2(\beta(G)-1)
$$
generators in \eqref{local-direct-sum}, and use (all but one of) the $\beta(G)$
generating global cycles
in $Z^{global}_{{\operatorname{line \,\,}} G}$ to rewrite them in terms of other generators, with $\beta(G)-1$
generators left. This will be
achieved by removing the vertices from $G$ one at a time in a certain order, in order to control
the rewriting process.
To this end, order the vertices $V_G$ as $v_1,v_2,\ldots,v_n$ in such a way that the
vertex-induced subgraphs
$$
\begin{aligned}
G_i &:=G \setminus \{v_1, v_2,\ldots,v_{i-1}\} \\
& (\text{so }G_1:=G, \text{ and }G_n \text{ has one vertex }v_n)
\end{aligned}
$$
satisfy
$$
d_i:=\deg_{G_i}(v_i) < d \text{ for } i \geq 2.
$$
For each $i \geq 1$, partition the $d_i$ neighbors $v_i$ in $G_i$
into blocks $A_1,A_2,\ldots,A_{c_i}$ according to the connected components
of $G_{i+1}$ in which they lie. The number $c_i$ of such components coincides with the
number of connected components in $G_{i+1}$ into which the connected component
of $v_i$ in $G_i$ splits after removing $v_i$.
Define
$$
\Delta_i:= d_i - c_i = \beta(G_i) - \beta(G_{i+1}),
$$
where the last equality follows from the Euler relation for graphs:
$$
|V_G|-|E_G| = |\{\text{connected components of }G\}| - |\beta(G)|.
$$
Consequently,
$$
\Delta_1 + \Delta_2 + \cdots + \Delta_{n-1} = \beta(G_1)-\beta(G_n) = \beta(G).
$$
Our goal will then be to find $\Delta_i$ minimal generators of
\eqref{local-direct-sum} to remove at
each stage $i \geq 2$ (and at the first stage $i=1$, remove one fewer, that is,
$\Delta_1-1=d-2$ of them).
This would leave a generating set for ${\operatorname{coker}}(f^t)$ of cardinality
$n(d-2)-(\beta(G)-1) = \beta(G)-1$, as desired.
For $i \geq 2$, inside the clique $K^{(v_i)}_d$ local to $v_i$, choose
a forest $F_i$ of
edges having $c_i$ components which are spanning trees on each of the subsets
$\{v_i x: x \in A_j\}$ for $j=1,2,\ldots,c_i$. Note that
$$
|F_i|= \sum_{j=1}^{c_i} (|A_j|-1) = d_i - c_i = \Delta_i.
$$
Also note that the forest $F_i$ manages to avoid touching at least one
vertex in the $d$-clique $K^{(v_i)}_d$, namely any vertex of the form
$v_i v_k$ in which $\{ v_i v_k \} \in E_G$ and $k < i$; there will exist
at least one such $k$ since by construction,
$\deg_{G_i}(v_i) =d_i < d = \deg_{G}(v_i)$.
Hence by Proposition~\ref{complete-graph}, the edges in $F_i$ give $\Delta_i$
generators that could be completed to a set of $d-2$ minimal generators for
$K(K^{(v_i)}_d) \cong \ensuremath{\mathbb{Z}}Z_d^{d-2}$. Each of these generators in $F_i$
can be re-written, using a cycle in $Z^{global}_{{\operatorname{line \,\,}} G}$, in terms of
generators from $K(K^{(v_k)})$'s that have $k > i$, as follows. Given
any edge $(v_i x,v_i x')$ in $F_i$, there is a path from $x$ to $x'$
in $G_{i+1}$ (because $x, x'$ lie in the same component of $G_{i+1}$ by
construction), and hence a directed cycle $C$ in $G_i$ going
from $v_i$ to $x$ then through this path to $x'$ and back to $v_i$.
The global cycle $z({\operatorname{line \,\,}} C)$ allows one to rewrite $(v_i x,v_i x')$ as desired.
The only difference for $i=1$ is that, even when $\Delta_1=d-1$
(that is, when $c_1=1$), in this situation choose $F_i$ to have at most
$d-2$ edges (that is, remove any edge from the forest $F_1$ if $c_1=1$). This modification ensures
that one can still apply Proposition~\ref{complete-graph} and rewrite all
of the generators of $K(K_d^{(v_1)})$ corresponding to the edges of $F_1$.
\end{proof}
\begin{remark}
One should remark that for a connected, $d$-regular graph $G$, the
extra hypothesis in Theorem~\ref{regular-result} that $G$ is
$2$-edge-connected is well-known (see, e.g., \cite{Hartsfield}) to be
superfluous when $d$ is even: a connected graph $G$ with all
vertices of even degree cannot have a cut-edge, as the two
components created by the removal of this edge would each be graphs
having exactly one vertex of odd degree, an impossibility.
However, when $d$ is odd, the extra hypothesis of
$2$-edge-connectivity need not follow. For example, the $3$-regular
graph shown in Figure~\ref{3-regular} is connected, but not
$2$-edge-connected.
\begin{figure}
\caption{A 3-regular connected graph which is not 2-edge-connected.}
\label{3-regular}
\end{figure}
\end{remark}
\section{Proof of Corollary~\ref{regular-nonbipartite-corollary}}
\label{regular-nonbipartite-section}
In this section we prove
Corollary~\ref{regular-nonbipartite-corollary}. Informally, the
corollary states that critical group of $G$ determines the critical
group of ${\operatorname{line \,\,}} G$ in a simple way.
\vskip.1in
\noindent
{\bf Corollary~\ref{regular-nonbipartite-corollary}.}
{\it
For $G$ a simple, connected, $d$-regular graph with $d \geq 3$ which is nonbipartite,
after expressing uniquely
$$
K(G) \cong \bigoplus_{i=1}^{\beta(G)} \ensuremath{\mathbb{Z}}Z_{d_i}
$$
with $d_i$ dividing $d_{i+1}$, one has
\begin{equation}
\label{algebraic-Sachs}
K({\operatorname{line \,\,}} G) \cong \left( \bigoplus_{i=1}^{\beta(G)-2} \ensuremath{\mathbb{Z}}Z_{2dd_i} \right)
\oplus \begin{cases}
\ensuremath{\mathbb{Z}}Z_{2d_{\beta(G)-1}} \oplus \ensuremath{\mathbb{Z}}Z_{2d_{\beta(G)}} & \text{ for }|V|\text{ even,}\\
\ensuremath{\mathbb{Z}}Z_{4d_{\beta(G)-1}} \oplus \ensuremath{\mathbb{Z}}Z_{d_{\beta(G)}} & \text{ for }|V|\text{ odd.}
\end{cases}
\end{equation}
}
\begin{proof}
Let $K:=K({\operatorname{line \,\,}} G)$, and fix a prime $p$. Our goal is to show that
the $p$-primary component of $K$ matches that of the group on the
right side of \eqref{algebraic-Sachs}.
The hypotheses of the theorem allow one to apply the nonbipartite
cases of Theorem~\ref{divisibility-theorem} and
Theorem~\ref{regular-result}. The former asserts that
\begin{equation}
\label{nonbipartite-divisibility}
K/p^{k(p)}K \cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-2} \oplus
\begin{cases}
0 & \text{ for }p\text{ odd,}\\
\ensuremath{\mathbb{Z}}Z_2^2 &\text{ for }p=2, |V|\text{ even,}\\
\ensuremath{\mathbb{Z}}Z_4 &\text{ for }p=2, |V|\text{ odd,}
\end{cases}
\end{equation}
while the latter gives an exact sequence
\begin{equation}
\label{nonbipartite-exact-sequence}
0 \rightarrow \ensuremath{\mathbb{Z}}Z_d^{\beta(G)-2} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(2,d)}
\rightarrow K
\rightarrow K({\operatorname{sd \,}} G)
\rightarrow \ensuremath{\mathbb{Z}}Z_{\gcd(2,d)} \rightarrow 0.
\end{equation}
In analyzing the $p$-primary component ${\operatorname{Syl}}_p(K)$, it is convenient to define
the {\it type} of a finite abelian $p$-group $A$ as
the unique integer partition $\nu=(\nu_1 \geq \nu_2 \geq \cdots)$
for which
$A \cong \bigoplus_{i \geq 1} \ensuremath{\mathbb{Z}}Z_{p^{\nu_i}}.$
Let $\mu, \lambda$ denote the types of ${\operatorname{Syl}}_p (K(G)), \, {\operatorname{Syl}}_p(K)$,
where we think of both $\mu, \lambda$ as partitions with $\beta(G)$ parts
(allowing some parts to be $0$). Note that
Proposition~\ref{Lorenzini's-theorem} asserts, in this language,
that ${\operatorname{Syl}}_p(K({\operatorname{sd \,}} G))$ has type $\mu$ for $p$ odd and
type $\mu+(1^{\beta(G)})$ for $p=2$.
A basic fact from the theory of Hall polynomials \cite[Chapter II
Section 9]{Macdonald} says that there exist short exact sequences of
abelian $p$-groups
$$
0 \rightarrow A \rightarrow B \rightarrow C
\rightarrow 0
$$
in which $A,B,C$ have types $\nu,\lambda,\mu$, respectively, if and
only if the {\it Littlewood-Richardson (or LR) coefficient}
$c_{\mu,\nu}^\lambda$ does not vanish. The combinatorial rephrasing
of this {\it LR-condition} is as follows: There must exist at least
one {\it column-strict tableau} (which we will call an {\it LR
tableau}) of the skew-shape $\lambda/\mu$ having content $\nu$, for
which the word obtained by reading the tableau (in English notation)
from right-to-left in each row, starting with the top row, is {\it
Yamanouchi}. Here the Yamanouchi condition means that within each
initial segment of the word, and for each value $i \geq 1$, the number
of occurrences of $i+1$ is at most the number of occurrences of $i$.
See \cite[Chapter I \S 9]{Macdonald} and \cite[Appendix \S
A1.3]{Stanley} for more on these notions.
Suppose that $p$ is odd. Then $k(p)$ is the largest power such that
$p^{k(p)}$ divides $d$, so taking the $p$-primary components in
\eqref{nonbipartite-exact-sequence}, we obtain the following short exact
sequence:
\begin{equation}
0 \longrightarrow \underbrace{\ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-2}}_{\text{ type }\nu=(k(p)^{\beta(G)-2})}
\longrightarrow \underbrace{{\operatorname{Syl}}_p K}_{\text{ type }\lambda}
\longrightarrow \underbrace{{\operatorname{Syl}}_p( K({\operatorname{sd \,}} G) )}_{\text{ type }\mu}
\longrightarrow 0
\end{equation}
where $\lambda$ has at most $\beta(G)-2$ nonzero parts by
\eqref{nonbipartite-divisibility}. Since nonvanishing of the
LR-coefficient $c^{\lambda}_{\mu,\nu}$ forces $\mu \subset \lambda$,
it must be that $\mu$ also has at most $\beta(G)-2$ nonzero parts.
Furthermore, one can check that column-strictness together with the
Yamanouchi condition on the reading word of an LR-tableau of shape
$\lambda/\mu$ and content $\nu=(k(p)^{\beta(G)-2})$ uniquely determine
the tableau: It must have each entry in row $i$ equal to $i$ for
$i=1,2,\ldots,\beta(G)-2$. This forces $\lambda_i=\mu_i+k(p)$ for
$i=1,2,\ldots,\beta(G)-2$, and hence $\lambda$ agrees with the type of
the $p$-primary component on the right side of
\eqref{algebraic-Sachs}.
Suppose that $p=2$, so that $2^{k(p)-1}$ divides $d$, but $2^{k(p)}$
does not.
When $d$ is odd we have that $k(p)=1$. On the other hand, taking the
$2$-primary components in \eqref{nonbipartite-exact-sequence} shows
that ${\operatorname{Syl}}_2 K \cong {\operatorname{Syl}}_2(K({\operatorname{sd \,}} G))$, so
$\lambda=\mu+(1^{\beta(G)})$. Since $d$ is odd, $|V|$ must be even
(as the $d$-regularity of $G$ forces $d|V|=2|E|$), so this $\lambda$
again agrees with the type of the $2$-primary component on the right
side of \eqref{algebraic-Sachs}.
If $d$ is even, the $2$-primary components in
\eqref{nonbipartite-exact-sequence} form the following exact sequence
\begin{equation}
0 \longrightarrow \underbrace{\ensuremath{\mathbb{Z}}Z_{2^{k(p)-1}}^{\beta(G)-2} \oplus \ensuremath{\mathbb{Z}}Z_2}_{\text{ type }\nu=((k(p)-1)^{\beta(G)-2},1)}
\longrightarrow \underbrace{{\operatorname{Syl}}_2 K}_{\text{ type }\lambda}
\longrightarrow \underbrace{{\operatorname{Syl}}_2( K({\operatorname{sd \,}} G) )}_{\text{ type }\mu+(1^{\beta(G)})}
\overset{\pi}{\rightarrow} \ensuremath{\mathbb{Z}}Z_2
\longrightarrow 0
.
\end{equation}
This can be truncated to the following {\it short} exact sequence involving $\ker \pi$:
\begin{equation}
0 \longrightarrow \underbrace{\ensuremath{\mathbb{Z}}Z_{2^{k(p)-1}}^{\beta(G)-2} \oplus \ensuremath{\mathbb{Z}}Z_2}_{\text{ type }\nu=(k(p)-1)^{\beta(G)-2},1)}
\longrightarrow \underbrace{{\operatorname{Syl}}_2 K}_{\text{ type }\lambda}
\longrightarrow \underbrace{\ker\pi}_{\text{ type }\hat\mu}
\longrightarrow 0
\end{equation}
for some partition $\hat\mu$, and where the last two parts
$
(\lambda_{\beta(G)-1},\lambda_{\beta(G)})
$
in $\lambda$ are
either $(1,1)$ or $(2,0)$ by \eqref{nonbipartite-divisibility},
depending on the parity of $|V|$.
The short exact sequence
$$
0 \rightarrow \ker\pi \rightarrow {\operatorname{Syl}}_2(
K({\operatorname{sd \,}} G) ) \overset{\pi}{\rightarrow} \ensuremath{\mathbb{Z}}_2 \rightarrow 0
$$
shows that $\hat\mu$ is obtained from $\mu+(1^{\beta(G)})$ by removing
one square; we claim that $\hat\mu$ can have at most $\beta(G)-1$
nonzero parts, and hence this square must be removed from the {\it
last} row, that is, $\hat\mu=\mu+(1^{\beta(G)-1},0)$. The reason
for this claim is that, since the LR-coefficient
$c^{\lambda}_{\hat\mu,\nu} \neq 0$, the LR-condition forces
$$
\sum_{i \geq \beta(G)-1} \lambda_i \geq
\sum_{i \geq \beta(G)-1} \hat\mu_i + \sum_{i \geq \beta(G)-1} \nu_i.
$$
As $\sum_{i \geq \beta(G)-1} \lambda_i=2$ in both cases for the
parity of $|V|$, and $\sum_{i \geq \beta(G)-1} \nu_i =1$,
this forces $\sum_{i \geq \beta(G)-1} \hat\mu_i \leq 1$. This
implies $\hat\mu$ can have at most $\beta(G)-1$ nonzero parts, as claimed.
Once one knows $\hat\mu$ takes this form, and since
\eqref{nonbipartite-divisibility} fixes the shape of $\lambda$
in its last two rows $\beta(G)-1, \beta(G)$, any
LR-tableau of shape $\lambda/\mu$ and content $\nu=(k(p)-1)^{\beta(G)-2},1)$
is completely determined by column-strictness and the Yamanouchi condition:
It must have its unique entry equal to $\beta(G)-1$ lying in the unique of cell
of $\lambda/\mu$ within the last two rows, while all of its entries in row $i$
are all equal to $i$ for $i=1,2,\ldots,\beta(G)-2$.
This again forces $\lambda_i=\mu_i+k(p)$ for $i=1,2,\ldots,\beta(G)-2$,
and means that $\lambda$ again matches the type
of the $2$-primary component on the right side of \eqref{algebraic-Sachs}.
\end{proof}
\begin{remark}
In light of what Corollary~\ref{regular-nonbipartite-corollary} says
about $K:=K({\operatorname{line \,\,}} G)$ for {\it nonbipartite} regular graphs, one
might wonder what can be deduced for {\it bipartite} regular graphs
using Theorems~\ref{divisibility-theorem} and
\ref{regular-result}. We discuss this briefly here.
Fixing a prime $p$, define $k$ to be the largest exponent such that
$p^k$ divides $d$, and let ${\operatorname{Syl}}_p(K(G))$ have type $\mu$. Then the
$p$-primary components in the bipartite case of
Theorem~\ref{regular-result} form the following exact sequence:
\begin{equation}
\label{bipartite-regular-sequence}
0 \rightarrow \underbrace{\ensuremath{\mathbb{Z}}Z_{p^{k}}^{\beta(G)-1}}_{\text{type }\nu=(k^{\beta(G)-1})}
\longrightarrow \underbrace{ {\operatorname{Syl}}_p K}_{\text{type }\lambda}
\longrightarrow \underbrace{{\operatorname{Syl}}_p( K({\operatorname{sd \,}} G) )}_{
\tiny\begin{cases}
\text{type }\mu &\text{ if }p \neq 2\\
\text{type }\mu+(1^{\beta(G)}) &\text{ if }p=2
\end{cases}
}
\overset{\pi}{\longrightarrow} \underbrace{\ensuremath{\mathbb{Z}}Z_{p^k}}_{\text{type }(k)}
\rightarrow 0
\end{equation}
As a consequence, ${\operatorname{Syl}}_p(K)$ will be uniquely determined by
${\operatorname{Syl}}_p(K(G))$ whenever $p$ does not divide $d$, since
then $k=0$ and \eqref{bipartite-regular-sequence} shows
${\operatorname{Syl}}_p(K) \cong {\operatorname{Syl}}_p(K({\operatorname{sd \,}} G))$ in this case.
However, in general, the structures of $\ker(\pi)$ and of ${\operatorname{Syl}}_p(K)$ seem less clear.
Even using the extra information from Theorem~\ref{divisibility-theorem}
that $K/p^{k(p)}K \cong \ensuremath{\mathbb{Z}}Z_{p^{k(p}}^{\beta(G)-1} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(p^{k},|V|)}$,
where $k(p)$ is the largest power such that $p^{k(p)}$ divides $2d$,
along with the LR-rule, the structures of the various terms in the
sequence are not uniquely determined.
\begin{question}
\label{bipartite-regular-question}
When $G$ is a simple, bipartite, regular graph, what
more can be said about the structure of
$K:=K({\operatorname{line \,\,}} G)$ in relation to that of $K(G)$?
\end{question}
\end{remark}
\section{Proof of Theorem~\ref{semiregular-result}}
\label{semi-regular}
Let $G=(V,E)$ be a semiregular bipartite graph with vertex bipartition
$V = V_1 \sqcup V_2$, such that vertices in $V_i$ have degree $d_i$.
In this section we prove our analogue of
Theorem~\ref{regular-result} for semiregular graphs. Recall that this
is motivated by Cvetkovi\'c's formula
\eqref{Cvetkovic's-theorem} for the spanning tree number of ${\operatorname{line \,\,}} G$:
$$
\kappa({\operatorname{line \,\,}} G) = \frac{(d_1+d_2)^{\beta(G)}}{d_1 d_2}
\left(\frac{d_1}{d_2}\right)^{|V_2|-|V_1|} \kappa(G).
$$
We recall here the statement of Theorem~\ref{semiregular-result}.
\begin{semiregtheorem*}
Let $G$ be a connected bipartite $(d_1,d_2)$-semiregular graph $G$.
Then there is a group homomorphism
$$
K({\operatorname{line \,\,}} G) \overset{g}{\rightarrow} K(G)
$$
whose kernel-cokernel exact sequence
\begin{equation}
0
\rightarrow \ker(g)
\rightarrow K({\operatorname{line \,\,}} G)
\overset{g}{\rightarrow} K(G)
\rightarrow {\operatorname{coker}}(g)
\rightarrow
0
\end{equation}
has
\begin{itemize}
\item
${\operatorname{coker}}(g)$ all ${\operatorname{lcm}}(d_1,d_2)$-torsion, and
\item
$\ker(g)$ all $\frac{d_1+d_2}{\gcd(d_1,d_2)} {\operatorname{lcm}}(d_1,d_2)$-torsion.
\end{itemize}
\end{semiregtheorem*}
The proof of this result is analogous to that of
Theorem~\ref{regular-result}; for this reason, some proofs here are
either abbreviated or only sketched. Note also that this theorem is
less precise than Theorem~\ref{regular-result}, partly out of
necessity: Examples~\ref{regular-complete-bipartite-example} and
\ref{complete-bipartite-example} below show that the morphism $g:
K({\operatorname{line \,\,}} G)\rightarrow K(G)$ appearing in the theorem is nearly
surjective in some cases, but is the zero morphism in some other
cases!
\subsection{Defining the morphism $g$}
\label{defining-g-subsection}
We define $g$ similarly to the map $f$ from
Definition~\ref{f-definition}. Let
$$
\begin{aligned}
\lambda &:={\operatorname{lcm}}(d_1,d_2) \\
\gamma &:=\gcd(d_1,d_2).
\end{aligned}
$$
As a notational convenience, denote typical vertices in $V_1$
(respectively, $V_2$) by $a$'s (respectively, $b$'s) with subscripts
or primes.
\begin{definition}
For a semiregular bipartite graph $G$, let $g: {\mathbb R}^{E_{{\operatorname{line \,\,}} G}}
\rightarrow {\mathbb R}^{E_G}$ be defined ${\mathbb R}$-linearly by
$$
\begin{aligned}
g(ab,ba') & =\frac{\lambda}{d_2} \left( (a,b) + (b,a') \right) \\
g(ba,ab') & =\frac{\lambda}{d_1} \left( (b,a) + (a,b') \right).
\end{aligned}
$$
Equivalently, the adjoint map $g^t$ is defined by
$$
g^t (a,b)=\frac{\lambda}{d_1}\sum_{b_i \in N(a)}(b_i a,ab)
+ \frac{\lambda}{d_2}\sum_{a_j \in N(b)}(ab,b a_j),
$$
where $N(v)$ denotes the set of vertices adjacent to $v$ in $G$.
\end{definition}
\begin{remark}
\label{regular-bipartite-case}
In the special case when $G$ is not only semiregular bipartite, but
actually regular, so $d_1=d_2 =\lambda =\gamma$, one can easily check
that the map $g$ coincides with the composite map $h \circ f$
$$
{\mathbb R}^{E_{{\operatorname{line \,\,}} G}} \overset{f}{\longrightarrow}{\mathbb R}^{E_{{\operatorname{sd \,}} G}}
\overset{h}{\longrightarrow}{\mathbb R}^{E_G}
$$
where $f$ is the map from Theorem~\ref{regular-result} defined in
Definition~\ref{f-definition}, and $h$ was defined in
Example~\ref{subdivision-example}.
\end{remark}
\begin{proposition}
If $G$ is a semiregular bipartite graph, then $g: \ensuremath{\mathbb{Z}}Z^{E_{{\operatorname{line \,\,}} G}}
\rightarrow \ensuremath{\mathbb{Z}}Z^{E_G}$ is a morphism of the associated rational
orthgonal decompositions, and hence induces a group homomorphism $g:
K({\operatorname{line \,\,}} G) \rightarrow K(G)$.
\end{proposition}
\begin{proof}
By Lemma \ref{induced-cycles} (ii), it is enough to show that $g$
takes global and local cycles in $Z_{{\operatorname{line \,\,}} G}$ to cycles in $Z_G$,
and that $g^t$ takes cycles in $Z_G$ to cycles in $Z_{{\operatorname{line \,\,}} G}$.
First, one can check that $g$ maps all local cyles to $0$. Each
global cycle is by definition of the form $z({\operatorname{line \,\,}} C)$ where $C$ is
a directed cycle of $G$, and one checks that
$$
g(z({\operatorname{line \,\,}} C)) = \left( \frac{\lambda}{d_1} + \frac{\lambda}{d_2} \right) z(C).
$$
On the other hand, one checks that $g^t(z(C))$ can be rewritten as a
sum of $\lambda$ cycles $\zeta_i$ in $Z_{{\operatorname{line \,\,}} G}$, each $\zeta_i$
being twice the length of $C$, and in which every other vertex on
$\zeta_i$ corresponds to an edge occurring in $C$.
\end{proof}
\subsection{Analyzing its kernel and cokernel}
\label{semiregular-kernel-cokernel-subsection}
\begin{proposition}
The map
$$
g^t g: K({\operatorname{line \,\,}} G) \rightarrow K({\operatorname{line \,\,}} G)
$$
coincides with scalar multiplication by $\frac{d_1+d_2}{\gamma}
\lambda$. Consequently, $\ker(g)$ is $\frac{d_1+d_2}{\gamma}
\lambda$-torsion.
\label{g-scalarmult}
\end{proposition}
\begin{proof}
For any edge $ab,ba'$ in $E_{{\operatorname{line \,\,}} G}$, use the definitions of $g$
and $g^t$ to write
\begin{equation}
\label{eqngtg}
\begin{aligned}
g^tg(ab,ba') =& \frac{\lambda^2}{d_1 d_2} \sum_{b_i \in N(a)} (b_ia,ab)
+ \frac{\lambda^2}{d_2^2} \sum_{a_j \in N(b)} (ab,ba_j) \\
& + \frac{\lambda^2}{d_2^2} \sum_{a_j \in N(b)} (a_jb,ba')
+ \frac{\lambda^2}{d_1 d_2} \sum_{b_k \in N(a')} (ba',a'b_k).
\end{aligned}
\end{equation}
For the first and fourth term, one has
$$
\begin{aligned}
\sum_{b_i \in N(a)} (b_ia,ab ) &= \sum_{a_j \in N(b)} (ab,ba_j) \mbox{ mod } B_{{\operatorname{line \,\,}} G} \\
\sum_{b_k \in N(a')} (ba',a'b_k) & = \sum_{a_j \in N(b)} (a_jb,ba') \mbox{ mod } B_{{\operatorname{line \,\,}} G}.
\end{aligned}
$$
Substituting these expressions into equation \eqref{eqngtg}, grouping
like terms, and using the identity \newline $d_1 d_2 = \lambda \gamma$
gives
$$
g^tg(ab,ba') = \frac{d_1 + d_2}{\gamma} \cdot \frac{\lambda}{d_2}
\left(\sum_{a_j \in N(b)} (ab,ba_j) + \sum_{a_j \in N(b)} (a_jb,ba') \right)
\mbox{ mod } B_{{\operatorname{line \,\,}} G},
$$
which then can be rewritten, using the $d_2$ triangular cycles
$$
(ab,ba_j) + (a_jb,ba') + (a'b,ba) \in Z_{{\operatorname{line \,\,}} G},
$$
as
\begin{align*}
g^tg(ab,ba') & = \frac{(d_1 + d_2)}{\gamma} \cdot
\frac{\lambda}{d_2} \left( d_2 (ab,ba') \right)
\mbox{ mod } B_{{\operatorname{line \,\,}} G} + Z_{{\operatorname{line \,\,}} G} \\
& = \frac{(d_1 + d_2)}{\gamma} \lambda (ab,ba') \mbox{ mod }
B_{{\operatorname{line \,\,}} G} + Z_{{\operatorname{line \,\,}} G}.\qedhere
\end{align*}
\end{proof}
\begin{remark}
As in Proposition~\ref{f-scaling-prop}, one can show that the other
map $g g^t: K(G) \rightarrow K(G)$ also coincides with the scalar multiplication
by $\frac{d_1+d_2}{\gamma} \lambda$, and hence that ${\operatorname{coker}}(g)$ is also
$\frac{d_1+d_2}{\gamma} \lambda$-torsion. However, we omit this proof,
since we are about to show the {\it stronger} assertion that ${\operatorname{coker}}(g)$ is $\lambda$-torsion.
\end{remark}
\vskip.2in
\noindent
{\it Proof of Theorem~\ref{semiregular-result}.}
In light of Proposition~\ref{g-scalarmult}, it only remains to show that
${\operatorname{coker}}(g)$ is $\lambda$-torsion. Given any edge $ab \in E_G$, one has
$$
\begin{aligned}
\lambda (a,b) & = \lambda (a,b) +
\frac{\lambda}{d_2} \sum_{a_j \in N(b)} (b,a_j) \mbox{ mod } B_{G} \\
& = \frac{\lambda}{d_2} \sum_{a_j \in N(b)} (a,b) + (b,a_j) \mbox{ mod } B_{G} \\
& = g \left( \sum_{a_j \in N(b)} (ab,ba_j) \right) \mbox{ mod } B_{G} .
\end{aligned}
$$
Consequently $\lambda (a,b)$ lies in ${\operatorname{im}}(g)+B_{G}$, so it
is zero in ${\operatorname{coker}}(g) : = \ensuremath{\mathbb{Z}}Z^{E_G}/ \left( {\operatorname{im}}(g) + B_G + Z_G \right)$.
$\qed$
\vskip .2in
Unlike the map $f$ from Section~\ref{regular-result-section}, it is
hard to be much more precise about the exact nature of cokernel and
kernel of $g$. The following two families of examples demonstrate two
extremes of behavior for how tightly or loosely the map $g$ ties
together $K({\operatorname{line \,\,}} G)$ and $K(G)$ for semiregular bipartite graphs $G$.
\begin{example}
\label{regular-complete-bipartite-example}
Assume $G$ is not only bipartite semiregular, but actually $d$-regular
(i.e., $d_1=d_2=d$). Then $g: K({\operatorname{line \,\,}} G) \rightarrow K(G)$ is nearly
surjective, in the sense that ${\operatorname{coker}}(g)$ is a quotient of $\ensuremath{\mathbb{Z}}Z_d$.
To see this, recall from Remark~\ref{regular-bipartite-case} that in
this case, $g = h \circ f$ where $h, f$ were defined in
Example~\ref{subdivision-example} and Definition~\ref{f-definition}.
Since $h: K({\operatorname{sd \,}} G) \rightarrow K(G)$ is surjective, it induces a
surjection
$$
{\operatorname{coker}}(f) := K({\operatorname{sd \,}} G)/{\operatorname{im}}(f) \longrightarrow K(G)/{\operatorname{im}}(h \circ f) =: {\operatorname{coker}}(g).
$$
But Theorem~\ref{regular-result} says that ${\operatorname{coker}}(f) = \ensuremath{\mathbb{Z}}Z_d$ in this situation.
\end{example}
\begin{example}
\label{complete-bipartite-example}
For the complete bipartite graph $G=K_{n_1,n_2}$, the structures of
the critical groups of $G$ and ${\operatorname{line \,\,}} G$ have been determined through
manipulations of their Laplacian matrices (see Lorenzini
\cite{Lorenzini} and Berget \cite{REU}, respectively):
\begin{equation}
\begin{aligned}
K(K_{n_1,n_2}) & \cong
\ensuremath{\mathbb{Z}}Z_{n_1}^{n_2-2} \oplus \ensuremath{\mathbb{Z}}Z_{n_2}^{n_1-2} \oplus \ensuremath{\mathbb{Z}}Z_{n_1n_2}, \\
K({\operatorname{line \,\,}} K_{n_1,n_2}) & \cong
\ensuremath{\mathbb{Z}}Z_{n_1(n_1+n_2)}^{n_1-2} \oplus \ensuremath{\mathbb{Z}}Z_{n_2(n_1+n_2)}^{n_2-2}
\oplus \ensuremath{\mathbb{Z}}Z_{n_1+n_2}^{(n_1-2)(n_2-2)+1}.
\end{aligned}
\nonumber
\end{equation}
In principle, the structures of these groups allow nonzero
homomorphisms between them for all values of $n_1, n_2$. However, we
claim that whenever $\gamma=\mbox{gcd}(n_1,n_2)=1$, the map $K({\operatorname{line \,\,}}
G) \overset{g}{\rightarrow} K(G)$ will be the zero morphism. In this
case, $\ker(g) = K({\operatorname{line \,\,}} G)$ and ${\operatorname{coker}}(g) = K(G)$.
To see this claim, let $(ab,ba')$ be a fixed edge in $E_{{\operatorname{line \,\,}} G}$.
Note that
$$
\begin{aligned}
\lambda &= d_1 d_2 = n_1 n_2, \\
d_1 &= n_2, \,\, d_2 =n_1.
\end{aligned}
$$
Then for each $b_j \in V_2$, one has
$$
\frac{1}{d_1}\left( g(ab,ba')+g(a'b_j,b_ja) \right)
=
ab+ba'+a'b_j+b_ja \in Z_G.
$$
On the other hand,
$$
\begin{aligned}
\sum_{b_{j} \in V_2} \frac{1}{d_1}\left( g(ab,ba')+g(a'b_j,b_ja) \right)
&=
g(ab,ba') + \sum_{b_{j} \in V_2}\frac{1}{d_1}\frac{\lambda}{d_2}(a'b_j+b_ja) \\
&=
g(ab,ba') +\sum_{b_{j} \in V_2}a'b_j+\sum_{b_{j} \in V_2}b_ja \\
&=
g(ab,ba') \mbox{ mod } B_G.
\end{aligned}
$$
Combining these two statements gives us $g(ab,ba') = 0$ mod
$Z_G + B_G$. By symmetry, one also has $g(ba,ab') = 0$ mod $Z_G + B_G$.
It follows that $g$ is the zero morphism.
\end{example}
\begin{remark}
Note that Theorem~\ref{divisibility-theorem} provides
convenient information about ${\operatorname{Syl}}_p (K)$ for $K:= K({\operatorname{line \,\,}} G)$
when $G$ is $(d_1,d_2)$-semiregular: If $k(p)$ denotes
the largest power $p^{k(p)}$ dividing $d_1+d_2$,
then
\begin{equation}
\label{convenient-semiregular-info}
K/p^{k(p)}K \cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta(G)-1} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(p^{k(p)},|V|)}.
\end{equation}
However, even in conjunction with Theorem~\ref{semiregular-result},
this does not appear to determine the structure of $K({\operatorname{line \,\,}} G)$
uniquely in terms of the structure of $K(G)$. Thus we are led to the
following generalization of Question~\ref{bipartite-regular-question}:
\begin{question}
\label{bipartite-semiregular-question}
When $G$ is a simple, semiregular bipartite graph, what more can be
said about the structure of $K:=K({\operatorname{line \,\,}} G)$ in relation to that of
$K(G)$?
\end{question}
\end{remark}
\section{Examples}\label{example-section}
\subsection{The complete graph $K_n$}
\label{complete-graph-section}
Proposition~\ref{complete-graph} can be rephrased as asserting that
$$
K(K_n) \cong \ensuremath{\mathbb{Z}}Z_n^{n-2} \oplus \ensuremath{\mathbb{Z}}Z_1^{\beta-n+2}
$$
where $\beta:=\beta(K_n)=\binom{n-1}{2}$. Since $K_n$ is nonbipartite
for $n \geq 3$, and contains an even length cycle for $n \geq 4$,
Corollary~\ref{regular-nonbipartite-corollary} immediately implies the
following:
\begin{corollary}
\label{complete-graph-corollary}
For $n \geq 4$, the line graph ${\operatorname{line \,\,}} K_n$ of the critical group of
the complete graph $K_n$ has the form
\begin{align*}\label{complete-graph-equation}
K({\operatorname{line \,\,}} K_4) &= \ensuremath{\mathbb{Z}}Z_{24} \oplus \ensuremath{\mathbb{Z}}Z_8 \oplus \ensuremath{\mathbb{Z}}Z_2 \\
K({\operatorname{line \,\,}} K_n) &= \ensuremath{\mathbb{Z}}Z_{2(n-1)n}^{n-2} \oplus \ensuremath{\mathbb{Z}}Z_{2(n-1)}^{\beta-n} \oplus
\begin{cases}
\ensuremath{\mathbb{Z}}Z_2^2 &\text{ for even }n > 5,\\
\ensuremath{\mathbb{Z}}Z_4 & \text{ for odd }n \geq 5.
\end{cases}
\end{align*}
\end{corollary}
\subsection{The complete bipartite graph $K_{n_1,n_2}$ }
\label{complete-bipartite-graph-section}
As mentioned in Example~\ref{complete-bipartite-example}, the critical
groups of the complete bipartite graph $K_{n_1,n_2}$ and its line
graph ${\operatorname{line \,\,}} K_{n_1,n_2}$ have the following forms:
$$
\begin{aligned}
K(K_{n_1,n_2}) &\cong
\ensuremath{\mathbb{Z}}Z_{n_1}^{n_2-2} \oplus \ensuremath{\mathbb{Z}}Z_{n_2}^{n_1-2} \oplus \ensuremath{\mathbb{Z}}Z_{n_1n_2}\\
K({\operatorname{line \,\,}} K_{n_1,n_2}) &\cong
\ensuremath{\mathbb{Z}}Z_{n_1(n_1+n_2)}^{n_1-2} \oplus \ensuremath{\mathbb{Z}}Z_{n_2(n_1+n_2)}^{n_2-2}
\oplus \ensuremath{\mathbb{Z}}Z_{n_1+n_2}^{(n_1-2)(n_2-2)+1}
\end{aligned}
$$
(see Lorenzini \cite{Lorenzini} and Berget \cite{REU}, respectively).
In addition, Example~\ref{complete-bipartite-example} showed that the
map $g$ in the exact sequence in Theorem~\ref{semiregular-result} is
sometimes the zero morphism and hence is not always useful for
determining the structure of $K({\operatorname{line \,\,}} K_{n_1,n_2})$. Even in the
special case when $n_1=n_2=n$ (so $K({\operatorname{line \,\,}} K_{n,n})$ is $n$-regular),
the exact sequence
$$
0 \rightarrow \underbrace{\ensuremath{\mathbb{Z}}Z_n^{\beta-2} \oplus
\ensuremath{\mathbb{Z}}Z_n}_{\ensuremath{\mathbb{Z}}Z_n^{n(n-2)}} \longrightarrow \underbrace{K({\operatorname{line \,\,}} K_{n,n})}_{\ensuremath{\mathbb{Z}}Z_{2n^2}^{2(n-2)} \oplus \ensuremath{\mathbb{Z}}Z_{2n}^{(n-2)^2+1}}
\overset{f}{\longrightarrow} \underbrace{K({\operatorname{sd \,}} K_{n,n})}_{\ensuremath{\mathbb{Z}}Z_{2n^2}^1 \oplus \ensuremath{\mathbb{Z}}Z_{2n}^{2(n-2)} \oplus \ensuremath{\mathbb{Z}}Z_2^{(n-2)^2}} \longrightarrow
\ensuremath{\mathbb{Z}}Z_n \rightarrow 0
$$
from Theorem~\ref{regular-result} does not determine {\it a priori} Berget's formula for $K({\operatorname{line \,\,}} K_{n,n})$.
However, we note that at least Theorem~\ref{divisibility-theorem} does
predict that the expression
$$
K({\operatorname{line \,\,}} K_{n_1,n_2})=\bigoplus_{i=1}^{\beta} \ensuremath{\mathbb{Z}}Z_{e_i} \text{ where }
\beta:=\beta(K_{n_1,n_2}) = (n_1-1)(n_2-1)
$$
should have $|V|=n_1+n_2$ dividing every one of the factors $e_i$.
This follows from equation~\eqref{convenient-semiregular-info}, since
$K_{n_1,n_2}$ is bipartite $(n_1,n_2)$-semiregular. Hence for each
prime $p$, one has
$$
K/p^{k(p)} K \cong \ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta-1} \oplus \ensuremath{\mathbb{Z}}Z_{\gcd(p^{k(p)},|V|)} =
\ensuremath{\mathbb{Z}}Z_{p^{k(p)}}^{\beta},
$$
where $k(p)$ is the largest power such that $p^{k(p)}$ divides $n_1+n_2$.
Hence $K/(n_1+n_2)K \cong \ensuremath{\mathbb{Z}}Z_{n_1+n_2}^\beta$.
\subsection{The $d$-dimensional cube}
\label{cube-section}
Denote by $G_{{\operatorname{d-cube}}}$ the graph of vertices and edges in the
$d$-dimensional cube, that is, $G_{{\operatorname{d-cube}}}=(V,E)$ in which $V$ is the
set of all binary strings of length $d$, and $E$ has an edge between
any two such strings that differ in exactly one binary digit. This is
a $d$-regular bipartite graph, having
$$
\beta:=\beta(G_{{\operatorname{d-cube}}})=(d-2)2^{d-1}+1.
$$
One knows its spanning tree number (see, e.g., \cite[Example 5.6.10]{Stanley}):
$$
\kappa(G_{{\operatorname{d-cube}}}) = \frac{1}{2^d} \prod_{k=1}^d (2k)^{\binom{d}{k}} = 2^{2^d-d-1} \prod_{k=2}^d k^{\binom{d}{k}}.
$$
Correspondingly, work of H. Bai \cite{Bai} computes its critical group
structure away from the prime $2$: For odd primes $p$, one has
$$
{\operatorname{Syl}}_p (K(G_{{\operatorname{d-cube}}})) = {\operatorname{Syl}}_p \left( \bigoplus_{k=2}^d
\ensuremath{\mathbb{Z}}Z_k^{\binom{d}{k}} \right).
$$
Unfortunately, ${\operatorname{Syl}}_2 (K(G_{{\operatorname{d-cube}}}))$ is a $2$-group that is still
not known for all $d$.
Consequently, Proposition~\ref{Lorenzini's-theorem} shows that
$K({\operatorname{sd \,}}(G_{{\operatorname{d-cube}}}))$ has the same $p$-primary structure as
$K(G_{{\operatorname{d-cube}}})$ for odd primes $p$, and Theorem~\ref{regular-result}
gives the following exact sequence for every odd prime $p$:
\begin{equation}
\label{cube-at-odd-prime-sequence}
0 \rightarrow
{\operatorname{Syl}}_p(\ensuremath{\mathbb{Z}}Z_d^{\beta-1}) \rightarrow
{\operatorname{Syl}}_p (K({\operatorname{line \,\,}} G_{{\operatorname{d-cube}}})) \rightarrow
{\operatorname{Syl}}_p \left( \bigoplus_{k=2}^d \ensuremath{\mathbb{Z}}Z_k^{\binom{d}{k}} \right) \rightarrow
\ensuremath{\mathbb{Z}}Z_d \rightarrow 0.
\end{equation}
This is particularly effective when $d$ itself is prime since then
${\operatorname{Syl}}_d \left( \bigoplus_{k=2}^d \ensuremath{\mathbb{Z}}Z_k^{\binom{d}{k}} \right) =
\ensuremath{\mathbb{Z}}Z_d$ and the exact sequence \eqref{cube-at-odd-prime-sequence} implies
that for odd primes $p$,
$$
{\operatorname{Syl}}_p (K({\operatorname{line \,\,}} G_{{\operatorname{d-cube}}})) =
\begin{cases}
\ensuremath{\mathbb{Z}}Z_d^{\beta-1} &\text{ if }p=d, \\
{\operatorname{Syl}}_p \left( \bigoplus_{k=2}^{d-1} \ensuremath{\mathbb{Z}}Z_k^{\binom{d}{k}} \right) &\text{ if }p \neq d.
\end{cases}
$$
Meanwhile ${\operatorname{Syl}}_2 (K({\operatorname{line \,\,}} G_{{\operatorname{d-cube}}})) = {\operatorname{Syl}}_2 (K({\operatorname{sd \,}}(G_{{\operatorname{d-cube}}})))$
is unknown, but by Proposition~\ref{Lorenzini's-theorem}, is
completely determined by the unknown $2$-group
${\operatorname{Syl}}_2(K(G_{{\operatorname{d-cube}}}))$.
\subsection{The Platonic solids}
\label{Platonic-solid-section}
One source of regular graphs are the $1$-skeleta (= graph of vertices
and edges) of the Platonic solids. There are certain features that
apply to any graph $G_P$ which is the $1$-skeleton of a
$3$-dimensional polyhedron $P$, and hence to any Platonic solid:
\begin{enumerate}
\item[$\bullet$] Because the cycles surrounding the (polygonal) faces
of $P$ generate the cycle lattice $Z$, the graph $G_P$ is bipartite
if and only if each face of $P$ is an even $n$-gon.
\item[$\bullet$] Furthermore, the cycles that bound all but one face
of $P$ form a basis for $Z$, so that $\beta(G_P)$ is always one less
than the number of faces.
\item[$\bullet$] Such graphs $G_P$ are always $2$-edge-connected, so
that Theorem~\ref{2-edge-connected-theorem} always applies.
\item[$\bullet$] Dual polyhedra $P, P^*$ have $G_P, G_{P^*}$ dual as
planar graphs. This identifies the lattice of bonds for one with
the lattice of cycles for the other, and implies that their critical
groups $K(G_P), K(G_{P*})$ are isomorphic; see also
\cite{CoriRossin}.
\end{enumerate}
\subsubsection{The tetrahedron}
The tetrahedron has $1$-skeleton $G_{{\operatorname{tetra}}}=K_4$, and hence
implicitly was discussed already in
Section~\ref{complete-graph-section} on $K_n$, as the special case
$n=4$.
\subsubsection{The cube and octahedron}
The cube and the octahedron are dual polyhedra. Either by direct
computer calculation, or by noting $G_{{\operatorname{octa}}} \cong {\operatorname{line \,\,}} K_4$
and applying Corollary~\ref{complete-graph-corollary} with $n=4$, one
finds that
$$
\begin{aligned}
K(G_{{\operatorname{cube}}})=K(G_{{\operatorname{octa}}})&=\ensuremath{\mathbb{Z}}Z_2 \oplus \ensuremath{\mathbb{Z}}Z_8 \oplus \ensuremath{\mathbb{Z}}Z_{24} \\
&=\ensuremath{\mathbb{Z}}Z_2 \oplus \ensuremath{\mathbb{Z}}Z_8^2 \oplus \ensuremath{\mathbb{Z}}Z_3.
\end{aligned}
$$
Since $G_{{\operatorname{octa}}}$ is $4$-regular and nonbipartite with
$\beta(G_{{\operatorname{octa}}})=7$,
Corollary~\ref{regular-nonbipartite-corollary} then implies
$$
K({\operatorname{line \,\,}} G_{{\operatorname{octa}}})
= \ensuremath{\mathbb{Z}}Z_2^2 \oplus \ensuremath{\mathbb{Z}}Z_8^2 \oplus \ensuremath{\mathbb{Z}}Z_{16} \oplus \ensuremath{\mathbb{Z}}Z_{64} \oplus \ensuremath{\mathbb{Z}}Z_{192}.
$$
For $G_{{\operatorname{cube}}}$, which has $\beta(G_{{\operatorname{cube}}})=5$, the results of
Section~\ref{cube-section} apply, and are particularly effective
because $d=3$ is prime. They show that $ {\operatorname{Syl}}_p (K({\operatorname{line \,\,}} G_{{\operatorname{d-cube}}}))
$ vanishes except for $p=2,3$, with
\[
\begin{aligned}
{\operatorname{Syl}}_3 (K({\operatorname{line \,\,}} G_{{\operatorname{d-cube}}})) &= \ensuremath{\mathbb{Z}}Z_3^4\\
{\operatorname{Syl}}_2 (K({\operatorname{line \,\,}} G_{{\operatorname{d-cube}}}))
&= {\operatorname{Syl}}_2 (K({\operatorname{sd \,}}(G_{{\operatorname{d-cube}}})))
= \ensuremath{\mathbb{Z}}Z_2^2 \oplus \ensuremath{\mathbb{Z}}Z_4 \oplus \ensuremath{\mathbb{Z}}Z_{16}^2.
\end{aligned}
\]
Hence
\[
\begin{aligned}
K({\operatorname{line \,\,}} G_{{\operatorname{cube}}} ) &= \ensuremath{\mathbb{Z}}Z_2^2 \oplus \ensuremath{\mathbb{Z}}Z_4 \oplus \ensuremath{\mathbb{Z}}Z_{16}^2 \oplus \ensuremath{\mathbb{Z}}Z_3^4\\
&= \ensuremath{\mathbb{Z}}Z_2 \oplus \ensuremath{\mathbb{Z}}Z_6 \oplus \ensuremath{\mathbb{Z}}Z_{12} \oplus \ensuremath{\mathbb{Z}}Z_{48}^2.
\end{aligned}
\]
\subsubsection{The dodecahedron and icosahedron}
The dodecahedron and icosahedron are dual polyhedra, both of whose
graphs are nonbipartite. Computer calculation shows that
$$
\begin{aligned}
K(G_{{\operatorname{dodeca}}})=K(G_{{\operatorname{icosa}}}) &= \ensuremath{\mathbb{Z}}Z_2 \oplus \ensuremath{\mathbb{Z}}Z_{12} \oplus \ensuremath{\mathbb{Z}}Z_{60}^3 \\
&= \ensuremath{\mathbb{Z}}Z_2 \oplus \ensuremath{\mathbb{Z}}Z_4^4 \oplus \ensuremath{\mathbb{Z}}Z_3^4 \oplus \ensuremath{\mathbb{Z}}Z_5^3.
\end{aligned}
$$
Since $G_{{\operatorname{dodeca}}}$ is $5$-regular with $\beta(G_{{\operatorname{dodeca}}})=11$,
one concludes from
Corollary~\ref{regular-nonbipartite-corollary} that
$$
\begin{aligned}
K({\operatorname{line \,\,}} G_{{\operatorname{dodeca}}})
&= \ensuremath{\mathbb{Z}}Z_2^2 \oplus \ensuremath{\mathbb{Z}}Z_6^4 \oplus \ensuremath{\mathbb{Z}}Z_{12} \oplus \ensuremath{\mathbb{Z}}Z_{72} \oplus \ensuremath{\mathbb{Z}}Z_{360}^3\\
&= \ensuremath{\mathbb{Z}}Z_2^6 \oplus \ensuremath{\mathbb{Z}}Z_4 \oplus \ensuremath{\mathbb{Z}}Z_8^4 \oplus \ensuremath{\mathbb{Z}}Z_3^5 \oplus \ensuremath{\mathbb{Z}}Z_9^4 \oplus \ensuremath{\mathbb{Z}}Z_5^3. \\
\end{aligned}
$$
Since $G_{{\operatorname{icosa}}}$ is $3$-regular with $\beta(G_{{\operatorname{icosa}}})=19$,
one concludes from
Corollary~\ref{regular-nonbipartite-corollary} that
$$
\begin{aligned}
K({\operatorname{line \,\,}} G_{{\operatorname{icosa}}})
& = \ensuremath{\mathbb{Z}}Z_2^2 \oplus \ensuremath{\mathbb{Z}}Z_{10}^{12} \oplus \ensuremath{\mathbb{Z}}Z_{20} \oplus \ensuremath{\mathbb{Z}}Z_{120} \oplus \ensuremath{\mathbb{Z}}Z_{600}^3\\
& = \ensuremath{\mathbb{Z}}Z_2^{14} \oplus \ensuremath{\mathbb{Z}}Z_4 \oplus \ensuremath{\mathbb{Z}}Z_8^4 \oplus \ensuremath{\mathbb{Z}}Z_3^4 \oplus \ensuremath{\mathbb{Z}}Z_5^{14} \oplus \ensuremath{\mathbb{Z}}Z_{25}^3. \\
\end{aligned}
$$
\section*{Acknowledgments}
The authors thank the anonymous referee for a careful reading of the
manuscript and many helpful suggestions, and also thank David Treumann
for allowing them to include some of his results here.
\end{document}
|
\begin{document}
\title{Construction of projective special K\"{a}
\begin{abstract}
In this paper we present an intrinsic characterisation of projective special K\"{a}hler manifolds in terms of a symmetric tensor satisfying certain differential and algebraic conditions.
We show that this tensor vanishes precisely when the structure is locally isomorphic to a standard projective special K\"{a}hler structure on $\mathrm{SU}(n,1)/\mathrm{S}(\mathrm{U}(n)\mathrm{U}(1))$.
We use this characterisation to classify $4$-dimensional projective special K\"{a}hler Lie groups.
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}}
\begin{NoHyper}
\footnotetext{\emph{MSC class}: 53C55; 53C26, 22E25, 53C80.}
\end{NoHyper}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\end{abstract}
\section{Introduction}
Projective special K\"{a}hler manifolds are a special class of K\"{a}hler quotients of conic special K\"{a}hler manifolds which is a class of pseudo-K\"{a}hler manifolds endowed with a symplectic, flat, torsion-free connection and an infinitesimal homothety.
Explicit examples can be found in \cite{CortesClassHomoSS}, where homogeneous projective special K\"{a}hler manifolds of semisimple Lie groups are classified.
A notable case appearing in this list is the complex hyperbolic $n$-space.
Many projective special K\"{a}hler manifolds can be constructed via the so called r-map \cite{CiSC}, which is a construction arising from supergravity and string theory allowing to build a projective special K\"{a}hler manifold starting from a homogeneous cubic polynomial.
See \cite{CortesClassPSKr} for a classification of $6$-dimensional manifolds that can be constructed via the r-map.
Another example is obtained by taking the Weil-Petersson metric on the space of complex structure deformations on a Calabi-Yau $3$-dimensional manifold \cite{CortesHK1998}.
Projective special K\"{a}hler manifolds appear in the study of supergravity and mirror symmetry with the name \emph{local special K\"{a}hler manifolds} (see \cite{Fre1995} and \cite{Freed1999} for more details on their history and applications to physics, and in particular \cite{CandelasOssa1991} for their importance in mirror symmetry).
The name projective special K\"{a}hler was given by Freed in \cite{Freed1999} where he also shows how such manifolds are quotients of special K\"{a}hler ones \cite[Proposition 4.6, p.\ 20]{Freed1999} (see e.g.\ \cite{SpComMan} for the relation between this definition and the one we will use in this work).
Projective special K\"{a}hler manifolds are not only interesting on their own, as they find an important application in quaternion K\"{a}hler geometry.
The construction known as c-map, also arising from the same areas of physics, allows in fact to create quaternion K\"{a}hler manifolds of negative scalar curvature starting from projective special K\"{a}hler ones \cite{CiSC}, \cite{Conification2013}, \cite{CortesHKQK}, \cite{Swann2015}, \cite{HitHKQK}, \cite{FerSab}, \cite{CFG1989}.
Quaternion K\"{a}hler manifolds are orientable smooth Riemannian manifolds of dimension $4n$ with $n\ge 2$, whose holonomy group is a subgroup of $\mathrm{Sp}(n)\mathrm{Sp}(1)$ not contained in $\mathrm{Sp}(n)$.
They are important since they are a special family of Einstein manifolds with non vanishing Ricci tensor, corresponding to one of the possible holonomy groups of a locally irreducible, non-locally symmetric, simply connected Riemannian manifold in Berger's list (see \cite{Berger}).
In this paper we present a characterisation of projective special K\"{a}hler manifolds that will hopefully shed more light on this type of structure.
Our characterisation is intrinsic in the sense that we reduce the projective special K\"{a}hler structure to data solely defined on the manifold itself.
The characterisation is obtained by means of a locally defined symmetric tensor that we call deviance, satisfying certain conditions: a differential one and an algebraic one.
The deviance tensor emerges from the difference between two naturally occurring connections on the conic special K\"{a}hler manifold over a projective special K\"{a}hler one.
Our description arises by writing the two connections with respect to a local frame, and relating the difference to a tensor defined on the basis.
We also prove a lower bound for the scalar curvature, which is reached exactly when the deviance is zero; this condition characterises projective special K\"{a}hler manifolds isomorphic to the complex hyperbolic $n$-space if one assumes the manifold complete, connected and simply connected.
For projective special K\"ahler manifolds of elliptic type, i.e.\ such that the corresponding conic special K\"ahler metric is positive definite, we have a similar result in \cite[Theorem 16, p.\ 126]{BC2003}, where complex projective spaces are characterised as the only complete projective special K\"ahler domains of elliptic type.
In that context, in fact, the vanishing of the deviance is induced by the completeness condition.
Our characterisation provides a simpler way to construct projective special K\"{a}hler manifolds, and we display this by classifying all possible projective special K\"{a}hler structures on $4$-dimensional Lie groups.
Our classification relies on the classification of K\"{a}hler Lie groups by Ovando \cite{Ovando2004}.
We note that an intrinsic characterisation of projective special K\"{a}hler Lie groups has been obtained independently in a very recent paper by Macia and Swann \cite{MaciaSwann2019}.
Our setting is slightly more general, since in our study of projective special K\"{a}hler Lie groups we do not assume the deviance tensor to be left-invariant.
In \cite{MaciaSwann2019} it is also shown that projective special K\"{a}hler Lie groups determine quaternion K\"{a}hler Lie groups via the c-map, if one assumes the exactness of the K\"{a}hler form and the invariance of the flat connection.
A similar result, holding in the case that the projective special K\"{a}hler Lie group is the quotient of an affine special K\"{a}hler domain, can be deduced from the more general result \cite[Corollary 24, p.\ 33]{CortesCohomogeneityOne}.
Since we are ultimately interested in the c-map, throughout this paper we adopt the same convention as \cite{CiSC}, where we only consider projective special K\"{a}hler manifolds obtained from conic special K\"{a}hler manifolds with signature $(2n,2)$.
Nonetheless, our characterisation can be generalised to generic signatures.
It is worth mentioning that the deviance, being a symmetric tensor of type (3,0), can often be seen as a homogeneous polynomial of degree three, which may have a role in providing a partial inversion to the r-map.
We also use our characterisation to show that, on a K\"{a}hler manifold, the existence of a symmetric tensor satisfying the deviance conditions implies the existence of a whole family of projective special K\"{a}hler structures and we provide sufficient conditions for said structures to be isomorphic.
\textbf{Acknowledgements}. This paper is part of the author's PhD thesis \cite{PhDThesis} written under the supervision of Diego Conti.
Part of the study leading to this work was carried out during a visiting period at QGM, Aarhus; the author wishes to thank Andrew Swann for many useful discussions during that visit.
This is a preprint of an article published in \emph{Annali di Matematica Pura ed Applicata (1923 -)}.
The final authenticated version is available online at: \href{https://doi.org/10.1007/s10231-021-01096-4}{https://doi.org/10.1007/s10231-021-01096-4}.
\section{Definitions}
In this section we are introducing the basic objects that we are going to discuss in this work.
The coming definition involves a flat connection $\nabla$ and its exterior covariant derivative operator $d^{\nabla}$.
\begin{defi}\label{def:CSK}
A \emph{conic special K\"{a}hler} manifold is the data of a pseudo-K\"{a}hler manifold $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega})$ with a flat, torsion-free, symplectic connection $\nabla$ and a vector field $\xi$ such that
\begin{enumerate}
\item $d^{\nabla}\widetilde{I}=0$ where we interpret $\widetilde{I}$ as a $1$-form with values in $T\widetilde{M}$;
\item $\widetilde{g}(\xi,\xi)$ is nowhere vanishing;
\item $\nabla \xi=\widetilde{\nabla}^{LC} \xi=\mathrm{id}$;
\item\label{cond:segnaturaPSK} $\widetilde{g}$ is negative definite on $\spn{\xi,I\xi}$ and positive definite on its orthogonal complement.
\end{enumerate}
Here $\widetilde{\nabla}^{LC}$ is the Levi-Civita connection.
\end{defi}
We will adopt the convention $\widetilde{\omega}=\widetilde{g}(\widetilde{I}\cdot,\cdot)$.
Definition \ref{def:CSK} is identical to Definition 3 in \cite{CiSC} if we take $-g$ as metric.
We start by showing how the Lie derivative along $\xi$ and $I\xi$ in a conic special K\"{a}hler manifold behaves on the K\"{a}hler structure.
\begin{lemma}[Lemma 3.2, p.\ 1336 in \cite{Swann2015}]\label{lemma:azioneInfinitesimaXieIxi}
Let $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$ be a conic special K\"{a}hler manifold, then:
\begin{enumerate}
\item $\xi$ is a homothety of scaling factor $2$ preserving $\widetilde{I}$;
\item $\widetilde{I}\xi$ preserves the K\"{a}hler structure.
\end{enumerate}
\end{lemma}
\begin{proof}
See e.g.\ \cite{Swann2015} where $X=-I\xi$.
\end{proof}
Before proceeding, we write the following lemma for future reference.
\begin{lemma}\label{lemma:XeXi}
In a conic special K\"{a}hler manifold $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$, $\nabla (\widetilde{I}\xi)=\widetilde{I}$.
\end{lemma}
\begin{proof}
For all $X\in\field{\widetilde{M}}$,
\begin{align*}
\nabla_X (\widetilde{I}\xi)-\widetilde{I}X
&=(\nabla_X \widetilde{I})\xi+\widetilde{I}\nabla_X\xi-\widetilde{I}X
=(\nabla_X \widetilde{I})\xi
=(\nabla_{\xi} \widetilde{I})X\\
&=\nabla_{\xi} (\widetilde{I}X)-\widetilde{I}\nabla_{\xi} X
=\nabla_{\widetilde{I}X} (\xi)+[\xi,\widetilde{I}X]-\widetilde{I}\left(\nabla_{X} \xi+[\xi,X]\right)\\
&=\widetilde{I}X+\Lie{\xi} (\widetilde{I}X)-\widetilde{I}X-\widetilde{I}\Lie{\xi} X
=(\Lie{\xi} \widetilde{I})X
=0.\qedhere
\end{align*}
\end{proof}
If we compare Definition \ref{def:CSK} with Definition 3.1 in \cite{Swann2015}, we notice that the main difference is the signature of the metric: it is enough to add condition \ref{cond:segnaturaPSK} to the latter and to define $X=-I\xi$ in order to obtain two equivalent definitions.
The proof of the equivalence is obtained by Lemma \ref{lemma:XeXi}.
\begin{defi}\label{def:PSK}
A \emph{projective special K\"{a}hler} manifold is a K\"{a}hler manifold $M$ endowed with a $\mathbb{C}^*$-bundle $\pi\colon \widetilde{M}\to M$ with $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$ conic special K\"{a}hler such that $\xi$ and $I\xi$ are the fundamental vector fields associated to $1,i\in\mathbb{C}$ respectively and $M$ is the K\"{a}hler quotient with respect to the induced $\mathrm{U}(1)$-action.
In this case we say that $M$ has a projective special K\"{a}hler structure.
For brevity, we will often denote a projective special K\"{a}hler manifold by $(\pi\colon\widetilde{M}\to M,\nabla)$.
\end{defi}
\begin{rmk}
We shall see later that by construction, the action is always Hamiltonian with moment map $-\widetilde{g}(\xi,\xi)$, and the choice of the level set affects the quotient only up to scaling.
\end{rmk}
Concerning the notation for projective special K\"{a}hler manifolds as in Definition \ref{def:PSK}, when a tensor or a connection is possessed by both $\widetilde{M}$ and $M$, we will write them and everything concerning them (torsion, curvature forms, covariant exterior differentials) on $\widetilde{M}$ with $\widetilde{(\cdot)}$ above, whereas the corresponding objects on $M$ will be denoted without it.
\section{Difference tensor}\label{sec:differenceTensor}
This section is devoted to the tensor obtained as difference between the flat and Levi-Civita connection on a conic special K\"{a}hler manifold.
We present the known symmetry of this tensor and write the flatness condition in terms of it \cite[p.\ 9-11]{Freed1999}.
Before talking about the difference tensor, we will introduce some notation and definitions.
Following \cite{RedBook}, if $V$ is a complex representation with a real structure $\sigma$, we define
\begin{equation}
[V]:=\{v\in V|\sigma(v)=v\}.
\end{equation}
Otherwise, for any complex representation $V$,
\begin{equation}
[\![V]\!]:=[V\oplus \overline{V}]
\end{equation}
where $\overline{V}$ is the conjugate representation of $V$.
In particular, the following complex Lie algebra isomorphisms hold:
\begin{equation}
[V]\otimes_{\mathbb{R}} \mathbb{C}\cong V,\qquad
[\![V]\!]\otimes_{\mathbb{R}} \mathbb{C}\cong V\oplus \overline{V}.
\end{equation}
The same notation is used for the associated vector bundles.
Given an almost complex manifold $(M,I)$, let $T_{1,0}M$ be the holomorphic cotangent bundle.
For all $p\in\mathbb{N}$, we denote its $p$-th symmetric power by $S_{p,0}M$.
Given a (pseudo-)Riemannian manifold $(M,g)$, we denote by $\flat$ and $\sharp$ the musical isomorphisms induced by $g$, and we can define the following isomorphism
\begin{equation}\label{eq:bemolle}
\flat_2=\mathrm{id}\otimes \flat\otimes \mathrm{id}\colon T^*M\otimes TM\otimes T^*M\to T_{3}M
\end{equation}
with inverse $\sharp_2:=\mathrm{id}\otimes \sharp\otimes \mathrm{id}$.
Returning now to the main topic of this section, let $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$ be a conic special K\"{a}hler manifold of dimension $n+1$.
We define $\widetilde{\eta}$ as the (1,2)-tensor such that for all vector fields $X$, $Y$ on $\widetilde{M}$ we have $\widetilde{\eta}_X Y=\nabla_X Y-\widetilde{\nabla}^{LC}_X Y$, where the employed notation $\widetilde{\eta}_X Y$ means $\widetilde{\eta}(X,Y)$.
Consider frames adapted to the pseudo-K\"{a}hler structure, hence such that the linear model is $(\mathbb{R}^{2n+2},g_0,I_0,\omega_0)$, where $g_0=\sum_{k=1}^{2k}(e^k)^2-(e^{2n+1})^2-(e^{2n+2})^2$, $I_0 e_{2k-1}=e_{2k}$ for $k=1,\dots,n+1$ and $\omega_0=g_0(I_0\cdot,\cdot)$.
Let $\omega^{\nabla}$ and $\widetilde{\omega}^{LC}$ be the connection forms corresponding respectively to the flat and the Levi-Civita connections represented with respect to an adapted frame.
Thus we have
\begin{equation}
\omega^{\nabla}=\widetilde{\omega}^{LC}+\widetilde{\eta}.
\end{equation}
We know that by the theory of Hessian manifolds, $\widetilde{\eta}$ is symmetric (see e.g.\ \cite[\S 3, p.\ 194]{ShimaV} or \cite[\S 1, p.\ 736]{ShimaCSC}). More precisely, we recall the following result (see \cite[Proposition 1.34, p.\ 39]{Freed1999}, or \cite[Proposition 4, p.\ 1743]{BauesCortes} and \cite[Lemma 3, p.\ 1745]{BauesCortes}).
\begin{lemma}\label{lemma:eta_Symm}
On a conic special K\"{a}hler manifold $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$, the tensor $\widetilde{\eta}$ is a section of $\sharp_2[\![S_{3,0}\widetilde{M}]\!]$.
\end{lemma}
In proving this lemma, one finds the following equality (see e.g.\ \cite[(3.3), p.\ 1743]{BauesCortes}), which we write for further reference.
\begin{equation}\label{eq:nabla I}
\nabla \widetilde{I}=[\widetilde{\eta},\widetilde{I}] =-2\widetilde{I}\widetilde{\eta}.
\end{equation}
Using the flatness of $\nabla$, we observe:
\begin{align}
0=\Omega^\nabla&=\widetilde{\Omega}^{LC}+\widetilde{d}^{LC}\widetilde{\eta} +\frac{1}{2}[\widetilde{\eta}\wedge\widetilde{\eta}],
\end{align}
where $\widetilde{\Omega}^{LC}$ and $\widetilde{d}^{LC}$ are respectively the curvature and exterior covariant derivative of the Levi-Civita connection on $\widetilde{M}$.
Arguing as in \cite[Proposition 1.34 (a), p.\ 39]{Freed1999} (see also \cite[Proposition 4 (iii), p.\ 1743]{BauesCortes}), one obtains
\begin{prop}\label{prop:SpezzamentoCurvatura}
For a K\"{a}hler manifold $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega})$ with a tensor $\widetilde{\eta}$ in $T^*M\otimes TM\otimes T^*M$ such that $\flat_2\widetilde{\eta}$ is a section of $[\![S_{3,0}\widetilde{M}]\!]$ and with a connection $\nabla$ with connection form $\omega^{\nabla}=\widetilde{\omega}^{LC}+\widetilde{\eta}$, then
\begin{align}
\Omega^{\nabla}=0&& \textrm{if and only if}&&\begin{cases}
\widetilde{\Omega}^{LC}+\frac{1}{2}[\widetilde{\eta}\wedge\widetilde{\eta}]=0\\
\widetilde{d}^{LC}\widetilde{\eta}=0
\end{cases}.
\end{align}
\end{prop}
\section{Conic and projective special K\"{a}hler metrics}
In this section we will consider the case of a projective special K\"{a}hler manifold $(\pi\colon\widetilde{M}\to M,\nabla)$ and we will give the explicit relation between the metric on $\widetilde{M}$ and the one on $M$ (see e.g.\ \cite[Section 1.1]{CortesCompProj}).
The mapping $\pi\colon\widetilde{M}\to M$ is a $\mathbb{C}^*$-principal bundle with infinitesimal principal action generated by $\xi$ and $\widetilde{I}\xi$.
We can always build the function $r=\sqrt{-\widetilde{g}(\xi,\xi)}\colon\widetilde{M}\to\mathbb{R}^+$ and define $S=r^{-1}(1)\subseteq \widetilde{M}$ with inclusion map $\iota_S\colon S\hookrightarrow \widetilde{M}$.
Now $r$ has no critical points, since
\begin{align}\label{eq:dr}
dr
&=\frac{d(r^2)}{2r}
=\frac{\widetilde{\nabla}^{LC}(r^2)}{2r}
=-\frac{\widetilde{\nabla}^{LC}(\widetilde{g}(\xi,\xi))}{2r}\\
&=-\frac{2\widetilde{g}(\widetilde{\nabla}^{LC} \xi,\xi)}{2r}
=-\frac{\widetilde{g}(\cdot,\xi)}{r}
=-\frac{1}{r}\xi^{\flat}
\end{align}
and $\widetilde{g}$ is non-degenerate.
It follows that $S$ is a submanifold of dimension $2n+1$ whose tangent bundle corresponds to $\ker(dr)\subset T\widetilde{M}$.
Notice that $dr(\widetilde{I}\xi)=-\frac{\widetilde{g}(\widetilde{I}\xi,\xi)}{r}=-\frac{\widetilde{\omega}(\xi,\xi)}{r}=0$, so $\widetilde{I}\xi$ is a vector field tangent to $S$ and it induces a principal $\mathrm{U}(1)$-action.
The induced metric on $S$ is $g_S=\iota_S^* \widetilde{g}$ and thus $\Lie{\widetilde{I}\xi}g_S=\iota_S^*\Lie{\widetilde{I}\xi}\widetilde{g}=0$.
The principal action of $\mathbb{C}^*$ on $\widetilde{M}$ induces by inclusion an $\mathbb{R}^+$-action, and in addition we have
\begin{lemma}
The map $r\colon\widetilde{M}\to\mathbb{R}^+$ is degree $1$ homogeneous with respect to the action of $\mathbb{R}^+\subseteq\mathbb{C}^*$ on $\widetilde{M}$, i.e.\ for all $s\in\mathbb{R}^+$ and $p\in\widetilde{M}$
\begin{equation}
r(ps)=r(p)s.
\end{equation}
\end{lemma}
As a consequence of this lemma, we can now define a retraction
\begin{align*}
p\colon\widetilde{M}&\longrightarrow S,\qquad u\longmapsto u\frac{1}{r(u)},
\end{align*}
which is well defined since $r(p(u))=r(u\frac{1}{r(u)})=\frac{r(u)}{r(u)}=1$.
Moreover, $p \iota_S=\mathrm{id}_S$ implies the surjectivity of $p$, which allows us to see $p\colon\widetilde{M}\to S$ as a principal $\mathbb{R}^+$-bundle and $\pi_S:=\pi\iota_S\colon S\to M$ as a principal $S^1$-bundle; the composition of the two gives $\pi$.
\begin{lemma}
If $(\pi\colon\widetilde{M}\to M,\nabla)$ is projective special K\"{a}hler, then $\widetilde{M}$ is diffeomorphic to $S\times \mathbb{R}^+$, and moreover
\begin{equation}
\widetilde{g}=r^2p^*g_S-dr^2.
\end{equation}
\end{lemma}
\begin{proof}
Let $a\colon S\times \mathbb{R}^+\to\widetilde{M}$ be the restriction of the principal right action $\widetilde{M}\times\mathbb{R}^+\to\widetilde{M}$ to $S\times \mathbb{R}^+$ and consider also $(p,r)\colon\widetilde{M}\to S\times\mathbb{R}^+$.
These maps are smooth and each an inverse to the other, in fact if $u\in\widetilde{M}$, $a(p,r)(u)=a(p(u),r(u))=u\frac{1}{r(u)}r(u)=u$ and for all $(q,s)\in S\times\mathbb{R}^+$, $(\pi_{S},r)a(q,s)=(p(qs),r(qs))=(q\frac{s}{r(qs)},r(q)s)=(q,s)$.
For the second statement consider the symmetric tensor
\begin{equation}
g'=\frac{1}{r^2}(\widetilde{g}+dr^2).
\end{equation}
We want to prove it is basic, that is horizontal and invariant with respect to the principal $\mathbb{R}^+$-action.
Since there is only one vertical direction, and since $g'$ is symmetric, it is enough to check whether $g'$ vanishes when evaluated on the fundamental vector field $\xi$ in one component.
Using \eqref{eq:dr} we obtain
\begin{align*}
g'(\xi,\cdot)
=\frac{1}{r}(\widetilde{g}(\xi,\cdot)+dr(\xi)dr)
=\frac{1}{r}(-rdr+r dr)
=0.
\end{align*}
And now for the $\mathbb{R}^+$-invariance:
\begin{align*}
\Lie{\xi}g'
&=-2\frac{\Lie{\xi}r}{r^3}(\widetilde{g}+dr^2)+\frac{1}{r^2}(\Lie{\xi}\widetilde{g}+2\Lie{\xi}(dr)dr)\\
&=-2\frac{dr(\xi)}{r^3}(\widetilde{g}+dr^2)+\frac{1}{r^2}(2\widetilde{g}+2(d\iota_\xi dr+\iota_\xi d^2r)dr)\\
&=-2\frac{r}{r^3}(\widetilde{g}+dr^2)+\frac{1}{r^2}(2\widetilde{g}+2dr^2)
=0.
\end{align*}
Therefore $g'$ is basic, which in turn implies it is of the form $p^*g''$ for some tensor $g''\in T_2S$, so that
\begin{equation}
\widetilde{g}=r^2p^*g''-dr^2.
\end{equation}
The proof is ended by the following observation:
\begin{equation*}
g_S
=\iota_S^* \widetilde{g}
=\iota_S^*\left(r^2p^*g''-dr^2\right)
=\iota_S^*p^*g''-\iota_S^*dr^2
=(p\iota_S)^*g''
=g''.
\qedhere
\end{equation*}
\end{proof}
The $\mathbb{C}^*$-bundle $\pi\colon\widetilde{M}\to M$ has a unique principal connection orthogonal to the fibres with respect to $\widetilde{g}$; the connection form can be written as
\begin{equation}\label{eq:C*connection}
\frac{dr}{r}+i\widetilde{\varphi}.
\end{equation}
Explicitly, we can describe $\widetilde{\varphi}$ using the metric:
\begin{equation}
\widetilde{\varphi}=\frac{\widetilde{g}(\widetilde{I}\xi,\cdot)}{\widetilde{g}(\widetilde{I}\xi,\widetilde{I}\xi)}=-\frac{1}{r^2}I\xi^{\flat}=-\frac{1}{r^2}\iota_{\xi}\widetilde{\omega}.
\end{equation}
If we restrict it to $S$, we obtain a connection form $\varphi=\iota_S^*\widetilde{\varphi}=-\iota_S^*(\iota_\xi\omega)$ corresponding to the $S^1$-action on $S$.
Notice that $p^*\varphi=\widetilde{\varphi}$, because the connection form \eqref{eq:C*connection} is right-invariant, so $\widetilde{\varphi}=p^*\varphi'$ for some $\varphi'$, and thus $\varphi=\iota_S^*\widetilde{\varphi}=\iota_S^*p^*\varphi'=(p\iota_S)^*\varphi'=\varphi'$.
The moment map for the action generated by $\widetilde{I}\xi$ is $\mu\colon\widetilde{M}\to \lie{u}(1)\cong\mathbb{R}$ such that $d\mu=\iota_{\widetilde{I}\xi}\omega=-\xi^{\flat}=rdr=d\left(\frac{r^2}{2}\right)$, so up to an additive constant, we can assume
\begin{equation}
\mu=\frac{r^2}{2}.
\end{equation}
Since $S=\mu^{-1}(\frac{1}{2})$ is a level set of the moment map and $M$ is the K\"{a}hler quotient, $\pi_{S}\colon S\to M$ is a pseudo-Riemannian submersion and thus we can write $g_S=\pi_{S}^*g-\varphi^2$.
\begin{prop}
A projective special K\"{a}hler manifold $(\pi\colon\widetilde{M}\to M,\nabla)$ satisfies
\begin{align*}
\widetilde{g}&=r^2\pi^*g-r^2\widetilde{\varphi}^2-dr^2,\\
\widetilde{\omega}&=r^2\pi^*\omega_M+r\widetilde{\varphi}\wedge dr.
\end{align*}
\end{prop}
\begin{proof}
From the previous arguments
\begin{align*}
\widetilde{g}
&=r^2 p^*g_S-dr^2
=r^2p^*(\pi_{S}^*g-\varphi^2)-dr^2\\
&=r^2(\pi_{S}p)^*g-r^2\widetilde{\varphi}^2-dr^2
=r^2\pi^*g-r^2\widetilde{\varphi}^2-dr^2.
\end{align*}
For the K\"{a}hler form it is enough to notice that $\pi$ is holomorphic, $M$ being a K\"{a}hler quotient, and that
\begin{equation*}
(r\widetilde{\varphi})\circ\widetilde{I}
=-\frac{1}{r}\widetilde{I}\xi^{\flat}\widetilde{I}
=-\frac{1}{r}\xi^{\flat}
=dr.
\qedhere
\end{equation*}
\end{proof}
For future reference we give the following
\begin{rmk}\label{rmk:curvatureSbundle}
The curvature of $\varphi$ is computed using Lemma \ref{lemma:azioneInfinitesimaXieIxi}:
\begin{equation}
d\varphi=-d\iota_S^*\iota_{\xi}\widetilde{\omega}=\iota_S^*(-\Lie{\xi}\widetilde{\omega}+\iota_{\xi}d\widetilde{\omega})=-2\iota_S^*\widetilde{\omega}=-2\pi_S^*\omega_M.
\end{equation}
in fact, the restriction to $S$ of $\widetilde{\omega}$ maps fixes $r=1$ and thus kills $dr$.
It will be useful to compute also
\begin{equation}
d\widetilde{\varphi}=-2\pi^*\omega_M.
\end{equation}
\end{rmk}
\section{Lifting the coframe}\label{sec:coframe}
The purpose of this section is to lift a generic unitary coframe on a projective special K\"{a}hler manifold to one on the corresponding conic special K\"{a}hler.
This will enable us to give a more explicit formulation of the Levi-Civita connection and associated curvature tensor on the conic special K\"{a}hler manifold.
In our convention, on a K\"{a}hler manifold $(M,g,I,\omega)$, the Hermitian form is $h=g+i\omega$.
Given a projective special K\"{a}hler manifold $(\pi\colon\widetilde{M}\to M,\nabla)$ and an open subset $U\subseteq M$, consider a unitary coframe $\theta=(\theta^1,\dots,\theta^n)\in \Omega^1(U,\mathbb{C}^n)$ on $M$, then we can build a coframe $\widetilde{\theta}\in \Omega^1(\pi^{-1}(U),\mathbb{C}^{n+1})$ on $\widetilde{M}$ as follows:
\begin{equation}\label{eq:coframe Mtilde}
\widetilde{\theta}^k=\begin{cases}
r\pi^*\theta^k&\mbox{if }k\le n\\
dr+i r\widetilde{\varphi}&\mbox{if }k=n+1
\end{cases}.
\end{equation}
This coframe is compatible with the $\mathrm{U}(n,1)$-structure because it takes complex values and
\begin{equation}
\sum_{k=1}^n\overline{\widetilde{\theta}^k}\widetilde{\theta}^k-\overline{\widetilde{\theta}^{n+1}}\widetilde{\theta}^{n+1}
=r^2\pi^*\left(\sum_{k=1}^n\overline{\theta^k}\theta^k\right)-dr^2-r^2\widetilde{\varphi}^2
=\widetilde{g}.
\end{equation}
We will denote the dual frame to a given coframe by the same symbol, but with lower indices.
\begin{rmk}
Let $T=\mathbb{C}^{n+1}$ be the standard real representation of $\mathrm{U}(n,1)$, and let $T\otimes_{\mathbb{R}}\mathbb{C} \cong T^{1,0}\oplus T^{0,1}$ be the holomorphic, anti-holomorphic split.
Given a connection on a K\"{a}hler manifold, it can be represented by a connection form $\omega$ with values in $\lie{u}(n,1)$ whose complexification is $\lie{gl}(n+1,\mathbb{C})\cong T^{1,0}\otimes T_{1,0}\oplus T^{0,1}\otimes T_{1,0}$, so we obtain projections in each component, respectively $\omega^{1,0}_{1,0}$ and $\omega^{0,1}_{0,1}$ such that $\omega=\omega^{1,0}_{1,0}+\omega^{0,1}_{0,1}$.
Notice that $\omega^{0,1}_{0,1}=\overline{\omega^{1,0}_{1,0}}$ because $\omega$ comes from a real representation and to give the first component is equivalent to give the whole form.
Notice also that $([\![T]\!],I)$, as complex representation, is isomorphic to $T^{1,0}$ and the component $A^{1,0}_{1,0}$ of an endomorphism $A$ gives the corresponding endomorphism of $T^{1,0}$.
We will often present connection forms by giving only the $T^{1,0}_{1,0}$ component.
We will call $\mathbb{R}E$ the projection from the complex tensor algebra to the real representation, defined so that $\mathbb{R}E(\alpha)=\alpha+\overline{\alpha}$ where the conjugate is the real structure.
\end{rmk}
\begin{prop}\label{prop:conicLC}
Let $(\pi\colon\widetilde{M}\to M,\nabla)$ be a projective special K\"{a}hler manifold, let $(U,\theta)$ be a local unitary coframe on $M$ lifted as in \eqref{eq:coframe Mtilde} to a coframe $\widetilde{\theta}$ adapted to the $\mathrm{U}(n,1)$-structure on $\widetilde{M}$.
With respect to $\widetilde{\theta}$, the Levi-Civita connection form on $\widetilde{M}$ is represented by
\begin{equation}
\widetilde{\omega}^{LC}
=\begin{pmatrix}
\pi^*\omega^{LC}& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0\\
\hline
0& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0
\end{pmatrix}+\frac{1}{r}\begin{pmatrix}
i\operatorname{Im}\left(\widetilde{\theta}^{n+1}\right)&&0& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&\widetilde{\theta}^1\\
&\ddots&& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&\vdots\\
0&&i\operatorname{Im}\left(\widetilde{\theta}^{n+1}\right)& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&\widetilde{\theta}^n\\
\hline
\overline{\widetilde{\theta}^1}&\cdots&\overline{\widetilde{\theta}^n}& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&i\operatorname{Im}\left(\widetilde{\theta}^{n+1}\right)
\end{pmatrix},
\end{equation}
that is
\begin{equation}\label{eq:LeviCivita}
\widetilde{\omega}^{LC}
=\begin{pmatrix}
\pi^*\omega^{LC}+i\widetilde{\varphi}\otimes I_n& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&\pi^*\theta\\
\hline
\pi^*\theta^{\star}& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&i\widetilde{\varphi}
\end{pmatrix}
\end{equation}
and its curvature form is
\begin{equation}
\widetilde{\Omega}^{LC}=\begin{pmatrix}
\pi^*(\Omega^{LC}+\theta\wedge\theta^*-2i\omega_M \otimes\mathrm{id})& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0\\
\hline
0& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0
\end{pmatrix}.
\end{equation}
\end{prop}
\begin{proof}
The connection form \eqref{eq:LeviCivita} is metric if and only if the matrix is anti-Hermitian with respect to $\widetilde{g}$ and since $\omega^{LC}$ is anti-Hermitian with respect to $g$, we get
\begin{align*}
(\widetilde{\omega}^{LC})^{\star}
=\begin{pmatrix}
\pi^*(\omega^{LC})^{\star}-i\widetilde{\varphi}\otimes I_n& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-\pi^*\theta\\
\hline
-\pi^*\theta^{\star}& \hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-i\widetilde{\varphi}
\end{pmatrix}
=-\widetilde{\omega}^{LC}.
\end{align*}
The torsion form of this connection is $\widetilde{\Theta}^{LC}=d\widetilde{\theta}+\widetilde{\omega}^{LC}\wedge\widetilde{\theta}$, so for $1\le k\le n$
\begin{align*}
\left(\widetilde{\Theta}^{LC}\right)^k
&=d\widetilde{\theta}^k+\sum_{j=1}^{n}\left(\widetilde{\omega}^{LC}\right)^k_j\wedge
\widetilde{\theta}^j+\left(\widetilde{\omega}^{LC}\right)^k_{n+1}\wedge\widetilde{\theta}^{n+1}\\
&=d\left(r\pi^*\theta^k\right)+\sum_{j=1}^{n}\left(\pi^*(\omega^{LC})^k_j+i\widetilde{\varphi}\delta^k_j\right)\wedge
\left(r\pi^*\theta^j\right)+\pi^*\theta^k\wedge\widetilde{\theta}^{n+1}\\
&=r\pi^*(\Theta^{LC})^k+(dr+ir\widetilde{\varphi})\wedge \pi^*\theta^k+\pi^*\theta^k\wedge\widetilde{\theta}^{n+1}\\
&=0+\widetilde{\theta}^{n+1}\wedge \pi^*\theta^k+\pi^*\theta^k\wedge\widetilde{\theta}^{n+1}
=0.
\end{align*}
In the last component
\begin{align*}
(\widetilde{\Theta}^{LC})^{n+1}
&=d\widetilde{\theta}^{n+1}+\sum_{j=1}^n \pi^*\overline{\theta^j}\wedge r\pi^*\theta^j+i\widetilde{\varphi}\wedge \widetilde{\theta}^{n+1}\\
&=d(dr+ir\widetilde{\varphi})+r\pi^*\left(\sum_{j=1}^n\overline{\theta^j}\wedge\theta^j\right)+i\widetilde{\varphi}\wedge \widetilde{\theta}^{n+1}\\
&=idr\wedge\widetilde{\varphi}+ir(d\widetilde{\varphi}+2\pi^*\omega_M)+i\widetilde{\varphi}\wedge dr
=0.
\end{align*}
$\widetilde{\omega}^{LC}$ is metric and torsion-free, therefore by uniqueness it must be the Levi-Civita connection.
Now let us compute its curvature form $\widetilde{\Omega}^{LC}=d\widetilde{\omega}^{LC}+\widetilde{\omega}^{LC}\wedge\widetilde{\omega}^{LC}$.
For $1\le k,h\le n$ we have
\begin{align*}
\left(\widetilde{\Omega}^{LC}\right)^h_k
&=d(\widetilde{\omega}^{LC})^h_k+(\widetilde{\omega}^{LC})^h_j\wedge(\widetilde{\omega}^{LC})^j_k\\
&=d\pi^*(\omega^{LC})^h_k+id\widetilde{\varphi}\delta^h_k+\sum_{j=1}^n(\pi^*(\omega^{LC})^h_j+i\widetilde{\varphi}\delta^h_j)\wedge (\pi^*(\omega^{LC})^j_k+i\widetilde{\varphi}\delta^j_k)\\
&\quad +\pi^*\theta^h\wedge\pi^*\overline{\theta^k}\\
&=\pi^*d(\omega^{LC})^h_k-2i\pi^*\omega_M\delta^h_k+\pi^*((\omega^{LC})^h_j\wedge (\omega^{LC})^j_k\\
&\quad +i\widetilde{\varphi}\wedge \pi^*(\omega^{LC})^h_k+\pi^*(\omega^{LC})^h_k\wedge i\widetilde{\varphi}-\widetilde{\varphi}\wedge \widetilde{\varphi}\delta^h_k+\pi^*\theta^h\wedge\pi^*\overline{\theta^k}\\
&=\pi^*(\Omega^{LC})^h_k-2i\pi^*\omega_M\delta^h_k+\pi^*(\theta^h\wedge\overline{\theta^k})
\end{align*}
and
\begin{align*}
\left(\widetilde{\Omega}^{LC}\right)^h_{n+1}
&=d\pi^*\theta^h+\sum_{j=1}^n(\pi^*(\omega^{LC})^h_j+i\widetilde{\varphi}\delta^h_j)\wedge \pi^*\theta^j+\pi^*\theta^h\wedge i\widetilde{\varphi}=\pi^*\left(\Theta^{LC}\right)^h\\*
&=0.
\end{align*}
Since the curvature form must also be anti-Hermitian, we also get
\begin{align*}
\left(\widetilde{\Omega}^{LC}\right)^{n+1}_k
&=-\left(\left(\widetilde{\Omega}^{LC}\right)^\star\right)^{n+1}_k
=\overline{\left(\widetilde{\Omega}^{LC}\right)^k_{n+1}}
=0.
\end{align*}
Finally,
\begin{equation*}
\left(\widetilde{\Omega}^{LC}\right)^{n+1}_{n+1}
=id\widetilde{\varphi}+\sum_{j=1}^n\pi^*\overline{\theta^j}\wedge\pi^*\theta^j-\widetilde{\varphi}\wedge \widetilde{\varphi}
=id\widetilde{\varphi}+2i\pi^*\omega_M
=0.\qedhere
\end{equation*}
\end{proof}
\begin{rmk}\label{rmk:curvatureProj}
The tensor $\theta\wedge\theta^{\star}-2i\omega_M\otimes id$, or explicitly
\begin{equation}
\Omega_{\mathbb{P}^n_{\mathbb{C}}}:=\mathbb{R}E\left((\theta^k\wedge\overline{\theta^h})\otimes \theta_k\otimes \theta^{h}-(\overline{\theta^k}\wedge\theta^k)\otimes \theta_h\otimes \theta^h\right)
\end{equation}
is a curvature tensor of the complex projective space of dimension $n$; in fact, $\Omega_{\mathbb{P}_{\mathbb{C}}^n}$ is the curvature with respect to the Fubini-Study metric (see for example \cite[II, p.\ 277]{KN}).
In order to verify that $\Omega_{\mathbb{P}^n_{\mathbb{C}}}$ is exactly the curvature of the Fubini-Study rather than a multiple, we compute the Ricci tensor:
\begin{equation}\label{eq:RicciProj}
\mathbb{R}ic_{\mathbb{P}^n_{\mathbb{C}}}
=\mathbb{R}E\left(n\theta^h\otimes \overline{\theta^h}+\delta_{h,k}\theta^h\otimes\overline{\theta^k}\right)
=\mathbb{R}E\left((n+1)h\right)
=2(n+1)g.
\end{equation}
Then,
\begin{equation}\label{eq:ScalProj}
\mathrm{scal}_{\mathbb{P}_{\mathbb{C}}^n}=2(n+1).
\end{equation}
Thus $\Omega_{\mathbb{P}^n_{\mathbb{C}}}$ corresponds exactly to the curvature of $\mathbb{P}_{\mathbb{C}}^n$ with the Fubini-Study metric.
\end{rmk}
Now, whenever we have a smooth map $f\colon M\to N$ between Riemannian manifolds, we can extend the pull-back $f^*\colon T_\bullet N\to T_\bullet M$ on the covariant tensor algebra to the whole tensor algebra, using the musical isomorphisms in each contravariant component.
Explicitly, for $X$ vector field on $N$, we define $f^*X:=\sharp f^*\flat X=(f^*X^{\flat})_\sharp$.
Notice that this extension of the pull-back is still functorial, since if $f\colon M\to N$, $g\colon N\to L$ are smooth maps, then $f^*g^*X=\sharp f^*\flat \sharp g^*\flat X=\sharp f^* g^*\flat X=\sharp (gf)^*\flat X=(gf)^* X$.
Since $\widetilde{M}$ and $M$ are Riemannian manifolds, we have $\pi^*\colon T^{\bullet}_{\bullet}M\to T^{\bullet}_{\bullet}\widetilde{M}$, and in particular, for $1\le k\le n$ we have
\begin{align*}
\pi^*\theta_k
=(\pi^*\theta_k^{\flat})_\sharp
=\frac{1}{2}(\pi^*\overline{\theta^k})_\sharp
=\frac{1}{2r}(\overline{\widetilde{\theta}^k})_\sharp
=\frac{1}{r}\widetilde{\theta}_k.
\end{align*}
\begin{rmk}\label{rmk:Levi-Civita_CSK}
In this notation,
\begin{equation}
\widetilde{\Omega}^{LC}=r^2\pi^*(\Omega^{LC}+\Omega_{\mathbb{P}_{\mathbb{C}}^n}).
\end{equation}
\end{rmk}
\section{Deviance}
In this section we will continue the analysis of the tensor $\widetilde{\eta}$ started in section \ref{sec:differenceTensor}.
The aim is to reduce it to a locally defined tensor on $M$ that we call deviance.
We will then use it to give an explicit local description of the Ricci tensor and the scalar curvature.
\begin{lemma}\label{lemma:etaHorizontal}
On a projective special K\"{a}hler manifold $(\pi\colon\widetilde{M}\to M,\nabla)$, if $\widetilde{\eta}_X Y=\nabla_X Y-\widetilde{\nabla}^{LC}_X Y$, then $\flat_2\widetilde{\eta}$ is horizontal with respect to $\pi$.
In other words, $\flat_2(\widetilde{\eta})$ is a section of $\pi^*[\![S_{3,0}M]\!]\subset [\![S_{3,0}\widetilde{M}]\!]$.
Explicitly, $\widetilde{\eta}_v$, $\widetilde{\eta} v$ and $\widetilde{g}(\widetilde{\eta}, v)$ vanish for all $v\in\langle \xi,I\xi\rangle$.
\end{lemma}
\begin{proof}
First notice that $\widetilde{\eta} (\xi)=\nabla \xi- \widetilde{\nabla}^{LC} \xi=0$, so by symmetry $\widetilde{\eta}_{\xi}=0$ and $g(\eta,\xi)=0$, so $\flat_2(\widetilde{\eta})$ in each component vanishes when evaluated at $\xi$.
From this fact and \eqref{eq:nabla I}, we also deduce $\widetilde{\eta} (\widetilde{I}\xi)=\widetilde{I}\widetilde{\eta} (\xi)+[\widetilde{\eta},\widetilde{I}] \xi=0-2\widetilde{I} \widetilde{\eta} (\xi)=0$.
By symmetry, we conclude that $\flat_2\widetilde{\eta}$ vanishes in every component on $I\xi$.
Linearity then completes the proof.
\end{proof}
\begin{lemma}\label{lemma:LieDerivativesEta}
Let $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$ be a conic special K\"{a}hler manifold and $\widetilde{\eta}$ be as above, then
\begin{enumerate}
\item $\Lie{\xi}\widetilde{\eta}=0$;
\item $\Lie{\widetilde{I}\xi}\widetilde{\eta}=-2\widetilde{I}\widetilde{\eta}$.
\end{enumerate}
\end{lemma}
\begin{proof}
The proof relies on a generic formula satisfied by a torsion-free connection $D$ (see e.g.\ \cite[equation (3.1), p.\ 1336]{Swann2015}), that is:
\begin{align*}
\Lie{A}(D_X Y)&-D_{\Lie{A}X} Y-D_X \Lie{A}Y=\Omega^D(A,X)Y-D_{D_{X}Y}A+D_{X}D_{Y} A.
\end{align*}
\begin{enumerate}
\item We check the formula on vector fields $X,Y\in\field{\widetilde{M}}$
\begin{align*}
(\Lie{\xi}\widetilde{\eta})_{X} Y
&=\Lie{\xi}(\widetilde{\eta}_{X} Y)-\widetilde{\eta}_{\Lie{\xi} X}Y-\widetilde{\eta}_{X}\Lie{\xi} Y\\
&=\Lie{\xi}\nabla_X Y-\Lie{\xi}\widetilde{\nabla}^{LC}_X Y-\nabla_{\Lie{\xi}X} Y+\widetilde{\nabla}^{LC}_{\Lie{\xi}X} Y\\
&\quad -\nabla_X \Lie{\xi}Y+\widetilde{\nabla}^{LC}_X \Lie{\xi}Y\\
&=\Omega^{\nabla}(\xi,X)Y-\nabla_{\nabla_{X}Y}\xi+\nabla_{X}\nabla_{Y} \xi-\widetilde{\Omega}^{LC}(\xi,X)Y\\
&\quad +\widetilde{\nabla}^{LC}_{\widetilde{\nabla}^{LC}_{X}Y}\xi-\widetilde{\nabla}^{LC}_{X}\widetilde{\nabla}^{LC}_{Y} \xi\\
&= -\nabla_{X}Y+\nabla_{X}Y-\widetilde{\Omega}^{LC}(\xi,X)Y+\widetilde{\nabla}^{LC}_{X}Y-\widetilde{\nabla}^{LC}_{X}Y\\
&=-\widetilde{\Omega}^{LC}(\xi,X)Y.
\end{align*}
Lowering the contravariant index of the curvature form, for $Z\in\field{\widetilde{M}}$, thanks to the symmetries of the Riemannian tensor we obtain
\begin{align*}
\widetilde{g}\left(\widetilde{\Omega}^{LC}(\xi,X)Y,Z\right)
&=\widetilde{g}\left(\widetilde{\Omega}^{LC}(Y,Z)\xi,X\right)\\
&=\widetilde{g}\left(\widetilde{\nabla}^{LC}_Y \widetilde{\nabla}^{LC}_Z\xi-\widetilde{\nabla}^{LC}_Z \widetilde{\nabla}^{LC}_Y\xi-\widetilde{\nabla}^{LC}_{[Y,Z]}\xi,X\right)\\
&=\widetilde{g}\left(\widetilde{\nabla}^{LC}_Y Z-\widetilde{\nabla}^{LC}_Z Y-[Y,Z],X\right)\\
&=\widetilde{g}\left(\Theta^{LC}(Y,Z),X\right)
=0,
\end{align*}
proving that $\widetilde{\Omega}^{LC}(\xi,X)Y=0$, which implies the statement.
\item As before,
\begin{align*}
(\Lie{\widetilde{I}\xi}\widetilde{\eta})_{X} Y
&=\Omega^{\nabla}(\widetilde{I}\xi,X)Y-\nabla_{\nabla_{X}Y}(\widetilde{I}\xi)+\nabla_{X}\nabla_{Y} (\widetilde{I}\xi)-\widetilde{\Omega}^{LC}(\widetilde{I}\xi,X)Y\\
&\quad +\widetilde{\nabla}^{LC}_{\widetilde{\nabla}^{LC}_{X}Y}(\widetilde{I}\xi)-\widetilde{\nabla}^{LC}_{X}\widetilde{\nabla}^{LC}_{Y} (\widetilde{I}\xi)\\
&=-\widetilde{I}\nabla_{X}Y+\nabla_{X}(\widetilde{I}Y)-\widetilde{\Omega}^{LC}(\widetilde{I}\xi,X)Y+\widetilde{I}\widetilde{\nabla}^{LC}_{X}Y-\widetilde{\nabla}^{LC}_{X}(\widetilde{I}Y)\\
&=(\nabla \widetilde{I})(X,Y)-\widetilde{\Omega}^{LC}(\widetilde{I}\xi,X)Y.
\end{align*}
Proceeding as in the previous point
\begin{align*}
\widetilde{g}&\left(\widetilde{\Omega}^{LC}(\widetilde{I}\xi,X)Y,Z\right)
=\widetilde{g}\left(\widetilde{\Omega}^{LC}(Y,Z)(\widetilde{I}\xi),X\right)\\
&=\widetilde{g}\left(\widetilde{\nabla}^{LC}_Y \widetilde{\nabla}^{LC}_Z(\widetilde{I}\xi)-\widetilde{\nabla}^{LC}_Z \widetilde{\nabla}^{LC}_Y(\widetilde{I}\xi)-\widetilde{\nabla}^{LC}_{[Y,Z]}(\widetilde{I}\xi),X\right)\\
&=\widetilde{g}\left(\widetilde{I}\widetilde{\Omega}^{LC}(Y,Z)\xi,X\right)
=-\widetilde{g}\left(\widetilde{\Omega}^{LC}(Y,Z)\xi,IX\right)\\
&=-\widetilde{g}\left(\widetilde{\Omega}^{LC}(\xi,\widetilde{I}X)Y,Z\right).
\end{align*}
This quantity is zero as shown in the previous point, so it follows that $\Lie{\widetilde{I}\xi}\widetilde{\eta}=\nabla \widetilde{I}$, so \eqref{eq:nabla I} ends the proof.\qedhere
\end{enumerate}
\end{proof}
We can now use a coframe $\widetilde{\theta}$ as in section \ref{sec:coframe} in order to progress in the study of $\widetilde{\eta}$.
We then write
\begin{equation}
\widetilde{\eta}=\mathbb{R}E(\widetilde{\eta}^j_{k,h}\widetilde{\theta}^k\otimes \overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h).
\end{equation}
Since every operator we use is $\mathbb{C}$-linear, we can study only the component in $T_{1,0} \otimes T^{0,1}\otimes T_{1,0}$, that is $\widetilde{\eta}^j_{k,h}\widetilde{\theta}^k\otimes \overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h$.
Because of Lemma \ref{lemma:etaHorizontal}, the coefficients $\widetilde{\eta}^{j}_{k,h}$ vanish if any one of the indices is $n+1$; moreover, $\widetilde{\eta}^j_{k,h}$ is completely symmetric in its indices.
The last statement follows from the fact that $\flat_2\widetilde{\eta}$ is a tensor in $\pi^*S_{3,0}M$, and such tensors are expressed using only $\pi^*\theta^k$ for $1\le k\le n$, where the metric is positive definite, and thus $\flat_2$ does not change the signs of the coefficients of $\widetilde{\eta}$.
We are now ready to reduce $\widetilde{\eta}$ to an object defined locally on the base space.
\begin{prop}\label{prop:ThetatildeWrtTheta}
Given a projective special K\"{a}hler $(\pi\colon\widetilde{M}\to M,\nabla)$ and a section $s\colon U\to S\subseteq\widetilde{M}$ inducing a trivialisation $(\pi|_{\pi^{-1}(U)},z)\colon \pi^{-1}(U)\to U\times\mathbb{C}^*$, there exists a tensor $\eta$ in $T_{1,0}U\otimes T^{0,1}U\otimes T_{1,0}U$ such that $\flat_2 \eta$ is a tensor in $S_{3,0}U$ and
\begin{equation}
\widetilde{\eta}=\mathbb{R}E(z^2\pi^*\eta)=r^2\cos(2\vartheta)2\mathbb{R}e\pi^*\eta+r^2\sin(2\vartheta)2\operatorname{Im}\pi^*\eta
\end{equation}
where $z=r e^{i\vartheta}$.
\end{prop}
\begin{proof}
For every point $p\in M$ we can find a local unitary coframe $\theta$ defined on an open set containing $p$, and the corresponding coframe $\widetilde{\theta}$ on $\widetilde{M}$ as in \eqref{eq:coframe Mtilde}.
For the coming arguments we first compute the following Lie derivatives
\begin{align*}
\Lie{\xi}\widetilde{\theta^k}
&=d\iota_{\xi}(r\pi^*\theta^k)+\iota_{\xi}d(r\pi^*\theta^k)
=0+\iota_{\xi}(dr\wedge\pi^*\theta^k)+r\iota_{\xi}d\pi^*\theta^k\\*
&=dr(\xi) \pi^*\theta^k+r\iota_{\xi}\pi^*d\theta^k
=r \pi^*\theta^k+0
=\widetilde{\theta}^k;\\
\\
\Lie{\xi}\widetilde{\theta}_k
&=\widetilde{g}(\Lie{\xi}\widetilde{\theta}_k,\cdot)_{\sharp}
=\Lie{\xi}\left(\widetilde{g}(\widetilde{\theta}_k,\cdot)\right)_{\sharp}-\left(\Lie{\xi}\widetilde{g}(\widetilde{\theta}_k,\cdot)\right)_{\sharp}\\
&=\frac{1}{2}\left(\Lie{\xi}\overline{\widetilde{\theta}^k}\right)_{\sharp}-2\widetilde{g}(\widetilde{\theta}_k,\cdot)_{\sharp}
=\frac{1}{2}\overline{\widetilde{\theta}^k}_{\sharp}-2\widetilde{\theta}_k
=-\widetilde{\theta}_k;\\
\\
\Lie{\widetilde{I}\xi}\widetilde{\theta^k}
&=d\iota_{\widetilde{I}\xi}\widetilde{\theta}^k+\iota_{\widetilde{I}\xi}d\widetilde{\theta}^k
=d\iota_{\widetilde{I}\xi}(r\pi^*\theta^k)+\iota_{\widetilde{I}\xi}d(r\pi^*\theta^k)\\
&=0+r\iota_{\widetilde{I}\xi}d\pi^*\theta^k
=r\iota_{\widetilde{I}\xi}\pi^*d\theta^k
=0;\\
\\
\Lie{\widetilde{I}\xi}\widetilde{\theta}_k
&=\widetilde{g}(\Lie{\widetilde{I}\xi}\widetilde{\theta}_k,\cdot)_{\sharp}
=\Lie{\widetilde{I}\xi}\left(\widetilde{g}(\widetilde{\theta}_k,\cdot)\right)_{\sharp}
=\frac{1}{2}\left(\Lie{\xi}\overline{\widetilde{\theta}^k}\right)_{\sharp}
=0.
\end{align*}
Lemma \ref{lemma:LieDerivativesEta} implies
\begin{align*}
0=\Lie{\xi}\widetilde{\eta}
&=\Lie{\xi}\mathbb{R}E\left(\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right)\\
&=\mathbb{R}E\left(\Lie{\xi}\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h
+\widetilde{\eta}^{j}_{k,h}\Lie{\xi}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right.\\
&\quad +\left.\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\Lie{\xi}\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h
+\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \Lie{\xi}\widetilde{\theta}^h\right)\\
&=\mathbb{R}E\left(\Lie{\xi}\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h
+\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right)\\
&=\mathbb{R}E\left(\left(\Lie{\xi}\widetilde{\eta}^{j}_{k,h}+\widetilde{\eta}^{j}_{k,h}\right)\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right).
\end{align*}
and
\begin{align*}
0=\Lie{\widetilde{I}\xi}\widetilde{\eta}+2\widetilde{I}\widetilde{\eta}
&=\Lie{\widetilde{I}\xi}\mathbb{R}E\left(\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right)+\mathbb{R}E\left(2\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes \widetilde{I}\left(\overline{\widetilde{\theta}_j}\right)\otimes \widetilde{\theta}^h\right)\\
&=\mathbb{R}E\left(\Lie{\xi}\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h
-2i\widetilde{\eta}^{j}_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right)\\
&=\mathbb{R}E\left(\left(\Lie{I\xi}\widetilde{\eta}^{j}_{k,h}-2i\widetilde{\eta}^{j}_{k,h}\right)\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes \widetilde{\theta}^h\right).
\end{align*}
Independent components must vanish, so we obtain a family of differential equations for $1\le j,k,h\le n$
\begin{equation}\label{eq:EqDiffEtatilde}
\begin{cases}
\Lie{\xi}\widetilde{\eta}^{j}_{k,h}=-\widetilde{\eta}^{j}_{k,h}\\
\Lie{\widetilde{I}\xi}\widetilde{\eta}^{j}_{k,h}=2i\widetilde{\eta}^{j}_{k,h}
\end{cases}.
\end{equation}
We define $\eta$, as the component in $T_{1,0}M\otimes T^{0,1}M\otimes T_{1,0}M$ of $s^*\widetilde{\eta}$, so that $\mathbb{R}E(\eta)=s^*\widetilde{\eta}$.
Notice that since $\pi s=\mathrm{id}_M$, the pullbacks satisfy $s^*\pi^*=\mathrm{id}_{T^{\bullet}_{\bullet}M}$, so
\begin{align*}
s^*\widetilde{\eta}
&=s^*\mathbb{R}E(\widetilde{\eta}^j_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes\widetilde{\theta}^h)
=\mathbb{R}E(s^*(r^3\widetilde{\eta}^j_{k,h}\pi^*\theta^k\otimes\pi^*\overline{\theta_j}\otimes\pi^*\theta^h))\\
&=\mathbb{R}E((r\circ s)^3(\widetilde{\eta}^j_{k,h}\circ s) s^*\pi^*\theta^k\otimes s^*\pi^*\overline{\theta_j}\otimes s^*\pi^*\theta^h)\\
&=\mathbb{R}E((\widetilde{\eta}^j_{k,h}\circ s) \theta^k\otimes \overline{\theta_j}\otimes \theta^h).
\end{align*}
Thus $\eta=s^*\widetilde{\eta}^j_{k,h}\theta^k\otimes \overline{\theta_j}\otimes \theta^h$ and we define $\eta^j_{k,h}:=s^*\widetilde{\eta}^j_{k,h}$.
Now we will use \eqref{eq:EqDiffEtatilde} to find $\widetilde{\eta}^j_{k,h}$ at a point of $\pi^*U$.
We define the function $f\colon \mathbb{R}\to\mathbb{C}$ such that $f(t):=\widetilde{\eta}^j_{k,h}(s(u)e^t)$ for $u\in U$ and compute its derivative at $t_0\in\mathbb{R}$.
\begin{align*}
\frac{d}{dt}f|_{t_0}
&=\frac{d}{dt}\widetilde{\eta}^j_{k,h}(s(u)e^t)|_{t=t_0}
=\frac{d}{dt}\widetilde{\eta}^j_{k,h}(s(u)e^{t_0+t})|_{t=0}
=\frac{d}{dt}\widetilde{\eta}^j_{k,h}(\phi_{\xi}^{t}(s(u)e^{t_0}))|_{t=0}\\
&=(\Lie{\xi}\widetilde{\eta}^j_{k,h})(s(u)e^{t_0})
=-\widetilde{\eta}^j_{k,h}(s(u)e^{t_0})
=-f(t_0).
\end{align*}
Moreover, $f(0)=\widetilde{\eta}^j_{k,h}(s(u))=\eta^j_{k,h}(u)$, so $f$ satisfies the following initial value problem
\begin{equation}
\begin{cases}
f'=-f\\
f(0)=\eta^j_{k,h}(u)
\end{cases}
\end{equation}
which has a unique solution, that is $f(t)=\eta^j_{k,h}(u)e^{-t}$.
This means that $\widetilde{\eta}^j_{k,h}(s(u)e^t)=\eta^j_{k,h}(u)e^{-t}$ or equivalently, for all $\rho\in\mathbb{R}^+$ we have $\widetilde{\eta}^j_{k,h}(s(u)\rho)=\frac{1}{\rho}\eta^j_{k,h}(u)=(\frac{1}{r}\pi^*\eta^j_{k,h})(s(u)\rho)$.
Similarly, consider the function $f\colon \mathbb{R}\to\mathbb{C}$ such that $f(t):=\widetilde{\eta}^j_{k,h}(s(u)\rho e^{it})$ and compute its derivative at $t_0\in\mathbb{R}$.
\begin{align*}
\frac{d}{dt}f|_{t_0}
&=\frac{d}{dt}\widetilde{\eta}^j_{k,h}(s(u)\rho e^{it})|_{t=t_0}
=\frac{d}{dt}\widetilde{\eta}^j_{k,h}(s(u)\rho e^{it_0+it})|_{t=0}\\
&=\frac{d}{dt}\widetilde{\eta}^j_{k,h}(\phi_{I\xi}^{t}(s(u)\rho e^{it_0}))|_{t=0}
=(\Lie{I\xi}\widetilde{\eta}^j_{k,h})(s(u)\rho e^{t_0})\\
&=2i\widetilde{\eta}^j_{k,h}(s(u)\rho e^{t_0})
=2if(t_0).
\end{align*}
And this time, $f(0)=\widetilde{\eta}^j_{k,h}(s(u)\rho)=\frac{1}{\rho}\eta^j_{k,h}(u)$, so that for $f$
\begin{equation}
\begin{cases}
f'=2if\\
f(0)=\frac{1}{\rho}\eta^j_{k,h}(u)
\end{cases}.
\end{equation}
Its unique solution is $f(t)=\eta^j_{k,h}(u)\frac{e^{2it}}{\rho}$, which implies
\begin{equation}
\widetilde{\eta}^j_{k,h}(s(u)\rho e^{it})=\eta^j_{k,h}(u)\frac{e^{2it}}{\rho}=\left(\frac{\pi^*\eta^j_{k,h}}{r^3}\right)(s(u)\rho e^{it})\rho^2 e^{2it}.
\end{equation}
Let now $z\colon \pi^{-1}(U)\to \mathbb{C}^*$ be as in the statement, then in particular for all $w\in \pi^{-1}(u)$, we have $w=s(u)z(u)$.
Then $\widetilde{\eta}^j_{k,h}(w)=z^2\frac{\pi^*\eta^j_{k,h}}{r^3}(w)$.
So finally we have
\begin{align*}
\widetilde{\eta}
&=\mathbb{R}E(\widetilde{\eta}^j_{k,h}\widetilde{\theta}^k\otimes\overline{\widetilde{\theta}_j}\otimes\widetilde{\theta}^h)
=\mathbb{R}E\left(z^2\frac{\pi^*\eta^j_{k,h}}{r^3}
(r\pi^*\theta^k\otimes r\pi^*\overline{\theta_j}\otimes r\pi^*\theta^h)\right)\\
&=\mathbb{R}E(z^2\pi^*\eta^j_{k,h}
\pi^*\theta^k\otimes \pi^*\overline{\theta_j}\otimes \pi^*\theta^h)
=\mathbb{R}E(z^2\pi^*\eta).
\qedhere
\end{align*}
\end{proof}
\begin{defi}
Given a section $s\colon U\to S$ with $U$ open subset of $M$, we will call the corresponding tensor $\eta$ found in Proposition \ref{prop:ThetatildeWrtTheta} the \emph{deviance tensor} with respect to $s$.
\end{defi}
We can give a more global formulation of Proposition \ref{prop:ThetatildeWrtTheta} in the following terms
\begin{prop}\label{prop:FibratoPrinC}
Given a projective special K\"{a}hler manifold $(\pi\colon \widetilde{M}\to M,\nabla)$, there exists a map $\gamma\colon \widetilde{M}\to \sharp_2 S_{3,0}M\subset T_{1,0}M\otimes T^{0,1}M\otimes T_{1,0}M$ of bundles over $M$, such that $\gamma(ua)=a^2\gamma(u)$ and for every local section $s\colon U\to S\subset \widetilde{M}$, the deviance induced by $s$ is $\eta=\gamma\circ s$.
Let $L:=\widetilde{M}\times_{\mathbb{C}^*} \mathbb{C}$, then $\gamma$ can be identified with a homomorphism of complex vector bundles $\widehat{\gamma}\colon L\otimes L\to \sharp_2 S_{3,0}M$ such that $\gamma (u)=\widehat{\gamma}([u,1]\otimes [u, 1])$.
\end{prop}
\begin{proof}
Let $u\in\widetilde{M}$, then there exists an open neighbourhood $U\subseteq M$ of $u$ and local trivialisation $(\pi|_{\pi^{-1}(U)},z)\colon \pi^{-1}(U)\to U\times\mathbb{C}^*$ induced by a section $s\colon U\to S$ so, for all $w\in \pi^{-1}(U)$ we have $w=s(\pi(w))z(w)$.
Let now $\eta\colon U\to \sharp_2 S_{3,0}M$ be the deviance corresponding to $s$; we define $\gamma(u):=z(u)^2\eta(p)$ where $p=\pi(u)$.
This definition is independent on the choice of $s$.
In order to prove it take another $s'\colon U'\to S$ with $p\in U'$ and the corresponding $z'$ and $\eta'$, then, on $U\cap U'$, there is a map $c:=z\circ s'\colon U\cap U'\to \mathbb{C}$ whose image is in $S^1$, as both $s$ and $s'$ are sections of $S$.
By definition, $s'=s\cdot c$.
Since $sz=s'z'$, $z(u)=z(s'(p)z'(u))=z(s'(p))z'(u)=c(p)z'(u)$, so $z=z'\pi^*c$.
Now, by construction $\mathbb{R}E(z'^2\pi^*\eta')=\widetilde{\eta}=\mathbb{R}E(z^2\pi^*\eta)=\mathbb{R}E(z'^2\pi^*c^2 \pi^*\eta')$, so $\eta'=c^2\eta$.
Thus $z(u)^2\eta(p)=z'(u)^2c(p)^2\eta(p)=z'(u)^2\eta'(p)$ and thus $\gamma$ is well defined.
Moreover, $\gamma(ua)=z(ua)^2\eta(\pi(ua))=z(u)^2 a^2\eta(p)=a^2\gamma(u)$.
We can define the homomorphism $L\otimes L\to \sharp_2S_{3,0}M$ locally: given a section $s\colon U\to S$, we map $[u,w]\otimes [u',w']$ to $z(u)z(u')ww'\cdot \eta^s_p$ where $p=\pi(u)=\pi(u')$.
This map does not depend on the choice of the section as one can see from the relations above, and it is also independent on the representatives chosen of these classes; for the first class for example $z(ua)w=z(u)aw$.
This map commutes with the projections on $M$ and it is $\mathbb{C}$-linear on the fibres, so it is a complex vector bundle map.
\end{proof}
\begin{defi}
We call $\gamma\colon S\to \sharp_2S_{3,0}M$ of Proposition \ref{prop:FibratoPrinC} the \emph{intrinsic deviance} of the projective special K\"{a}hler manifold.
\end{defi}
\begin{rmk}
Given a section $s\colon U\to S$ and the corresponding function $z\in\smooth{\pi^{-1}(U),\mathbb{C}^*}$ such that $sz=\mathrm{id}_{\pi^{-1}(U)}$, we can compute $dz=z(\frac{1}{r}dr+id\vartheta)$, since locally $z=re^{i\vartheta}$.
Notice that $\vartheta$ is not globally defined on $\pi^{-1}(U)$, but $d\vartheta$ and $e^{i\vartheta}$ are.
Moreover,
\begin{equation}\label{eq:coordConnection}
\frac{1}{z}dz=\frac{1}{r}dr+id\vartheta\in\Omega^1(\pi^{-1}(U),\mathbb{C})
\end{equation}
is a principal connection form, in fact it is equivariant for the action of $\mathbb{C}^*$ as $z(ua)=az(u)$ for all $a\in\mathbb{C}$ and, given a complex number $a$ and its corresponding fundamental vector field $a^{\circ}\in\field{\widetilde{M}}$,
\begin{align*}
\frac{1}{z}dz(a^{\circ})_u
&=\frac{1}{z}dz(\frac{d}{dt}ue^{at}|_{t=0})
=\frac{1}{z(u)}\frac{d}{dt}z(ue^{at})|_{t=0}
=\frac{1}{z(u)}\frac{d}{dt}z(u)e^{at}|_{t=0}
=a.
\end{align*}
\end{rmk}
\begin{rmk}\label{rmk:tau}
A local section $s\colon U\to S$ induces $\tau:=s^*\widetilde{\varphi}=s^*\varphi\in\Omega^1(U)$ such that on $\pi^{-1}(U)$
\begin{equation}
\widetilde{\varphi}=d\vartheta+\pi^*\tau
\end{equation}
and thus on $\pi_{S}^{-1}(U)$:
\begin{equation}
\varphi=d\vartheta|_{S}+\pi_S^*\tau.
\end{equation}
If we consider in fact the form $\widetilde{\varphi}-d\vartheta$, we notice that it is basic, as it can also be seen as the difference of two connection forms on $\pi^{-1}(U)$ (namely \eqref{eq:C*connection} and \eqref{eq:coordConnection}) up to a multiplication by $i$.
Therefore, $\widetilde{\varphi}-d\vartheta=\pi^*\tau$ for some $\tau\in\Omega^1(U)$.
The second equation is simply obtained from the first by restriction to $S\subseteq \widetilde{M}$.
\end{rmk}
\section{Characterisation theorem}
In this section we prove our main theorem, characterising projective special K\"{a}hler manifolds in terms of the deviance.
We start by deriving necessary conditions on the deviance, reflecting the curvature conditions of Proposition \ref{prop:SpezzamentoCurvatura}.
\begin{prop}\label{prop:differentialCondition}
For a projective special K\"{a}hler manifold $(\pi\colon \widetilde{M}\to M,\nabla)$ with $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega},\nabla,\xi)$, and a local section $s\colon U\to S$, then the corresponding deviance $\eta$ satisfies
\begin{equation}
d^{LC}\eta=2i\tau\wedge \eta
\end{equation}
where $\tau=s^*\varphi\in\Omega^1(U)$.
\end{prop}
\begin{proof}
Thanks to Proposition \ref{prop:ThetatildeWrtTheta}, we know that there exists $z=re^{i\vartheta}$ and $\eta\in T_{1,0}U\otimes T^{0,1}U\otimes T_{1,0}U$ such that on $\pi^{-1}(U)$ we have $\widetilde{\eta}=\mathbb{R}E(z^2\pi^*\eta)$.
Now we would like to describe $\widetilde{d}^{LC}\widetilde{\eta}$ in terms of $d^{LC}\eta$.
Notice that
\begin{equation}\label{eq:TildedEta1}
\begin{aligned}[b]
\widetilde{d}^{LC}\widetilde{\eta}
&=\widetilde{d}^{LC}\mathbb{R}E(z^2\pi^*\eta)
=\mathbb{R}E(\widetilde{d}^{LC}(z^2\pi^*\eta))
=\mathbb{R}E(2zdz\wedge \pi^*\eta+z^2\widetilde{d}^{LC}\pi^*\eta)\\
&=\mathbb{R}E\left(z^2\left(2(\frac{1}{r}dr+id\vartheta) \wedge \pi^*\eta+\widetilde{d}^{LC}\pi^*\eta\right)\right).
\end{aligned}
\end{equation}
The next step is to compute $\widetilde{d}^{LC}\pi^*\eta$, but since we are using the Levi-Civita connection, it is equivalent to compute $\sharp_2(\widetilde{d}^{LC}\pi^*\sigma)$, where $\sigma=\flat_2 \eta\in S_{3,0}U$.
Let us consider a local coframe $\theta$ in $M$ and the corresponding lifting $\widetilde{\theta}$ as in \eqref{eq:coframe Mtilde}, so that we can denote explicitly $\sigma=\sigma_{k,j,h}\theta^k\otimes \theta^{j}\otimes \theta^{h}$.
We have
\begin{align}
\widetilde{\nabla}^{LC}\pi^*\theta^k
&=\widetilde{\nabla}^{LC}\frac{\widetilde{\theta}^k}{r}
=-\frac{dr}{r^2}\otimes \widetilde{\theta}^k-\frac{1}{r}\left((\widetilde{\omega}^{LC})^{k}_{j}\otimes\widetilde{\theta}^{j}\right)\\
&=-\frac{dr}{r}\otimes \pi^*\theta^k-\frac{1}{r}\left(\sum_{j=1}^{n}\pi^*(\omega^{LC})^{k}_{j}\otimes \widetilde{\theta}^{j}+i\widetilde{\varphi}\otimes\widetilde{\theta}^j+\pi^*\theta^k\otimes \theta^{n+1}\right)\\
&=-\frac{dr}{r}\otimes \pi^*\theta^k-\pi^*\left((\omega^{LC})^{k}_{j}\otimes \theta^{j}\right)-i\widetilde{\varphi}\otimes\pi^*\theta^j-\pi^*\theta^k\otimes \frac{1}{r}\theta^{n+1}\\
&=\pi^*\left(\nabla^{LC} \theta^{k}\right)- \frac{1}{r}\theta^{n+1}\otimes \pi^*\theta^k-\pi^*\theta^k\otimes \frac{1}{r}\theta^{n+1}.
\end{align}
We can now compute the following for $X\in\field{\pi^{-1}(U)}$:
\begin{align*}
\widetilde{\nabla}^{LC}_X&\pi^*\sigma
=\widetilde{\nabla}^{LC}_X\pi^*(\sigma_{k,j,h}\theta^k\otimes \theta^{j}\otimes \theta^{h})
=\widetilde{\nabla}^{LC}_X(\pi^*\sigma_{k,j,h}\pi^*\theta^k\otimes \pi^*\theta^{j}\otimes \pi^*\theta^{h})\\
&=d\pi^*\sigma_{k,j,h}(X)\theta^k\otimes \theta^{j}\otimes \theta^{h}
+\pi^*\sigma_{k,j,h}\left(\widetilde{\nabla}^{LC}_X\pi^*\theta^k\otimes \pi^*\theta^{j}\otimes \pi^*\theta^{h}\right.\\
&\quad\left.+\pi^*\theta^k\otimes \widetilde{\nabla}^{LC}_X\pi^*\theta^{j}\otimes \pi^*\theta^{h}
+\pi^*\theta^k\otimes \pi^*\theta^{j}\otimes \widetilde{\nabla}^{LC}_X\pi^*\theta^{h}\right)\\
&=\pi^*d\sigma_{k,j,h}(X)\theta^k\otimes \theta^{j}\otimes \theta^{h}
+\pi^*\sigma_{k,j,h}\pi^*\left(\nabla^{LC} \theta^{k}\right)_X \otimes \pi^*\theta^{j}\otimes \pi^*\theta^{h}\\
&\quad +\pi^*\sigma_{k,j,h}\pi^*\theta^k\otimes \pi^*\left(\nabla^{LC} \theta^{j}\right)_X\otimes \pi^*\theta^{h}\\
&\quad +\pi^*\sigma_{k,j,h}\pi^*\theta^k\otimes \pi^*\theta^{j}\otimes \pi^*\left(\nabla^{LC} \theta^{j}\right)_X
-\frac{3}{r}\widetilde{\theta}^{n+1}(X)\pi^*\sigma\\
&\quad -\frac{1}{r}\left(\pi^*\sigma_{k,j,h}\pi^*\theta^k(X)\widetilde{\theta}^{n+1}\otimes \pi^*\theta^{j}\otimes \pi^*\theta^{h}\right.\\
&\quad\left.+\pi^*\sigma_{k,j,h}\pi^*\theta^k\otimes \pi^*\theta^{j}(X)\widetilde{\theta}^{n+1}\otimes \pi^*\theta^{h}\right.\\
&\quad\left.+\pi^*\sigma_{k,j,h}\pi^*\theta^k\otimes \pi^*\theta^{j}\otimes \pi^*\theta^{h}(X)\widetilde{\theta}^{n+1}\right)\\
&=\pi^*\left(\nabla^{LC}\sigma\right)_X
-\frac{2}{r}\widetilde{\theta}^{n+1}(X)\pi^*\sigma
-\frac{1}{r}\widetilde{\theta}^{n+1}(X)\pi^*\sigma
-\frac{1}{r}\widetilde{\theta}^{n+1}\otimes\pi^*\sigma(X,\cdot,\cdot)\\
&\quad -\frac{1}{r}\pi^*\sigma(\cdot,X\otimes\widetilde{\theta}^{n+1},\cdot)
-\frac{1}{r}\pi^*\sigma(\cdot,\cdot,X\otimes\widetilde{\theta}^{n+1}).
\end{align*}
In general then, if $\sigma=\theta^k\otimes\sigma_k$, where $\sigma_k=\sigma_{k,j,h}\theta^j\otimes \theta^h\in S_{2,0}U$, we have by symmetry
\begin{align}
\widetilde{\nabla}^{LC}\pi^*\sigma
&=\pi^*\left(\nabla^{LC}\sigma\right)
-\frac{2}{r}\widetilde{\theta}^{n+1}\otimes\pi^*\sigma
-\frac{2}{r}((\widetilde{\theta}^{n+1})(\pi^*\theta^k))\otimes\pi^*(\sigma_{k,j,h}\theta^j\otimes\theta^h)\\
&\quad -\frac{2}{r}\left(\pi^*(\sigma_{k,j,h}\theta^k\otimes\theta^j)\otimes((\widetilde{\theta}^{n+1})(\pi^*\theta^h))\right).
\end{align}
Notice in particular that the last two rows are symmetric in the first two indices.
In order to compute $\widetilde{d}^{LC}\pi^*\sigma$ we need to antisymmetrise $\widetilde{\nabla}^{LC}\pi^*\sigma$ in the first two indices and multiply by two, so only the first row survives and we get
\begin{equation}
\widetilde{d}^{LC}\pi^*\sigma
=\pi^*(d^{LC}\sigma)-\frac{2}{r}\widetilde{\theta}^{n+1}\wedge \pi^*\sigma,
\end{equation}
and therefore
\begin{equation}
\widetilde{d}^{LC}\pi^*\eta
=\pi^*(d^{LC}\eta)-\frac{2}{r}\widetilde{\theta}^{n+1}\wedge \pi^*\eta.
\end{equation}
Substituting this value in \eqref{eq:TildedEta1}, we obtain
\begin{align}\label{eq:TildedEta2}
\widetilde{d}^{LC}\widetilde{\eta}
&=\mathbb{R}E\left(z^2\left(2(\frac{1}{r}dr+id\vartheta) \wedge \pi^*\eta+\pi^*(d^{LC}\eta)-\frac{2}{r}\widetilde{\theta}^{n+1}\wedge \pi^*\eta\right)\right)\\
&=\mathbb{R}E\left(z^2\left(\pi^*d^{LC}\eta-2i(\widetilde{\varphi}-d\vartheta) \wedge \pi^*\eta\right)\right).
\end{align}
As observed in Remark \ref{rmk:tau}, $\widetilde{\varphi}-d\vartheta=\pi^*\tau$, so we have
\begin{align*}
\widetilde{d}^{LC}\widetilde{\eta}
&=\mathbb{R}E\left(z^2\pi^*\left(d^{LC}\eta-2i\tau \wedge \eta\right)\right).
\end{align*}
From Proposition \ref{prop:SpezzamentoCurvatura}, we know that $\widetilde{d}^{LC}\widetilde{\eta}=0$, and since $\eta\in\Omega^1(U,T_{0,1}\otimes T^{1,0})$, $\eta$ and $\overline{\eta}$ are linearly independent, so this quantity vanishes if and only if $z^2 \pi^*\left(d^{LC}\eta-2i\tau \wedge \eta\right)$ does.
Therefore,
\begin{equation}
d^{LC}\eta-2i\tau \wedge \eta=0,
\end{equation}
ending the proof.
\end{proof}
Let us now look at the final ingredient of the curvature tensor, that is $\frac{1}{2}[\widetilde{\eta}\wedge \widetilde{\eta}]$.
In the setting of Proposition \ref{prop:ThetatildeWrtTheta}, given a section $s\colon U\to S$, and the induced deviance $\eta$, then
\begin{align}
\frac{1}{2}[\widetilde{\eta}\wedge \widetilde{\eta}]
&=\frac{1}{2}[\mathbb{R}E(z^2\pi^*\eta)\wedge \mathbb{R}E(z^2\pi^*\eta)]
=\frac{1}{2}[z^2\pi^*\eta+\overline{z}^2\pi^*\overline{\eta}\wedge z^2\pi^*\eta+\overline{z}^2\pi^*\overline{\eta}]\\
&=\frac{1}{2}\mathbb{R}E\left(z^4[\pi^*\eta\wedge \pi^*\eta]\right)
+|z|^4[\pi^*\eta\wedge \pi^*\overline{\eta}].
\end{align}
We can compute this tensor for a local coframe $\theta$ on $M$.
Since we have
\begin{equation}
\pi^*\theta^k\circ \pi^*\theta_h
=\frac{1}{r}\widetilde{\theta}^k(\frac{1}{r}\widetilde{\theta}_h)
=\frac{1}{r^2}\widetilde{\theta}^k(\widetilde{\theta}_h)
=\frac{1}{r^2}\delta^k_h
=\frac{1}{r^2}\pi^*(\theta^k\circ\theta_h)
\end{equation}
and $\pi^*\theta^k\circ \pi^*\overline{\theta_h}=\pi^*\overline{\theta^k}\circ \pi^*\theta_h=0$, then
\begin{align}
[\pi^*\eta\wedge \pi^*\eta]
&=[\pi^*\eta^{j}_{k,h}\pi^*\theta^{k}\otimes \pi^*\overline{\theta_{j}}\otimes \pi^*\theta^{h}\wedge \pi^*\eta^{j'}_{k',h'}\pi^*\theta^{k'}\otimes \pi^*\overline{\theta_{j'}}\otimes \pi^*\theta^{h'}]\\
&=\pi^*\eta^{j}_{k,h}\pi^*\theta^{k}\wedge \pi^*\eta^{j'}_{k',h'}\pi^*\theta^{k'}\otimes [\pi^*\overline{\theta_{j}}\otimes \pi^*\theta^{h},\pi^*\overline{\theta_{j'}}\otimes \pi^*\theta^{h'}]
=0
\end{align}
and
\begin{align}
[\pi^*\eta\wedge& \pi^*\overline{\eta}]
=[\pi^*\eta^{j}_{k,h}\pi^*\theta^{k}\otimes \pi^*\overline{\theta_{j}}\otimes \pi^*\theta^{h}\wedge \pi^*\overline{\eta^{j'}_{k',h'}}\pi^*\overline{\theta^{k'}}\otimes \pi^*\theta_{j'}\otimes \pi^*\overline{\theta^{h'}}]\\
&=\pi^*\eta^{j}_{k,h}\pi^*\theta^{k}\wedge \pi^*\overline{\eta^{j'}_{k',h'}}\pi^*\overline{\theta^{k'}}\otimes [\pi^*\overline{\theta_{j}}\otimes \pi^*\theta^{h},\pi^*\theta_{j'}\otimes \pi^*\overline{\theta^{h'}}]\\
&=\pi^*(\eta^{j}_{k,h}\theta^{k}\wedge \overline{\eta^{j'}_{k',h'}}\overline{\theta^{k'}})\otimes \frac{1}{r^2}\pi^*(\overline{\theta_{j}}\otimes \theta^{h}(\theta_{j'})\otimes \overline{\theta^{h'}}-\theta_{j'}\otimes \overline{\theta^{h'}}(\overline{\theta_{j}})\otimes \theta^{h})\\
&=\frac{1}{r^2}\pi^*[\eta\wedge \overline{\eta}].
\end{align}
Therefore
\begin{equation}\label{eq:etawedgeeta}
\frac{1}{2}[\widetilde{\eta}\wedge\widetilde{\eta}]
=\frac{|z|^4}{r^2}\pi^*[\eta\wedge\overline{\eta}]
=r^2\pi^*[\eta\wedge\overline{\eta}].
\end{equation}
\begin{rmk}\label{rmk:sezioneGlobaleH}
Note that $[\eta\wedge\overline{\eta}]$ is independent on the local coframe, and if we consider another section such that $s'=sa$ on the intersection of their domains, with $a$ taking values in $S^1$, if $\eta'$ is the deviance corresponding to $s'$, then $[\eta'\wedge \overline{\eta'}]=[\eta a\wedge \overline{\eta}\overline{a}]=|a|^2[\eta \wedge \overline{\eta}]=[\eta \wedge \overline{\eta}]$.
So, there is a globally defined section $M\to S^2(\lie{u}(n))$ mapping $p$ to $[\eta_p\wedge \overline{\eta_p}]$.
\end{rmk}
For a projective special K\"{a}hler manifold $(\pi\colon \widetilde{M}\to M,\nabla)$ of real dimension $2n$, Proposition \ref{prop:SpezzamentoCurvatura}, interpreted in the light of the last observations and the ones made in Section \ref{sec:coframe} (see Remark \ref{rmk:Levi-Civita_CSK}), says that $0=r^2\pi^*(\Omega^{LC}+\Omega_{\mathbb{P}_{\mathbb{C}}^n}+[\eta\wedge \overline{\eta}])$, thus we have the following equation:
\begin{equation}\label{eq:curvatureEquation}
\Omega^{LC}+\Omega_{\mathbb{P}_{\mathbb{C}}^n}+[\eta\wedge \overline{\eta}]=0.
\end{equation}
This is a curvature tensor, so we can compute its Ricci and scalar component.
\begin{prop}
Let $(\pi\colon \widetilde{M}\to M,\nabla)$ be a projective special K\"{a}hler manifold of dimension $2n$, then
\begin{equation}\label{eq:RicciPSK}
\mathbb{R}ic_M(X,Y)+2(n+1)g(X,Y)-\mathbb{R}E(h(\overline{\eta_X},\eta_Y))=0;
\end{equation}
\begin{equation}\label{eq:ScalCurv}
\mathrm{scal}_M+2(n+1)-\frac{2}{n}\norm{\eta}_h^2=0.
\end{equation}
\end{prop}
\begin{proof}
The first summand in \eqref{eq:curvatureEquation} gives the Ricci tensor of $M$, the second gives the Ricci tensor of the projective space \eqref{eq:RicciProj}.
In order to compute the last term, consider a unitary frame $\theta$; from previous computations,
\begin{align*}
[\eta\wedge \overline{\eta}]
&=(\eta^{j}_{k,h}\theta^{k}\wedge \overline{\eta^{j'}_{k',h'}}\overline{\theta^{k'}})\otimes (\delta^{h}_{j'}\overline{\theta_{j}}\otimes \overline{\theta^{h'}}-\delta^{h'}_{j}\theta_{j'}\otimes \theta^{h})\\
&=\mathbb{R}E\left(\eta^{j}_{k,h}\overline{\eta^{h}_{k',h'}}\theta^{k}\wedge \overline{\theta^{k'}}\otimes \overline{\theta_{j}}\otimes \overline{\theta^{h'}}\right)
\end{align*}
then the Ricci component $\mathbb{R}ic([\eta\wedge\overline{\eta}])$ evaluated on $X=\mathbb{R}E(X^k\theta_k)$ and $Y=\mathbb{R}E(Y^k\theta_k)$ is the trace of $[\eta\wedge\overline{\eta}](\cdot,Y)X$, which is
\begin{align*}
[\eta\wedge&\overline{\eta}](\cdot,Y)X\\
&=\eta^{j}_{k,h}\overline{\eta^{h}_{u,v}}(\theta^{k} \overline{Y^{u}}-Y^k \overline{\theta^{u}})\otimes \overline{\theta_{j}}\otimes \overline{X^{v}}+
\overline{\eta^{j}_{k,h}}\eta^{h}_{u,v}(\overline{\theta^{k}} Y^{u}-\overline{Y^{u}}\theta^{k})\otimes \theta_{j}\otimes X^{v}\\
&=\mathbb{R}E\left(\eta^{j}_{k,h}\overline{\eta^{h}_{u,v}}(\theta^{k} \overline{Y^{u}}-Y^k \overline{\theta^{u}})\otimes \overline{\theta_{j}}\otimes \overline{X^{v}}\right).
\end{align*}
Its trace is therefore
\begin{equation}
-\mathbb{R}E\left(\eta^{j}_{k,h}\overline{\eta^{h}_{j,v}}Y^k \overline{X^{v}}\right)=-\mathbb{R}E\left(\eta^{j}_{k,h}\overline{\eta^{h}_{u,j}}Y^k \overline{X^{u}}\right)=-\mathbb{R}E(h(\overline{\eta_X},\eta_Y)),
\end{equation}
or equivalently, $\mathbb{R}ic([\eta\wedge\overline{\eta}])=-\mathbb{R}E\left(\overline{\eta^{h}_{u,j}}\eta^{j}_{k,h}\overline{\theta^{u}}\theta^k \right)$.
Thus we obtain \eqref{eq:RicciPSK}.
From this tensor we can now obtain \eqref{eq:ScalCurv} by computing the scalar component, that is by taking the trace, raising the indices with $g$ and then dividing it by the dimension of $M$.
Thus the first summand gives $\mathrm{scal}_M$, the second gives $2(n+1)$ and the third
\begin{align}
\frac{1}{2n}\mathrm{tr}\left(-\mathbb{R}E\left(\overline{\eta^{h}_{u,j}}\eta^{j}_{k,h}(\overline{\theta^{u}})_\sharp\theta^k \right) \right)
&=-\frac{1}{2n}\mathrm{tr}\left(\mathbb{R}E\left(\overline{\eta^{h}_{u,j}}\eta^{j}_{k,h}(2\theta_{u})\theta^k \right)\right)\\*
&=-\frac{1}{n}\sum_{j,h,k} \mathbb{R}E\left(\eta^{j}_{k,h}\overline{\eta^{h}_{k,j}}\right)=-\frac{2}{n}\norm{\eta}_h^2.\qedhere
\end{align}
\end{proof}
In particular, since the norm of $\eta$ is non negative, we obtain a lower bound for the scalar curvature:
\begin{cor}
Let $(\pi\colon \widetilde{M}\to M,\nabla)$ be a projective special K\"{a}hler manifold, then
\begin{equation}
\mathrm{scal}_M\ge -2(n+1).
\end{equation}
Equality holds at a point if and only if the deviance vanishes at that point.
\end{cor}
\begin{rmk}
The lower bound is reached by projective special K\"{a}hler manifolds with zero deviance; we will see that this condition characterises the complex hyperbolic space (Proposition \ref{prop:uniquenessHyp}).
\end{rmk}
We can now state the main result:
\begin{theo}\label{theo:characterisationPSK}
On a $2n$-dimensional K\"{a}hler manifold $(M,g,I,\omega)$, to give a projective special K\"{a}hler structure is equivalent to give an $S^1$-bundle $\pi_S\colon S\to M$ endowed with a connection form $\varphi$ and a bundle map $\gamma\colon S\to \sharp_2 S_{3,0} M$ such that:
\begin{enumerate}
\item\label{theo:charPSKPunto1} $d\varphi=-2\pi_S^*\omega$;
\item\label{theo:charPSKPunto2} $\gamma(u a)=a^2\gamma(u)$ for all $a\in S^1$;
\item\label{theo:charPSKPunto3} for a certain choice of an open covering $\{U_\alpha|\alpha\in\mathcal{A}\}$ of $M$ and a family $\{s_\alpha\colon U_\alpha\to S\}_{\alpha\in\mathcal{A}}$ of sections, denoting by $\eta_\alpha$ the local $1$-form taking values in $T^{0,1}M\otimes T_{1,0}M$ determined by $\gamma\circ s_{\alpha}$, for all $\alpha\in\mathcal{A}$:
\begin{enumerate} [label=\textbf{D\arabic*}]
\item\label{theo:charpsk:CurvCond} $\qquad\Omega^{LC}+\Omega_{\mathbb{P}^n_{\mathbb{C}}}+[\eta_\alpha\wedge\overline{\eta_\alpha}]=0$;
\item\label{theo:charpsk:DiffCond} $\qquad d^{LC}\eta_\alpha=2i s_{\alpha}^*\varphi\wedge\eta_\alpha$.
\end{enumerate}
\end{enumerate}
In this case, \ref{theo:charPSKPunto3} is satisfied by every such family of sections.
\end{theo}
\begin{proof}
Given a projective special K\"{a}hler manifold, we define $S:=r^{-1}(1)\subset \widetilde{M}$ and $\varphi:=-\iota_\xi\omega|_{S}$.
The principal action on $S$ is generated by $I\xi$ which is tangent to $S$ since $T_uS=\ker(dr)$ and $dr(I\xi)=-\frac{1}{r}\xi^\flat (I\xi)=-\frac{\widetilde{g}(\xi,I\xi)}{r}$.
The curvature is then $d\varphi=-2\pi_S^*\omega$ as shown in Remark \ref{rmk:curvatureSbundle}, so the first point is satisfied.
The second condition holds thanks to Proposition \ref{prop:FibratoPrinC}.
For the third point, we get \ref{theo:charpsk:CurvCond} from the arguments leading to equation \eqref{eq:curvatureEquation} and \ref{theo:charpsk:DiffCond} from Proposition \ref{prop:differentialCondition}.
In order to prove the other direction, define $\widetilde{M}:=S\times \mathbb{R}^+$, $\pi:=\pi_S\circ \pi_1\colon \widetilde{M}\to M$, and $t:=\pi_2\in\smooth{\widetilde{M},\mathbb{R}^+}$, where $\pi_1\colon S\times \mathbb{R}^+\to S$ and $\pi_2\colon S\times \mathbb{R}^+\to \mathbb{R}^+$ are the projections.
Let $\widetilde{\varphi}:=\pi_1^*\varphi$, in particular $d\widetilde{\varphi}=\pi_1^*d\varphi=-2\pi^*\omega$ as expected.
Define now
\begin{equation}
\widetilde{g}:=t^2\pi^*g-t^2\widetilde{\varphi}^2-dt^2
\end{equation}
which is non-degenerate, since $r\widetilde{\varphi}$ and $dt$ are linearly independent and transverse to $\pi$, so we can form a basis for the $1$-forms according to which we can see that $\widetilde{g}$ has signature $(2n,2)$.
Extend now $I$ to $\widetilde{I}$ so that $\widetilde{I}\cdot(\pi^*\alpha)=\pi^*I\alpha$ for all $\alpha\in T^*M$ and $\widetilde{I}\cdot(dt)=t\widetilde{\varphi}$.
The metric $\widetilde{g}$ is compatible with $\widetilde{I}$ since $\widetilde{I}\cdot \widetilde{g}=t^2\widetilde{I}\cdot\pi^*g-(\widetilde{I}\cdot t\widetilde{\varphi})^2-(\widetilde{I}\cdot dt)^2=t^2\pi^*(I\cdot g)-(-dt)^2-(t\widetilde{\varphi})^2=t^2\pi^*(I\cdot g)-dt^2-t^2\widetilde{\varphi}^2=\widetilde{g}$.
We thus have a K\"{a}hler manifold $(\widetilde{M},\widetilde{g},\widetilde{I},\widetilde{\omega})$, where
\begin{equation}
\widetilde{\omega}:=t^2\pi^*\omega+t\widetilde{\varphi}\wedge dt.
\end{equation}
Let $\xi:=t\partial_t$ where $\partial_t$ is the vector field corresponding to the coordinate derivation on $\mathbb{R}^+$.
Notice that the function $r=\sqrt{-\widetilde{g}(\xi,\xi)}$ coincides with $t$, as $\sqrt{-\widetilde{g}(t\partial_t,t\partial_t)}=\sqrt{-t^2 \widetilde{g}(\partial_t,\partial_t)}=t$.
In particular $\widetilde{g}(\xi,\xi)=-t^2\neq 0$ and $\widetilde{g}(\widetilde{I}\xi,\widetilde{I}\xi)=\widetilde{g}(\xi,\xi)<0$, so $\widetilde{g}$ is negative definite on $\langle\xi,I\xi\rangle$ and hence positive definite on the orthogonal complement.
Let now $\theta$ be a unitary coframe on an open subset $U\subseteq M$, then we can lift it to a complex coframe $\widetilde{\theta}$ on $\pi^{-1}(U)$ defined as in \eqref{eq:coframe Mtilde}.
It is straightforward to check that $\widetilde{\theta}$ is adapted to the pseudo-K\"{a}hler structure of $\widetilde{M}$.
Notice that the proof of Proposition \ref{prop:conicLC} is still valid in this situation even though we do not know whether $\widetilde{M}\to M$ has a structure of projective special K\"{a}hler manifold; this gives us a description of the Levi-Civita connection form on $\widetilde{M}$ with respect to $\widetilde{\theta}$.
Notice that $\widetilde{\theta}^k(\xi)=0$ for $k\le n$ and $\widetilde{\theta}^{n+1}(\xi)=dt(t\partial_t)+i\widetilde{\varphi}(t\partial_t)=t$ so $\xi=\mathbb{R}E(t\widetilde{\theta}_{n+1})$.
We can thus compute
\begin{align}
\widetilde{\nabla}^{LC}\xi
&=dt\otimes \mathbb{R}E(\widetilde{\theta}_{n+1})+t\widetilde{\nabla}^{LC}\mathbb{R}E(\widetilde{\theta}_{n+1})\\
&=\mathbb{R}E(dt\otimes \widetilde{\theta}_{n+1})+\frac{t}{r}\mathbb{R}E\left(\sum_{k=1}^n \widetilde{\theta}^k\otimes\widetilde{\theta}_k+ i\operatorname{Im}(\widetilde{\theta}^{n+1})\otimes\widetilde{\theta}_{n+1}\right)\\
&=\mathbb{R}E\left(\sum_{k=1}^{n+1} \widetilde{\theta}^k\otimes\widetilde{\theta}_k\right)
=\mathrm{id}.
\end{align}
Each section $s_\alpha$ corresponds to the trivialisation $(\pi|_{\pi^{-1}(U)},z_{\alpha})\colon \pi^{-1}U\to U\times \mathbb{C}^*$ in the sense that $s(\pi(u))\cdot z_{\alpha}(u)=u$ for all $u\in\pi^{-1}(U_\alpha)$.
For all $\alpha$ on $\pi^{-1}(U_{\alpha})$, define the tensor $\widetilde{\eta}_\alpha:=\mathbb{R}E(z_\alpha^2\pi^*\eta_\alpha)$.
The family $\{\widetilde{\eta}_\alpha\}_{\alpha\in\mathcal{A}}$ is compatible on intersections $U_1\cap U_2$, in fact if $s_1=cs_2$ for $c\in \mathrm{U}(1)$, then $z_2=cz_1$ and $\eta_1=\gamma\circ s_1=\gamma\circ cs_2=c^2\gamma\circ s_2=c^2\eta_2$, so
\begin{equation}
\widetilde{\eta_1}
=\mathbb{R}E(z_1^2\pi^*\eta_1)
=\mathbb{R}E(z_1^2c^2\pi^*\eta_2)
=\mathbb{R}E(z_2^2\pi^*\eta_2)
=\widetilde{\eta_2}.
\end{equation}
Therefore, this family glues to form a tensor $\widetilde{\eta}\in\sharp_2 S^3\widetilde{M}$.
We can build another connection $\nabla:=\widetilde{\nabla}^{LC}+\widetilde{\eta}$.
Notice that $\nabla\xi=\widetilde{\nabla}^{LC}\xi+\widetilde{\eta}(\xi)=\mathrm{id}+\mathbb{R}E(z_{\alpha}^2\pi^*\eta_{\alpha})(\xi)=\mathrm{id}$ because locally $\eta_\alpha$ is horizontal for all $\alpha$.
In order to prove that $\nabla$ is symplectic, since the Levi-Civita connection is symplectic, it is enough to prove that $\widetilde{\omega}(\widetilde{\eta},\cdot)+\widetilde{\omega}(\cdot,\widetilde{\eta})=0$.
Locally, $\widetilde{\omega}=\frac{1}{2i}\sum_{k=1}^{n+1}\overline{\widetilde{\theta}^k}\wedge \widetilde{\theta}^k$ and in fact, for all $X=\mathbb{R}E(X^k\widetilde{\theta}_k)$, $Y=\mathbb{R}E(Y^k\widetilde{\theta}_k)$, $Z=\mathbb{R}E(Z^k\widetilde{\theta}_k)$ vector fields on $\widetilde{M}$:
\begin{align*}
2i(\widetilde{\omega}(\widetilde{\eta}_X Y,Z)+\widetilde{\omega}(Y,\widetilde{\eta}_X Z))
&=\sum_{k=1}^{n+1}\left(\overline{\widetilde{\theta}^k}(\widetilde{\eta}_X Y)\widetilde{\theta}^k(Z)
-\widetilde{\theta}^k(\widetilde{\eta}_X Y)\overline{\widetilde{\theta}^k}(Z)\right.\\
&\quad\left.+\overline{\widetilde{\theta}^k}(Y)\wedge \widetilde{\theta}^k(\widetilde{\eta}_X Z)
-\widetilde{\theta}^k(Y)\wedge \overline{\widetilde{\theta}^k}(\widetilde{\eta}_X Z)\right)\\
&=\sum_{k=1}^{n+1}\left(
z \pi^*\eta^k_{u,v}X^u Y^v Z^k
-\overline{Z^k}\overline{z}^2\overline{\pi^*\eta}^k_{u,v}\overline{X^u} \overline{Y^v} \right.\\
&\quad +\left.\overline{Y}^k\overline{z}^2\overline{\pi^*\eta}^k_{u,v}\overline{X^u} \overline{Z^v}
-z^2\pi^*\eta^k_{u,v}X^u Z^v Y^k
\right)\\
&=\sum_{k=1}^{n+1}\mathbb{R}E\left(
z^2 \pi^*\eta^k_{u,v}X^u Y^v Z^k
-z^2\pi^*\eta^k_{u,v}X^u Z^v Y^k
\right)\\
&=\sum_{k=1}^{n+1}\mathbb{R}E\left(
z^2 \pi^*(\eta^k_{u,v}-\eta^v_{u,k})X^u Y^v Z^k
\right).
\end{align*}
By the symmetry of $\eta$, this quantity vanishes.
Proving that $d^{\nabla}\widetilde{I}=0$, is equivalent to proving that $\nabla \widetilde{I}$ is symmetric in the two covariant indices, and thus $\nabla \widetilde{I}=\widetilde{\nabla}^{LC}\widetilde{I}+[\widetilde{\eta},\widetilde{I}]=[\widetilde{\eta},\widetilde{I}]$.
Since $\widetilde{I}=\mathbb{R}E(i\widetilde{\theta}_k\widetilde{\theta}^k)$, we have
\begin{align*}
[\widetilde{\eta},\widetilde{I}]
&=iz^2\pi^*\eta^u_{v,w}\widetilde{\theta}^v\otimes\overline{\widetilde{\theta}_u}\otimes\widetilde{\theta}^w
-i\overline{z^2\pi^*\eta^u_{v,w}\widetilde{\theta}^v\otimes\overline{\widetilde{\theta}_u}\otimes\widetilde{\theta}^w}\\
&\quad +iz^2\pi^*\eta^u_{v,w}\widetilde{\theta}^v\otimes\overline{\widetilde{\theta}_u}\otimes\widetilde{\theta}^w
-i\overline{z^2\pi^*\eta^u_{v,w}\widetilde{\theta}^v\otimes\overline{\widetilde{\theta}_u}\otimes\widetilde{\theta}^w}
=2i\widetilde{\eta}=-2\widetilde{I}\widetilde{\eta},
\end{align*}
which is symmetric, proving $d^{\nabla}I=0$.
For the flatness of $\nabla$, we compute the curvature locally
\begin{align*}
\Omega^\nabla
=d\omega^{\nabla}+\frac{1}{2}[\omega^{\nabla}\wedge\omega^{\nabla}]
=\widetilde{\Omega}^{LC}+\widetilde{d}^{LC}\widetilde{\eta}+\frac{1}{2}[\widetilde{\eta}\wedge\widetilde{\eta}].
\end{align*}
By Proposition \ref{prop:conicLC}, $\widetilde{\Omega}^{LC}=r^2\pi^*(\Omega^{LC}+\Omega_{\mathbb{P}^n_{\mathbb{C}}})$.
For the same reasoning exposed in the proof of Proposition \ref{prop:differentialCondition}, $\widetilde{d}^{LC}\widetilde{\eta}=0$ if and only if $d^{LC}\eta-2is^*\varphi\wedge \eta=0$, which is granted by \ref{theo:charpsk:DiffCond}.
Finally, the computations leading to equation \eqref{eq:etawedgeeta} still apply and thus we can deduce that
\begin{equation}
\Omega^{\nabla}=r\pi^*(\Omega^{LC}+\Omega_{\mathbb{P}^n_\mathbb{C}}+[\eta\wedge\overline{\eta}])=0,
\end{equation}
making the connection $\nabla$ flat.
Notice that $\pi\colon \widetilde{M}\to M$ is a principal $\mathbb{C}^*$-bundle, where for all $l e^{i\theta}\in\mathbb{C}^*$ and $(u,t)\in\widetilde{M}$:
\begin{equation}
(u,t)l e^{i\theta}:=(u e^{i\theta},tl).
\end{equation}
The infinitesimal vector field corresponding to $1$ at $(u,t_0)$ is $\xi_{(u,t_0)}$ and the one corresponding to $i$ is $X:=\frac{d}{dt}((u,t_0)\exp(it))|_{t=0}=\frac{d}{dt}(ue^{it},t_0)|_{t=0}$, which is vertical and such that $\widetilde{\varphi}(X)=\varphi(p_*X)=\varphi(\frac{d}{dt}(ue^{it})|_{t=0})=1$ and $dr(X)=0$.
This means that $X=I\xi$ since $\widetilde{g}(X,\cdot)=-r^2\widetilde{\varphi}=-rIdr=I\xi^{\flat}$.
We are only left to prove that $M$ is the K\"{a}hler quotient or $\widetilde{M}$ with respect to the $\mathrm{U}(1)$-action and in order to do so, notice that $\widetilde{\omega}(I\xi,\cdot)=-\widetilde{g}(\xi,\cdot)=rdr=d\left(\frac{r^2}{2}\right)$, so $\mu:=\frac{r^2}{2}$ is a moment map for $I\xi$.
Notice that $\mu^{-1}(\frac{1}{2})=S\times\{1\}$ and $S$ is a principal bundle so, by definition of $\widetilde{g}$ and $\widetilde{\omega}$, $S/\mathrm{U}(1)$ is isometric to $M$ and this ends the proof.
\end{proof}
\begin{rmk}
Starting from the family $\{\eta_\alpha\}_{\alpha}$, we can build a bundle map $\gamma\colon S\to M$ as long as the $\eta_\alpha$'s are linked by the relation $\eta_\alpha=g_{\alpha,\beta}^2\eta_\beta$ where $g_{\alpha,\beta}$ is a cocycle defining $S$.
\end{rmk}
\begin{rmk}\label{rmk:trivial_cohomology2=trivial_line_bundles}
Let $(M,g,I)$ be a K\"{a}hler manifold, then if $H^2(M,\mathbb{Z})=0$, in particular, every complex line bundle and every circle bundle are trivial.
Moreover, by de Rham's theorem, $H^2_{dR}(M)=H^2(M,\mathbb{R})=H^2(M,\mathbb{Z})\otimes\mathbb{R}=0$, so in particular $\omega=d\lambda$ for some $\lambda\in\Omega^1(M)$.
\end{rmk}
\begin{cor}\label{cor:charPSKesatte}
A K\"{a}hler $2n$-manifold $(M,g,I,\omega)$ such that $H^2(M,\mathbb{Z})=0$, has a projective special K\"{a}hler structure if and only if there exists a section $\eta\colon M\to \sharp_2 S_{3,0}M$ such that
\begin{enumerate} [label=\textbf{D\arabic*\textsuperscript{*}}]
\item\label{eq:condCurvCor} $\qquad \Omega^{LC}+\Omega_{\mathbb{P}^n_{\mathbb{C}}}+[\eta\wedge\overline{\eta}]=0;$
\item\label{eq:condDiffCor} $\qquad d^{LC}\eta=-4i\lambda\wedge \eta$;
\end{enumerate}
for some $\lambda\in\Omega^{1}(M)$ such that $d\lambda=\omega$.
\end{cor}
\begin{proof}
If $M$ has a projective special K\"{a}hler structure, then from Theorem \ref{theo:characterisationPSK} we obtain an $S^1$-bundle $p\colon S\to M$ and the map $\gamma\colon S\to \sharp_2 S_{3,0}M$.
Consider the corresponding line bundle $L=S\times_{\mathrm{U}(1)}\mathbb{C}$.
As noted in Remark \ref{rmk:trivial_cohomology2=trivial_line_bundles}, we can assume $L=M\times \mathbb{C}$ and $S=M\times S^1$.
In particular, there is a global section $s\colon M\to S$ and if we call $\eta=\gamma\circ s\colon M\to \sharp_2S_{3,0}M$, it is a global section satisfying the curvature equation thanks to Theorem \ref{theo:characterisationPSK}.
Defining $\lambda:=-\frac{1}{2}s^*\varphi$, we have $d\lambda=-\frac{1}{2}s^*(-2\pi_S^*\omega)=(\pi_S s)^* \omega=\omega$ and thus also the differential condition is satisfied by Theorem \ref{theo:characterisationPSK}.
Conversely, by de Rham's Theorem, we have $\lambda\in\Omega^1(M)$ such that $d\lambda=\omega$.
We define $\pi_S=\pi_1\colon S=M\times S^1\to M$ and choose as connection the form $\varphi=\pi_2^*d\vartheta-2\pi_S^*\lambda$, where $d\vartheta$ is the fundamental $1$-form on $S^1=\mathrm{U}(1)$.
Then $d\varphi=0-2\pi_S^*d\lambda=-2\pi_S^*\omega$, so $S\to M$ has the desired curvature.
Moreover, it is trivial, so we have a global section $s\colon M\to S$ mapping $p$ to $(p,1)$.
Given $\eta\colon M\to \sharp_2S_{3,0}M$ as in the statement, we define $\gamma\colon S\to \sharp_2 S_{3,0}M$ such that $\gamma(p,a):=a^2\eta(p)$ for all $p\in M$ and $a\in \mathrm{U}(1)$.
Notice that $\gamma\circ s=\gamma(\cdot,1)=\eta$, so the curvature equation of this corollary gives the curvature equation in Theorem \ref{theo:characterisationPSK} and the same is true for the differential condition, since $s^*\varphi=s^*\pi_2^*d\vartheta-2s^*\pi_S^*\lambda=0-2\lambda$.
By Theorem \ref{theo:characterisationPSK}, $M$ is thus projective special K\"{a}hler.
\end{proof}
\begin{rmk}
Instead of requiring a section $\eta$ as in Corollary \ref{cor:charPSKesatte}, we could use a section $\sigma$ of $S_{3,0}M$ such that $\sharp_2\sigma=\eta$.
\end{rmk}
\section{Varying the projective special K\"{a}hler structure by a \texorpdfstring{$\mathrm{U}(1)$}{TEXT}-valued function}
Theorem \ref{theo:characterisationPSK} allows to find a whole class of projective special K\"{a}hler structures from a given one, as shown in the following
\begin{prop}\label{prop:PSKcerchioConnessioni}
Let $(\pi\colon \widetilde{M}\to M,\nabla)$ be a projective special K\"{a}hler manifold, let $\gamma\colon S\to \sharp_2 S_{3,0}M$ be its intrinsic deviance and $\varphi\in\Omega^1(S)$ the principal connection form on $\pi_S\colon S\to M$, then for all $\beta\in\smooth{M,\mathrm{U}(1)}$ there is a new projective special K\"{a}hler structure $(\pi\colon \widetilde{M}^{\beta}\to M,\nabla^{\beta})$ with intrinsic deviance $\gamma^\beta=\beta\gamma\colon S\to\sharp_2 S_{3,0}M$ on the same bundle $S$, with principal connection form $\varphi^{\beta}=\pi_S^*\left(\frac{d\beta}{2i\beta}\right)+\varphi$.
\end{prop}
\begin{proof}
We want to use Theorem \ref{theo:characterisationPSK}, so consider the same bundle $\pi_S\colon S\to M$, but with the new connection form $\varphi^\beta$.
Notice that $\varphi^\beta$ is a real form, in fact $\overline{\beta}\beta=1$, so
\begin{equation}
0=\beta d\overline{\beta}+\overline{\beta}d\beta
=\overline{\beta}\beta \left(\frac{d\overline{\beta}}{\overline{\beta}}+\frac{d\beta}{\beta}\right)
=\left(\overline{\left(\frac{d\beta}{\beta}\right)}+\frac{d\beta}{\beta}\right)
=2\mathbb{R}e\left(\frac{d\beta}{\beta}\right),
\end{equation}
and thus $\operatorname{Im}\left(\frac{d\beta}{2i\beta}\right)=-\frac{1}{2}\mathbb{R}e\left(\frac{d\beta}{\beta}\right)=0$.
Moreover $d\varphi^\beta=-\pi_S^*\left(\frac{d\beta\wedge d\beta}{\beta^2}\right)+d\varphi=d\varphi=-2\pi^*\omega$, so condition \ref{theo:charPSKPunto1} of Theorem \ref{theo:characterisationPSK} this is a compatible principal connection form.
The bundle map $\gamma^\beta$ is still homogeneous of degree $2$.
We are only left to prove the two conditions of point \ref{theo:charPSKPunto3}, so consider a family of sections $\{(U_\alpha,s_\alpha)\}_{\alpha\in\mathcal{A}}$ corresponding to a trivialisation of $S$ and let $\eta^\beta_\alpha:=\gamma^\beta\circ s_\alpha=\beta\gamma\circ s_{\alpha}=\beta \eta_\alpha$.
We thus have
\begin{align*}
d^{LC}\eta^\beta_\alpha
&=d^{LC}(\beta\eta_\alpha)
=d\beta\wedge \eta_\alpha+\beta 2i s_\alpha^*\varphi\wedge \eta_\alpha
=2i\left(\frac{d\beta}{2i\beta}+ s_\alpha^*\varphi\right)\wedge e^{2i\beta}\eta_\alpha\\
&=2is_\alpha^*\left(d\pi_S^*\left(\frac{d\beta}{2i\beta}\right)+ s_\alpha^*\varphi\right)\wedge \eta^\beta_\alpha
=2is_\alpha^*\varphi^\beta\wedge \eta^\beta_\alpha.
\end{align*}
As for the curvature condition \ref{theo:charpsk:CurvCond}, it still holds because
\begin{equation*}
[\eta_\alpha^\beta\wedge\overline{\eta_\alpha^\beta}]=[\beta\eta_\alpha\wedge \overline{\beta\eta_\alpha}]=[\eta_\alpha\wedge\overline{\eta_\alpha}].
\qedhere
\end{equation*}
\end{proof}
These modified deviances do not always provide an entirely new projective special K\"{a}hler structure.
Before entering into the details, we recall the following elementary result.
\begin{lemma}\label{lemma:differential_action}
Let $M$ be a smooth manifold and $G$ a Lie group with Lie algebra $\lie{g}$ such that there is a smooth right action
\begin{equation}
r\colon M\times G\longrightarrow M.
\end{equation}
Then, the differential of $r$ at a point $(x,a)$ is
\begin{equation}
r_*(X,A)=(R_a)_*(X)+A^{\circ},
\end{equation}
for all $X\in T_xM$, $A\in\lie{g}$, where $A^{\circ}$ denotes the fundamental vector field associated to $A$.
\end{lemma}
\begin{proof}
See e.g.\ \cite[Ex.\ 27.4, p.\ 326]{TuDG}.
\end{proof}
We now present the following isomorphism result:
\begin{prop}\label{prop:isomorphism_if_root}
In the setting of Proposition \ref{prop:PSKcerchioConnessioni}, if moreover $\beta$ has a square root, meaning that $\beta=b^2$ for some $b\colon M\to \mathrm{U}(1)$, then the map
\begin{align}
m_b\colon S&\longrightarrow S,\qquad
u\longmapsto u.b(\pi_S(u))=R_{b(\pi_S(u))}(u)
\end{align}
induces a bundle isomorphism preserving connection and deviance, that is
\begin{equation}
\varphi^\beta=m_b^*(\varphi),\qquad\qquad \gamma^{\beta}=\gamma\circ m_b.
\end{equation}
In particular, if $\beta^*\colon \mathbb{R}\cong H^1_{dR}(S^1)\to H^1_{dR}(M)$ is the zero map, then $\beta$ has a square root.
\end{prop}
\begin{proof}
The preservation of $\gamma$ follows from its $2$-homogeneity, since for all $u\in S$:
\begin{equation}
\gamma\circ m_b (u)=\gamma(u b(\pi_S(u)))
=b(\pi_S(u))^2\gamma(u)
=(\beta\circ \pi_S) \gamma(u)
=\gamma^{\beta}.
\end{equation}
For the connection, we first compute the differential of $m_b$.
Let $r\colon S\times \mathrm{U}(1)\to S$ be the principal right action, then we can see $m_b$ as $r\circ (\mathrm{id}_S\times (b\circ\pi_S))$.
The differential of $(\mathrm{id}_S\times (u\circ\pi_S))$ is $\mathrm{id}_{TS}\times \pi_S^*db$, where $db$ has values in $\lie{u}(1)=i\mathbb{R}$.
Lemma \ref{lemma:differential_action} gives us the differential of the action.
We have
\begin{equation}
((m_b)_*)_u=(R_{b\pi_S(u)})_*+(d_{\pi_S(u)}b)^{\circ}.
\end{equation}
Now let us compute the pullback of $\varphi$, using the fact that $\varphi$ is right invariant and $d\beta=db^2=2bdb$
\begin{align}
m_b^*(\varphi)
&=\varphi\circ (m_b)_*
=\varphi\circ (R_{b\pi_S(u)})_*+\varphi((d_{\pi_S(u)}b)^{\circ})
=R_{b\pi_S(u)}^*\varphi+\frac{1}{ib}d_{\pi_S(u)}b\\
&=\varphi+\frac{1}{i2b^2}d_{\pi_S(u)}\beta
=\varphi+\frac{1}{i2\beta}d_{\pi_S(u)}\beta
=\varphi^{\beta}.
\end{align}
In order to prove the last statement, let $a\colon\mathrm{U}(1)\to\mathbb{C}$ be the standard identification of $\mathrm{U}(1)$ with the unit circle.
Denote by $\psi$ the fundamental form of $\mathrm{U}(1)$, then we can write
\begin{equation}
\psi=\frac{1}{ia}da.
\end{equation}
Now let $\beta\colon M\to \mathrm{U}(1)$, and consider the pullback
\begin{equation}
\beta^*\psi=\beta^*\bigg(\frac{1}{ia}da\bigg)=\frac{1}{i\beta}d\beta.
\end{equation}
We have $0=\beta^*\colon H^1_{dR}(\mathrm{U}(1))\to H^1_{dR}(M)$, so in particular $\frac{1}{i\beta}d\beta$ is exact.
Let $\lambda\in\smooth{M}$ be such that $d\lambda=\frac{1}{i\beta}d\beta$, then $e^{-i\lambda}\beta$ is a smooth function with image in $\mathrm{U}(1)$ and differential
\begin{equation}
-ie^{i\lambda}\beta d\lambda+e^{i\lambda}d\beta
=-\frac{ie^{i\lambda}\beta}{i\beta} d\beta+e^{i\lambda}d\beta
=-e^{i\lambda} d\beta+e^{i\lambda}d\beta
=0.
\end{equation}
So up to a locally constant function $k$, we have $\beta=ke^{i\lambda}$.
Without loss of generality, we can assume $k=1$ (take $\lambda'=\lambda-i\log(k)$).
Then let $b=e^{\frac{i\lambda}{2}}$ and $b^2=\beta$.
\end{proof}
\begin{rmk}\label{rmk:dR0_unique_structure}
In the family of projective special K\"{a}hler structures constructed in Proposition \ref{prop:PSKcerchioConnessioni}, if $H^1_{dR}(M)=0$, then there is a unique projective special K\"{a}hler structure on $M$ up to a natural notion of isomorphism.
\end{rmk}
Even when $H^1_{dR}(M)\neq 0$, we can say exactly when a function has a global square root by considering the following functional for all $p\in M$:
\begin{equation}
F_{\beta,p}\colon \pi_1(M,p)\longrightarrow \mathbb{R},\qquad \sigma\longmapsto \frac{1}{2\pi}\int_\sigma \frac{1}{i\beta}d\beta.
\end{equation}
Notice that, in the notation above,
\begin{equation}\label{eq:pullback}
\frac{1}{2\pi}\int_\sigma \frac{1}{i\beta}d\beta=\frac{1}{2\pi}\int_{\beta\circ\sigma} \frac{1}{ia}da=\frac{1}{2\pi}\int_{\beta\circ\sigma} \psi,
\end{equation}
so, $F$ has image in $\mathbb{Z}$.
\begin{lemma}\label{lemma:cohomological_condition_square}
Let $M$ be a smooth manifold and $\beta\colon M\to \mathrm{U}(1)$, then there exists $b\colon M\to\mathrm{U}(1)$ such that $\beta=b^2$ if and only if for all $p\in M$, the functional
\begin{align*}
[F_{\beta,p}]\colon \pi_1(M,p)\longrightarrow \mathbb{Z}_2,\qquad \sigma\longmapsto \frac{1}{2\pi}\int_\sigma \frac{1}{i\beta}d\beta\ \textrm{mod}\ 2
\end{align*}
is zero.
Explicitly, given $y_p\in\mathrm{U}(1)$ such that $y_p^2=\beta(p)$, then for all $q\in M$ in the same connected component of $p$,
\begin{equation}\label{eq:root_beta}
b(q)=y_p \exp\left(\frac{1}{2} \int_\sigma \frac{1}{\beta}d\beta\right)
\end{equation}
for all continuous $\sigma\colon [0,1]\to M$ such that $\sigma(0)=p$ and $\sigma(1)=q$.
\end{lemma}
\begin{proof}
If $\beta=b^2$ for some $b\colon M\to \mathrm{U}(1)$, then for all $p\in M$ and $\sigma\in\pi_1(M,p)$,
\begin{equation}
F_{\beta,p}(\sigma)=\frac{1}{2\pi}\int_\sigma \frac{2}{ib}db=2\left(\frac{1}{2\pi}\int_{b\circ\sigma} \psi\right).
\end{equation}
Since $b\circ \sigma$ is a loop, $F_{\beta,p}(\sigma)$ is even, so $[F_{\beta,p}]=0$.
Conversely, choose a point in every connected component of $M$ and define $b$ by glueing functions defined as in \eqref{eq:root_beta}.
We can verify $\beta=b^2$ on each connected component, so let $p$ be the chosen point in said component.
Connected components on manifolds are also path connected components, so for all $q$ in the same connected component, there exists a smooth $\sigma\colon[0,1]\to M$ such that $\sigma(0)=p$ and $\sigma(1)=q$.
The value $b(q)$ is independent from the path $\sigma$ chosen, in fact if we pick another such $\sigma'\colon [0,1]\to M$, then the composition of paths $(\sigma')^{-1} \ast \sigma\in\pi_1(M,p)$ is a loop, and thus
\begin{align*}
\int_\sigma \frac{1}{\beta}d\beta-\int_{\sigma'} \frac{1}{\beta}d\beta
&=\int_\sigma \frac{1}{\beta}d\beta+\int_{(\sigma')^{-1}} \frac{1}{\beta}d\beta
=2\pi i\bigg(\frac{1}{2\pi}\int_{(\sigma')^{-1}\ast \sigma} \frac{1}{i\beta}d\beta\bigg)
=4\pi i k
\end{align*}
for some $k\in\mathbb{Z}$.
It follows that
\begin{equation}
y_p \exp\left(\frac{1}{2} \int_\sigma \frac{1}{\beta}d\beta\right)
=y_p \exp\left(\frac{1}{2} \int_{\sigma'} \frac{1}{\beta}d\beta+2\pi i k\right)=
y_p \exp\left(\frac{1}{2} \int_{\sigma'} \frac{1}{\beta}d\beta\right)
\end{equation}
We can now compute
\begin{align*}
b^2(q)
&=y_p^2 \left(\exp\left(\frac{1}{2} \int_\sigma \frac{1}{\beta}d\beta\right)\right)^2
=\beta(p) \exp\left(\int_\sigma \frac{1}{\beta}d\beta\right).
\end{align*}
Since locally $\frac{1}{\beta}d\beta=d\log(\beta)$, up to picking a suitable partition of $[0,1]$ we have $\exp\left(\int_\sigma \frac{1}{\beta}d\beta\right)=\beta(q)/\beta(p)$ so $b^2(q)=\beta(q)$.
\end{proof}
We deduce
\begin{prop}
Let $M$ be a smooth manifold and $\beta\colon M\to\mathrm{U}(1)$, then the following are equivalent:
\begin{enumerate}
\item\label{prop:cohomological_char_root:1} there exists $b\colon M\to\mathrm{U}(1) $ such that $\beta=b^2$;
\item\label{prop:cohomological_char_root:2} $[F_{\beta,p}]=0$ for all $p\in M$;
\item\label{prop:cohomological_char_root:3} $[F_{\beta,p_k}](\sigma_k)=0$ for a set of loops $\sigma_k\in \pi_1(M,p_k)$ corresponding to a set of generators of $H_1(M,\mathbb{Z})$;
\item\label{prop:cohomological_char_root:4} $[F_{\beta,p_k}](\sigma_k)=0$ for a set of loops $\sigma_k\in \pi_1(M,p_k)$ corresponding to a set of generators of $H_1(M,\mathbb{Z}_2)=H_1(M,\mathbb{Z})\otimes_{\mathbb{Z}}\mathbb{Z}_2$;
\item\label{prop:cohomological_char_root:5} the pullback $\beta^*\colon \mathbb{Z}_2\cong H^1(\mathrm{U}(1),\mathbb{Z}_2)\to H^1(M,\mathbb{Z}_2)$ is the zero map.
\end{enumerate}
\end{prop}
\begin{proof}
The equivalence \ref{prop:cohomological_char_root:1}$\Leftrightarrow$\ref{prop:cohomological_char_root:2} is Lemma \ref{lemma:cohomological_condition_square}.
For \ref{prop:cohomological_char_root:2}$\Leftrightarrow$\ref{prop:cohomological_char_root:3}, suppose at first that $M$ is connected and let $p\in M$.
The functional $[F_{\beta,p}]\colon \pi_1(M,p)\to \mathbb{Z}_2$ is a group homomorphism and by Hurewicz theorem, $H_1(M,\mathbb{Z})$ is the abelianisation of $\pi_1(M,p)$.
Since $\mathbb{Z}_2$ is an abelian group, there are natural isomorphisms
\begin{equation}
\mathrm{Hom}(\pi(M,p),\mathbb{Z}_2)\cong \mathrm{Hom}(H_1(M,\mathbb{Z}),\mathbb{Z}_2)= \mathrm{Hom}_{\mathbb{Z}}(H_1(M,\mathbb{Z}),\mathbb{Z}_2),
\end{equation}
and thus, there is a canonical factorization of $[F_{\beta,p}]$ as an abelian group homomorphism (i.e.\ $\mathbb{Z}$-linear map) $H_1(M,\mathbb{Z})\to\mathbb{Z}_2$ which is the zero map if and only if $[F_{\beta,p}]$ is zero.
In particular this proves \ref{prop:cohomological_char_root:2}$\Leftrightarrow$\ref{prop:cohomological_char_root:3}.
In general, $M=\coprod_{i\in I} M_i$ with $M_i$ connected for all $i\in I$, so $H_1(M,\mathbb{Z})=\bigoplus_{i\in I}H_1(M_i,\mathbb{Z})$ and hence
\begin{equation}
\mathrm{Hom}_{\mathbb{Z}}(H_1(M,\mathbb{Z}),\mathbb{Z}_2)
\cong \prod_{i\in I}\mathrm{Hom}_{\mathbb{Z}}(H_1(M_i,\mathbb{Z}),\mathbb{Z}_2)
\cong \prod_{i\in I}\mathrm{Hom}(\pi_1(M_i,p_i),\mathbb{Z}_2).
\end{equation}
Thus, \ref{prop:cohomological_char_root:2} holds if and only if $[F_{\beta,p_i}]=0$ for all $i\in I$, and by the previous isomorphism, this happens if and only if \ref{prop:cohomological_char_root:3}.
\ref{prop:cohomological_char_root:3}$\Leftrightarrow$\ref{prop:cohomological_char_root:4} follows from properties of tensor products and linear maps, in fact, given a $\mathbb{Z}$-module $A$, a $\mathbb{Z}$-linear map $A\to\mathbb{Z}_2$ vanishes on $2A$, and thus factors as a map $A/2A\to\mathbb{Z}_2$.
Moreover, $A/2A\cong A\otimes_{\mathbb{Z}}\mathbb{Z}_2$ (seen by applying the right-exact functor $A\otimes_{\mathbb{Z}}\cdot$ to the short exact sequence $0\rightarrow \mathbb{Z}\xrightarrow{2\cdot} \mathbb{Z}\rightarrow\mathbb{Z}_2\rightarrow 0$).
From these properties along with the homological universal coefficients theorem, we find the following natural isomorphisms
\begin{equation}
\mathrm{Hom}_{\mathbb{Z}}(H_1(M,\mathbb{Z}),\mathbb{Z}_2)\cong \mathrm{Hom}_{\mathbb{Z}_2}(H_1(M,\mathbb{Z})\otimes_{\mathbb{Z}} \mathbb{Z}_2,\mathbb{Z}_2)\cong\mathrm{Hom}_{\mathbb{Z}_2}(H_1(M,\mathbb{Z}_2),\mathbb{Z}_2),
\end{equation}
that show the equivalence \ref{prop:cohomological_char_root:3}$\Leftrightarrow$\ref{prop:cohomological_char_root:4}.
Finally, we prove \ref{prop:cohomological_char_root:3}$\Leftrightarrow$\ref{prop:cohomological_char_root:5} by the cohomological universal coefficient theorem, which gives the natural isomorphism $\mathrm{Hom}(H_1(M,\mathbb{Z}),\mathbb{Z}_2)\cong H^1(M,\mathbb{Z}_2)$.
In particular, the class in $H^1(M,\mathbb{Z}_2)$ corresponding to $[F_{\beta,p}]$, is by construction the pullback along $\beta$ of the fundamental form on $\mathrm{U}(1)$ (see \eqref{eq:pullback}).
Since $H^1(\mathrm{U}(1),\mathbb{Z}_2)$ is generated by the integral functional associated to the fundamental form, this image is zero if and only if the whole $\beta^*$ is the zero map.
\end{proof}
This proposition clarifies when two structures built as in Proposition \ref{prop:PSKcerchioConnessioni} are isomorphic as in Proposition \ref{prop:isomorphism_if_root}.
Since $\beta^*$ always vanishes on torsion elements, $H^1_{dR}(M)=0$ is a sufficient condition for not only $\beta^*\colon H^1_{dR}(\mathrm{U}(1))\to H^1_{dR}(M)$ being zero, but also for $\beta^*\colon H^1(\mathrm{U}(1),\mathbb{Z}_2)\to H^1(M,\mathbb{Z}_2)$ being zero.
However, the condition $\beta^*=0$ on the cohomology with coefficients in $\mathbb{Z}_2$ is in general strictly weaker than the same condition in de Rham cohomology.
\section{Complex hyperbolic n-space}
In this section we are going to describe a special family of projective special K\"{a}hler manifolds, which can be thought of as the simplest possible model in a given dimension.
Let $\mathbb{C}^{n,1}$ be the Hermitian space $\mathbb{C}^{n+1}$ endowed with the Hermitian form
\begin{equation}
\langle z,w\rangle =\overline{z_1} w_1+\dots+\overline{z_n} w_n-\overline{z_{n+1}} w_{n+1}.
\end{equation}
It is a complex vector space, so it makes sense to consider the projective space associated to it, that is $\mathbb{P}(\mathbb{C}^{n,1})=(\mathbb{C}^{n,1}\setminus \{0\})/\mathbb{C}^*$ with the quotient topology and the canonical differentiable structure, where $\mathbb{C}^*$ acts by scalar multiplication.
We will denote the quotient class corresponding to an element $z\in\mathbb{C}^{n,1}$ by $[z]$.
We can define the following open subset:
\begin{equation}
\mathcal{H}_{\mathbb{C}}^n:=\{[v]\in\mathbb{P}(\mathbb{C}^{n,1})|\langle v,v\rangle<0\}.
\end{equation}
Let $v=(v_1,\dots,v_{n+1})\in\mathbb{C}^{n,1}$, notice that if $[v]\in\mathcal{H}_{\mathbb{C}}^n$, then $|v_1|^2+\dots+|v_n|^2-|v_{n+1}|^2<0$ so $|v_{n+1}|^2>|v_1|^2+\dots+|v_n|^2\ge 0$ which implies $v_{n+1}\neq 0$.
We thus have a global differentiable chart $\mathcal{H}_{\mathbb{C}}^n\to\mathbb{C}^n$ by restricting the projective chart $[v]\mapsto \Big(\frac{v_1}{v_{n+1}},\dots,\frac{v_n}{v_{n+1}}\Big)$.
\begin{rmk}\label{rmk:HypSpContractible}
The inverse of this chart $\mathbb{C}^n\to\mathbb{P}(\mathbb{C}^{n,1})$ maps $z=(z_1,\dots,z_n)\in\mathbb{C}^n$ to $[(z_1,\dots,z_n,1)]$, which is in $\mathcal{H}_{\mathbb{C}}^n$ if and only if $\norm{z}^2<1$.
We have proven that $\mathcal{H}_{\mathbb{C}}^n$ is diffeomorphic to the complex unit ball and thus in particular it is contractible.
\end{rmk}
Consider now the Lie group $\mathrm{SU}(n,1)$ of the matrices with determinant $1$ that are unitary with respect to the Hermitian metric on $\mathbb{C}^{n,1}$.
We define a left action of $\mathrm{SU}(n,1)$ on $\mathcal{H}_{\mathbb{C}}$ such that $A[v]=[Av]$; it is well defined by linearity and invertibility and it is smooth.
This action is also transitive, in fact given $[v],[w]\in\mathcal{H}_{\mathbb{C}}^n$, without loss of generality, we can assume that $\langle v,v\rangle=-1=\langle w,w\rangle$.
Because of this, we can always complete $v$ and $w$ to an orthonormal basis with respect to the Hermitian product, obtaining $\{v_1,\dots,v_n,v\}$ and $\{w_1,\dots,w_n,w\}$.
Consider the following block matrices $V=(v_1|\dots|v_n|v)$ and $W=(w_1|\dots|w_n|w)$ which, up to permuting two of the first $n$-columns, belong to $\mathrm{SU}(n,1)$.
The matrix $A=WV^{-1}\in\mathrm{SU}(n,1)$ maps $v$ in $w$ and thus $[v]$ in $[w]$.
We shall now compute the stabiliser of the last element of the canonical basis $e_{n+1}$ for this action, that is, the set of matrices $A\in\mathrm{SU}(n,1)$ such that $Ae_{n+1}=\lambda e_{n+1}$ for $\lambda\in\mathbb{C}$.
Observe that $\lambda\in \mathrm{U}(1)$ since
\begin{equation}
-1=\langle e_{n+1}, e_{n+1}\rangle=\langle Ae_{n+1}, Ae_{n+1}\rangle=\langle \lambda e_{n+1}, \lambda e_{n+1}\rangle=-|\lambda|^2.
\end{equation}
Moreover, the last column of $A$ is $A_{n+1}=Ae_{n+1}=\lambda e_{n+1}$.
This forces $A$ to assume the form
\begin{equation}
\begin{pmatrix}
B&0\\
0&\lambda
\end{pmatrix}.
\end{equation}
Since $A$ belongs to $\mathrm{SU}(n,1)$, we must infer that $B$ belongs to $\mathrm{U}(n)$ and $\lambda=\det(B)^{-1}$.
The stabiliser of $e_{n+1}$ is thus $\mathrm{S}(\mathrm{U}(n)\mathrm{U}(1))$, which is isomorphic to $\mathrm{U}(n)$.
We deduce that $\mathcal{H}_{\mathbb{C}}^n$ is a symmetric space $\mathrm{SU}(n,1)/\mathrm{S}(\mathrm{U}(n)\mathrm{U}(1))$.
We will adopt the nomenclature of \cite{Goldman} for the following
\begin{defi}
We call the K\"{a}hler manifold $\mathcal{H}_{\mathbb{C}}^n$ of complex dimension $n$ the \emph{complex hyperbolic $n$-space}.
\end{defi}
There is a natural K\"{a}hler structure on $\mathcal{H}_{\mathbb{C}}^n$ coming from its representation as a symmetric space $G/H$.
Let $\lie{g}=\lie{h}+\lie{m}$ be the canonical decomposition, in particular
\begin{equation}
\lie{m}:=\bigg\{\begin{pmatrix}
0&x\\
x^\star&0
\end{pmatrix}\big| x\in\mathbb{C}^n\bigg\}.
\end{equation}
On a symmetric space, there is a one-to-one correspondence between Riemannian metrics and $\mathrm{Ad}(H)$-invariant positive definite symmetric bilinear forms on $\lie{m}$ (See \cite[II, Corollary 3.2, p.\ 200]{KN}).
Let $\theta\colon T_{[e_{n+1}]}\mathcal{H}_{\mathbb{C}}^n\cong \lie{m}\to\mathbb{C}^n$ be the identification mapping to $x$ the tangent vector corresponding to $\begin{pmatrix}
0&x\\
x^\star&0
\end{pmatrix}$.
With this identification, for $A\in\mathrm{U}(n)$ we see that the $\mathrm{Ad}(A)$-action on $\lie{m}$ corresponds on $\mathbb{C}^n$ to the $x\mapsto\det(A)Ax$.
The metric is induced by the Killing form on $\lie{su}(n,1)$ given by (\cite{Helgason})
\begin{equation}
B(X,Y)=2(n+1)\mathrm{tr}(XY),\quad \forall X,Y\in\lie{u}(n,1).
\end{equation}
We restrict the Killing form to $\lie{m}$ in order to define an $\mathrm{Ad}(H)$-invariant bilinear form, that is, given $x,y\in \mathbb{C}^n$, if $X,Y$ are the corresponding tangent vectors,
\begin{align*}
B(X,Y)
&=2(n+1)\mathrm{tr}\left(\begin{pmatrix}
0&x\\
x^\star&0
\end{pmatrix}\begin{pmatrix}
0&y\\
y^\star&0
\end{pmatrix}\right)
=2(n+1)\mathrm{tr}\begin{pmatrix}
x y^\star&0\\
0&x^\star y
\end{pmatrix}\\
&=2(n+1)\mathbb{R}e(x^\star y)
=2(n+1)(\theta^\star \theta) (X,Y).
\end{align*}
We define $g_{[e_{n+1}]}:=\theta^\star \theta$, which is $\mathrm{Ad}(\mathrm{U}(n))$-invariant, so it extends to a global Riemannian metric $g$.
By using the same idea, we can also define an almost complex structure $I$ on $\lie{m}$ as the map corresponding to the scalar multiplication by $i$ on $\mathbb{C}^n$.
This structure is compatible with the metric and it is $\mathrm{Ad}(\mathrm{U}(n))$-invariant, so it defines a K\"{a}hler structure (see \cite[II, Proposition 9.3, p.\ 260]{KN}).
The K\"{a}hler form $\omega$ is then:
\begin{align*}
\omega(X,Y)
=g(IX,Y)
=\mathbb{R}e(x^\star i^\star y)
=\operatorname{Im}(x^\star y)
=\operatorname{Im}(\theta^\star \otimes \theta)(X,Y).
\end{align*}
\begin{prop}
The manifold $\mathcal{H}_{\mathbb{C}}^n$ has curvature tensor $-\Omega_{\mathbb{P}_{\mathbb{C}}^n}$ and is projective special K\"{a}hler for all $n\ge 1$ with constant zero deviance.
\end{prop}
\begin{proof}
The computation of the curvature tensor is standard.
By Remark \ref{rmk:HypSpContractible}, we know that $\mathcal{H}_{\mathbb{C}}^n$ is contractible, allowing us to apply Corollary \ref{cor:charPSKesatte}.
If we choose as tensor $\eta$ of type $\sharp_2 S_{3,0}M$ the $0$-section, then the differential condition \ref{eq:condDiffCor} is trivially satisfied, while condition \ref{eq:condCurvCor} follows from the computation of the curvature tensor.
\end{proof}
Notice that the deviance measures the difference of a projective special K\"{a}hler manifold of dimension $2n$ from being the complex hyperbolic $n$-space.
More precisely, we have
\begin{prop}
At a point $p$ of a projective special K\"{a}hler manifold $M$ with intrinsic deviance $\gamma\colon S\to \sharp_2 S_{3,0}M$, the curvature tensor $\Omega_M$ coincides with the one of $\mathcal{H}_{\mathbb{C}}^n$ exactly in those points $p$ where $\gamma|_p$ vanishes.
\end{prop}
In particular, for any section of $S$ defined on an open neighbourhood of $p$, the corresponding local deviance vanishes at $p$ whenever the two curvatures coincide.
\begin{proof}
One direction follows from condition \ref{theo:charpsk:CurvCond}.
For the opposite one, if $\Omega_M=\Omega_{\mathcal{H}_{\mathbb{C}}^{n}}=-\Omega_{\mathbb{P}_{\mathbb{C}}^n}$, then $\mathrm{scal}_M=-2(n+1)$ and the intrinsic deviance vanishes as the norm of any local deviance vanishes by \eqref{eq:ScalCurv}.
\end{proof}
We can also prove
\begin{prop}\label{prop:uniquenessHyp}
The only complete connected and simply connected projective special K\"{a}hler manifold of dimension $2n$ with zero deviance is $\mathcal{H}_{\mathbb{C}}^n$.
\end{prop}
\begin{proof}
Let $(\pi\colon \widetilde{M}\to M,\nabla)$ be such a projective special K\"{a}hler manifold.
Consider a point $p\in M$, then $(T_p M,g,I)$ can be seen as a complex vector space compatible with the metric and can thus be identified with the tangent space at a point of $\mathcal{H}_\mathbb{C}^n$ via an isomorphism $F$ as they are both isomorphic to $\mathbb{C}^n$ with the standard metric.
Being complex manifolds, $\mathcal{H}_\mathbb{C}^n$ and $M$ are analytic, and since the curvature of $M$ is forced to be $-\Omega_{\mathbb{P}^n_{\mathbb{C}}}$, which corresponds to a $\lie{u}(n)$-invariant map from the bundle of unitary frames to $S^2(\lie{u}(n))$, it is also parallel with respect to the Levi-Civita connection.
It follows that the linear isomorphism $F$ preserves the curvature tensors and their covariant derivatives.
Thus, $F$ can be extended to a diffeomorphism $f\colon M\to\mathcal{H}_\mathbb{C}^n$ (See \cite[I, Corollary 7.3, p.\ 261]{KN}) such that $F$ is its differential at $p$.
Since $F$ preserves $I$ and $\omega$ which are parallel, $f$ is an isomorphism of K\"{a}hler manifolds, as the latter maps parallel tensors to parallel tensors.
Since the deviance of both manifolds is zero, we also have an isomorphism of projective special K\"{a}hler manifolds.
\end{proof}
\section{Classification of projective special K\"{a}hler Lie groups in dimension 4}
If $M$ is a Lie group, the conditions of Theorem \ref{theo:characterisationPSK} are simpler, because a Lie group is always parallelisable.
As a consequence, the bundle $\sharp_2 S_{3,0}(M)$ is trivial, and in particular we have a global coordinate system to write the local deviances.
\begin{defi}\label{def:PSKLg}
A projective special K\"{a}hler Lie group is a Lie group with projective special K\"{a}hler structure such that the K\"{a}hler structure is left-invariant.
\end{defi}
Notice that we do not require the deviance to be left-invariant.
An example is $\mathcal{H}^n_{\mathbb{C}}$, since the Iwasawa decomposition $\mathrm{SU}(n,1)=KAN$ (see \cite[Theorem 1.3, p.\ 403]{Helgason}) gives a left-invariant K\"{a}hler structure on the solvable Lie group $AN$.
We denote by $\mathcal{H}_\lambda$ the hyperbolic plane with curvature $-\lambda^2$, which is actually just a rescaling of $\mathcal{H}_{\mathbb{C}}^1$.
With Definition \ref{def:PSKLg}, we are able to classify $4$-dimensional projective special K\"{a}hler Lie groups; we obtain exactly two, which coincide with the two $4$-dimensional cases appearing in the classification of projective special K\"{a}hler manifolds homogeneous under the action of a semisimple Lie groups (\cite{CortesClassHomoSS}).
\begin{theo}\label{theo:classificazionePSK4}
Up to isomorphisms of projective special K\"{a}hler manifolds, there are only two $4$-dimensional connected and simply connected projective special K\"{a}hler Lie groups: $\mathcal{H}_{\sqrt{2}}\times\mathcal{H}_{2}$ and the complex hyperbolic plane.
Up to isomorphisms that also preserve the Lie group structure, there are four families of $4$-dimensional connected and simply connected projective special K\"{a}hler Lie groups, listed in Table \ref{table:PSKLieGroups}.
\begin{table}[!ht]
\centering
\begin{tabular}[c]{|c|l|c|c|}
\hline
PSK &Diff.\ complex unitary cof.\ $\theta$ &Riemann curv.\ &$\sigma$\\
\hline
$\mathcal{H}_{\sqrt{2}}\times\mathcal{H}_2$&
\begin{tabular}{@{}l@{}}
$d\theta^1=-\frac{\sqrt{2}}{2}\overline{\theta^1}\wedge\theta^1$\\
$d\theta^2=-\overline{\theta^2}\wedge\theta^2$\\
\end{tabular}
&
\begin{tabular}{@{}l@{}}
$\frac{1}{2}(\overline{\theta^1}\wedge\theta^1)^2$\\
$+(\overline{\theta^2}\wedge\theta^2)^2$
\end{tabular}
&
$\frac{3}{2}(\theta^1)^2\theta^2$\\
\hline
$\mathcal{H}_{\mathbb{C}}^2$&
\begin{tabular}{@{}l@{}}
$d\theta^1=\frac{1}{2}\theta^1\wedge(\theta^2+\overline{\theta^2})$\\
$d\theta^2=-\overline{\theta^1}\wedge\theta^1-\overline{\theta^2}\wedge\theta^2$\\
\end{tabular}
&
$-\Omega_{\mathbb{P}^{2}_{\mathbb{C}}}^{\flat}$
&0\\
\hline
$\mathcal{H}_{\mathbb{C}}^2$&
\begin{tabular}{@{}l@{}}
$d\theta^1=(\frac{1}{2}-\frac{i}{\delta})\theta^1\wedge(\theta^{2}+\overline{\theta^2})$\\
$d\theta^2=-\overline{\theta^1}\wedge\theta^1-\overline{\theta^2}\wedge\theta^2$\\
$\delta>0$\\
\end{tabular}
&
$-\frac{1}{\delta}\Omega_{\mathbb{P}_{\mathbb{C}}^2}^{\flat}$
&0\\
\hline
$\mathcal{H}_{\mathbb{C}}^2$&
\begin{tabular}{@{}l@{}}
$d\theta^1=-(\frac{1}{\delta}+\frac{i}{2})\theta^1\wedge(\theta^{2}-\overline{\theta^2})$\\
$d\theta^2=-i\overline{\theta^1}\wedge\theta^1-\overline{\theta^2}\wedge\theta^2$\\
$\delta>0$\\
\end{tabular}
&
$-\frac{1}{\delta}\Omega_{\mathbb{P}^{2}_{\mathbb{C}}}^{\flat}$
&0\\
\hline
\end{tabular}
\caption{Connected projective special K\"ahler Lie groups.}
\label{table:PSKLieGroups}
\end{table}
\end{theo}
\begin{proof}
We will study K\"ahler Lie groups through K\"ahler Lie algebras, see e.g.\ \cite[\S 1.1, p.\ 26]{DorfmeisterNakajima}.
We will start from the classification of pseudo-K\"{a}hler Lie groups provided by \cite{Ovando2004}.
Table \ref{table:classificationOvando} displays the eighteen families of non-abelian pseudo-K\"{a}hler Lie algebras in dimension $4$.
\begin{table}[!ht]
\centering
\begin{tabular}[c]{|l|l|p{6cm}|}
\hline
$\lie{g}$ & $I$ & $\omega$\\
\hline
$\lie{rh}_3$ &$Ie_1=e_2, Ie_3=e_4$&$a_1 (e^{13}+e^{24})+a_2(e^{14}-e^{23})+a_3 e^{12}$, $a_1^2+a_2^2\neq 0$\\
\hline
$\lie{rr}_{3,0}$ &$Ie_1=e_2, Ie_3=e_4$&$a_1 e^{12}+a_2 e^{34}$, $a_1a_2\neq 0$\\
\hline
$\lie{rr}'_{3,0}$ &$Ie_1=e_4, Ie_2=e_3$&$a_1 e^{14}+a_2 e^{23}$, $a_1a_2\neq 0$\\
\hline
$\lie{r}_2\lie{r}_2$&$Ie_1=e_2, Ie_3=e_4$&$a_1 e^{12}+a_2 e^{34}$, $a_1a_2\neq 0$\\
\hline
$\lie{r}'_2$ &$Ie_1=e_3, Ie_2=e_4$&$a_1 (e^{13}-e^{24})+a_2(e^{14}+e^{23})$, $a_1^2+a_2^2\neq 0$\\
\hline
$\lie{r}'_2$ &$Ie_1=-e_2, Ie_3=e_4$&$a_1 (e^{13}-e^{24})+a_2(e^{14}+e^{23})+a_3 e^{12}$, $a_1^2+a_2^2\neq 0$\\
\hline
$\lie{r}_{4,-1,-1}$ &$Ie_4=e_1, Ie_2=e_3$&$a_1 (e^{12}+e^{34})+a_2(e^{13}-e^{24})+a_3 e^{14}$, $a_1^2+a_2^2\neq 0$\\
\hline
$\lie{r}'_{4,0,\delta}$ &$Ie_4=e_1, Ie_2=e_3$&$a_1 e^{14}+a_2 e^{23}$, $a_1a_2\neq 0$, $\delta>0$\\
\hline
$\lie{r}'_{4,0,\delta}$ &$Ie_4=e_1, Ie_2=-e_3$&$a_1 e^{14}+a_2 e^{23}$, $a_1a_2\neq 0$, $\delta>0$\\
\hline
$\lie{d}_{4,1}$ &$Ie_1=e_4, Ie_2=e_3$&$a_1 (e^{12}-e^{34})+a_2e^{14}$, $a_1\neq 0$\\
\hline
$\lie{d}_{4,2}$ &$Ie_4=-e_2, Ie_1=e_3$&$a_1 (e^{14}+e^{23})+a_2e^{24}$, $a_1\neq 0$\\
\hline
$\lie{d}_{4,2}$ &$Ie_4=-2e_1, Ie_2=e_3$&$a_1 e^{14}+a_2 e^{23}$, $a_1a_2\neq 0$\\
\hline
$\lie{d}_{4,1/2}$ &$Ie_4=e_3, Ie_1=e_2$&$a_1 (e^{12}-e^{34})$, $a_1\neq 0$\\
\hline
$\lie{d}_{4,1/2}$ &$Ie_4=e_3, Ie_1=-e_2$&$a_1 (e^{12}-e^{34})$, $a_1\neq 0$\\
\hline
$\lie{d}'_{4,\delta}$ &$Ie_4=e_3, Ie_1=e_2$&$a_1 (e^{12}-\delta e^{34})$, $a_1\neq 0$, $\delta>0$\\
\hline
$\lie{d}'_{4,\delta}$ &$Ie_4=-e_3, Ie_1=e_2$&$a_1 (e^{12}-\delta e^{34})$, $a_1\neq 0$, $\delta>0$\\
\hline
$\lie{d}'_{4,\delta}$ &$Ie_4=-e_3, Ie_1=-e_2$&$a_1 (e^{12}-\delta e^{34})$, $a_1\neq 0$, $\delta>0$\\
\hline
$\lie{d}'_{4,\delta}$ &$Ie_4=e_3, Ie_1=-e_2$&$a_1 (e^{12}-\delta e^{34})$, $a_1\neq 0$, $\delta>0$\\
\hline
\end{tabular}
\caption[Pseudo-K\"{a}hler Lie algebras]{Classification of $4$-dimensional pseudo-K\"{a}hler non-abelian Lie algebras \cite[Table 5.1, p.\ 63]{Ovando2004}.}
\label{table:classificationOvando}
\end{table}
Among these families, only for the ones in Table \ref{table:KahlerLieAlgebras} the metric can be positive definite, i.e.\ K\"{a}hler.
\begin{table}[!ht]
\centering
\begin{tabular}[c]{|l|l|l|l|l|}
\hline
Case& $\lie{g}$ & $I$ & $\omega$& Conditions\\
\hline
I&$\lie{rr}_{3,0}$ &$Ie_1=e_2, Ie_3=e_4$&$a_1 e^{12}+a_2 e^{34}$& $a_1, a_2>0$\\
\hline
II&$\lie{rr}'_{3,0}$ &$Ie_1=e_4, Ie_2=e_3$&$a_1 e^{14}+a_2 e^{23}$& $a_1, a_2> 0$\\
\hline
III&$\lie{r}_2\lie{r}_2$&$Ie_1=e_2, Ie_3=e_4$&$a_1 e^{12}+a_2 e^{34}$& $a_1, a_2> 0$\\
\hline
IV&$\lie{r}'_{4,0,\delta}$ &$Ie_4=e_1, Ie_2=e_3$&$a_1 e^{14}+a_2 e^{23}$& $a_1<0;a_2,\delta>0$\\
\hline
V&$\lie{r}'_{4,0,\delta}$ &$Ie_4=e_1, Ie_2=-e_3$&$a_1 e^{14}+a_2 e^{23}$& $a_1,a_2<0; \delta>0$\\
\hline
VI&$\lie{d}_{4,2}$ &$Ie_4=-2e_1, Ie_2=e_3$&$a_1 e^{14}+a_2 e^{23}$& $a_1,a_2> 0$\\
\hline
VII&$\lie{d}_{4,1/2}$ &$Ie_4=e_3, Ie_1=e_2$&$a_1 (e^{12}-e^{34})$& $a_1> 0$\\
\hline
VIII&$\lie{d}'_{4,\delta}$ &$Ie_4=e_3, Ie_1=e_2$&$a_1 (e^{12}-\delta e^{34})$& $a_1,\delta>0$\\
\hline
IX&$\lie{d}'_{4,\delta}$ &$Ie_4=-e_3, Ie_1=-e_2$&$a_1 (e^{12}-\delta e^{34})$& $a_1<0;\delta>0$\\
\hline
\end{tabular}
\caption{Non-abelian K\"{a}hler Lie algebras of dimension $4$.}
\label{table:KahlerLieAlgebras}
\end{table}
It is now straightforward to find a unitary frame $u$ for each case, that is such that $g=\sum_{k=1}^4 (u^k)^2$, $Iu_1=u_2$, $Iu_3=u_4$ and $\omega=u^{1,2}+u^{3,4}$.
With respect to $u$, we can write the new structure constants and compute the Levi-Civita connection form $\omega^{LC}$ and the corresponding curvature form $\Omega^{LC}$.
We write
\begin{align*}
H_1:=\begin{pmatrix}
&\quad -u^{12}&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
u^{12}&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
\hline
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&
\end{pmatrix},& &
H_2=\begin{pmatrix}
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
\hline
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&-u^{34}\\
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&u^{34}&
\end{pmatrix}.
\end{align*}
\begin{table}
\centering
\begin{tabular}[c]{|c|c|l|c|}
\hline
Case& $\lie{g}$ &Str.\ constants & $\Omega^{LC}$\\
\hline
I&$\lie{rr}_{3,0}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=a u_2$\\
$a>0$\\
\end{tabular}
&
$a^2 H_1$
\\
\hline
II&$\lie{rr}'_{3,0}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_3]=- u_4$\\
$[u_1,u_4]= u_3$\\
\end{tabular}
&
0
\\
\hline
III&$\lie{r}_2\lie{r}_2$&
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=a u_2$\\
$[u_3,u_4]=b u_4$\\
$a,b>0$\\
\end{tabular}
&
$a^2 H_1+b^2 H_2$
\\
\hline
IV&$\lie{r}'_{4,0,\delta}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=a u_2$\\
$[u_1,u_3]=-\delta a u_4$\\
$[u_1,u_4]=\delta a u_3$\\
$a,\delta>0$\\
\end{tabular}
&
$a^2H_1$
\\
\hline
V&$\lie{r}'_{4,0,\delta}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=a u_2$\\
$[u_1,u_3]=\delta a u_4$\\
$[u_1,u_4]=-\delta a u_3$\\
$a,\delta>0$\\
\end{tabular}
&
$a^2 H_1$
\\
\hline
VI&$\lie{d}_{4,2}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=-2a u_1$\\
$[u_1,u_3]=2a u_4$\\
$[u_2,u_3]=-a u_3$\\
$[u_2,u_4]=a u_4$\\
$a>0$\\
\end{tabular}
&
$-a^2\Omega_{\mathbb{P}^2_{\mathbb{C}}}-6a^2 H_2$
\\
\hline
VII&$\lie{d}_{4,1/2}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=2a u_4$\\
$[u_1,u_3]=-a u_1$\\
$[u_2,u_3]=-a u_2$\\
$[u_3,u_4]=2a u_4$\\
$a>0$\\
\end{tabular}
&
$-a^2\Omega_{\mathbb{P}^2_{\mathbb{C}}}$
\\
\hline
VIII&$\lie{d}'_{4,\delta}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=2a\sqrt{\delta} u_4$\\
$[u_1,u_3]=-a\sqrt{\delta} u_1+\frac{2a}{\sqrt{\delta}} u_2$\\
$[u_2,u_3]=-\frac{2a}{\sqrt{\delta}} u_1-a\sqrt{\delta} u_2$\\
$[u_3,u_4]=2a\sqrt{\delta} u_4$\\
$a,\delta>0$\\
\end{tabular}
&
$-\delta a^2\Omega_{\mathbb{P}^2_{\mathbb{C}}}$
\\
\hline
IX&$\lie{d}'_{4,\delta}$ &
\begin{tabular}{@{}l@{}}
$[u_1,u_2]=-2a\sqrt{\delta} u_3$\\
$[u_1,u_4]=-a\sqrt{\delta} u_1-\frac{2a}{\sqrt{\delta}} u_2$\\
$[u_2,u_4]=+\frac{2a}{\sqrt{\delta}} u_1-a\sqrt{\delta} u_2$\\
$[u_3,u_4]=-2a\sqrt{\delta} u_3$\\
$a,\delta>0$\\
\end{tabular}
&
$-\delta a^2 \Omega_{\mathbb{P}^2_{\mathbb{C}}}$
\\
\hline
\end{tabular}
\caption{Curvature tensors.}
\label{table:curvatures}
\end{table}
From Table \ref{table:curvatures} we notice that the curvature tensors are of two types:
\begin{enumerate}[label=(\roman*)]
\item\label{case:curvi} $a^2 H_1+b^2 H_2$ for $a,b\ge 0$;
\item\label{case:curvii} $-a^2(\Omega_{\mathbb{P}^2_{\mathbb{C}}}+6bH_2)$ for $a>0$ and $b\in\{0,1\}$.
\end{enumerate}
A K\"{a}hler Lie group $M$ of dimension $4$ is always solvable \cite[Theorem 9, p.\ 155]{Chu}.
This implies that $M$ is the product of a torus\index{torus} (product of circumferences) and a euclidean space \cite[Theorem 2\textsuperscript{a}, .\ 675]{Chevalley}, but $M$ is also simply connected, so it must be an euclidean space, and thus contractible.
If now $M$ has a projective special K\"{a}hler structure, thanks to Corollary \ref{cor:charPSKesatte}, there is a global section $\eta\colon M\to \sharp_2 S_{3,0}M$ satisfying \ref{eq:condCurvCor} and \ref{eq:condDiffCor}.
Applying $\flat_2$ we obtain a global section $\sigma$ of $S_{3,0}M$ which better displays the symmetry.
Consider the globally defined complex coframe $\theta^1=u^1+iu^2$, $\theta^2=u^3+i u^4$.
We write $\sigma$ in its generic form with respect to $\theta$:
\begin{equation}
\sigma=c_1(\theta^1)^3+c_2(\theta^1)^2\theta^2+c_3\theta^1(\theta^2)^2+c_4(\theta^2)^3
\end{equation}
for some functions $c_1,c_2,c_3,c_4\in\smooth{M,\mathbb{C}}$.
By raising the second index, we obtain $\eta=\sharp_2\sigma$ which is
\begin{align}
\eta
&=2c_1\theta^1\otimes \overline{\theta_1}\otimes \theta^1
+\frac{2c_2}{3}\left(\theta^1\otimes \overline{\theta_1}\otimes\theta^2+\theta^1\otimes \overline{\theta_2}\otimes\theta^1+\theta^2\otimes \overline{\theta_1}\otimes\theta^1\right)\\*
&\quad +\frac{2c_3}{3}\left(\theta^1\otimes \overline{\theta_2}\otimes\theta^2+\theta^2\otimes \overline{\theta_1}\otimes\theta^2+\theta^2\otimes \overline{\theta_2}\otimes\theta^1\right)
+2c_4\theta^2\otimes \overline{\theta_2}\otimes \theta^2.
\end{align}
With respect to this generic section, we can compute $[\eta\wedge\overline{\eta}]$ explicitly:
\allowdisplaybreaks[0]
\begin{align*}
[\eta\wedge\overline{\eta}]
=\frac{4}{9}\mathbb{R}E&\left(\overline{\theta^1}\wedge\theta^1\otimes \begin{pmatrix}
9|c_1|^2+|c_2|^2&3\overline{c_1}c_2+\overline{c_2}c_3\\
3\overline{c_2}c_1+\overline{c_3}c_2&|c_2|^2+|c_3|^2\\
\end{pmatrix}\right.\\
&\quad +\overline{\theta^1}\wedge\theta^2\otimes \begin{pmatrix}
3\overline{c_1}c_2+\overline{c_2}c_3&\overline{c_1}c_3+\overline{c_2}c_4\\
|c_2|^2+|c_3|^2&\overline{c_2}c_3+3\overline{c_3}c_4\\
\end{pmatrix}\\
&\quad +\overline{\theta^2}\wedge\theta^1\otimes \begin{pmatrix}
3\overline{c_2}c_1+\overline{c_3}c_2&|c_2|^2+|c_3|^2\\
\overline{c_3}c_1+\overline{c_4}c_2&\overline{c_3}c_2+3\overline{c_4}c_3\\
\end{pmatrix}\\
&\quad +\left.\overline{\theta^2}\wedge\theta^2\otimes \begin{pmatrix}
|c_2|^2+|c_3|^2&\overline{c_2}c_3+\overline{c_3}c_4\\
\overline{c_3}c_2+3\overline{c_4}c_3&|c_3|^2+9|c_4|^2\\
\end{pmatrix}\right).
\end{align*}
\allowdisplaybreaks
Notice that if we define $v_1,v_2,v_3\in \smooth{M,\mathbb{C}^2}$ such that
\begin{align}\label{eq:defVk}
v_1:=\begin{pmatrix}
2c_1\\
\frac{2c_2}{3}
\end{pmatrix}=\begin{pmatrix}
x\\
y
\end{pmatrix},\quad v_2:=\begin{pmatrix}
\frac{2c_2}{3}\\
\frac{2c_3}{3}
\end{pmatrix}=\begin{pmatrix}
y\\
z
\end{pmatrix},\quad v_3:=\begin{pmatrix}
\frac{2c_3}{3}\\
2c_4
\end{pmatrix}=\begin{pmatrix}
z\\
w
\end{pmatrix},
\end{align}
then we have
\begin{align*}
[\eta\wedge\overline{\eta}]
&=\mathbb{R}E\left(\overline{\theta^1}\wedge\theta^1\otimes \begin{pmatrix}
\norm{v_1}^2&\langle v_1,v_2\rangle\\
\overline{\langle v_1,v_2\rangle}&\norm{v_2}^2\\
\end{pmatrix}
+\overline{\theta^1}\wedge\theta^2\otimes \begin{pmatrix}
\langle v_1,v_2\rangle&\langle v_1,v_3\rangle\\
\norm{v_2}^2&\langle v_2,v_3\rangle\\
\end{pmatrix}\right.\\
&\quad\left.+\overline{\theta^2}\wedge\theta^1\otimes \begin{pmatrix}
\overline{\langle v_1,v_2\rangle}&\norm{v_2}^2\\
\overline{\langle v_2,v_3\rangle}&\overline{\langle v_2,v_3\rangle}\\
\end{pmatrix}
+\overline{\theta^2}\wedge\theta^2\otimes \begin{pmatrix}
\norm{v_2}^2&\langle v_2,v_3\rangle\\
\overline{\langle v_2,v_3\rangle}&\norm{v_3}^2\\
\end{pmatrix}\right).
\end{align*}
In other words, the coefficients of $[\eta\wedge\overline{\eta}]$ are the pairwise Hermitian products of $v_1,v_2,v_3$.
Returning to the classification, if we write $H_1,H_2,\Omega_{\mathbb{P}^2_{\mathbb{C}}}$ with respect to the complex coframe, we notice that the positions corresponding to the mixed Hermitian products are always zero.
\begin{align*}
H_1&=\mathbb{R}E\left(\overline{\theta^1}\wedge\theta^1\otimes\begin{pmatrix}
\frac{1}{2}&0\\
0&0
\end{pmatrix}\right),\qquad
H_2=\mathbb{R}E\left(\overline{\theta^2}\wedge\theta^2\otimes\begin{pmatrix}
0&0\\
0&\frac{1}{2}
\end{pmatrix}\right),\\
\Omega_{\mathbb{P}^2_{\mathbb{C}}}&=\mathbb{R}E\left(\overline{\theta^1}\wedge\theta^1\otimes\begin{pmatrix}
-2&0\\
0&-1\\
\end{pmatrix}
+\overline{\theta^1}\wedge\theta^2\otimes\begin{pmatrix}
0&0\\
-1&0
\end{pmatrix}\right.\\
&\quad \left.+\overline{\theta^2}\wedge\theta^1\otimes\begin{pmatrix}
0&-1\\
0&0\\
\end{pmatrix}+\overline{\theta^2}\wedge\theta^2\otimes\begin{pmatrix}
-1&0\\
0&-2\\
\end{pmatrix}\right).
\end{align*}
As a consequence, for all cases, if \ref{eq:condCurvCor} holds, then $v_1,v_2,v_3$ must be orthogonal.
Now we will treat each case of possible curvature tensor separately.
\begin{enumerate}[label=(\roman*)]
\item Let $a,b\ge 0$ and $\Omega^{LC}=a^2 H_1+b^2 H_2$, then
\begin{equation}
\Omega^{LC}=\mathbb{R}E\left(\overline{\theta^1}\wedge\theta^1\otimes\begin{pmatrix}
\frac{a^2}{2}&0\\
0&0\\
\end{pmatrix}+\overline{\theta^2}\wedge\theta^2\otimes\begin{pmatrix}
0&0\\
0&\frac{b^2}{2}\\
\end{pmatrix}\right).
\end{equation}
So, by \ref{eq:condCurvCor}, $[\eta\wedge\overline{\eta}]=-\Omega^{LC}-\Omega_{\mathbb{P}^2_{\mathbb{C}}}$, which implies
\begin{align*}
\norm{v_1}^2=2-\frac{a^2}{2},&&\norm{v_2}^2=1,&&\norm{v_3}^2=2-\frac{b^2}{2}.
\end{align*}
These equalities translate to a linear system in the squared norms of $x,y,z,w$ introduced in \eqref{eq:defVk}, namely
\begin{equation}
\begin{cases}
|x|^2+|y|^2=2-\frac{a^2}{2}\\
|y|^2+|z|^2=1\\
|z|^2+|w|^2=2-\frac{b^2}{2}
\end{cases}.
\end{equation}
Its solutions are
\begin{align}\label{eq:systemHxHnorms}
\begin{cases}
|x|^2=1-\frac{a^2}{2}+s\\
|y|^2=1-s\\
|z|^2=s\\
|w|^2=2-\frac{b^2}{2}-s
\end{cases}&& \textrm{ for } s\in [0,1].
\end{align}
Imposing the orthogonality conditions $\langle v_1,v_2\rangle=\langle v_2,v_3\rangle=\langle v_3,v_4\rangle=0$, we get:
\begin{align}\label{eq:systemHxHprod}
\begin{cases}
\overline{x}y+\overline{y}z=0\\
\overline{y}z+\overline{z}w=0\\
\overline{x}z+\overline{y}w=0\\
\end{cases}.
\end{align}
Notice that because of \eqref{eq:systemHxHnorms}, $y$ and $z$ cannot vanish simultaneously, so we have (at each point) three different cases:
\begin{itemize}
\item
Suppose at first that $z=0$, then $s=0$ and $\norm{y}=1$, so $y\neq 0$ and \eqref{eq:systemHxHprod} becomes
\begin{align}
\begin{cases}
\overline{x}y=0\\
0=0\\
\overline{y}w=0\\
\end{cases}.
\end{align}
Implying $x=w=0$, so the solutions are $(x,y,z,w)=(0,y,0,0)$ for $y\in \smooth{U,\mathrm{U}(1)}$.
Now $M$ is simply connected, so $y=e^{i\alpha}$ for some $\alpha\in\smooth{M}$, as $y$ lifts to the universal cover $\exp\colon i\mathbb{R}\to \mathrm{U}(1)$.
Thus we have $(c_1,c_2,c_3,c_4)=(0,\frac{3}{2}e^{i\alpha},0,0)$ for some $\alpha\in\smooth{M}$.
Finally, \eqref{eq:systemHxHnorms} gives
\begin{equation}
\begin{cases}
1-\frac{a^2}{2}=0\\
2-\frac{b^2}{2}=0
\end{cases}
\end{equation}
and thus $a=\sqrt{2}$ and $b=2$.
\item Suppose now that $z\neq 0$ and $y=0$, then \eqref{eq:systemHxHprod} becomes
\begin{align}
\begin{cases}
0=0\\
\overline{z}w=0\\
\overline{x}z=0\\
\end{cases}
\end{align}
and then $w=x=0$ so, similarly to the previous case, the solutions are $(c_1,c_2,c_3,c_4)=(0,0,e^{i\alpha},0)$ for $\alpha\in\smooth{M}$ and this time, \eqref{eq:systemHxHnorms} implies $a=2$ and $b=\sqrt{2}$.
\item The remaining case has $z\neq 0$ and $y\neq 0$.
In order to solve it, let us call $t:=\overline{y}z\neq 0$, then \eqref{eq:systemHxHnorms} and \eqref{eq:systemHxHprod} give
\begin{align*}
z&=\frac{ty}{|y|^2}=\frac{ty}{1-s},\\
x&=-\frac{\overline{t}y}{|y|^2}=-\frac{\overline{t}y}{1-s},\\
w&=-\frac{tz}{|z|^2}=-\frac{t^2 y}{s(1-s)},\\
0=\overline{x}z+\overline{y}w&=\left(-\frac{t\overline{y}}{1-s}\right)\left(\frac{ty}{1-s}\right)+\overline{y}\left(-\frac{t^2y}{s(1-s)}\right)\\
&=-t^2\left(\frac{1}{1-s}+\frac{1}{s}\right)=-\frac{t^2}{s(1-s)},
\end{align*}
in contradiction with $t\neq 0$.
\end{itemize}
In conclusion, for this class of curvature tensors, the only solutions are for
\begin{align*}
a=\sqrt{2},&& b=2, && \sigma=\frac{3}{2}e^{i\alpha}(\theta^1)^2\theta^2,&& \textrm{for }\alpha\in\smooth{M}\textrm{, or}\\
a=2,&& b=\sqrt{2}, && \sigma=\frac{3}{2}e^{i\alpha}\theta^1(\theta^2)^2,&& \textrm{for }\alpha\in\smooth{M}.
\end{align*}
We deduce that in Table \ref{table:curvatures} there are no solutions for the cases I, II, IV, V, and the only solutions in case III are the ones mentioned before.
Moreover, these solutions are isomorphic to one another and the isomorphism is obtained by swapping $u_1$ with $u_3$ and $u_2$ with $u_4$.
The simply connected Lie group corresponding to this case is $\mathcal{H}_{\sqrt{2}}\times \mathcal{H}_{2}$.
Notice that the unique abelian K\"{a}hler $4$-dimensional Lie algebra is flat, so its curvature is also of type (i), with $a=b=0$; thus it cannot be endowed with a projective special K\"{a}hler structure.
\item Let now $a>0, b\in\{0,1\}$ and $\Omega^{LC}=-a^2(\Omega_{\mathbb{P}^2_{\mathbb{C}}}+6bH_2)$, then
\begin{align*}
&[\eta\wedge\overline{\eta}]
=-\Omega^{LC}-\Omega_{\mathbb{P}^{n}_{\mathbb{C}}}
=(a^2-1)\Omega_{\mathbb{P}^{n}_{\mathbb{C}}}+6a^2bH_2\\
&\quad=\mathbb{R}E\left(\overline{\theta^1}\wedge\theta^1\otimes\begin{pmatrix}
2(1-a^2)&0\\
0&1-a^2\\
\end{pmatrix}
+\overline{\theta^1}\wedge\theta^2\otimes\begin{pmatrix}
0&0\\
1-a^2&0
\end{pmatrix}\right.\\
&\quad\quad\left.+\overline{\theta^2}\wedge\theta^1\otimes\begin{pmatrix}
0&1-a^2\\
0&0\\
\end{pmatrix}+\overline{\theta^2}\wedge\theta^2\otimes\begin{pmatrix}
1-a^2&0\\
0&2-2a^2+3a^2b\\
\end{pmatrix}\right).
\end{align*}
Therefore, we obtain the equations
\begin{align*}
\norm{v_1}^2=2-2a^2,&&\norm{v_2}^2=1-a^2,&&\norm{v_3}^2=2-2a^2+3a^2b.
\end{align*}
Giving the conditions
\begin{equation}
\begin{cases}
|x|^2+|y|^2=2-2a^2\\
|y|^2+|z|^2=1-a^2\\
|z|^2+|w|^2=2-2a^2+3a^2b
\end{cases}
\end{equation}
with solutions
\begin{equation}\label{eq:systemHxHnorms2}
\begin{cases}
|x|^2=1-a^2+s\\
|y|^2=1-a^2-s\\
|z|^2=s\\
|w|^2=2-2a^2+3a^2b-s
\end{cases}\qquad\textrm{ for } s\in [0,1-a^2].
\end{equation}
We now impose the vanishing of $\langle v_1,v_2\rangle$, $\langle v_2,v_3\rangle$, $\langle v_3,v_4\rangle$, that is \eqref{eq:systemHxHprod}.
We have four different cases:
\begin{itemize}
\item
Suppose at first that $y=z=0$, then $s=0$ and $a=1$, so \eqref{eq:systemHxHprod} is always satisfied, while \eqref{eq:systemHxHnorms2} becomes
\begin{align}
\begin{cases}
|x|^2=0\\
|y|^2=0\\
|z|^2=0\\
|w|^2=3b
\end{cases}.
\end{align}
It has solutions $(x,y,z,w)=(0,0,0,\sqrt{3b}e^{i\alpha})$ for $\alpha\in\smooth{M}$ and thus $(c_1,c_2,c_3,c_4)=(0,0,0,\frac{\sqrt{3b}}{2}e^{i\alpha})$.
In conclusion, $a=1$ and $\sigma=\frac{\sqrt{3b}}{2}e^{i\alpha}(\theta_2)^3$.
\item Suppose now that $z=0$ but $y\neq 0$, then $s=0$ and $a^2-1\neq 0$.
The system \eqref{eq:systemHxHprod} implies $x=w=0$, but then by \eqref{eq:systemHxHnorms2}, $0=|x|^2=1-a^2\neq 0$, so in this case there are no solutions.
\item Analogously, if $z\neq 0$ but $y=0$, then $s=1-a^2$ and \eqref{eq:systemHxHprod} gives $w=x=0$, so from \eqref{eq:systemHxHnorms2} we get $0=|x|^2=2-2a^2=2|z|^2\neq 0$ leaving no solutions.
\item The remaining case has $z\neq 0$ and $y\neq 0$.
In order to solve it, let us call $t:=\overline{y}z\neq 0$, then \eqref{eq:systemHxHnorms2} and \eqref{eq:systemHxHprod} give
\begin{align}
z&=\frac{ty}{|y|^2}=\frac{ty}{1-a^2-s},\\
x&=-\frac{\overline{t}y}{|y|^2}=-\frac{\overline{t}y}{1-a^2-s},\\
w&=-\frac{tz}{|z|^2}=-\frac{t^2 y}{s(1-a^2-s)},\\
0=\overline{x}z+\overline{y}w
&=\left(\frac{-t\overline{y}}{1-a^2-s}\right)\left(\frac{ty}{1-a^2-s}\right)+\overline{y}\left(\frac{-t^2y}{s(1-a^2-s)}\right)\\
&=-t^2\left(\frac{1}{1-a^2-s}+\frac{1}{s}\right)=-\frac{t^2(1-a^2)}{s(1-a^2-s)}.
\end{align}
The latter implies $a=1$, and from \eqref{eq:systemHxHnorms2}, we deduce a contradiction: $0< |y|^2=-s<0$.
\end{itemize}
In conclusion, the only solutions for this type of curvature tensors are obtained for
\begin{align*}
a=1, &&b=0, && \sigma=0,&&\textrm{or }\\
a=1, &&b=1, && \sigma=\frac{\sqrt{3}}{2}e^{i\alpha}(\theta^2)^3, && \textrm{for }\alpha\in\smooth{M}.
\end{align*}
In Table \ref{table:curvatures}, these results correspond to: VI for $a=1$ and $\sigma=\frac{\sqrt{3}}{2}e^{i\alpha}(\theta^2)^3$ for $\alpha\in\smooth{M}$; VII for $a=1$ and $\sigma=0$; VIII and IX for $a=\frac{1}{\sqrt{\delta}}$, $\delta>0$ and $\sigma=0$.
\end{enumerate}
Table \ref{table:casesLeft} summarises (up to isomorphisms) the cases satisfying the curvature condition, showing the non vanishing differentials of the coframe and the Levi-Civita connection.
\begin{table}[!ht]
\centering
\begin{tabular}[c]{|c|l|c|c|}
\hline
Case &Structure constants & Levi-Civita connection &PSK\\
\hline
III&
\begin{tabular}{@{}l@{}}
$du^2=-\sqrt{2}u^{1,2}$\\
$du^4=-2u^{3,4}$\\
\end{tabular}
&
$\begin{psmallmatrix}
&\sqrt{2}u^2&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
-\sqrt{2}u^2&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&\\
\hline
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&&2 u^4\\
&&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-2 u^4&
\end{psmallmatrix}$
&\checkmark\\
\hline
VI&
\begin{tabular}{@{}l@{}}
$du^1=2u^{1,2}$\\
$du^3=u^{2,3}$\\
$du^4=-2u^{1,3}-u^{2,4}$\\
\end{tabular}
&
$\begin{psmallmatrix}
0&-2u^1&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&u^4&u^3\\
2u^1&0&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-u^3&u^4\\
\hline
-u^4&u^3&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0&-u^1\\
-u^3&-u^4&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&u^1&0
\end{psmallmatrix}$
&\\
\hline
VII&
\begin{tabular}{@{}l@{}}
$du^1=u^{1,3}$\\
$du^2=u^{2,3}$\\
$du^4=-2u^{1,2}-2u^{3,4}$\\
\end{tabular}
&
$\begin{psmallmatrix}
0&u^4&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-u^1&u^2\\
-u^4&0&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-u^2&-u^1\\
\hline
u^1&u^3&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0&2u^4\\
-u^2&-u^4&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-2u^4&0
\end{psmallmatrix}$
&\checkmark\\
\hline
VIII&
\begin{tabular}{@{}l@{}}
$du^1=u^{1,3}+\frac{2}{\delta}u^{2,3}$\\
$du^2=-\frac{2}{\delta}u^{1,3}+u^{2,3}$\\
$du^4=-2u^{1,2}-2u^{3,4}$\\
$\delta>0$\\
\end{tabular}
&
$\begin{psmallmatrix}
0&\frac{2}{\delta}u^3+u^4&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-u^1&u^2\\
-\frac{2}{\delta}u^3-u^4&0&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-u^2&-u^1\\
\hline
u^1&u^2&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0&2u^4\\
-u^2&u^1&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-2u^4&0
\end{psmallmatrix}$
&\checkmark\\
\hline
IX&
\begin{tabular}{@{}l@{}}
$du^1=u^{1,4}-\frac{2}{\delta}u^{2,4}$\\
$du^2=\frac{2}{\delta}u^{1,4}+u^{2,4}$\\
$du^3=2u^{1,2}+2u^{3,4}$\\
$\delta>0$\\
\end{tabular}
&
$\begin{psmallmatrix}
0&-\frac{2}{\delta}u^4-u^3&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&-u^2&-u^1\\
\frac{2}{\delta}u^4+u^3&0&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&u^1&-u^2\\
\hline
u^2&-u^1&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&0&-2u^3\\
u^1&u^2&\hspace*{-\arraycolsep}\vline\hspace*{-\arraycolsep}&2u^3&0
\end{psmallmatrix}$
&\checkmark\\
\hline
\end{tabular}
\caption{Cases satisfying the curvature condition.}
\label{table:casesLeft}
\end{table}
Now we must check whether condition \ref{eq:condDiffCor} holds for the cases left.
Notice that for cases III, VII, VIII, IX, the K\"{a}hler form is exact with invariant potentials; respectively $-\frac{1}{\sqrt{2}} u^2-\frac{1}{2}u^4$, $-\frac{1}{2}u^4$, $-\frac{1}{2}u^4$, $\frac{1}{2}u^3$.
We can immediately say that cases VII, VIII, IX are all projective special K\"{a}hler because $\sigma=0$, so the differential condition is trivially satisfied.
Concerning case III, we can compute $d^{LC}\sigma$ by understanding how the Levi-Civita connection behaves on the unitary complex coframe $\theta$.
\begin{align*}
\nabla^{LC}\theta^1
&=\nabla^{LC}u^{1}+i\nabla^{LC}u^2
=-(\omega^{LC})^1_k \otimes u^k-i(\omega^{LC})^2_k\otimes u^k\\
&=-\sqrt{2}u^2\otimes u^2+i\sqrt{2}u^2\otimes u^1
=\sqrt{2}i u^2\otimes \theta^1;\\
\nabla^{LC}\theta^2
&=\nabla^{LC}u^{3}+i\nabla^{LC}u^4
=-(\omega^{LC})^3_k \otimes u^k-i(\omega^{LC})^4_k\otimes u^k\\*
&=-2u^4\otimes u^4+i2u^4\otimes u^3
=2i u^4\otimes \theta^2.
\end{align*}
Now we can compute
\begin{align*}
\nabla^{LC}\sigma
&=\nabla^{LC} \left(\frac{3}{2} e^{i\alpha} (\theta^1)^2\theta^2\right)\\*
&=\frac{3}{2} i d\alpha \otimes e^{i\alpha} (\theta^1)^2\theta^2+3 \sqrt{2}iu^2 e^{i\alpha}(\theta^1)^2\theta^2+\frac{3}{2} 2i u^4\otimes e^{i\alpha} (\theta^1)^2\theta^2\\
&=-4i\left(-\frac{1}{4}d\alpha-\frac{1}{\sqrt{2}}u^2-\frac{1}{2}u^4\right)\otimes \sigma.
\end{align*}
If we define $\lambda:=-\frac{1}{4}d\alpha-\frac{1}{\sqrt{2}}u^2-\frac{1}{2}u^4$, we have that $d\lambda=\omega$ and $d^{LC}\sigma=-4i\lambda\wedge\sigma$.
Thanks to Corollary \ref{cor:charPSKesatte}, we have proven that also case III has a projective special K\"{a}hler structure for every choice of $\alpha\in\smooth{M}$.
Suppose that VI is projective special K\"{a}hler, than by Theorem \ref{theo:characterisationPSK}, locally we must have the differential condition \ref{theo:charpsk:DiffCond}.
Consider the unitary global complex coframe $\theta$.
\begin{align*}
\nabla^{LC}\theta^2
&=\nabla^{LC}u^{3}+i\nabla^{LC}u^4\\
&=u^4\otimes u^1-u^3\otimes u^2+u^1\otimes u^4+i(u^3\otimes u^1+u^4\otimes u^2-u^1\otimes u^3)\\
&=u^4\otimes \theta^1 +iu^3\otimes \theta^1-iu^1\otimes \theta^2
=i\overline{\theta^2}\otimes \theta^1-iu^1\otimes \theta^2.
\end{align*}
Thus,
\begin{align}
\begin{split}
\nabla^{LC}\sigma
&=\nabla^{LC}\left(\frac{\sqrt{3}}{2}e^{i\alpha}(\theta^2)^3\right)\\
&=id\alpha\otimes \frac{\sqrt{3}}{2}e^{i\alpha}(\theta^2)^3+3\frac{\sqrt{3}}{2}e^{i\alpha}(\nabla^{LC}\theta^2)(\theta^2)^2\\
&=id\alpha\otimes \sigma+3\frac{\sqrt{3}}{2}e^{i\alpha}(i\overline{\theta^2}\otimes \theta^1-iu^1\otimes \theta^2
)(\theta^2)^2\\
&=i(d\alpha-3u^1)\otimes \sigma+3i\overline{\theta^2}\otimes\frac{\sqrt{3}}{2}e^{i\alpha}\theta^1(\theta^2)^2;\\
d^{LC}\sigma
&=i(d\alpha-3u^1)\wedge \sigma+3i\overline{\theta^2}\wedge\frac{\sqrt{3}}{2}e^{i\alpha}\theta^1(\theta^2)^2.
\end{split}
\end{align}
Notice that this is never of the form required by condition \ref{theo:charpsk:DiffCond} for any available choice of $\sigma$, since evaluating the last component at $\theta_1$, we obtain $i\frac{\sqrt{3}}{2}\overline{\theta^2}\wedge\theta^2\otimes \theta^2$, whereas the same operation on a form of type $i\tau\wedge\sigma$ would evaluate to zero.
We deduce that VI does not admit a projective special K\"{a}hler structure.
We are now left with cases III, VII, VIII, IX.
At the level of Lie groups, case III corresponds to the connected simply connected Lie group $\mathcal{H}_{\sqrt{2}}\times\mathcal{H}_2$ with $\sigma=\frac{3}{2}(\theta^1)^2\theta^2$ up to isomorphism.
The other deviances are in fact obtained by taking $e^{i\alpha}\sigma$ and thus we are in the situation noted in Remark \ref{rmk:dR0_unique_structure}.
The Lie groups corresponding to the cases VII, VIII and IX, are in particular homogeneous, and they all have zero deviance, so by Proposition \ref{prop:uniquenessHyp} we deduce that they are all isomorphic to $\mathcal{H}_{\mathbb{C}}^2$ as projective special K\"{a}hler manifolds.
\end{proof}
\begin{rmk}
It is striking that in case III, which is obtained via the r-map from the polynomial $x^2y$, the deviance is a global tensor which is a multiple of this polynomial with respect to a K\"{a}hler holomorphic coframe.
\end{rmk}
It turns out that all $4$-dimensional projective special K\"{a}hler Lie groups are simply connected, so this theorem already presents all possible cases.
\begin{prop}\label{prop:universalCover}
Let $(\pi\colon \widetilde{M}\to M,\nabla)$ be a projective special K\"{a}hler manifold, then the universal cover $p\colon U\to M$ admits a projective special K\"{a}hler structure.
In particular, if $\gamma\colon S\to \sharp_2 S_{3,0}M$ is the intrinsic deviance for $M$, then $p^*S\to U$ is an $S^1$-bundle and if we call $p'$ the canonical map $p^*S\to S$, then $U$ has deviance $p^*\circ \gamma\circ p'\colon p^*S\to \sharp_2 S_{3,0}U$ on $U$.
If $M$ is a projective special K\"{a}hler Lie group, then so is $U$.
\end{prop}
\begin{proof}
Since $p\colon U\to M$ is a cover, we can lift the whole K\"{a}hler structure of $M$ to $U$ by pullback $(U,p^*g,p^*I,p^*\omega)$ (the pullback of $I$ makes sense, since $p$ is a local diffeomorphism).
We will now use Theorem \ref{theo:characterisationPSK}.
The $S^1$-bundle $S$ lifts to an $S^{1}$-bundle $\pi_{p^*S}\colon p^*S\to U$, where the right action can be defined locally, since $p$ is a local diffeomorphism.
The principal connection $\varphi$ on $S$ lifts to $\varphi'=p'^*\varphi$ and its curvature is, as expected, $d\varphi'=p'^*d\varphi=-2p'\pi_S^*\omega=-2\pi_{p^*S}^*p^*\omega$.
Let $\gamma'=p^*\circ \gamma\circ p'\colon p^*S\to \sharp_2 S_{3,0}U$, then $\gamma'(ua)=a^2\gamma'(u)$ holds, as the action is defined on the fibres, which are preserved by the pullback.
The remaining properties also follow from the fact $p$ is a local diffeomorphism.
Finally, if $M$ is a Lie group with left invariant K\"{a}hler structure, then $U$ is a Lie group and its K\"{a}hler structure is also left invariant.
\end{proof}
Given a universal cover $p\colon U\to M$ of a projective special K\"{a}hler Lie group, $\ker(p)$ is a discrete subgroup and when $M$ is connected, $\ker(p)$ is in the centre $Z(U)$ of $U$.
From this observation we obtain the following corollary
\begin{cor}
A connected $4$-dimensional projective special K\"{a}hler Lie group is isomorphic to one of the following:
\begin{itemize}
\item $\mathcal{H}_{\sqrt{2}}\times \mathcal{H}_2$ with deviance $\sharp_2(\frac{3}{2}(\theta^1)^2\theta^2)$ in the standard complex unitary coframe $\theta$;
\item complex hyperbolic $2$-space with zero deviance.
\end{itemize}
\end{cor}
\begin{proof}
The proof follows from Theorem \ref{theo:classificazionePSK4} with Proposition \ref{prop:universalCover}, as a connected group $M$ with universal cover $p\colon U\to M$ is isomorphic to $U/\ker(p)$ and, if $M$ is a projective special K\"{a}hler Lie group, so is $U$ by Proposition \ref{prop:universalCover}.
Since $U$ is also simply connected, Theorem \ref{theo:classificazionePSK4} provides all the possibilities up to isomorphisms preserving the Lie structure.
The statement follows from the fact that these possibilities for $U$ have trivial centre.
\end{proof}
\section*{}
\end{document}
|
\begin{document}
\author{Robert Harron}
\title{On Greenberg's $L$-invariant of the symmetric sixth power of an ordinary cusp form}
\date{\today}
\maketitle
\thispagestyle{plain}
\begin{abstract}
We derive a formula for Greenberg's $L$-invariant of Tate twists of the symmetric sixth power of an ordinary non-CM cuspidal newform of weight $\geq4$, under some technical assumptions. This requires a ``sufficiently rich'' Galois deformation of the symmetric cube which we obtain from the symmetric cube lift to $\GSp(4)_{/\mathbf{Q}}$ of Ramakrishnan--Shahidi and the Hida theory of this group developed by Tilouine--Urban. The $L$-invariant is expressed in terms of derivatives of Frobenius eigenvalues varying in the Hida family. Our result suggests that one could compute Greenberg's $L$-invariant of all symmetric powers by using appropriate functorial transfers and Hida theory on higher rank groups.
\end{abstract}
\tableofcontents
\section*{Introduction}\addcontentsline{toc}{section}{Introduction}
The notion of an $L$-invariant was introduced by Mazur, Tate, and Teitelbaum in their investigations of a $p$-adic analogue of the Birch and Swinnerton-Dyer conjecture in \cite{MTT86}. When considering the $p$-adic $L$-function of an elliptic curve $E$ over $\mathbf{Q}$ with split, multiplicative reduction at $p$, they saw that its $p$-adic $L$-function vanishes even when its usual $L$-function does not (an ``exceptional zero'' or ``trivial zero''). They introduced a $p$-adic invariant, the ``($p$-adic) $L$-invariant'', of $E$ as a fudge factor to recuperate the $p$-adic interpolation property of $L(1,E,\chi)$ using the \textit{derivative} of its $p$-adic $L$-function. Their conjecture appears in \cite[\S\S13--14]{MTT86} and was proved by Greenberg and Stevens in \cite{GS93}. The proof conceptually splits up into two parts. One part relates the $L$-invariant of $E$ to the derivative in the ``weight direction'' of the unit eigenvalue of Frobenius in the Hida family containing $f$ (the modular form corresponding to $E$). The other part uses the functional equation of the two-variable $p$-adic $L$-function to relate the derivative in the weight direction to the derivative of interest, in the ``cyclotomic direction''. In this paper, we provide an analogue of the first part of this proof replacing the $p$-adic Galois representation $\rho_f$ attached to $f$ with Tate twists of $\Sym^6\!\rho_f$. More specifically, we obtain a formula for Greenberg's $L$-invariant (\cite{G94}) of Tate twists of $\Sym^6\!\rho_f$ in terms of derivatives in weight directions of the unit eigenvalues of Frobenius varying in some ordinary Galois deformation of $\Sym^3\!\rho_f$.
Let us describe the previous work in this subject. In his original article, Greenberg (\cite{G94}) computes his $L$-invariant for all symmetric powers of $\rho_f$ when $f$ is associated to an elliptic curve with split, multiplicative reduction at $p$. In this case, the computation is local, and quite simple. In a series of articles, Hida has relaxed the assumption on $f$ allowing higher weights and dealing with Hilbert modular forms (see \cite{Hi07}), but still requiring, for the most part, $\rho_f$ to be (potentially) non-cristalline (though semistable) at $p$ in order to obtain an explicit formula for the $L$-invariant. A notable exception where a formula is known in the cristalline case is the symmetric square, done by Hida in \cite{Hi04} (see also chapter 2 of the author's Ph.D.\ thesis \cite{H-PhD} for a slightly different approach). Another exception comes again from Greenberg's original article (\cite{G94}) where he computes his $L$-invariant when $E$ has good ordinary reduction at $p$ \textit{and has complex multiplication}. In this case, the symmetric powers are reducible and the value of the $L$-invariant comes down to the result of Ferrero--Greenberg (\cite{FeG78}). The general difficulty in the cristalline case is that Greenberg's $L$-invariant is then a \textit{global} invariant and its computation requires the construction of a global Galois cohomology class.
In this article, we attack the cristalline case for the next symmetric power which has an $L$-invariant, namely the sixth power (a symmetric power $n$ has an $L$-invariant in the cristalline case only when $n\equiv2\ (\operatorname{mod} 4)$). In general, one could expect to be able to compute Greenberg's $L$-invariant of $\Sym^n\!\rho_f$ by looking at ordinary Galois deformations of $\Sym^{\frac{n}{2}}\!\rho_f$ (see \S\ref{sec:sympowgeneral}). Unfortunately, when $n>2$ in the cristalline case, the $\Sym^{\frac{n}{2}}$ of the Hida deformation of $\rho_f$ is insufficient. The new ingredient we bring to the table is the idea to use a functorial transfer of $\Sym^{\frac{n}{2}}\!f$ to a higher rank group, use Hida theory there, and hope that the additional variables in the Hida family provide non-trivial Galois cohomology classes. In theorem \ref{thm:theoremA}, we show that this works for $n=6$ using the symmetric cube lift of Ramakrishnan--Shahidi (\cite{RS07}) (under certain technical assumptions). This provides hope that such a strategy would yield formulas for Greenberg's $L$-invariant for all symmetric powers in the cristalline case. The author is currently investigating if the combined use of the potential automorphy results of \cite{BLGHT09}, the functorial descent to a unitary group, and Hida theory on it (\cite{Hi02}) will be of service in this endeavour.
We also address whether the $L$-invariant of the symmetric sixth power equals that of the symmetric square. There is a guess, due to Greenberg, that it does. We fall short of providing a definitive answer, but obtain a relation between the two in theorem \ref{thm:theoremB}.
There are several facets of the symmetric sixth power $L$-invariant which we do not address. We do not discuss the expected non-vanishing of the $L$-invariant nor its expected relation to the size of a Selmer group. Furthermore, we make no attempt to show that Greenberg's $L$-invariant is the \textit{actual} $L$-invariant appearing in an interpolation formula of $L$-values. Aside from the fact that the $p$-adic $L$-function of the symmetric sixth power has not been constructed, a major impediment to proving this identity is that the point at which the $p$-adic $L$-function has an exceptional zero is no longer the centre of the functional equation, and a direct generalization of the second part of the proof of Greenberg--Stevens is therefore not possible. Citro suggests a way for dealing with this latter problem in the symmetric square case in \cite{Ci08}. Finally, we always restrict to the case where $f$ is ordinary at $p$. Recently, in \cite{Be09}, Benois has generalized Greenberg's definition of $L$-invariant to the non-ordinary case, and our results suggest that one could hope to compute his $L$-invariant using the eigenvariety for $\GSp(4)_{/\mathbf{Q}}$.
We remark that the results of this article were obtained in the author's Ph.D.\ thesis (\cite[Chapter 3]{H-PhD}). There, we give a slightly different construction of the global Galois cohomology class, still using the same deformation of the symmetric cube. In particular, we use Ribet's method of constructing a global extension of Galois representations by studying an irreducible, but residually reducible, representation. We refer to \cite{H-PhD} for details.
\section*{Notation and conventions}\addcontentsline{toc}{section}{Notation and conventions}
We fix throughout a prime $p\geq3$ and an isomorphism $\iota_\infty:\ol{\mathbf{Q}}_p\cong\mathbf{C}$. For a field $F$, $G_F$ denotes the absolute Galois group of $F$. We fix embeddings $\iota_\ell$ of $\ol{\mathbf{Q}}$ into $\ol{\mathbf{Q}}_\ell$ for all primes $\ell$. These define primes $\ol{\ell}$ of $\ol{\mathbf{Q}}$ over $\ell$, and we let $G_\ell$ denote the decomposition group of $\ol{\ell}$ in $G_\mathbf{Q}$, which we may thus identify with $G_{\mathbf{Q}_\ell}$. Let $I_\ell$ denote the inertia subgroup of $G_\ell$. Let $\mathbb{A}A$ denote the adeles of $\mathbf{Q}$ and let $\mathbb{A}A_f$ be the finite adeles.
By a $p$-adic representation (over $K$) of a topological group $G$, we mean a continuous representation $\rho:G\rightarrow\mathbb{A}ut_K(V)$, where $K$ is a finite extension of $\mathbf{Q}_p$ and $V$ is a finite-dimensional $K$-vector space equipped with its $p$-adic topology. Let $\chi_p$ denote the $p$-adic cyclotomic character. We denote the Tate dual $\Hom(V,K(1))$ of $V$ by $V^\ast$. Denote the Galois cohomology of the absolute Galois group of $F$ with coefficients in $M$ by $H^i(F,M)$.
For compatibility with \cite{G94}, we take $\Frob_p$ to be an \textit{arithmetic} Frobenius element at $p$, and we normalize the local reciprocity map $\rec:\mathbf{Q}_p^\times\rightarrow G^{\ab}_{\mathbf{Q}_p}$ so that $\Frob_p$ corresponds to $p$. We normalize the $p$-adic logarithm $\log_p:\overline{\mathbf{Q}}_p^\times\longrightarrow\overline{\mathbf{Q}}_p$ by $\log_p(p)=0$.
\section{Greenberg's theory of trivial zeroes}
In \cite{G94}, Greenberg introduced a theory describing the expected order of the trivial zero, as well as a conjectural value for the $L$-invariant of a $p$-ordinary motive. In this section, we briefly describe this theory, restricting ourselves to the case we will require in the sequel, specifically, we will assume the ``exceptional subquotient'' $W$ is isomorphic to the trivial representation. We end this section by explaining our basic method of computing $L$-invariants of symmetric powers of cusp forms.
\subsection{Ordinarity, exceptionality, and some Selmer groups}
Let $\rho:G_\mathbf{Q}\rightarrow\gl(V)$ be a $p$-adic representation over a field $K$. Recall that $V$ is called \textit{ordinary} if there is a descending filtration $\{F^iV\}_{i\in\mathbf{Z}}$ of $G_p$-stable $K$-subspaces of $V$ such that $I_p$ acts on $\gr^i\!V=F^iV/F^{i+1}V$ via multiplication by $\chi_p^i$ (and $F^iV=V$ (resp.\ $F^iV=0$) for $i$ sufficiently negative (resp.\ sufficiently positive)). Under this assumption, Greenberg (\cite{G89}) has defined what we call the \textit{ordinary Selmer group} for $V$ as
\[ \Sel_\mathbf{Q}(V):=\ker\!\left(\hone{\mathbf{Q}}{V}\longrightarrow\prod_v\hone{\mathbf{Q}_v}{V}/L_v(V)\right)
\]
where the product is over all places $v$ of $\mathbf{Q}$ and the local conditions $L_v(V)$ are given by
\begin{equation}\label{eqn:ordlocalconditions}
L_v(V):=\left\{\begin{array}{ll}
\hone[\nr]{\mathbf{Q}_v}{V}:=\ker\!\left(\hone{\mathbf{Q}}{V}\rightarrow\hone{I_v}{V}\right), & v\neq p\\
\hone[\ord]{\mathbf{Q}_p}{V}:=\ker\!\left(\hone{\mathbf{Q}}{V}\rightarrow\hone{I_p}{V/F^1V}\right), & v=p.
\end{array}\right.
\end{equation}
This Selmer group is conjecturally related to the $p$-adic $L$-function of $V$ at $s=1$.
To develop the theory of exceptional zeroes following Greenberg (\cite{G94}), we introduce three additional assumptions on $V$ (which will be satisfied by the $V$ in which we are interested). Assume
\begin{itemize}
\item[(C)]\label{item:C} $V$ is \textit{critical} in the sense that $\dim_KV/F^1V=\dim_KV^-$, where $V^-$ is the $(-1)$-eigenspace of complex conjugation,
\item[(U)]\label{item:U} $V$ has no $G_p$ subquotient isomorphic to the cristalline extension of $K$ by $K(1)$,
\item[(S)]\label{item:S} $G_p$ acts semisimply on $gr^iV$ for all $i\in\mathbf{Z}$.
\end{itemize}
If $V$ arises from a motive, condition (\hyperref[item:C]{C}) is equivalent to that motive being critical at $s=1$ in the sense of Deligne \cite{D79} (see \cite[\S6]{G89}). Condition (\hyperref[item:U]{U}) will come up when we want to define the $L$-invariant. Assumption (\hyperref[item:S]{S}) allows us to refine the ordinary filtration and define a $G_p$-subquotient of $V$ that (conjecturally) regulates the behaviour of $V$ with respect to exceptional zeroes.
\begin{definition}\mbox{}
\begin{enumerate}
\item Let $F^{00}V$ be the maximal $G_p$-subspace of $F^0V$ such that $G_p$ acts trivially on $F^{00}V/F^1V$.
\item Let $F^{11}V$ be the minimal $G_p$-subspace of $F^1V$ such that $G_p$ acts on $F^1V/F^{11}V$ via multiplication by $\chi_p$.
\item Define the \textit{exceptional subquotient} $W$ of $V$ as
\[ W:=F^{00}V/F^{11}V.
\]
\item $V$ is called \textit{exceptional} if $W\neq0$.
\end{enumerate}
\end{definition}
Note that $W$ is ordinary with $F^2W=0, F^1W=F^1/F^{11}V$, and $F^0W=W$. For $?=00,11,$ or $i\in\mathbf{Z}$, we denote
\[ F^?\hone{\mathbf{Q}_p}{V}:=\im\!\left(\hone{\mathbf{Q}_p}{F^?V}\longrightarrow\hone{\mathbf{Q}_p}{V}\right).
\]
For simplicity, we impose the following condition on $V$ which will be sufficient for our later work:
\begin{itemize}
\item[(T$^\prime$)]\label{item:Tp} $W\cong K$, i.e.\ $F^{11}V=F^1V$ and $\dim_KF^{00}V/F^1V=1$.
\end{itemize}
We remark that this is a special case of condition (T) of \cite{G94}.
The ordinarity of $V$ and assumptions (\hyperref[item:C]{C}), (\hyperref[item:U]{U}), (\hyperref[item:S]{S}), and (\hyperref[item:Tp]{T$^\prime$}) allow us to introduce Greenberg's \textit{balanced Selmer group} $\overline{\operatorname{Sel}}_\mathbf{Q}(V)$ of $V$ (terminology due to Hida) as follows. The local conditions $\overline{L}_v(V)$ at $v\neq p$ are simply given by the unramified conditions $L_v(V)$ of \eqref{eqn:ordlocalconditions}. At $p$, $\overline{L}_p(V)$ is characterized by the following two properties:
\begin{itemize}
\item[(Bal1)]\label{item:Bal1} $F^{11}\hone{\mathbf{Q}_p}{V}\subseteq\overline{L}_p(V)\subseteq F^{00}\hone{\mathbf{Q}_p}{V}$,
\item[(Bal2)]\label{item:Bal2} $\im\!\left(\overline{L}_p(V)\rightarrow\hone{\mathbf{Q}_p}{W}\right)=\hone[\nr]{\mathbf{Q}_p}{W}$.
\end{itemize}
The balanced Selmer group of $V$ is
\[ \overline{\operatorname{Sel}}_\mathbf{Q}(V):=\ker\!\left(\hone{\mathbf{Q}}{V}\longrightarrow\prod_v\hone{\mathbf{Q}_v}{V}/\overline{L}_v(V)\right).
\]
The rationale behind the name ``balanced'' is provided by the following basic result of Greenberg's.
\begin{proposition}[Proposition 2 of \cite{G94}]\label{prop:balanced}
The balanced Selmer groups of $V$ and $V^\ast$ have the same dimension.
\end{proposition}
To make the reader feel more familiar with the balanced Selmer group, we offer the following result on its value under our running assumptions.
\begin{proposition}
Let $V$ be an ordinary $p$-adic representation of $G_\mathbf{Q}$. Under assumptions \textup{(\hyperref[item:C]{C})}, \textup{(\hyperref[item:U]{U})}, \textup{(\hyperref[item:S]{S})}, and especially \textup{(\hyperref[item:Tp]{T$^\prime$})}, we have the following equalities
\[ \overline{\operatorname{Sel}}_\mathbf{Q}(V)=\Sel_\mathbf{Q}(V)=\hone[g]{\mathbf{Q}}{V}=\hone[f]{\mathbf{Q}}{V}
\]
where $\hone[g]{\mathbf{Q}}{V}$ and $\hone[f]{\mathbf{Q}}{V}$ are the Bloch--Kato Selmer groups introduced in \textup{\cite{BK90}}.
\end{proposition}
\begin{proof}
The second equality is due to Flach (\cite[Lemma 2]{Fl90}) and the last equality follows from \cite[Corollary 3.8.4]{BK90}. We proceed to prove the first equality. The local conditions at $v\neq p$ are the same for $\overline{\operatorname{Sel}}_\mathbf{Q}(V)$ and $\Sel_\mathbf{Q}(V)$ so we are left to show that $\overline{L}_p(V)=L_p(V)$.
Let $c\in\overline{L}_p(V)$. Condition (\hyperref[item:Bal1]{Bal1}) implies that there is $c^\prime\in\hone{\mathbf{Q}_p}{F^{00}V}$ mapping to $c$. By (\hyperref[item:Bal2]{Bal2}), the image of $c^\prime$ under the map in the bottom row of the commutative diagram
\[
\xymatrix{\hone{\mathbf{Q}_p}{V}\ar@{->}[r] &\hone{I_p}{V/F^1V} \\
\hone{\mathbf{Q}_p}{F^{00}V}\ar@{->}[u] \ar@{->}[r] & \hone{I_p}{W}\ar@{->}[u]
}
\]
is zero. Thus, $c$ is in the kernel of the map in the top row, which is exactly $L_p(V)$.
For the reverse equality, let $c\in L_p(V)$ and consider the commutative diagram
\[\xymatrix{\hone{\mathbf{Q}_p}{V/F^{00}V}\ar@{->}[r]^{f_2} & \hone{I_p}{V/F^{00}V} \\
c\in\hone{\mathbf{Q}_p}{V}\ar@{->}[r]^{f_1} \ar@{->}[u]^{f_3} & \hone{I_p}{V/F^1V} \ar@{->}[u] \\
\hone{\mathbf{Q}_p}{F^{00}V}\ar@{->}[u] \ar@{->}[r] &\hone{I_p}{W}. \ar@{->}[u]^{f_4}
}\]
The local condition $L_p(V)$ satisfies (\hyperref[item:Bal1]{Bal1}) if $c\in\ker f_3$. By definition, $c\in\ker f_1$, so we show that $\ker f_2=0$. By inflation-restriction, $\ker f_2$ is equal to
\[ \im\left(\hone{G_p/I_p}{\left(V/F^{00}V\right)^{I_p}}\longrightarrow\hone{\mathbf{Q}_p}{V/F^{00}V}\right).
\]
Note that $\left(V/F^{00}V\right)^{I_p}=F^0V/F^{00}V$. The pro-cyclic group $G_p/I_p$ has (topological) generator $\Frob_p$, so
\[ \hone{G_p/I_p}{F^0V/F^{00}V}\cong(F^0V/F^{00}V)/\left((\Frob_p-1)(F^0V/F^{00}V)\right)=0
\]
where the last equality is because $F^{00}V$ was defined to be exactly the part of $F^0V$ on which $\Frob_p$ acts trivially (mod $F^1V$). Thus, $L_p(V)$ satisfies (\hyperref[item:Bal1]{Bal1}), so there is a $c^\prime\in\hone{\mathbf{Q}_p}{F^{00}V}$ mapping to $c$. Its image in $\hone{I_p}{V/F^1V}$ is trivial, so it suffices to show that $\ker f_4=0$ to conclude that $L_p(V)$ satisfies (\hyperref[item:Bal2]{Bal2}). By the long exact sequence in cohomology, the exactness (on the right) of
\[ 0\longrightarrow W^{I_p}\longrightarrow(V/F^1V)^{I_p}\longrightarrow(V/F^{00}V)^{I_p}\longrightarrow0
\]
shows that $\ker f_4=0$.
\end{proof}
\begin{remark}
In fact, this result is still valid if (\hyperref[item:Tp]{T$^\prime$}) is relaxed to simply $F^{11}V=F^1V$ (see \cite[Lemma 1.3.4]{H-PhD}).
\end{remark}
\subsection{Greenberg's \texorpdfstring{$L$}{\textit{L}}-invariant}\label{sec:GrLinvar}
We now proceed to define Greenberg's $L$-invariant. To do so, we impose one final condition on $V$, namely
\begin{itemize}
\item[(Z)]\label{item:Z} the balanced Selmer group of $V$ is zero: $\overline{\operatorname{Sel}}_\mathbf{Q}(V)=0$.
\end{itemize}
This will allow us to define a one-dimensional global subspace $H_{\glob}^{\exc}$ in a global Galois cohomology group (via some local conditions) whose image in $\hone{\mathbf{Q}_p}{W}$ will be a line. The slope of this line is the $L$-invariant of $V$.
Let $\Sigma$ denote the set of primes of $\mathbf{Q}$ ramified for $V$, together with $p$ and $\infty$, let $\mathbf{Q}_\Sigma$ denote the maximal extension of $\mathbf{Q}$ unramified outside $\Sigma$, and let $G_\Sigma:=\Gal(\mathbf{Q}_\Sigma/\mathbf{Q})$. By definition, $\overline{\operatorname{Sel}}_\mathbf{Q}(V)~\subseteq~\hone{G_\Sigma}{V}$. The Poitou--Tate exact sequence with local conditions $\overline{L}_v(V)$ yields the exact sequence
\[ 0\longrightarrow\overline{\operatorname{Sel}}_\mathbf{Q}(V)\longrightarrow\hone{G_\Sigma}{V}\longrightarrow\bigoplus_{v\in\Sigma}\hone{\mathbf{Q}_v}{V}/\overline{L}_v(V)\longrightarrow\overline{\operatorname{Sel}}_\mathbf{Q}(V^\ast).
\]
Combining this with assumption (\hyperref[item:Z]{Z}) and proposition \ref{prop:balanced} gives an isomorphism
\begin{equation}\label{eqn:globalisom}
\hone{G_\Sigma}{V}\cong\bigoplus_{v\in\Sigma}\hone{\mathbf{Q}_v}{V}/\overline{L}_v(V).
\end{equation}
\begin{definition}
Let $H_{\glob}^{\exc}$ be the one-dimensional subspace\footnote{This is the subspace denoted $\wt{\mathbf{T}}$ in \cite{G94}. Page 161 of \textit{loc.\ cit.} shows that it is one-dimensional.} of $\hone{G_\Sigma}{V}$ corresponding to the subspace $F^{00}\hone{\mathbf{Q}_p}{V}/\overline{L}_p(V)$ of $\bigoplus_{v\in\Sigma}\hone{\mathbf{Q}_v}{V}/\overline{L}_v(V)$ under the isomorphism in (\ref{eqn:globalisom}).
\end{definition}
By definition of $F^{00}V$, we know that $(V/F^{00}V)^{G_p}=0$. Hence, we have injections
\[ \hone{\mathbf{Q}_p}{F^{00}V}\hookrightarrow\hone{\mathbf{Q}_p}{V}
\]
and
\[ \hone{\mathbf{Q}_p}{W}\hookrightarrow\hone{\mathbf{Q}_p}{V/F^1V}.
\]
\begin{definition}
Let $H_{\loc}^{\exc}\subseteq\hone{\mathbf{Q}_p}{W}$ be the image of $H_{\glob}^{\exc}$ in the bottom right cohomology group in the commutative diagram
\[
\xymatrix{\hone{G_\Sigma}{V}\ar@{->}[r] &\hone{\mathbf{Q}_p}{V}\ar@{->}[r] &\hone{\mathbf{Q}_p}{V/F^1V} \\
H_{\glob}^{\exc}\ar@{->}[r]\ar@{}[u]|<<<{\LARGE \begin{rotate}{90}$\subseteq$\end{rotate}} &F^{00}\hone{\mathbf{Q}_p}{V}\ar@{}[u]|<<<{\LARGE \begin{rotate}{90}$\subseteq$\end{rotate}} \\
& \hone{\mathbf{Q}_p}{F^{00}V}\ar@{->}[r]\ar@{}[u]|<<<{\LARGE \begin{rotate}{90}$\cong$\end{rotate}} &\hone{\mathbf{Q}_p}{W}.\ar@{^(->}[uu]
}
\]
\end{definition}
\begin{lemma}\mbox{}
\begin{enumerate}
\item $\dim_KH_{\loc}^{\exc}=1$,
\item $H_{\loc}^{\exc}\cap\hone[\nr]{\mathbf{Q}_p}{W}=0$.
\end{enumerate}
\end{lemma}
\begin{proof}
This follows immediately from the definitions of $H_{\glob}^{\exc}$ and of $\overline{L}_p(V)$, together with assumption (\hyperref[item:U]{U}).
\end{proof}
There are canonical coordinates on $\hone{\mathbf{Q}_p}{W}\cong\Hom(G_{\mathbf{Q}_p},W)$ given as follows. Every homomorphism $\varphi:G_{\mathbf{Q}_p}\rightarrow W$ factors through the maximal pro-$p$ quotient of $G^{\ab}_{\mathbf{Q}_p}$, which is $\Gal(\mathbf{F}_\infty/\mathbf{Q}_p)$, where $\mathbf{F}_\infty$ is the compositum of two $\mathbf{Z}_p$-extensions of $\mathbf{Q}_p$: the cyclotomic one, $\mathbf{Q}_{p,\infty}$, and the maximal unramified abelian extension $\mathbf{Q}_p^{\nr}$. Let
\[ \Gamma_\infty:=\Gal(\mathbf{Q}_{p,\infty},\mathbf{Q}_p)\cong\Gal(\mathbf{F}_\infty,\mathbf{Q}_p^{\nr})
\]
and
\[ \Gamma_{\nr}:=\Gal(\mathbf{Q}_p^{\nr},\mathbf{Q}_p)\cong\Gal(\mathbf{F}_\infty,\mathbf{Q}_{p,\infty}),
\]
then
\[ \Gal(\mathbf{F}_\infty,\mathbf{Q}_p)=\Gamma_\infty\times\Gamma_{\nr}.
\]
Therefore, $\hone{\mathbf{Q}_p}{W}$ breaks up into $\Hom(\Gamma_\infty,W)\times\Hom(\Gamma_{\nr},W)$. We have
\[\Hom(\Gamma_\infty,W)=\Hom(\Gamma_\infty,\mathbf{Q}_p)\otimes W
\]
and
\[\Hom(\Gamma_{\nr},W)=\Hom(\Gamma_{\nr},\mathbf{Q}_p)\otimes W.
\]
Composing the $p$-adic logarithm with the cyclotomic character provides a natural basis of\linebreak
$\Hom(\Gamma_\infty,\mathbf{Q}_p)$, and the function $\ord_p:\Frob_p\mapsto1$ provides a natural basis of $\Hom(\Gamma_{\nr},\mathbf{Q}_p)$. Coordinates are then provided by the isomorphisms
\[ \func{\Hom(\Gamma_\infty,W)}{W}{\log_p\chi_p\otimes w}{w}
\]
and
\[ \func{\Hom(\Gamma_{\nr},W)}{W}{\ord_p\otimes w}{w.}
\]
The $L$-invariant of $V$ is the slope of $H_{\loc}^{\exc}$ with respect to these coordinates.
Specifically, we will compute the $L$-invariant in section \ref{sec:calcLinvar} by constructing a global class\linebreak
$[c]~\in~\hone{\mathbf{Q}}{V}$ satisfying
\begin{itemize}
\item[(CL1)]\label{item:CL1} $[c_v]\in\hone[\nr]{\mathbf{Q}_v}{V}$ for all $v\in\Sigma\setminus\{p\}$,
\item[(CL2)]\label{item:CL2} $[c_p]\in F^{00}\hone{\mathbf{Q}_p}{V}$,
\item[(CL3)]\label{item:CL3} $[c_p]\not\equiv0\ \mathrm{mod}\ F^1V$.
\end{itemize}
This class then generates $H_{\glob}^{\exc}$, so its image $[\ol{c}_p]\in\hone{\mathbf{Q}_p}{W}$ generates $H_{\loc}^{\exc}$. Let $u\in\mathbf{Z}_p^\times$ be any principal unit, so that under our normalizations, $\chi_p(\rec(u))=u^{-1}$. Then, the coordinates of $[\ol{c}_p]$ are given by
\begin{equation}\label{eqn:coords}
\left(-\frac{1}{\log_pu}\ol{c}_p(\rec(u)),\ol{c}_p(\Frob_p)\right)
\end{equation}
where $\ol{c}_p$ is a cocyle in $[\ol{c}_p]$. Note that these coordinates are independent of the choice of $u$. We then have the following formula for the $L$-invariant of $V$:
\begin{equation}\label{eqn:linvarslop}
\mathcal{L}(V)=\frac{\ol{c}_p(\Frob_p)}{-\frac{1}{\log_pu}\ol{c}_p(\rec(u))}.
\end{equation}
\subsection{Symmetric power \texorpdfstring{$L$}{\textit{L}}-invariants of ordinary cusp forms}\label{sec:sympowgeneral}
Let $f$ be a $p$-ordinary,\footnote{More specifically, $\iota_\infty$-ordinary, in the sense that $\ord_p(\iota_\infty^{-1}(a_p))=0$, where $a_p$ is the $p$th Fourier coefficient of $f$.} holomorphic, cuspidal, normalized newform of weight $k\geq2$, level $\Gamma_1(N)$ (prime to $p$), and trivial character. Let $E=\mathbf{Q}(f)$ be the field generated by the Fourier coefficients of $f$. Let $\mf{p}_0|p$ be the prime of $E$ above $p$ corresponding to the fixed embedding $\iota_p$, and let\linebreak
$\rho_f:G_\mathbf{Q}\rightarrow\gl(V_f)$ be the contragredient of the $\mf{p}_0$-adic Galois representation (occurring in \'etale cohomology) attached to $f$ by Deligne \cite{D71} on the two-dimensional vector space $V_f$ over $K:=E_{\mf{p}_0}$. Let $\alpha_p$ denote the root of $x^2-a_px+p^{k-1}$ which is a $p$-adic unit. The $p$-ordinarity assumption implies that
\[ \rho_f|_{G_p}\sim\mtrx{\chi_p^{k-1}\delta^{-1}}{\varphi}{0}{\delta}
\]
where $\delta$ is the unramified character sending $\Frob_p$ to $\alpha_p$ (\cite[Theorem 2.1.4]{W88}). Thus, $\rho_f$ is ordinary. Note that assumption (\hyperref[item:S]{S}) is automatically satisfied by all (Tate twists of) symmetric powers of $\rho_f$ since all graded pieces of the ordinary filtration are one-dimensional. For condition (\hyperref[item:U]{U}) to be violated, we have must have $k=2$ and $\alpha_p=1$, but the Hasse bound shows that this is impossible.
\begin{lemma}\label{lem:exceptwists}
If $(\Sym^n\!\rho_f)(r)$ is an exceptional, critical Tate twist of $\rho_f$, then $n\equiv2\ (\operatorname{mod} 4)$, $r=\frac{n}{2}(1-k)$ or $\frac{n}{2}(1-k)+1$, and $k$ is even. Furthermore, the exceptional subquotient is isomorphic to K or K(1), respectively.
\end{lemma}
\begin{proof}
The critical Tate twists are listed in \cite[Lemma 3.3]{RgS08}. Determining those that are exceptional is a quick computation, noting that $\delta$ is non-trivial.\comdone{(???is it?)}
\end{proof}
For the Tate twist by $\frac{n}{2}(1-k)+1$, the exceptional subquotient is isomorphic to $K(1)$, a case we did not treat in the previous section. However, Greenberg defines the $L$-invariant of such a representation as the $L$-invariant of its Tate dual, whose exceptional subquotient is isomorphic to the trivial representation. In fact, the Tate dual of the twist by $\frac{n}{2}(1-k)+1$ is the twist by $\frac{n}{2}(1-k)$.
Accordingly, let $m$ be a positive odd integer, $n:=2m$, $\rho_n:=(\Sym^n\!\rho_f)(m(1-k))$, and assume $k$ is even. We present a basic setup for computing Greenberg's $L$-invariant $\mathcal{L}(\rho_n)$ using a deformation of $\rho_m:=\Sym^m\!\rho_f$. The main obstacle in carrying out this computation is to find a ``sufficiently rich'' deformation of $\rho_m$ to obtain a non-trivial answer. We do so below in the case $n=6$ for non-CM $f$ (of weight $\geq4$) by transferring $\rho_3$ to $\GSp(4)_{/\mathbf{Q}}$ and using a Hida deformation on this group. The case $n=2$ has been dealt with by Hida in \cite{Hi04} (see also \cite[Chapter 2]{H-PhD}).
We need a lemma from the finite-dimensional representation theory of $\gl(2)$ whose proof we leave to the reader.
\begin{lemma}\label{lem:enddecomp}
Let $\Std$ denote the standard representation of $\gl(2)$. Then, for $m$ an odd positive integer, there is a decomposition
\[ \End\left(\Sym^m\!\Std\right)\cong\bigoplus_{i=0}^m\left(\Sym^{2i}\!\Std\right)\otimes\mathrm{det}^{-i}.
\]
\end{lemma}
Since $\mathrm{det}\rho_f=\chi_p^{k-1}$, this lemma implies that $\rho_n$ occurs as a (global) direct summand in $\End\rho_m$. A deformation of $\rho_m$ provides a class in $\hone{\mathbf{Q}}{\End\rho_m}$. If its projection to $\hone{\mathbf{Q}}{\rho_n}$ is non-trivial (and satisfies conditions (\hyperref[item:CL1]{CL1--3}) of the previous section), then it generates $H_{\glob}^{\exc}$ and can be used to compute $\mathcal{L}(\rho_n)$.
An obvious choice of deformation of $\rho_m$ is the symmetric $m$th power of the Hida deformation of $\rho_f$. The cohomology class of this deformation has a non-trivial projection to $\hone{\mathbf{Q}}{\rho_n}$ only when $m=1$ (i.e.\ $n=2$, the symmetric square). For larger $m$, a ``richer'' deformation is required. The aims of the remaining sections of this paper are to obtain such a deformation in the case $m=3$ ($n=6$) and to use it to find a formula for the $L$-invariant of $\rho_6$ in terms of derivatives of Frobenius eigenvalues varying in the deformation.
\section{Input from \texorpdfstring{$\GSp(4)_{/\mathbf{Q}}$}{GSp(4)/\textbf{Q}}}
We use this section to set up our notations and conventions concerning the group $\GSp(4)_{/\mathbf{Q}}$, its automorphic representations, its Hida theory, and the Ramakrishnan--Shahidi symmetric cube lift from $\gl(2)_{/\mathbf{Q}}$ to it. We only provide what is required for our calculation of the $L$-invariant of $\rho_6$.
\subsection{Notation and conventions}
Let $V$ be a four-dimensional vector space over $\mathbf{Q}$ with basis $\{e_1,\dots,e_4\}$ equipped with the symplectic form given by
\[ J=\left(\begin{array}{cccc}
&&&1\\
&&1\\
&-1\\
-1
\end{array}\right).
\]
Let $\GSp(4)$ be the group of symplectic similitudes of $(V,J)$, i.e.\ $g\in\gl(4)$ such that ${}^tgJg=\nu(g)J$ for some $\nu(g)\in\mathbf{G}_m$. The stabilizer of the isotropic flag $0\subseteq\langle e_1\rangle\subseteq\langle e_1,e_2\rangle$ is the Borel subgroup $B$ of $\GSp(4)$ whose elements are of the form
\newlength{\arrrow}
\settowidth{\arrrow}{$\frac{c}{a}$}
\renewcommand{2.2}{1.2}
\[
\left(\begin{array}{x{\arrrow}x{\arrrow}x{\arrrow}x{\arrrow}}
a&\ast&\ast&\ast\\
&b&\ast&\ast\\
&&\frac{c}{b}&\ast\\
&&&\frac{c}{a}
\end{array}\right).
\]
Writing an element of the maximal torus $T$ as
\settowidth{\arrrow}{$\frac{\nu(t)}{t_2}$}
\renewcommand{2.2}{1.6}
\[t=\left(\begin{array}{x{\arrrow}x{\arrrow}x{\arrrow}x{\arrrow}}
t_1\\
&t_2\\
&&\frac{\nu(t)}{t_2}\\
&&&\frac{\nu(t)}{t_1}
\end{array}\right),
\]
we identify the character group $X^\ast(T)$ with triples $(a,b,c)$ satisfying $a+b\equiv c\ (\operatorname{mod}\ 2)$ so that
\[ t^{(a,b,c)}=t_1^{a}t_2^{b}\nu(t)^{(c-a-b)/2}.
\]
The dominant weights with respect to $B$ are those with $a\geq b\geq0$. If $\Pi$ is an automorphic representation of $\GSp(4,\mathbb{A}A)$ whose infinite component $\Pi_\infty$ is a holomorphic discrete series, we will say $\Pi$ has weight $(a,b)$ if $\Pi_\infty$ has the same infinitesimal character as the algebraic representation of $\GSp(4)$ whose highest weight is $(a,b,c)$ (for some $c$). For example, a classical Siegel modular form of (classical) weight $(k_1,k_2)$ gives rise to an automorphic representation of weight $(k_1-3,k_2-3)$ under our normalizations.
\subsection{The Ramarkrishnan--Shahidi symmetric cube lift}
We wish to move the symmetric cube of a cusp form $f$ to a cuspidal automorphic representation of $\GSp(4,\mathbb{A}A)$ in order use the Hida theory on this group to obtain an interesting Galois deformation of the symmetric cube of $\rho_f$. The following functorial transfer due to Ramakrishnan and Shahidi (\cite[Theorem A$^\prime$]{RS07}) allows us to do so in certain circumstances.
\begin{theorem}[Ramakrishnan--Shahidi \cite{RS07}]
Let $\pi$ be the cuspidal automorphic representation of $\gl(2,\mathbb{A}A)$ defined by a holomorphic, non-CM newform $f$ of even weight $k\geq2$, level $N$, and trivial character. Then, there is an irreducible cuspidal automorphic representation $\Pi$ of $\GSp(4,\mathbb{A}A)$ with the following properties
\begin{enumerate}
\item $\Pi_\infty$ is in the holomorphic discrete series with its $L$-parameter being the symmetric cube of that of $\pi$,
\item\label{item:RSweight} $\Pi$ has weight $(2(k-2),k-2)$, trivial central character, and is unramified outside of $N$,
\item $\Pi^K\neq0$ for some compact open subgroup $K$ of $\GSp(4,\mathbb{A}A_f)$ of level equal to the conductor of $\Sym^3\!\rho_f$,
\item $L(s,\Pi)=L(s,\pi,\Sym^3)$, where $L(s,\Pi)$ is the dergree 4 spin $L$-function,
\item\label{item:RSgeneric} $\Pi$ is weakly equivalent\footnote{Recall that ``weakly equivalent'' means that the local components are isomorphic for almost all places.} to a globally generic cuspidal automorphic representation,
\item $\Pi$ is not CAP, nor endoscopic.\footnote{Recall that an irreducible, cuspidal, automorphic representation of $\GSp(4,\mathbb{A}A)$ is ``CAP'' if it is weakly equivalent to the induction of an automorphic representation on a proper Levi subgroup, and it is ``endoscopic'' if the local $L$-factors of its spin $L$-function are equal, at almost all places, to the product of the local $L$-factors of two cuspidal automorphic representations of $\gl(2,\mathbb{A}A)$ with equal central characters.}
\end{enumerate}
\end{theorem}
We remark that the weight in part \ref{item:RSweight} can be read off from the $L$-parameter of $\Pi_\infty$ given in \cite[(1.7)]{RS07}. As for part \ref{item:RSgeneric}, note that the construction of $\Pi$ begins by constructing a globally generic representation on the bottom of page 323 of \cite{RS07}, and ends by switching, in the middle of page 326, the infinite component from the generic discrete series element of the archimedean $L$-packet to the holomorphic one. Alternatively, in \cite{Wei08}, Weissauer has shown that any non-CAP non-endoscopic irreducible cuspidal automorphic representation of $\GSp(4,\mathbb{A}A)$ is weakly equivalent to a globally generic cuspidal automorphic representation.
\subsection{Hida deformation of \texorpdfstring{$\rho_3$}{\textrho\textthreeinferior} on \texorpdfstring{$\GSp(4)_{/\mathbf{Q}}$}{GSp(4)/\textbf{Q}}}\label{sec:rho3t}
Let $f$ a $p$-ordinary, holomorphic, non-CM, cuspidal, normalized newform of even weight $k\geq4$, level $\Gamma_1(N)$ (prime to $p$), and trivial character. We have added the non-CM hypothesis to be able to use the Ramakrishnan--Shahidi lift.\footnote{This is not really an issue as the CM case is much simpler.} According to lemma \ref{lem:exceptwists}, we only need to consider even weights. The restriction $k\neq2$ is forced by problems with the Hida theory on $\GSp(4)_{/\mathbf{Q}}$ in the weight $(0,0)$.
Tilouine and Urban (\cite{TU99}, \cite{U01}, \cite{U05}), as well as Pilloni (\cite{Pi10}, building on Hida (\cite{Hi02})) have worked on developing Hida theory on $\GSp(4)_{/\mathbf{Q}}$. In this section, we describe the consequences their work has on the deformation theory of $\rho_3=\Sym^3\!\rho_f$ (where $\rho_f$ is as described in section \ref{sec:sympowgeneral}).
We begin by imposing two new assumptions:
\begin{itemize}
\item[(\'Et)]\label{item:et} the universal ordinary $p$-adic Hecke algebra of $\GSp(4)_{/\mathbf{Q}}$ is \'etale over the Iwasawa algebra at the height one prime corresponding to $\Pi$;
\item[(RAI)]\label{item:RAI} the representation $\rho_3$ is residually absolutely irreducible.
\end{itemize}
Considering $\rho_3$ as the $p$-adic Galois representation attached to the Ramakrishnan--Shahidi lift $\Pi$ of $f$, we obtain a ring $\mathcal{A}$ of $p$-adic analytic functions in two variables $(s_1,s_2)$ on some neighbourhood of the point $(a,b)=(2(k-2),k-2)\in\mathbf{Z}_p^2$, a free rank four module $\mathcal{M}$ over $\mathcal{A}$, and a deformation $\wt{\rho}_3:G_\mathbf{Q}\rightarrow\mathbb{A}ut_\mathcal{A}(\mathcal{M})$ of $\rho_3$ such that $\wt{\rho}_3(a,b)=\rho_3$ and
\begin{equation}\label{eqn:rho3t}
\wt{\rho}_3|_{G_p}\sim\left(\begin{array}{cccc}
\theta_1\theta_2\mu_1 & \xi_{12} & \xi_{13} & \xi_{14} \\
&\theta_2\mu_2 & \xi_{23} & \xi_{24} \\
&&\theta_1\mu_2^{-1} & \xi_{34} \\
&&&\mu_1^{-1}
\end{array}\right)
\end{equation}
where the $\mu_i$ are unramified, and
\begin{eqnarray}
\mu_1(a,b)&=&\delta^{-3}\label{eqn:mu1} \\
\mu_2(a,b)&=&\delta^{-1}\label{eqn:mu2} \\
\theta_1(s_1,s_2)&=&\chi_p^{s_2+1}\label{eqn:theta1}\\
\theta_1(a,b)&=&\chi_p^{k-1} \\
\theta_2(s_1,s_2)&=&\chi_p^{s_1+2}\label{eqn:theta2}\\
\theta_2(a,b)&=&\chi_p^{2(k-1)}.
\end{eqnarray}
\begin{remark}
Assumption (\hyperref[item:RAI]{RAI}) allows us to take the integral version of \cite[Theorem 7.1]{TU99} (see the comment of \textit{loc.\ cit.} at the end of \S7) and assumption (\hyperref[item:et]{\'Et}) says that the coefficients are $p$-adic analytic. The shape of $\wt{\rho}_3|_{G_p}$ can be seen as follows. That four distinct Hodge--Tate weights show up can be seen by using \cite[Lemma 3.1]{U01} and the fact that both $\Pi$ and the representation obtained from $\Pi$ by switching the infinite component are automorphic. Applying Theorem 3.4 of \textit{loc.\ cit.} gives part of the general form of $\wt{\rho}_3|_{G_p}$ (taking into account that we work with the contragredient). The form the unramified characters on the diagonal take is due to $\wt{\rho}_3|_{G_p}$ taking values in the Borel subgroup $B$ (this follows from Corollary 3.2 and Proposition 3.4 of \textit{loc.\ cit.}). That the specializations of the $\mu_i$ and $\theta_i$ at $(a,b)$ are what they are is simply because $\wt{\rho}_3$ is a deformation of $\rho_3$.
\end{remark}
We may take advantage of assumption (\hyperref[item:et]{\'Et}) to determine a bit more information about the $\mu_i$. Indeed, let $\wt{\rho}_f$ denote the Hida deformation (on $\gl(2)_{/\mathbf{Q}}$) of $\rho_f$, so that
\[ \wt{\rho}_f|_{G_p}\sim\mtrx{\theta\mu^{-1}}{\xi}{0}{\mu}
\]
where $\theta,\mu,$ and $\xi$ are $p$-adic analytic functions on some neighbourhood of $s=k$, $\theta(s)=\chi_p^{s-1}$, and $\mu(s)$ is the unramified character sending $\Frob_p$ to $\alpha_p(s)$ (where $\alpha_p(s)$ is the $p$-adic analytic function giving the $p$th Fourier coefficients in the Hida family of $f$) (\cite[Theorem 2.2.2]{W88}).
By \cite[Remark 9]{GhVa04}, we know that every arithmetic specialization of $\wt{\rho}_f$ is non-CM. We may thus apply the Ramakrishnan--Shahidi lift\comdone{(???trivial character?)} to the even weight specializations and conclude that $\Sym^3\!\wt{\rho}_f$ is an ordinary modular deformation of $\rho_3$. Assumption (\hyperref[item:et]{\'Et}) then implies that $\Sym^3\!\wt{\rho}_f$ is a specialization of $\wt{\rho}_3$. Since the weights of the symmetric cube lift of a weight $k^\prime$ cusp form are $(2(k^\prime-2),k^\prime-2)$, we can conclude that $\Sym^3\!\wt{\rho}_f$ is the ``sub-family'' of $\wt{\rho}_3$ where $s_1=2s_2$. Thus,
\begin{eqnarray*}
\mu_1(2s,s)&=&\mu^{-3}(s+2)\\
\mu_2(2s,s)&=&\mu^{-1}(s+2).
\end{eqnarray*}
Applying the chain rule yields
\begin{eqnarray}
2\partial_1\mu_1(a,b)+\partial_2\mu_1(a,b)&=&-\frac{3\mu^\prime(k)}{\delta^4}\label{eqn:mu1relations}\\
2\partial_1\mu_2(a,b)+\partial_2\mu_2(a,b)&=&-\frac{\mu^\prime(k)}{\delta^2}.\label{eqn:mu2relations}
\end{eqnarray}
\section{Calculating the \texorpdfstring{$L$}{\textit{L}}-invariant}\label{sec:calcLinvar}
For the remainder of this article, let $f$ a $p$-ordinary, holomorphic, non-CM, cuspidal, normalized newform of even weight $k\geq4$, level $\Gamma_1(N)$ (prime to $p$), and trivial character. Let $\rho_f$, $\rho_3$, and $\rho_6$ be as in section \ref{sec:sympowgeneral}, and let $W$ denote the exceptional subquotient of $\rho_6$. Furthermore, assume condition (\hyperref[item:Z]{Z}) that $\overline{\operatorname{Sel}}_\mathbf{Q}(\rho_6)=0$. We now put together the ingredients of the previous sections to compute Greenberg's $L$-invariant of $\rho_6$.
\subsection{Constructing the global Galois cohomology class}
Recall that if $\rho_3^\prime$ is an infinitesimal deformation of $\rho_3$ (over $K[\epsilon]:=K[x]/(x^2)$), a corresponding cocycle $c_3^\prime:G_\mathbf{Q}\rightarrow\End\rho_3$ is defined by the equation
\[\rho_3^\prime(g)=\rho_3(g)(1+\epsilon c_3^\prime(g)).
\]
Let $\wt{\rho}_3$ be the deformation of $\rho_3$ constructed in section \ref{sec:rho3t}. Taking a first order expansion of the entries of $\wt{\rho}_3$ around $(a,b)=(2(k-2),k-2))$ in any given direction yields an infinitesimal deformation of $\rho_3$. We parametrize these as follows. A $p$-adic analytic function $F\in\mathcal{A}$ has a first-order expansion near $(s_1,s_2)=(a,b)$ given by
\[ F(a+\epsilon_1,b+\epsilon_2)\approx F(a,b)+\epsilon_1\partial_1F(a,b)+\epsilon_2\partial_2F(a,b).
\]
We introduce a parameter $\Delta$ such that $\epsilon_1=(1-\Delta)\epsilon$ and $\epsilon_2=\Delta\epsilon$. Let $\wt{\rho}_{3,\Delta}$ denote the infinitesimal deformation of $\rho_3$ obtained by first specializing $\wt{\rho}_3$ along the direction given by $\Delta$, then taking the quotient by the ideal $((s_1,s_2)-(a,b))^2$. Let $c_{6,\Delta}$ denote the projection of its corresponding cocyle to $\rho_6$ (in the decomposition of lemma \ref{lem:enddecomp}).
\subsection{Properties of the global Galois cohomology class}
To use $c_{6,\Delta}$ to compute the $L$-invariant of $\rho_6$, we must show that it satisfies conditions (CL1--3) of section \ref{sec:GrLinvar}. The proofs of lemma 1.2 and 1.3 of \cite{Hi07} apply to the cocycle $c_{6,\Delta}$ to show that it satisfies (\hyperref[item:CL1]{CL1}).\footnote{The deformation $\wt{\rho}_3$ clearly satisfies conditions (K$_3$1--4) of \cite[\S0]{Hi07} and our $c_{6,\Delta}$ is a special case of the cocycles Hida defines in the proof of lemma 1.2 of \textit{loc.\ cit.}}
To verify conditions (\hyperref[item:CL2]{CL2}) and (\hyperref[item:CL3]{CL3}) (and to compute the $L$-invariant of $\rho_6$), we need to find an explicit formula for part of $c_{6,\Delta}|_{G_p}$. We know that
\[ \rho_f|_{G_p}\sim\mtrx{\chi_p^{k-1}\delta^{-1}}{\varphi}{0}{\delta}.
\]
Taking the symmetric cube (considered as a subspace of the third tensor power) yields
\settowidth{\arrrow}{$\chi_p^{3(k-1)}\delta^{-1}$}
\renewcommand{2.2}{2.2}
\[ \rho_3|_{G_p}\sim\left(
\begin{array}{x{\arrrow}x{\arrrow}x{\arrrow}x{\arrrow}}
\chi_p^{3(k-1)}\delta^{-3} & \frac{3\chi_p^{2(k-1)}\varphi}{\delta^2} & \frac{3\chi_p^{k-1}\varphi^2}{\delta} & \varphi^3 \\
& \chi_p^{2(k-1)}\delta^{-1} & 2\chi_p^{k-1}\varphi & \delta\varphi^2 \\
& & \chi_p^{k-1}\delta & \delta^2\varphi \\
& & & \delta^3
\end{array}\right).
\]
Taking first-order expansions of the entries of $\wt{\rho}_3\rho_3^{-1}-I_4$, specializing along the direction given by $\Delta$, and projecting yields $c_{6,\Delta}$. However, since we are interested in an explicit formula for $c_{6,\Delta}|_{G_p}$, we need to determine a basis that gives the decomposition of lemma \ref{lem:enddecomp}. This can be done using the theory of raising and lowering operators. We obtain the following result.
\begin{theorem}
In such an aforementioned basis,
\begin{equation}\label{eqn:c6formula}
c_{6,\Delta}|_{G_p}\sim\left(\begin{array}{c}
\ast\\
(1-\Delta)\left(\frac{\partial_1\theta_2}{\chi_p^{2(k-1)}}-\frac{2\partial_1\theta_1}{\chi_p^{k-1}}-\delta^3\partial_1\mu_1+3\delta\partial_1\mu_2\right) \\
\hspace{0.28in}+\Delta\left(\frac{\partial_2\theta_2}{\chi_p^{2(k-1)}}-\frac{2\partial_2\theta_1}{\chi_p^{k-1}}-\delta^3\partial_2\mu_1+3\delta\partial_2\mu_2\right)\\
0
\end{array}\right)
\end{equation}
where $\ast$ and $0$ are both $3\times1$, and all derivatives are evaluated at $(a,b)$.
\end{theorem}
Since the bottom three coordinates in \eqref{eqn:c6formula} are zero, the image of $c_{6,\Delta}|_{G_p}$ lands in $F^{00}\rho_6$, i.e.\ $c_{6,\Delta}$ satisfies (\hyperref[item:CL2]{CL2}). If we can show that the middle coordinate is non-zero, then $c_{6,\Delta}$ satisfies (\hyperref[item:CL3]{CL3}). In fact, we will show that $c_{6,\Delta}$ satisfies (\hyperref[item:CL3]{CL3}) if, and only if, $\Delta\neq1/3$ (in this latter case, we will show that $[c_{6,1/3}]=0$).
Let $\ol{c}_{6,\Delta}$ denote the image of $c_{6,\Delta}$ in $\hone{\mathbf{Q}_p}{W}$. Let
\[ \alpha_p^{(i,j)}:=\partial_j\mu_i(a,b)(\Frob_p).
\]
\begin{corollary}\label{prop:c6coords}
The coordinates of $\ol{c}_{6,\Delta}$, as in equation \eqref{eqn:coords}, are
\[ \left(1-3\Delta,(1-\Delta)\left(-\alpha_p^3\alpha_p^{(1,1)}+3\alpha_p\alpha_p^{(2,1)}\right)+\Delta\left(-\alpha_p^3\alpha_p^{(1,2)}+3\alpha_p\alpha_p^{(2,2)}\right)\right).
\]
In particular, if $\Delta\neq1/3$, then $c_{6,\Delta}$ satisfies (\hyperref[item:CL3]{CL3}).
\end{corollary}
Before proving this, we state and prove a lemma.
\begin{lemma}
Recall that $\theta(s)=\chi_p^{s-1}$. For any integer $s\geq2$, and any principal unit $u$,
\begin{enumerate}
\item $\theta^\prime(s)(\Frob_p)=0$,
\item $\displaystyle{\frac{\theta^\prime(s)(\rec(u))}{\chi_p^{s-1}(\rec(u))}=-\log_pu}$.
\end{enumerate}
\end{lemma}
\begin{proof}
The first equality is simply because $\chi_p(\Frob_p)=1$. For the second, recall that\linebreak
$\chi_p(\rec(u))=u^{-1}$, so $\theta(s)(\rec(u))=u^{1-s}$. Thus, the logarithmic derivative of $\theta(s)(\rec(u))$ is indeed $-\log_pu$.
\end{proof}
\begin{proof}[Proof of corollary \ref{prop:c6coords}]
The first coordinate is obtained by taking an arbitrary principal unit $u$, evaluating $\ol{c}_{6,\Delta}$ and $\rec(u)$ and dividing by $-\log_pu$. By equations \eqref{eqn:theta1} and \eqref{eqn:theta2}, $\partial_i\theta_i=0$. Combining the fact that the $\mu_i$ are unramified with part (b) of the above lemma yields
\[ \frac{\ol{c}_{6,\Delta}(\rec(u))}{-\log_pu}=\frac{(1-\Delta)(-\log_pu)+\Delta(-2\log_pu)}{-\log_pu}=1-3\Delta.
\]
If $\Delta\neq1/3$, the first coordinate is non-zero, so $\ol{c}_{6,\Delta}$ itself is non-zero, so $c_{6,\Delta}$ satisfies (\hyperref[item:CL3]{CL3}).
Combining part (a) of the above lemma with equations \eqref{eqn:mu1} and \eqref{eqn:mu2} yields the second coordinate (recall that $\delta(\Frob_p)=\alpha_p$).
\end{proof}
\begin{remark}
If we take $\Delta=1/3$, the first coordinate of $\ol{c}_{6,1/3}$ vanishes. Hence,\linebreak
$\ol{c}_{6,1/3}~\in~\hone[\nr]{\mathbf{Q}_p}{W}$. Therefore, $[c_{6,1/3}]\in\overline{\operatorname{Sel}}_\mathbf{Q}(V)=0$ (by assumption (\hyperref[item:Z]{Z})). The direction $\Delta=1/3$ is the one for which $\epsilon_1/\epsilon_2=2$, i.e.\ the direction corresponding to the symmetric cube of the $\gl(2)$ Hida deformation of $\rho_f$. This is an instance of the behaviour mentioned at the end of section \ref{sec:sympowgeneral}.
\end{remark}
\subsection{Formula for the \texorpdfstring{$L$}{\textit{L}}-invariant}
Tying all this together yields the main theorem of this article.
\begin{theoremA}\label{thm:theoremA}
Let $p\geq3$ be a prime. Let $f$ a $p$-ordinary, holomorphic, non-CM, cuspidal, normalized newform of even weight $k\geq4$, level $\Gamma_1(N)$ (prime to $p$), and trivial character. Let $\rho$ be a critical, exceptional Tate twist of $\Sym^6\!\rho_f$, i.e.\ $\rho=\rho_6=(\Sym^6\!\rho_f)(3(1-k))$ or its Tate dual. Assume conditions \textup{(\hyperref[item:et]{\'Et})}, \textup{(\hyperref[item:RAI]{RAI})}, and \textup{(\hyperref[item:Z]{Z})}. Then,
\begin{equation}\label{eqn:sym6Linvar}
\mathcal{L}(\rho)=-\alpha_p^3\alpha_p^{(1,1)}+3\alpha_p\alpha_p^{(2,1)}.
\end{equation}
\end{theoremA}
\begin{proof}
Pick any $\Delta\neq1/3$. We've shown that $[c_{6,\Delta}]$ satisfies (\hyperref[item:CL1]{CL1--3}) and hence generates $H_{\glob}^{\exc}$. The coordinates of its image in $\hone{\mathbf{Q}_p}{W}$ were obtained in corollary \ref{prop:c6coords}. Therefore, $\mathcal{L}(\rho_6)$ can be computed from equation \eqref{eqn:linvarslop}. Specifically, the result is obtained by solving the system of linear equations in $\mathcal{L}(\rho_6)$ and the $\alpha_p^{(i,j)}$ given by the coordinates of $\ol{c}_{6,\Delta}$ and equations \eqref{eqn:mu1relations} and \eqref{eqn:mu2relations}. The $L$-invariant of $\rho_6^\ast$ is by definition that of $\rho_6$.
\end{proof}
\begin{remark}
We could express this result in terms of other $\alpha_p^{(i,j)}$. For example, picking $\Delta=1$ yields
\[ \mathcal{L}(\rho_6)=\frac{1}{2}\alpha_p^3\alpha_p^{(1,2)}-\frac{3}{2}\alpha_p\alpha_p^{(2,2)}.
\]
\end{remark}
\subsection{Relation to Greenberg's \texorpdfstring{$L$}{\textit{L}}-invariant of the symmetric square}
We can carry out the above analysis for the projection to $\rho_2:=(\Sym^2\!\rho_f)(1-k)$ in lemma \ref{lem:enddecomp} and compare the value of $\mathcal{L}(\rho_2)$ obtained with the known value (\cite[Theorem 1.1]{Hi04}, \cite[Theorem~A]{H-PhD})
\[ \mathcal{L}(\rho_2)=-2\frac{\alpha_p^\prime}{\alpha_p}
\]
where $\alpha_p^\prime=\mu^\prime(k)(\Frob_p)=\alpha_p^\prime(k)$, and one assumes that $\overline{\operatorname{Sel}}_\mathbf{Q}(\rho_2)=0$.\footnote{This vanishing is known in many cases due to work of Hida (\cite{Hi04}), Kisin (\cite{Ki04}), and Weston (\cite{Wes04}). See those papers for details or \cite[Theorem 2.1.1]{H-PhD} for a summary.} The restriction of the cocycle $c_{2,\Delta}$ (in an appropriate basis) is
\[ c_{2,\Delta}|_{G_p}\sim\left(\begin{array}{c}
\ast\\
(1-\Delta)\left(-\frac{2\partial_1\theta_2}{\chi_p^{2(k-1)}}-\frac{\partial_1\theta_1}{\chi_p^{k-1}}-3\delta^3\partial_1\mu_1-\delta\partial_1\mu_2\right) \\
\hspace{0.28in}+\Delta\left(-\frac{2\partial_2\theta_2}{\chi_p^{2(k-1)}}-\frac{\partial_2\theta_1}{\chi_p^{k-1}}-3\delta^3\partial_2\mu_1-\delta\partial_2\mu_2\right)\\
0
\end{array}\right)
\]
Accordingly, the coordinates of the class $\ol{c}_{2,\Delta}$ are
\[ \left(\Delta-2,(1-\Delta)\left(-3\alpha_p^3\alpha_p^{(1,1)}-\alpha_p\alpha_p^{(2,1)}\right)+\Delta\left(-3\alpha_p^3\alpha_p^{(1,2)}-\alpha_p\alpha_p^{(2,2)}\right)\right).
\]
The cocycle $c_{2,\Delta}$ can be used to compute $\mathcal{L}(\rho_2)$ when $\Delta\neq2$. When $\Delta=2$, one has, as above, $[c_{2,\Delta}]\in\overline{\operatorname{Sel}}_\mathbf{Q}(\rho_2)$. Taking $\Delta=0$ yields
\begin{equation}\label{eqn:sym2Linvar}
\mathcal{L}(\rho_2)=\frac{3}{2}\alpha_p^3\alpha_p^{(1,1)}+\frac{1}{2}\alpha_p\alpha_p^{(2,1)}.
\end{equation}
Combining equations \eqref{eqn:sym6Linvar} and \eqref{eqn:sym2Linvar} yields the following relation between $L$-invariants.
\begin{theoremA}\label{thm:theoremB}
Assuming \textup{(\hyperref[item:et]{\'Et})}, \textup{(\hyperref[item:RAI]{RAI})}, \textup{(\hyperref[item:Z]{Z})}, and $\overline{\operatorname{Sel}}_\mathbf{Q}(\rho_2)=0$, we have
\[ \mathcal{L}(\rho_6)=-10\alpha_p^3\alpha_p^{(1,1)}+6\mathcal{L}(\rho_2).
\]
\end{theoremA}
\begin{remark}
There is a guess, suggested by Greenberg \cite[p.~170]{G94}, that the $L$-invariants of all symmetric powers of $\rho_f$ should be equal. This is known in the cases where it is relatively easy to compute the $L$-invariant, namely when $f$ corresponds to an elliptic curve with split, multiplicative reduction at $p$, or when $f$ has CM. In the case at hand, we fall one relation short of showing the equality of $\mathcal{L}(\rho_6)$ and $\mathcal{L}(\rho_2)$. Equality would occur if one knew the relation
\[ \alpha_p^{(1,1)}\overset{?}{=}-\frac{\alpha_p^\prime}{\alpha_p^4}.
\]
\end{remark}
\nocite{xxx}
\nocite{yyy}
\addcontentsline{toc}{section}{References}
\end{document}
|
\begin{document}
\title[$\ast $-homogeneous ideals]{On $\ast $-homogeneous ideals}
\author{Muhammad Zafrullah}
\address{Department of Mathematics, Idaho State University,\\
Pocatello, Idaho 83209 USA}
\email{[email protected]}
\urladdr{http://www.lohar.com}
\subjclass[2010]{13A15; Secondary 13G05; 06F20}
\date{July 27, 2001}
\keywords{star operation, Monoid, pre-Riesz Monoid, $\ast $-homogeneous $
\ast $-potent domain}
\dedicatory{Dedicated to the memory of True Friendship}
\begin{abstract}
Let $\ast $ be a star operation of finite character. Call a $\ast $-ideal $I$
of finite type a $\ast $-homogeneous ideal if $I$ is contained in a unique
maximal $\ast $-ideal $M=M(I).$ A maximal $\ast $-ideal that contains a $
\ast $-homogeneous ideal is called potent and the same name bears a domain
all of whose maximal $\ast $-ideals are potent. One among the various aims
of this article is to indicate what makes a $\ast $-ideal of finite type a $
\ast $-homogeneous ideal, where and how we can find one, what they can do
and how this notion came to be. We also prove some results of current
interest in ring theory using some ideas from this author's joint work in
\cite{LYZ 2014} on partially ordered monoids. We characterize when a
commutative Riesz monoid generates a Riesz group
\end{abstract}
\maketitle
\section{Introduction}
Let $\ast $ be a finite character star operation defined on an integral
domain $D$ throughout. (A working introduction to the star operations, and
the reason for using them, will follow.) Call a nonzero $\ast $-ideal of
finite type a $\ast $-homogeneous ideal, if $I$ is contained in a unique
maximal $\ast $-ideal. According to proposition $1$ of \cite{AZ 2019},
associated with each $\ast $-homogeneous ideal $I$ is a unique $\ast $
-maximal ideal $M(I)=\{x\in D|$ $(x,I)^{\ast }\neq D\}.$ The notion of a $
\ast $-homogeneous ideal has figured prominently in describing unique
factorization of ideals and elements in \cite{AZ 2019} and it seems
important to indicate some other properties and uses of this notion and
notions related to it. Call a $\ast $-maximal ideal $M$ $\ast $-potent if $M$
contains a $\ast $-homogeneous ideal and call a domain $D$ $\ast $-potent if
each of the $\ast $-maximal ideals of $D$ is $\ast $-potent. The aim of this
article is to study some properties of $\ast $-homogeneous ideals and of $
\ast $-potent domains. We show for instance that while in a $\ast $-potent
domain every proper $\ast $-ideal of finite type is contained in a $\ast $
-homogeneous ideal, the converse may not be true. We shall also indicate how
these concepts can be put to use. Before we elaborate on that, it seems
pertinent to give an idea of our main tool, the star operations. Indeed, the
rest of what we plan to prove will be included in the plan of the paper
after the introduction to star operations.
\subsection{Introduction to star operations}
Let $D$ be an integral domain with quotient field $K$ $,$ throughout. Let $
F(D)$ be the set of nonzero fractional ideals of $D,$ and let $f(D)=\{A\in
F(D)|A$ is finitely generated\}. A star operation $\ast $ on $D$ is a
closure operation on $F(D)$ that satisfies $D^{\ast }=D$ and $(xA)^{\ast
}=xA^{\ast }$ for $A\in F(D)$ and $x\in K=K\backslash \{0\}$. With $\ast $
we can associate a new star-operation $\ast _{s}$ given by $A\mapsto A^{\ast
_{s}}=\cup \{B^{\ast }|B\subseteq A,B\in f(D)\}$ for each $A\in F(D).$ We
say that $\ast $ has finite character if $\ast =\ast _{s}$. Three important
star-operations are the $d$-operation $A\mapsto A_{d}=A$, the $v$-operation $
A\mapsto A_{v}=(A^{-1})^{-1}$ $=\cap \{Dx|Dx\supseteq A,x\in K\}$ where $
A^{-1}=\{x\in K:xA\subseteq D\}$ and the $t$-operation $t=v_{s}.$ Here $d$
and $t$ have finite character. A fractional ideal $A$ is a $\ast $-ideal if $
A=A^{\ast }$and a $\ast $-ideal $A$ is of finite type if $A=B^{\ast }$ for
some $B\in f(D).$ If $\ast $ has finite character and $A^{\ast }$ is of
finite type, then $A^{\ast }=B^{\ast }$ for some $B\in f(D),$ $B\subseteq A.$
A fractional ideal $A\in F(D)$ is $\ast $-invertible if there exists a $B\in
F(D)$ with $(AB)^{\ast }=D$; in this case we can take $B=A^{-1}$. For any $
\ast $-invertible $A\in F(D),A^{\ast }=A_{v}$. If $\ast $ has finite
character and $A$ is $\ast $-invertible, then $A^{\ast }$ is a finite type $
\ast $-ideal and $A^{\ast }=A_{t}$ . Given two fractional ideals $A,B\in
F(D),~(AB)^{\ast }$ denotes their $\ast $-product. Note that $(AB)^{\ast
}=(A^{\ast }B)^{\ast }=(A^{\ast }B^{\ast })^{\ast }$. Given two star
operations $\ast _{1}$ and $\ast _{2}$ on $D$, we write $\ast _{1}\leq \ast
_{2}$ if $A^{\ast _{1}}\subseteq A^{\ast _{2}}$ for all $A\in F(D)$. So $
\ast _{1}\leq \ast _{2}\Leftrightarrow (A^{\ast _{1}})^{\ast _{2}}=A^{\ast
_{2}}\Leftrightarrow (A^{\ast _{2}})^{\ast _{1}}=A^{\ast _{2}}$ for all $
A\in F(D).$
Indeed for any finite character star-operation $\ast $ on $D$ we have $d\leq
\ast \leq t$ . For a quick introduction to star-operations, the reader is
referred to \cite[Sections 32, 34]{Gil 1972} or \cite{Zaf 2000}, for a quick
review. For a more detailed treatment see Jaffard \cite{Jaf 1962}. A keenly
interested reader may also look up \cite{H-K 1998}. These days star
operations are being used to define analogues of various concepts. The trick
is to take a concept, e.g., a PID and look for what the concept would be if
we require that for every nonzero ideal $I,$ $I^{\ast }$ is principal and
voila! You have several concepts parallel to that of a PID. Of these $t$-PID
turns out to be a UFD. Similarly a $v$-PID is a completely integrally closed
GCD domain of a certain kind. A $t$-Dedekind domain, on the other hand is a
Krull domain and a $v$-Dedekind domain is a domain with the property that
for each nonzero ideal $A$ we have $A_{v}$ invertible. So when we prove a
result about a general star operation $\ast $ the result gets proved for all
the different operations, $d,t,v$ etc. Apart from the above, any terminology
that is not mentioned above will be introduced at the point of entry of the
concept.
Suppose that $\ast $ is a finite character star-operation on $D$. Then a
proper $\ast $-ideal is contained in a maximal $\ast $-ideal and a maximal $
\ast $-ideal is prime. We denote the set of maximal $\ast $-ideals of $D$ by
$\ast $-$Max(D)$. We have $D=\cap D_{P}$ where $P$ ranges over $\ast $-$
Max(D).$From this point on we shall use $\ast $ to denote a finite type star
operation. Call $D$ of finite $\ast $-character if for each nonzero non unit
$x$ of $D,$ $x$ belongs to at most a finite number of maximal $\ast $
-ideals. Apart from the introduction there are three sections in this paper.
In section \ref{S1} we talk about $\ast $-homogeneous ideals, and $\ast $
-potent domains. We characterize $\ast $-potent domains in this section,
show that if $D$ is of finite $\ast $-character then $D$ must be potent,
examine an error in a paper of the author, \cite{DZ 2010}, in characterizing
domains of finite $\ast $-character and characterize domains of finite $\ast
$-character and give a new proof. In section \ref{S2}, we show how creating
a suitable definition of a $\ast $-homogeneous ideal will create theory of
unique factorization of ideals. Calling an element $r\in D$ $\ast $-f-rigid (
$\ast $-factorial rigid) if $rD$ is a $\ast $-homogeneous ideal such that
every proper $\ast $-homogeneous ideal containing $r$ is principal we call a
$\ast $-potent maximal $\ast $-ideal $M$ (resp., domain $D$) $\ast $
-f-potent if $M$ (resp., every maximal $\ast $-ideal of $D$) contains a $
\ast $-f-rigid element and show that over a $\ast $-f-potent domain a
primitive polynomial $f$ is super primitive i.e. if $A_{f},$ the content of $
f,$ is such that the generators of $f$ have no non unit common factor then $
(A_{f})_{v}=D$ and indicate how to construct atomless non-pre-Schreier
domain. In this section we offer a seamless patch to remove an error in the
proof of result in a paper by Kang \cite{Kan 1989} and show that $D$ is a $t$
-superpotent if and only if $D[X]_{S}$ is $t$-f-potent, where $X$ is an
indeterminate and $S=\{f\in \lbrack X]|(A_{f})_{v}=D\}.$ We also show, by
way of constructing more examples, in this section that if $L$ is an
extension of $K$ the quotient field of $D$ and $X$ an indeterminate over $D$
then $D$ $t$-f-potent if and only if $D+XL[X]$ is. Finally in section \ref
{S3} we define a pre-Riesz monoid as a p.o. monoid $M$ if for any $
x_{1},x_{2},...,x_{n}\in M\backslash \{0\}$ $glb(x_{1},x_{2},...,x_{n})=0$
or there is $r\in M$ with $0<r\leq x_{1},x_{2},...,x_{n}$ and indicate that
the monoid of $\ast $-ideals of finite type is a pre-Riesz monoid and, of
course we indicate how to use this information.
\section{\label{S1}$\ast $-potent domains and $\ast $-homogeneous ideals}
Work on this paper started in earnest with the somewhat simple observation
that if $D$ is $\ast $-potent then every nonzero non unit $x\in D$ is
contained in some $\ast $-homogeneous ideal. The proof goes as follows:
Because $x$ is a nonzero non unit, $x$ must be contained in some maximal $
\ast $-ideal $M.$ Now as $D$ is $\ast $-potent $M=M(I)$ for some $\ast $
-homogeneous ideal $I.$ Consider $J=(I,x)^{\ast }$ and note that $
(I,x)^{\ast }\neq D$ because $x\in M$ and $(I,x)^{\ast }$ is contained in a
unique maximal $\ast $-ideal and this makes $J$ a $\ast $-homogeneous ideal.
This leads to the question: If $D$ is a domain with a finite character star
operation $\ast $ defined on it such that every nonzero non unit $x$ of $D$
is contained in some $\ast $-homogeneous ideal $I$ of $D,$ must $D$ be $\ast
$-potent?
This question came up in a different guise as: when is a certain type of
domain $\ast _{s}$-potent for a general star operation $\ast $ in \cite{YZ
2011} and sort of settled in a tentative fashion in Proposition 5.12 of \cite
{YZ 2011} saying, in the general terms being used here, that: Suppose that $
D $ is a domain with a finite character $\ast $-operation defined on it.
Then $D$ is $\ast $-potent provided (1) every nonzero non unit $x$ of $D$ is
contained in some $\ast $-homogeneous ideal $I$ of $D$ and (2) for $M,$ $
M_{{\Greekmath 010B} }\in $ $\ast $-$max(D)$, $M\subseteq \cup M_{{\Greekmath 010B} }$ implies $
M=M_{{\Greekmath 010B} }$ for some ${\Greekmath 010B} .$
The proof could be something like: By (1) for every nonzero non unit $x$
there is a $\ast $-homogeneous ideal $I_{x}$ containing $x$ and so $x\in
M(I_{x})$. So $M\subseteq \cup M(I_{x})$ and by (2) $M$ must be equal to $
M(I_{x})$ for some $x.$
Thus we have the following statement.
\begin{theorem}
\label{Theorem 1}. Let $\ast $ be a finite character star operation defined
on $D.$ Then $D$ is $\ast $-potent if $D$ satisfies the following: (1)every
nonzero non unit $x$ of $D$ is contained in some $\ast $-homogeneous ideal $
I $ of $D$ and (2) For $M,$ $M_{{\Greekmath 010B} }\in $ $\ast $-$max(D)$, $M\subseteq
\cup M_{{\Greekmath 010B} }$ implies $M=M_{{\Greekmath 010B} }$ for some ${\Greekmath 010B} .$
\end{theorem}
Condition (2) in the statement of Theorem \ref{Theorem 1} has had to face a
lot of doubt from me, in that, is it really necessary or perhaps can it be
relaxed a little?
The following example shows that condition (2) or some form of it is here to
stay.
It is well known that the ring $\mathcal{E}$ of entire functions is a Bezout
domain \cite[Exercise 18, p 147]{Gil 1972}. It is easy to check that a
principal prime in a Bezout domain is maximal. Now we know that a zero of an
entire function determines a principal prime in $\mathcal{E}$ and that the
set of zeros of a nontrivial entire function is discrete, including
multiplicities, the multiplicity of a zero of an entire function is a
positive integer \cite[Theorem 6]{Hel 1940}. Thus each nonzero non unit of $
\mathcal{E}$ is expressible as a countable product of finite powers of
distinct principal primes. For the identity star operation $d,$ certainly
defined on $\mathcal{E},$ only an ideal $I$ generated by a power of a
principal prime can be $d$-homogeneous. For if $I$ is $d$-homogeneous, then $
I=(x_{1},...,x_{n})^{d}=x\mathcal{E}$ a principal ideal and hence a
countable product of distinct primes. Now $I$ cannot be in a unique non
principal prime for then $I$ would have to be a countably infinite product
of principal primes and so in infinitely many principal prime ideals, which
are maximal. So $I$ can only belong to a unique principal prime and has to
be a finite prime power. To see that $\mathcal{E}$ falls foul of Theorem \ref
{Theorem 1}, let's put $S=\{p|p$ a prime element in $\mathcal{E}\}.$ Then
for each non principal prime $P$ of $\mathcal{E}$ we have $P\subseteq \cup
_{p\in S}p\mathcal{E}$ because each element of $P$ is divisible by some
member(s) of $S.$ (I have corresponded with Prof. Evan Houston about the
above material and I gratefully acknowledge that.)
Once we know more about $\ast $-homogeneous ideals we would know that rings
do not behave in the same manner as groups do. To get an idea of how groups
behave and what is the connection the reader may look up \cite{YZ 2011}.
Briefly, the notion of a $\ast $-homogeneous ideal arose from the notion of
a basic element of a lattice ordered group $G$ (defined as $b>0$ in $G$ such
that $(0,b]$ is a chain). A basis of $G,$ if it exists, is a maximal set of
mutually disjoint strictly positive basic elements of $G$. According to \cite
{Con 1961} a l.o. group $G$ has a basis if and only if every strictly
positive element of $G$ exceeds a basic element. So if we were to take $D$
being potent as having a basis (every proper $\ast $-ideal of finite type
being contained in a $\ast $-homogeneous ideal) then every proper $\ast $
-ideal of finite type being contained in a $\ast $-homogeneous ideal does
not imply that $D$ is potent.
We next tackle the question of where $\ast $-homogeneous ideals can be
found. Call $D$ of finite $\ast $-character if every nonzero non unit of $D$
is contained in at most a finite number of maximal $\ast $-ideals. Again, a
domain of finite $\ast $-character could be a domain of finite character
(every nonzero non unit belongs to at most a finite number of maximal
ideals) such as an h-local domain or a semilocal domain or a PID or a domain
of finite $t$-character such as a Krull domain.
\begin{proposition}
\label{Proposition X1} A domain $D$ of finite $\ast $-character is $\ast $
-potent.
\end{proposition}
\begin{proof}
Let $M$ be a maximal $\ast $-ideal of $D$ and let $x$ be a nonzero element
of $M.$ If $x$ belongs to no other maximal $\ast $-ideal then $xD$ is $\ast $
-homogeneous and $M$ is potent. So let us assume that $M,$ $
M_{1},M_{2},...,M_{n}$ is the set of all maximal $\ast $-ideals containing $
x.$ Now consider the ideal $A=(x,x_{1},...,x_{n})$ where $x_{i}\in
M\backslash M_{i}$ for $i=1,...,n.$ Obviously $A\subseteq M$ but $
A\nsubseteq M_{i}$ because of $x_{i}.$ Note that $A$ cannot be contained in
any maximal $\ast $-ideal other than $M,$ for if $N$ were any maximal $\ast $
-ideal containing $A$ then $N$ would belong to $\{M,M_{1},M_{2},...,M_{n}\}$
because of $x.$ And $N$ cannot be any of the $M_{i}.$ Thus $A^{\ast }$ is a $
\ast $-homogeneous ideal contained in $M$ and $M$ is potent. Since $M$ was
arbitrary we have the conclusion.
\end{proof}
The above proof is essentially taken from the proof for part (2) of Theorem
1.1 of \cite{ACZ 2013}.
Now how do we get a domain of finite $\ast $-character? The answer is
somewhat longish and interesting. Bazzoni conjectured in \cite{Baz 2000} and
\cite{Baz 2001} that a Prufer domain $D$ is of finite character if every
locally principal ideal of $D$ is invertible. \cite{HMMT 2008} were the
first to verify the conjecture using partially ordered groups. Almost
simultaneously \cite{H-K 2009} proved the conjecture for $r$-Prufer monoids,
using Clifford semigroups of ideals and soon after I chimed in with a very
short paper \cite{Zaf 2010}. The ring-theoretic techniques used in this
paper not only verified the Bazzoni conjecture but also helped prove
Bazzoni-like statements for other, suitable, domains that were not
necessarily PVMDs. (Recall that $D$ is a PVMD if every $t$-ideal $A$ of
finite type of $D$ is $t$-invertible i.e. $(AA^{-1})_{t}=D$.) In the course
of verification of the conjecture I mentioned a result due to Griffin from
\cite{Gri 1967} that says:
\begin{theorem}
\label{Theorem X2} A PVMD\ $D$ is of finite $t$-character if and only if
each $t$-invertible $t$-ideal of $D$ is contained in at most a finite number
of mutually $t$-comaximal $t$-invertible $t$-ideals of $D$.
\end{theorem}
As indicated in the introduction of \cite{Zaf 2010} the set of $t$
-invertible $t$-ideals of a PVMD is a lattice ordered group under $t$
-multiplication and the order defined by reverse containment of the ideals
involved and that the above result for PVMDs came from the use of Conrad's
F-condition. Stated for lattice ordered groups Conrad's F-condition says:
Every strictly positive element exceeds at most a finite number of mutually
disjoint elements. This and Theorem \ref{Theorem X2}, eventually led the
authors of \cite{DZ 2010}, to the following statement.
\begin{theorem}
\label{Theorem X3}(cf. Theorem 1 of \cite{DZ 2010}) Let $D$ be an integral
domain, $\ast $ a finite character star operation on $D$ and let $\Gamma $
be a set of proper, nonzero, $\ast $-ideals of finite type of $D$ such that
every proper nonzero $\ast $-finite $\ast $-ideal of $D$ is contained in
some member of $\Gamma $ . Let $I$ be a nonzero finitely generated ideal of $
D$ with $I^{\ast }\neq D$. Then $I$ is contained in an infinite number of
maximal $\ast $-ideals if and only if there exists an infinite family of
mutually $\ast $-comaximal ideals in $\Gamma $ containing $I$.
\end{theorem}
This theorem was a coup, it sort of catapulted the consideration of
finiteness of character from Prufer-like domains to consideration of
finiteness of $\ast $-character in general domains. But alas, there was an
error in the proof. There was no reason for the error as I had used the
technique, Conrad's F-condition, involved in the proof of Theorem \ref
{Theorem X3} at other places such as \cite{DLMZ 2001}, \cite{MRZ 2008} and,
later, \cite{DZ 2011} but there it was. I realized the error while working
on a paper on p.o. groups, that I eventually published with Y.C. Yang as
\cite{YZ 2011}. I wrote to my coauthor of \cite{DZ 2010}, proposing a
corrigendum. But for one reason or another the corrigendum never got off the
ground. Fortunately Chang and Hamdi have recently published \cite{CH 2019}
including Theorem 1 of \cite{DZ 2010} as Lemma 2.3 with proof exactly the
way I would have liked after the corrigendum was used.
Perhaps as a kind gesture those authors have not pointed out the error in
the proof of \cite[Theorem 1]{DZ 2010}, but a careless use of Zorn's Lemma
must be pointed out so that others do not fall in a similar pit. Now going
over the whole thing anew might be painful, so I reproduce below the
proposed brief corrigendum and point out any other s made that I could not
see at that time.
\begin{quotation}
\textquotedblleft There is some confusion in lines 8-15 of the proof of
Theorem 1. In the following we offer a fix to clear the confusion and give a
rationale for the fix.
The fix: Read the proof from the sentence that starts from line 8 as
follows: Let $S$ be the family of sets of mutually $\ast $-comaximal
homogeneous members of $\Gamma $ containing $I$. Then $S$ is non empty by $
(\sharp \sharp ).$ Obviously $S$ is partially ordered under inclusion. Let $
A_{n_{1}}\subset A_{n_{2}}\subset ...\subset A_{n_{r}}\subset ...$ be an
ascending chain of sets in $S$. Consider $T=\cup A_{n_{r}}.$ We claim that
the members of $T$ are mutually $\ast $-comaximal. For take $x,y\in T,$ then
$x,y\in A_{n_{i}},$ for some $i,$ and hence are $\ast $-comaximal. Having
established this we note that by $(\sharp ),$ $T$ must be finite and hence
must be equal to one of the $A_{n_{j}}.$ Thus by Zorn's Lemma, $S$ must have
a maximal element $U=\{V_{1},V_{2},...,V_{n}\}.$ Disregard the next two
sentences and read on from: Next let $M_{i}$ be the maximal $\ast $-ideal....
Rationale for the Fix: Using sets of mutually $\ast $-comaximal elements
would entail some unwanted maximal elements as the following example shows:
Let $x=2^{2}5^{2}$ in $Z$ the ring of integers. Then $\mathcal{S}
=\{\{(2^{2}5^{2})\},\{(25^{2})\},\{(2^{2}5)\}\{(2^{2})\},$ $
\{(5^{2})\},\{(2^{2}),(5^{2})\},$ $\{(2)\},\{(5)\},$ $\{(2),(5^{2})\},
\{(2^{2}),(5)\},$ $\{(2),(5)\}\}.$ In this case, while $\mathcal{S}$
includes legitimate maximal elements: $\{(2^{2}),(5^{2})\},$ $
\{(2),(5^{2})\},$ $\{(2^{2}),(5)\},\{(2),(5)\}$ it also includes $
\{(2^{2}5^{2})\},\{(25^{2})\},\{(2^{2}5)\}$ which fit the definition of
maximal elements. The reason why the fix should work is that given any set $
T=\{A_{1},A_{2},...,A_{m}\}$ of mutually $\ast $-comaximal $\ast $-finite
ideals, by $(\sharp \sharp )$ there is a set of mutually $\ast $-comaximal
homogeneous $\ast $-finite ideals $\{H_{1},H_{2},...,H_{n}\}$ in $\Gamma ,$
where $n\geq m$ such that each $H_{j}$ contains some $A_{i}.$ Also as a
homogeneous ideal cannot be contained in two disjoint ideals we do not face
the above indicated problem and Zorn's Lemma gives the required maximal
elements.\textquotedblright\
(To be sure that the above "proposal" was not created after seeing the Chang
Hamdi paper check the image of the E-mail sent to Prof. Dumitrescu and a pdf
version of the corrigendum here \cite{Zaf1 2019}, at the end of that
doument.)
\end{quotation}
The other error was essentially confusing the size of a set with the set, on
my part. I must admit that my coauthor told me to say, after finding that
there was at least one homogeneous ideal containing a given $\ast $-ideal $A$
of finite type, that one can find a largest set of mutually $\ast $
-comaximal homogeneous ideals containing $A.$ But I just don't care about
doing that unless the conclusion is very simple.
It's only fitting that I end this saga with a more satisfying statement
and/or proof of \cite[Theorem 1]{DZ 2010}. Lurking behind the fa\c{c}ade of
the set $\Gamma $ and the other conditions were the following definitions
and statements. Call a $\ast $-ideal $I$ of finite type ($\ast $-)
homogeneous, as we have already done, if $I$ is contained in a unique
maximal $\ast $-ideal $M=M(I)$.
\begin{lemma}
\label{Lemma X4}A $\ast $-ideal $I$ of finite type is $\ast $-homogeneous if
and only if for each pair $X,Y$ of proper $\ast $-ideals of finite type
containing $I$ we have that $(X+Y)^{\ast }$ is proper.
\end{lemma}
\begin{proof}
Let $I$ be $\ast $-homogeneous, then any proper finite type $\ast $-ideals $
X,Y$ containing $I$ are $\ast $-homogeneous contained in $M(I)$ and so $
(X+Y)^{\ast }\subseteq M(I).$ Conversely if the condition holds and $I$ is
contained in two distinct maximal $\ast $-ideals $N_{1},N_{2}$. For $n\in
N_{1}\backslash N_{2}$ we have $(n,N_{2})^{\ast }=D,$ so there is a finite
set $J\subseteq N_{2}$ such that $(n,J)^{\ast }=D$, because $\ast $ is of
finite type. But then $X=(I,n)^{\ast }\subseteq N_{1}$ and $Y=(I,J)^{\ast
}\subseteq N_{2}$ both containing $I$ but $(X+Y)^{\ast }=D$ a contradiction.
\end{proof}
\begin{remark}
\label{Remark X5}Note that if $A$ and $B$ are proper $\ast $-ideals such
that $(A+B)^{\ast }=D$ and if $C$ is any proper $\ast $-ideal containing $B$
then $(A+C)^{\ast }=D,$ since $(A+C)^{\ast }=(A+B+C)^{\ast }.$
\end{remark}
\begin{theorem}
\label{Theorem X6}Let $\ast $ be a finite type star operation defined on an
integral domain $D.$ Then $D$ is of finite $\ast $-character if and only if
every $\ast $-ideal of finite type of $D$ is contained in at most a finite
number of mutually $\ast $-comaximal $\ast $-ideals of finite type.
\end{theorem}
\begin{proof}
(I) We first show that every $\ast $-ideal of finite type of $D$ is
contained in at least one $\ast $-homogeneous ideal of $D$. For suppose that
there is a $\ast $-ideal $A$ of finite type of $D$ that is not contained in
any $\ast $-homogeneous ideals of $D$. Then obviously $A$ is not $\ast $
-homogeneous. So there are at least two proper $\ast $-ideals $A_{1},B_{1}$
of finite type such that $(A_{1}+B_{1})^{\ast }=D$ and $A\subseteq
A_{1},B_{1}$. Obviously, neither of $A_{1},B_{1}$ is homogeneous. As $B_{1}$
is not $\ast $-homogeneous there are at least two $\ast $-comaximal proper $
\ast $-ideals $B_{11},B_{12}$ of finite type containing $B_{1}$. Now by
Remark \ref{Remark X5} $A_{1},B_{11},B_{12}$ are mutually $\ast $-comaximal
proper $\ast $-ideals containing $A$ and by assumption none of these is $
\ast $-homogeneous. Let $B_{123}$ and $B_{22}$ be two $\ast $-comaximal
proper $\ast $-ideals containing $B_{12}.$ Then by Remark \ref{Remark X5}
and by assumption, $A_{1},B_{11},B_{22}$, $B_{123}$ are proper mutually $
\ast $-comaximal $\ast $-ideals containing $A$ and none of these ideals is
homogeneous, and so on. Thus at stage $n$ we have a collection: $
A_{1},B_{11},B_{22},...,$ $B_{nn},B_{12...n,n+1}$ that are proper mutually $
\ast $-comaximal $\ast $-ideals containing $A$ and none of these ideals is
homogeneous. The process is never ending and has the potential of delivering
an infinite number of mutually $\ast $-comaximal proper $\ast $-ideals of
finite type containing $A,$ contrary to the finiteness condition. Whence the
conclusion.
Call two $\ast $-homogeneous ideals $A,B$ similar if $(A,B)^{\ast }\neq D,$
that is if $A$ and $B$ belong to the same maximal $\ast $-ideal. The
relation $R=$ \textquotedblleft $A$ is similar to $B$\textquotedblright\ is
obviously an equivalence relation on the set $S$ of $\ast $-homogeneous
ideals containing $A.$ Form a set $T$ of $\ast $-homogeneous ideals by
selecting one and exactly one $\ast $-homogeneous ideal from each
equivalence class of $R$. Then $T$ is a set of mutually $\ast $-comaximal $
\ast $-homogeneous ideals containing $A$ and so must be finite because of
the finiteness condition. Let $|T|=n$ and claim that $n$ is the largest
number of mutually $\ast $-comaximal $\ast $-ideals of finite type
containing $A.$ For if not then there is say a set $U$ of mutually $\ast $
-comaximal $\ast $-ideals of finite type that contain $A$ and $|U|=r>n.$
Then there is at least one member $B$ of $U$ that is $\ast $-comaximal with
each member of $T.$ (Since no two $\ast $-comaximal $\ast $-ideals share the
same maximal $\ast $-ideal.) But then, by (I), there is a $\ast $
-homogeneous ideal $J$ containing $B.$ By Remark \ref{Remark X5}, $J$ is $
\ast $-comaximal with each member of $T,$ yet by the construction of $T$ a $
\ast $-homogeneous ideal containing $A$ must be similar to a member of $T,$
a contradiction. Finally if $P_{1},P_{2},...,P_{n}$ are maximal $\ast $
-ideals such that each contains a member of $T$ then these are the only
maximal $\ast $-ideals containing $A.$ For if not then there is a maximal $
\ast $-ideal $M\neq P_{i}$ containing $A$ and there is $x\in M\backslash
P_{i},$ $i=1,2,...,n.$ But then $(A,x)^{\ast }$ is a finite type $\ast $
-ideal containing $A$ and $\ast $-comaximal with each member of $T,$ yet by
(I) $(A,x)^{\ast }$ must be contained in a $\ast $-homogeneous ideal that is
$\ast $-comaximal with each member of $T$, a contradiction. For the converse
note that if a nonzero non unit $x\in D$ is contained in infinitely many
mutually $\ast $-comaximal ideals then $D$ cannot be of finite $\ast $
-character, because a maximal $\ast $-ideal cannot contain two or more $\ast
$-comaximal ideals.
\end{proof}
So, if we must construct a $\ast $-homogeneous ideal we know where to go.
Otherwise there are plenty of $\ast $-potent domains, with one kind studied
in \cite{HZ 2019} under the name $\ast $-super potent domains. Let's note
here that there is a slight difference between the definitions. Definition
1.1 of \cite{HZ 2019} calls a finitely generated ideal $I$ $\ast $-rigid if $
I$ is contained in a unique maximal $\ast $-ideal. But it turns out that if $
I$ is $\ast $-rigid, then $I^{\ast }$ is $\ast $-homogeneous and if $J$ is $
\ast $-homogeneous then $J$ contains a finitely generated ideal $K$ such
that $K$ is exactly in the same maximal $\ast $-ideal containing $J,$ making
$K$ $\ast $-rigid, see also \cite{Zaf2 2019}.)
\section{\label{S2}What $\ast $-homogeneous ideals can do}
This much about $\ast $-homogeneous ideals and potent domains leads to the
questions: What else can $\ast $-homogeneous ideals do? $\ast $-homogeneous
ideals arise and figure prominently in the study of finite $\ast $-character
of integral domains. The domains of $\ast $-finite character where the $\ast
$-homogeneous ideals show their full force are the $\ast $-Semi Homogeneous (
$\ast $-SH) Domains.
It turns out, and it is easy to see, that if $I$ and $J$ are two $\ast $
-homogeneous ideals that are similar, i.e. that belong to the same unique
maximal $\ast $-ideal (i.e. $M(I)=M(J)$ in the notation and terminology of
\cite{AZ 2019}) then $(IJ)^{\ast }$ is $\ast $-homogeneous belonging to the
same maximal $\ast $-ideal. With the help of this and some auxiliary results
it can then be shown that if an ideal $K$ is a $\ast $-product of finitely
many $\ast $-homogeneous ideals then $K$ can be uniquely expressed as a $
\ast $-product of mutually $\ast $-comaximal $\ast $-homogeneous ideals.
Based on this a domain $D$ is called a $\ast $-semi homogeneous ($\ast $-SH)
domain if every proper principal ideal of $D$ is expressible as a $\ast $
-product of finitely many $\ast $-homogeneous ideals. It was shown in \cite[
Theorem 4]{AZ 2019} that $D$ is a $\ast $-SHD if and only if $D$ is a $\ast $
-h-local domain ($D$ is a locally finite intersection of localizations at
its maximal $\ast $-ideals and no two maximal $\ast $-ideals of $D$ contain
a common nonzero prime ideal.) Now if we redefine a $\ast $-homogeneous
ideal so that the $\ast $-product of two similar, newly defined, $\ast $
-homogeneous ideals is a $\ast $-homogeneous ideal meeting the requirements
of the new definition, we have a new theory.
To explain the process of getting a new theory of factorization merely by
producing a suitable definition of a $\ast $-homogeneous ideal we give below
one such theory.
Let's recall first that if $A=(a_{1},...,a_{m})$ is a finitely generated
ideal then $A_{(r)}$ denotes $(a_{1}^{r},...,a_{m}^{r})$. Let's also recall
that if $A$ is $\ast $-invertible then $(A^{r})^{\ast
}=(a_{1}^{r},...,a_{m}^{r})^{\ast }$ \cite[Lemma 1.14]{AHZ 2019}.
\begin{definition}
\label{Definition X7} Call a $\ast $-homogeneous ideal $I$ $\ast $-almost
factorial general homogeneous ($\ast $-afg homogeneous) if (afg1) $I$ is $
\ast ~$-invertible, and (afg2) for each finite type $\ast $-homogeneous
ideal $J\subseteq M(I)$ we have for some $r\in N,$ $(I^{r}+J)_{m}^{\ast }$
is principal for some $m\in N,$ $m$ depending upon the choice of generators
of $(I^{r}+J).$
\end{definition}
(You can also redefine it as: Definition A. Call a $\ast $-homogeneous ideal
$I$ $\ast $-almost factorial general homogeneous ($\ast $-afg homogeneous)
if (afg1) $I$ is $\ast ~$-invertible and (afg2) for each finitely generated $
\ast $-homogeneous ideal $J\subseteq M(I)$ such that $J^{\ast }\supseteq
I^{r}$ , for some $r\in N$, we have $(J)_{m}^{\ast }$ principal for some $
m\in N$. (Here you may add that $m$ may vary with each choice of generators
of $J.$ And redo the following accordingly.)
\begin{lemma}
\label{Lemma X8} Let $I$ be $\ast $-invertible and $J$ any f.g. ideal then $
((IJ)_{r})^{\ast }=(I^{r}J_{r})^{\ast }$
\end{lemma}
\begin{proof}
Let $I=(a_{1},...,a_{m}),$ $J=(b_{1},...,b_{n}).$ Then $IJ=(
\{a_{i}b_{j}|i=1,...,m$; $j=1,...n\})$ and $(IJ)_{r}^{\ast
}=\{a_{i}^{r}b_{j}^{r}|i=1,...,m$; $j=1,...n\}^{\ast
}=((a_{1}^{r},...,a_{m}^{r})(b_{1}^{r},...,b_{n}^{r}))^{\ast
}=(I^{r}J_{r})^{\ast },$ because $I$ is $\ast $-invertible.
\end{proof}
Using the above definition, we can be sure of the following.
\begin{proposition}
\label{Proposition X9} The following hold for a $\ast $-afg ideal $I.$ (1) $
(I^{r})^{\ast }$ is principal for some positive integer $r,$ (2) for any
finitely generated ideal $J\subseteq M(I),$ we have $(I^{m})^{\ast
}\subseteq (J_{m})^{\ast }$ or $(J_{m})^{\ast }\subseteq (I^{m})^{\ast }$
for some positive integer $m,$ (3) if $J$ is a $\ast $-invertible $\ast $
-ideal that contains $I,$ then $J$ itself is a $\ast $-afg ideal and (4) if $
J$ is a $\ast $-afg ideal similar to $I$ (i.e., $J\subseteq M(I)),$ then $
(IJ)^{\ast }$ is $\ast $-afg similar to both $I$ and $J.$
\end{proposition}
\begin{proof}
(1) If $I$ is $\ast $-afg, $(I^{r})^{\ast }=(f),$ for some $r\in N$ and $
f\in D$ by definition and we can choose $r$ to be minimum. ($(I+I)_{r}=(f)$)
(2) By definition, if $I$ is $\ast $-afg, we also have ($(I^{m}+J_{m})^{\ast
}=(d),$ for each finitely generated ideal $J.$ Dividing both sides by $d$ we
get $(I^{m}/d+J_{m}/d)^{\ast }=D.$ Now as $I^{m}$ and $J_{m}$ are contained
in $M(I),$ and no other maximal $\ast $-ideal, so $(I^{m}/d)^{\ast }$ and $
(J_{m}/d)^{\ast }$ have no choice but to be in $M(I),$ if non-trivial. So, $
(I^{m}/d)^{\ast }=D$ or $(J_{m}/d)^{\ast }=D.$ Thus if $(I^{m})^{\ast }=dD,$
then $(I^{m})^{\ast }=(d)\supseteq (J_{m})^{\ast }$ and if $(J_{m})^{\ast
}=dD$ then $I^{m}\subseteq J_{m}=dD.$ Thus by (afg2) $(I^{m})^{\ast }$ is
principal and contains $(J_{m})^{\ast }$ or $(J_{m})^{\ast }$ is principal
and contains $(I^{m})^{\ast },$ for some $m\in N.$
(3) Note that as $J\supseteq I$ we have $J^{r}\supseteq I^{r}$ for all
positive integers $r.$ Next for every finitely generated ideal $F$ such that
$F^{\ast }\supseteq J$ $^{s}$ for some $s$ we have $F^{\ast
}=(J^{s}+G)^{\ast }=(J^{s}+I^{s}+G)^{\ast }=(I^{s}+(J^{s}+G))^{\ast }$ and
so $(F_{m})^{\ast }=(d)$ for some positive integer $m$ and for each $\ast $
-homogeneous ideal $G.)$ (4) If $I,J$ are two similar $\ast $-afg
homogeneous ideals then $(IJ)^{\ast }$ is similar to both $I$ and $J.${} $
(IJ)^{\ast }$ is $\ast $-invertible and $\ast $-homogeneous and of course
similar to both $I$ and $J.$ We have to show that for each $\ast $
-homogeneous ideal $G,$ for some $r\in N,$ $(I^{r}J^{r}+G)_{m}^{\ast }=(d)$
for some $m.$ Let $F=(I^{r}J^{r}+G)^{\ast }.$ By (2) we know that $
I^{m}\supseteq J^{m}$ or $J^{m}\supseteq I^{m},$ say $I^{m}\supseteq J^{m}.$
Now consider $(F_{m})^{\ast }=(I^{mr}J^{mr}+G_{m})^{\ast }\supseteq
(J^{2mr}+G_{m})$ or $(F_{m})^{\ast }=(J^{2mr}+H)^{\ast }$ and by definition $
(F_{mt})^{\ast }$ is principal for some $t.$
\end{proof}
Now define a $\ast $-afg semi homogeneous domain ($\ast $-afg-SHD) as: $D$
is a $\ast $-afg-SHD if every nonzero non unit of $D$ is expressible as a $
\ast $-product of finitely many $\ast $-afg homogeneous ideals. Indeed $D$
is a $\ast $-afg-SHD is a $\ast $-SHD whose $\ast $-homogeneous ideals are $
\ast $-afg homogeneous. (S. Xing, a student of Wang Fanggui, is working with
me on this topic. Xing, incidentally, is also at Chengdu University, China.
Now Dan Anderson has also joined in and there's a possibility that the
definition will be completely twisted out of shape.)
Next, each of the definitions of homogeneous elements can actually give rise
to $\ast $-potent domains in the same manner as the $\ast $-super potent
domains of \cite{HZ 2019}. In \cite{HZ 2019}, for a star operation $\ast $
of finite character, a $\ast $-homogeneous ideal is called $\ast $-rigid.
The $\ast $-maximal ideal containing a $\ast $-homogeneous ideal $I$ may be
called a $\ast $-potent maximal $\ast $-ideal, as we have already done. Next
we may call the $\ast $-homogeneous ideal $I$ $\ast $-super-homogeneous if
each $\ast $-homogeneous ideal $J$ containing $I$ is $\ast $-invertible and
we may call a $\ast $-potent domain $D$ $\ast $-super potent if every
maximal $\ast $- ideal $I$ of $D$ contains a $\ast $-super homogeneous
ideal. But then one can study $\ast $-A-potent domains where A refers to a $
\ast $-homogeneous ideal that corresponds to a particular definition. For
example a $\ast $-homogeneous ideal $J$ is said to be of type $1$ in \cite
{AZ 2019} if $\sqrt{J}=M(J).$ So we can talk about $\ast $-type $1$ potent
domains as domains each of whose maximal $\ast $-ideals contains a $\ast $
-homogeneous ideal of type $1.$ The point is, to each suitable definition
say A of a $\ast $-homogeneous ideal we can study the $\ast $-A-potent
domains as we studied the $\ast $-super potent domains in \cite{HZ 2019}. Of
course the theory corresponding to definition A would be different from that
of other $\ast $-potent domains. For example each of the maximal $\ast $
-ideal of the $\ast $-type $1$ potent domain would be the radical of a $\ast
$-homogeneous ideal etc. Now as it is usual we present some of the concepts
that have some direct and obvious applications, stemming from the use of $
\ast $-homogeneous ideals. For this we select the $\ast $-f-potent domains
for a study.
\subsection{$\ast $-f-potent domains}
Let $\ast $ be a finite type star operation defined on an integral domain $D$
. Call a nonzero non unit element $r$ of $D$ $\ast $-factorial rigid ( $\ast
$-f-rigid) if $r$ belongs to a unique maximal $\ast $-ideal and every finite
type $\ast $-homogeneous ideal containing $r$ is principal. Indeed if $r$ is
a $\ast $-f-rigid element then $rD$ is a $\ast $-f- homogeneous ideal and
hence a $\ast $-super homogeneous ideal. So the terminology and the theory
developed in \cite{AZ 2019} applies. Note here that every non unit factor $s$
of a $\ast $-f-rigid element $r$ is $\ast $-f-rigid because of the
definition. Note also that if $r,s$ are similar $\ast $-f-rigid elements
(i.e. $rD,$ $sD$ are similar $\ast $-f-homogeneous ideals) then $rs$ is a $
\ast $-f-rigid element similar to $r$ and $s$ and so if $r$ is $\ast $
-f-rigid then $r^{n}$ is $\ast $-f-rigid for any positive integer $n.$
\begin{example}
\label{Example A}. Every prime element is a $t$-f-rigid element.
\end{example}
Call a maximal $\ast $-ideal $M$ $\ast $-f-potent if $M$ contains a $\ast $
-f-rigid element and a domain $D$ $\ast $-f-potent if every maximal $\ast $
-ideal of $D$ is $\ast $-f-potent.
\begin{example}
\label{Examples B}. UFDs PIDs, Semirigid GCD domains, prime potent domains
are all $t$-f-potent.
\end{example}
(domains in which every maximal $t$-ideal contains a prime element may be
called prime potent. Indeed a prime element generates a maximal $t$-ideal
\cite[13.5]{H-K 1998}. (So a domain in which every maximal $t$-ideal
contains a prime element is simply a domain in which every maximal $t$-ideal
is principal.)
The definition suggests right away that if $r$ is $\ast $-f-rigid and $x$
any element of $D$ then $(r,x)^{\ast }=sD$ for some $s\in D$ and applying
the $v$-operation to both sides we conclude that $GCD(r,x)=(r,x)_{v}$ of $r$
exists with every nonzero element $x$ of $D$ and that for each pair of
nonzero factors $u,v$ of $r$ we have $u|v$ or $v|u$; that is $r$ is a rigid
element of $D$, in Cohn's terminology \cite{Coh 1968}. Indeed it is easy to
see, if necessary with help from \cite{AZ 2019}, that a finite product of $
\ast $-f-rigid elements is uniquely expressible as a product of mutually $
\ast $-comaximal $\ast $-f-rigid elements, up to order and associates and
that if every nonzero non unit of $D$ is expressible as a product of $\ast $
-f-rigid elements then $D$ is a semirigid GCD domain of \cite{Zaf 1975}.
Also, as we shall show below, a $t$-f-potent domain of $t$-dimension one
(i.e. every maximal $t$-ideal is of height one) is a GCD domain of finite $t$
-character. But generally a $t$-f-potent domain is far from being a GCD
domain. Before we delve into examples, let's prove a necessary result, by
mimicking Theorem 4.12 of \cite{CMZ 1978} and its proof. (We shall also use
Theorem 4.21 of \cite{CMZ 1978}, in the proofs of results below.)
\begin{proposition}
\label{Proposition B1} Let $D$ be an integral domain and let $L$ be an
extension of the field of fractions $K$ of $D.$ Then each ideal $I$ of $
R=D+XL[X]$ is of the form $f(X)FR=f(X)(F+XL[X])$, where $F$ is a nonzero $D$
-submodule of $L$ such that $f(0)F\subseteq D$ and $f(X)\in L[X]$. The
finitely generated ideals of $R$ are of the form $f(X)JR$, where $J$ is a
finitely generated $D$-submodule of $L$ and $f(X)\in R$.
\end{proposition}
\begin{proof}
First observe that a subset of $R$ of the form $f(X)FR,$ where $
f(0)F\subseteq D,$ is in fact an ideal of $R$. According to \cite[Lemma 1.1]
{CMZ 1986}, the following are equivalent for an ideal $I$ of $R$: (1) $I$ is
such that $I\cap D\neq 0,$ (2) $I\supseteq XL[X]$ and (3) $IL[X]=L[X]$.
Further if any of these hold, then $I=I\cap D+XL[X]=(I\cap D)R$ and taking $
f=1$, $F=I\cap D$ we have the stated form. Let's now consider the case when $
IL[X]\neq L[X].$ In this case $IL[X]=f(X)L[X]$ where $f(X)$ is a variable
polynomial of $L[X].$ Then there is a nonzero element ${\Greekmath 010B} \in L$ such
that ${\Greekmath 010B} f(X)\in I$. Let $F=\{{\Greekmath 010B} \in L$ $|$ ${\Greekmath 010B} f(X)\in I\}$.
Then $F$ is a $D$-submodule of $L$. Since $F\neq 0$ and $f(X)F\subseteq
I,I\supseteq f(X)FR=f(X)(F+XL[X]).$ Now if $h(X)\in I$, then $
h(X)=f(X)({\Greekmath 010B} _{0}+...+{\Greekmath 010B} _{n}X^{n})$, where ${\Greekmath 010B} _{0},,...,{\Greekmath 010B}
_{n}\in L$ whence $h(X)=$ ${\Greekmath 010B} _{0}f(X)+h\prime (X)$, where $h\prime
(X)\in f(X)XL[X]\in I$. Hence ${\Greekmath 010B} _{0}\in F$ and $h(X)\in f(X)(F+XL[X])$
. Thus $I=f(X)(F+XL[X])=f(X)FR$, from which it also follows that $
f(0)F\subseteq D$. Finally let $I$ be finitely generated, then by the above
we have $I=f(X)FR$ where $F$ is a finitely generated $D$-submodule of $L$
and $f(X)\in L[X].$ If $f(0)=0,$ then $f(X)$ is obviously in $R.$ So let's
consider $f(0)=h\neq 0$ and $F=({\Greekmath 010B} _{1},{\Greekmath 010B} _{2},...,{\Greekmath 010B} _{r})D$.
Since $f(0)F\subseteq D$ we must have $h{\Greekmath 010B} _{i}\in D.$ But then $
I=f(X)FR $ can be written as $I=\frac{f(X)}{h}(h{\Greekmath 010B} _{1},h{\Greekmath 010B}
_{2},...,h{\Greekmath 010B} _{r})R$ where $\frac{f(X)}{h}\in R.$
\end{proof}
(I was struggling with an earlier version of Proposition \ref{Proposition B1}
and Prof. T. Dumitrescu's suggested improvement for it when I remembered
Theorem 4.12 of \cite{CMZ 1978}. I am thankful for his input.)
\begin{lemma}
\label{Lemma B2} Let $D$ be an integral domain and let $L$ be an extension
field of the field of fractions $K$ of $D.$ Then $d\in D\backslash (0)$ is a
$t$-f-homogeneous element of $D$ if and only if $d$ is a $t$-f-homogeneous
element of $D+XL[X]$.
\end{lemma}
\begin{proof}
Let's first note that $D+XL[X]$ has the $D+M$ form. Thus if $I$ is a nonzero
ideal of $D$ then $(I+XL[X])_{v}=I_{v}+XL[X]=I_{v}(D+XL[X]),$ by \cite[
Proposition 2.4]{AR 1988} and using this we can also conclude that $
(I+XL[X])_{t}=I_{t}+XL[X]=I_{t}(D+XL[X]).$ Now let $d$ be a $t$
-f-homogeneous element of $D$ then $dD$ is a $t$-f-homogeneous ideal, so any
$t$-ideal of finite type, of $D,$ containing $dD$ is principal. Next
consider $d\in D+XL[X].$ Any $t$-ideal of finite type $F$ of $R$ containing $
d$ intersects $D$ and so has the form $(F\cap D)+XL[X]$, according to \cite[
Lemma 1.1]{CMZ 1986}. Consequently $F$ contains $dD+XL[X].$ We show that $F$
is principal. For this let $
F=(a_{1}+Xf_{1}(X),...,a_{n}+Xf_{n}(X)_{t}=((a_{1},...,a_{n})+XL[X])_{v}$ $
=((a_{1},...,a_{n})_{v}+XL[X]).$ But $((a_{1},...,a_{n})_{v}+XL[X])=F
\supseteq dD+XL[X]$ forces $(a_{1},...,a_{n})_{v}\supseteq dD$. Also $dD$
being $t$-f-rigid, $(a_{1},...,a_{n})_{v}$ must be principal whence $F$ is
principal Now note that according to \cite{CMZ 1986}, every prime ideal $M$
of $R$ that intersects $D$ is of the form $M\cap D+XL[X]$ and using the
above mentioned result of \cite[Proposition 2.4]{AR 1988} we can show that
every maximal $t$-ideal $M$ that intersects $D$ is of the form $M\cap
D+XL[X] $ where $M\cap D$ is a maximal $t$-ideal of $D$ and that,
conversely, if $m$ is a maximal ideal of $D$ then $m+XL[X]$ is a maximal
ideal of $R.$ Thus, finally, if $m$ is the unique maximal $t$-ideal of $D$
containing $d$ then $m+XL[X]$ is a maximal $t$-ideal of $R$ containing $d$
and if $N$ were another maximal $t$-ideal containing $d$ then $N\cap D$
would be another maximal $t$-ideal of $D$ containing $d$ a contradiction.
Thus $d$ is a $t$-f-homogeneous ideal of $R.$
\end{proof}
\begin{proposition}
\label{Proposition B3} Let $D$ be an integral domain and let $L$ be
an extension field of the field of fractions $K$ of $D.$ Then $D$ is $t$
-potent if and only if $R=D+XL[X]$ is.
\end{proposition}
\begin{proof}
Note that, according to \cite[Lemma 1.2]{CMZ 1986}, every prime ideal $P$ of
$R$ that is not comparable with $XL[X]$ contains an element of the form $
1+Xg(X)$, so must contain a prime element of the form $1+Xg(X)$ and so must
be a principal prime. We next show that a finitely generated ideal $
F\nsubseteq XL[X]$ of $R$ is $t$-homogeneous if and only if $F$ is of the
form $A+XL[X],$ where $A$ is a $t$-homogeneous ideal of $D$ or generated by
a prime power of the form $(1+Xh(X))^{n},$ \cite[Theorem 4.21]{CMZ 1978}.
Obviously if $A$ is contained in a unique maximal $t$-ideal $P$ of $D$ then $
A+XL[X]$ is contained in the maximal $t$-ideal $P+XL[X]$ and any maximal $t$
-ideal that contains $A+XL[X]$ also contains $P+XL[X].$ Next, an ideal
generated by a prime power is $t$-homogeneous anyway. Conversely let $F$ be
a finitely generated nonzero ideal of $R.$ Then by Proposition \ref
{Proposition B1}, $F=f(X)(J+XL[X])$ where $f(X)\in L[X]$ as $F$ is not
contained in $XL[X],$ $f(0)=1$ forcing $J$ to be a finitely generated ideal
of $D.$ If in addition $F$ has to be $t$-homogeneous then $F$ is either
contained in a prime ideal of the form $P+XL[X]$ or in a prime ideal
incomparable with $XL[X].$ In the first case $F=J+XL[X]$ where $J$ is a
rigid ideal belonging to $P$ and in the second case $F=f(X)R,$ \cite[Theorem
4.21]{CMZ 1978}.
\end{proof}
\begin{corollary}
\label{Corollary B4} Let $D$ be an integral domain and let $L$ be an
extension field of the field of fractions $K$ of $D.$ Then $D$ is $t$
-f-potent if and only if $R=D+XL[X]$ is.
\end{corollary}
\begin{proof}
Suppose that $D$ is $t$-f-potent. As in the proof of Proposition \ref
{Proposition B3} every maximal $t$-ideal $P$ of $R$ that is not comparable
with $XL[X]$ contains an element of the form $1+Xg(X)$, so must contain a
prime element of the form $1+Xg(X)$ and so must be a principal prime. Next
the maximal $t$-ideals comparable with $XL[X]$ are of the form $P+XL[X]$
where $P$ is a maximal $t$-ideal of $D.$ Since $D$ is $t$-f-potent $P$
contains a $t$-f-rigid element, which is also a $t$-f-rigid element of $R,$
by Lemma \ref{Lemma B2}. So $P+XL[X]$ contains a $t$-f-rigid element of $R.$
In sum, every maximal $t$-ideal of $R$ contains a $t$-f-rigid element of $R$
and so $R$ is $t$-f-potent. Conversely suppose that $R$ is $t$-f-potent.
Then as for each maximal $t$-ideal $M$ of $D,$ $M+XL[X]$ is a maximal $t$
-ideal, each $M$ contains a $t$-f-rigid element of $R$ and hence of $D,$ by
Lemma \ref{Lemma B2}. Thus each maximal $t$-ideal of $D$ contains a $t$
-f-rigid element of $D.$
\end{proof}
Recall, from \cite{AAZ 1995}, that a GCD domain of finite $t$-character that
is also of $t$-dimension $1$ is termed as a generalized UFD (GUFD).
\begin{example}
\label{Example C} If $D$ is a UFD (GUFD, Semirigid GCD domain) and $L$ an
extension of the quotient field $K$ of $D,$ then the ring $D+XL[X]$ is a $t$
-f-potent domain.
\end{example}
The $t$-f-potent domains and their examples are nice but we must show that
they have some useful properties. We start with the most striking property.
Here let $X$ be an indeterminate over $D.$ A polynomial $f=\sum $ $
a_{i}X^{i} $ is called primitive if its content $
A_{f}=(a_{0},a_{1},...,a_{n})$ generates a primitive ideal, i.e., $
(a_{0},a_{1},...,a_{n})\subseteq aD$ implies $a$ is a unit and super
primitive if $(A_{f})_{v}=D.$ It is known that while a super primitive
polynomial is primitive a primitive polynomial may not be super primitive,
see e.g. Example 3.1 of \cite{AZ 2007}. A domain $D$ is called a PSP domain
if each primitive polynomial $f$ over $D$ is superprimitive, i.e. if $
(A_{f})_{v}=D.$
\begin{proposition}
\label{Proposition E} A $t$-f-potent domain $D$ has the PSP property.
\end{proposition}
\begin{proof}
Let $f=\sum $ $a_{i}X^{i}$ be primitive i.e. $(a_{0},a_{1},...,a_{n})
\subseteq aD$ implies $a$ is a unit and consider the finitely generated
ideal $(a_{0},a_{1},...,a_{n})$ in a $t$-f- potent domain$D.$ Then $
(a_{0},a_{1},...,a_{n})$ is contained in a maximal $t$-ideal $M$ associated
with a $t$-f-rigid element $r$ (of course $M=M(rD))$ if and only if $
(a_{0},a_{1},...,a_{n},r)_{t}=sD\neq D.$ Since every maximal $t$-ideal of a $
t$-f-potent domain is associated with a $t$-f-rigid element, we conclude
that in a $t$-f-potent domain $D,$ $f=\sum $ $a_{i}X^{i}$ primitive implies
that $A_{f}$ is contained in no maximal $t$-ideal of $D;$ giving $
(A_{f})_{v} $ $=D$ which means that each primitive polynomial $f$ in a $t$
-f-potent domain $D$ is actually super primitive.
\end{proof}
Now PSP implies AP i.e. every atom is prime, see e.g. \cite{AZ 2007}. So, in
a $t$-f-potent domain every atom is a prime. If it so happens that a $t$
-f-potent domain has no prime elements then the $t$-f-potent domain in
question is atomless. Recently atomless domains have been in demand. The
atomless domains are also known as antimatter domains. Most of the examples
of atomless domains that were constructed were the so-called pre-Schreier
domains, i.e. domains in which every nonzero non unit $a$ is primal (is such
that ($a|xy$ implies $a=rs$ where $r|x$ and $s|y)$. One example (Example
2.11 \cite{AZ 2007}) was laboriously constructed in \cite{AZ 2007} and this
example was atomless and not pre-Schreier, As we indicate below, it is easy
to establish a method of telling whether a $t$-f-potent domain is
pre-Schreier or not.
Cohn in \cite{Coh 1968} called an element $c$ in an integral domain $D$
primal if (in $D)$ $c|a_{1}a_{2}$ implies $c=c_{1}c_{2}$ where $c_{i}|a_{i}.$
Cohn \cite{Coh 1968} assumes that $0$ is primal. We deviate slightly from
this definition and call a nonzero element $c$ of an integral domain $D$
primal if $c|a_{1}a_{2},$ for all $a_{1},a_{2}\in D\backslash \{0\},$
implies $c=c_{1}c_{2}$ such that $c_{i}|a_{i}.$ He called an integral domain
$D$ a Schreier domain if (a) every (nonzero) element of $D$ is primal and
(b) $D$ is integrally closed. We have included nonzero in brackets because
while he meant to include zero as a primal element, he mentioned that the
group of divisibility of a Schreier domain is a Riesz group. Now the
definition of the group of divisibility $G(D)(=\{\frac{a}{b}D:$ $a,b\in
D\backslash \{0\}\}$ ordered by reverse containment) of an integral domain $
D $ involves fractions of only nonzero elements of $D$, so it's permissible
to restrict primal elements to be nonzero and to study domains whose nonzero
elements are all primal. This is what McAdam and Rush did in \cite{MR 1978}.
In \cite{Zaf 1987} integral domains whose nonzero elements are primal were
called pre-Schreier. It turned out that pre-Schreier domains possess all the
multiplicative properties of Schreier domains. So let's concentrate on the
terminology introduced by Cohn as if it were actually introduced for
pre-Schreier domains.
Cohn called an element $c$ of a domain $D$ completely primal if every factor
of $c$ is primal and proved, in Lemma 2.5 of \cite{Coh 1968} that the
product of two completely primal elements is completely primal and stated in
Theorem 2.6 a Nagata type result that can be rephrased as: Let $D$ be
integrally closed and let $S$ be a multiplicative set generated by
completely primal elements of $D$. If $D_{S}$ is a Schreier domain then so
is $D.$ This result was analyzed in \cite{AZ 2007} and it was decided that
the following version (\cite[Theorem 4.4]{AZ 2007} of Cohn's Nagata type
theorem works for pre-Schreier domains.
\begin{theorem}
\label{Theorem F} (Cohn's Theorem for pre-Schreier domains). Let $D$ be an
integral domain and $S$ a multiplicative set of $D$. (i) If $D$ is
pre-Schreier, then so is $D_{S}$ . (ii) (Nagata type theorem) If $D_{S}$ is
a pre-Schreier domain and $S$ is the set generated by a set of completely
primal elements of $D$, then $D$ is a pre-Schreier domain.
\end{theorem}
Now we have already established above that if $r$ is a $t$-f-rigid element
then $(r,x)_{v}$ is principal for each $x\in D\backslash \{0\}.$ But then $
(r,x)_{v}$ is principal for each $x\in D\backslash \{0\}$ if and only if $
(r)\cap (x)$ is principal for each $x\in D\backslash \{0\}.$ But then $r$ is
what was called in \cite{AZ 1995} an extractor. Indeed it was shown in \cite
{AZ 1995} that an extractor is completely primal. Thus we have the following
statement.
\begin{corollary}
\label{Corollary G} Let $D$ be a $t$-f-potent domain. Then $D$ is
pre-Schreier if and only if $D_{S}$ is pre-Schreier for some multiplicative
set $S$ that is the saturation of a set generated by some $t$-f rigid
elements.
\end{corollary}
(Proof. If $D$ is pre-Schreier then $D_{S}$ is pre-Schreier anyway. If on
the other hand $D_{S}$ is pre-Schreier and $S$ is (the saturation of a set)
multiplicatively generated by some $t$-f- rigid elements. Then by Theorem
\ref{Theorem F}, $D$ is pre-Schreier.)
One may note here that if $D_{S}$ is not pre-Schreier for any multiplicative
set $S,$ then $D$ is not pre-Schreier. So the decision making result of Cohn
comes in demand only if $D_{S}$ is pre-Schreier. Of course in the Corollary
\ref{Corollary G} situation, the saturation $S$ of the multiplicative set
generated by all the $t$-f-rigid elements of $D,$ leading to: if $D_{S}$ is
not pre-Schreier then $D$ is not pre-Schreier for sure and if $D_{S}$ is
pre-Schreier then $D$ cannot escape being a pre- Schreier domain.
\begin{example}
\label{Example H} Let $D=\cap _{i=1}^{i=n}V_{i}$ be a finite intersection of
distinct non-discrete rank one valuation domains with quotient field $
K=qf(D),$ $X$ an indeterminate over $D$ and let $L$ be a proper field
extension of $K.$ Then (a) $D+XL[X]$ is a non-pre-Schreier, $t$-f-potent
domain and (b) $D+XL[X]_{(X)}$ is an atomless non-pre-Schreier, $t$-f-potent
domain.
\end{example}
Illustration: (a) It is well known that $D$ is a Bezout domain with exactly $
n$ maximal ideals, $M_{i}$ \cite{Kap 1970}, with $V_{i}=D_{M_{i}}.$ Thus $D$
= $\cap D_{M_{i}}$ and each of $M_{i}$ being a $t$-ideal must, each,
contains a $t$-homogeneous ideal by Proposition \ref{Proposition X1}. $
D+XL[X]$ is $t$-f-potent by Corollary \ref{Corollary B4}.
One more result that can be added needs introduction to a neat construction
called the Nagata ring construction these days. This is how the construction
goes.
Let $\ast $ be a star operation on a domain $D$, let $X$ be an indeterminate
over $D$ and Let $S_{\ast }=\{f\in D[X]|$ $(A_{f})^{\ast }=D\}.$ Then the
ring $D[X]_{S_{\ast }}$ is called the Nagata construction from $D$ with
reference to $\ast $ and is denoted by $Na(D,\ast ).$ Indeed $Na(D,\ast
)=Na(D,\ast _{f})$
\begin{proposition}
\label{Proposition G1} (\cite{Kan 1989} Proposition 2.1.) Let $\ast $ be a
star operation on $R$. Let $\ast _{f}$ be the finite type star operation
induced by $\ast .$ Let $S_{\ast }=\{f\in D[X]|(A_{f})_{\ast }=D\}$. Then
(1) $S_{\ast }=D[X]\backslash \cup _{M\in \Gamma }M[X]$ where $\Gamma $ is
the set of all maximal $\ast _{f}$-ideals of $D$. (Hence $S_{\ast }$ is a
saturated multiplicatively closed subset of $D[X].$), (2) $\{M[X]_{S_{\ast
}}\}$ is the set of all maximal ideals of $[DX]_{S_{\ast }}.$
\end{proposition}
As pointed out in \cite{FHP 2019}, proof of Part (1) of the following
proposition has a minor flaw, in that for a general domain it uses a result (
\cite[38.4]{Gil 1972}) that is stated for integrally closed domains. The fix
offered in \cite{FHP 2019} is a new result and steeped in semistar
operations. We offer, in the following, a simple change in the proof of \cite
[(1) Proposition 2.2.]{Kan 1989} to correct the flaw indicated above.
\begin{proposition}
\label{Proposition G2} (\cite{Kan 1989} Proposition 2.2.) Let $T$ be a
multiplicatively closed subset of $D[X]$ contained in $S_{v}=\{f\in
D[X]|(A_{f})_{v}=D\}$. Let $I$ be a nonzero fractional ideal of $D$. Then
(1) $(I[X]_{T})^{-1}=I^{-1}[X]_{T}$, (2) $(I[X]_{T})_{v}=I_{v}[X]_{T}$ and
(3) $(I[X]_{T})_{t}$ $=I_{t}[X]_{T}.$
\end{proposition}
(1) It is clear that $I^{-1}[X]_{T}\subseteq (I[X]_{T})^{-1}$. Let $u\in
(I[X]_{T})^{-1}.$ Since for any $a\in I\backslash \{0\}$ we have $
(I[X]_{T})^{-1}\subseteq a^{-1}D[X]_{T}\subseteq K[X]_{T}$ we may assume
that $u=f/h$ with $f\in K[X]$ and $h\in T$. Then $f\in (I[X]_{T})^{-1}$.
Hence $fI[X]_{T}$ $\subseteq D[X]_{T}$ $.$ Hence $bf\in D[X]_{T}$ for any $
b\in I$. Now $bfg\in $ $D[X]$ for some $g\in S_{v}$. So $(A_{bfg})_{v}
\subseteq D$. By \cite[Proposition 2.2.]{MNZ 1990}, $
(A_{bfg})_{v}=(A_{bf}A_{g})_{v}=(A_{bf})_{v}$, since $(A_{g})_{v}=D$ and
hence $v$-invertible. Therefore $bA_{f}\subseteq
(bA_{f})_{v}=(A_{bf})_{v}\subseteq D$ for any $b\in I$ Hence$A_{f}\subseteq
I^{-1}$. Hence $f\in I^{-1}[X]$ and $f/h\in I^{-1}[X]_{T}.$ Therefore $
(I[X]_{T})^{-1}=I^{-1}[X]_{T}.$
\begin{theorem}
\label{Theorem G3} (\cite{Kan 1989}, Theorem 2.4.) Let $\ast $ be a finite
type star operation on $D$. Let $I$ be a$F[X]_{S_{v}}$ nonzero ideal of $D$.
Then $I$ is $\ast $-invertible if and only if $I[X]_{S_{\ast }}$ is
invertible.
\end{theorem}
\begin{theorem}
\label{Theorem G4}(\cite{Kan 1989}, Proposition 2.14.) Let $\ast $ be a star
operation on $D$. Then any invertible ideal of $D[X]_{S_{\ast }}$ is
principal.
\end{theorem}
Thus we have the following corollary.
\begin{corollary}
\label{Corollary G5} Let $I$ be a $t$-ideal of finite type of $D$. Then $I$
is $t$-invertible if and only if $I[X]_{S_{v}}$ is principal.
\end{corollary}
\begin{proof}
If $I[X]_{S_{v}}$ is principal then $I[X]_{S_{v}}$ is invertible and so $I$
is $t$-invertible by Theorem \ref{Theorem G3}. Conversely let $F$ be a
finitely generated ideal such that $F_{t}=I.$ Then $F$ is $t$-invertible and
so, by Theorem \ref{Theorem G3}, is $F[X]_{S_{v}}$ invertible and hence
principal by Theorem \ref{Theorem G4}. But then $
F[X]_{S_{v}}=(F[X]_{S_{v}})_{t}=I[X]_{S_{v}}.$
\end{proof}
\begin{lemma}
\label{Lemma G6} Let $I$ be a $t$-ideal of finite type of $D.$ Then $
I[X]_{S_{v}}$ is $d$-homogeneous if and only if $I$ is $t$-homogeneous.
Consequently $I[X]_{S_{v}}$ is $t$-f-rigid if and only if $I$ is $t$-super
homogeneous.
\end{lemma}
\begin{proof}
Let $I$ be a $t$-homogeneous ideal of $D.$ That $I[X]_{S_{v}}$ is a $t$
-ideal of finite type is an immediate consequence of Proposition \ref
{Proposition G2}. If $M$ is the unique maximal $t$-ideal containing $I,$
then at least $M[X]_{S_{v}}\supseteq I[X]_{S_{v}}.$ Suppose that $\mathcal{N}
$ is another maximal ideal of $D[X]_{S_{v}}$ containing $I[X]_{S_{v}}.$ But
by Proposition \ref{Proposition G1}, $\mathcal{N}$ $=N[X]_{S_{v}}$ for some
maximal $t$-ideal $N$ of $D.$ But then $N=D\cap N[X]_{S_{v}}\supseteq D\cap
I[X]_{S_{v}}\supseteq I$ . This forces $N=M$ and consequently $
N[X]_{S_{v}}=M[X]_{S_{v}}$ making $I[X]_{S_{v}}$ homogeneous.
Conversely if $I[X]_{S_{v}}$ is $d$-homogeneous contained in a unique $
M[X]_{S_{v}},$suppose that $N$ is another maximal $t$-ideal containing $I.$
Then again $N[X]_{S_{v}}\supseteq ID[X]_{S_{v}}$ which is $d$-homogeneous, a
contradiction unless $N=M.$
The consequently part follows from Corollary \ref{Corollary G5}.
\end{proof}
Let's all a domain $\ast $-f-r-potent if every maximal $\ast $-ideal of $D$
contains a $\ast $-f-rigid element.
\begin{proposition}
\label{Proposition G7} Let $D$ be an integral domain with quotient field $K$
, $X$ an indeterminate over $D$ and let $S_{v}=\{f\in D[X]|(A_{f})_{v}=D\}.$
Then (a) $D$ is $t$-potent if and only if $D[X]_{S_{v}}$ is $d$-potent and
(b) $D$ is $t$-super potent if and only if $D[X]_{S_{v}}$ is $d$-f-r-potent
\end{proposition}
\begin{proof}
(a) Suppose that $D$ is potent Let $M[X]_{S_{v}}$ be a maximal ideal of $
D[X]_{S_{v}}$ and let $I$ be a $t$-homogeneous ideal contained in $M.$ By
Lemma \ref{Lemma G6}, $I[X]_{S_{v}}$ is $d$-homogeneous, making $
M[X]_{S_{v}} $ $d$-potent. Conversely suppose that $D[X]_{S_{v}}$ is $d$
-potent and let $M $ be a maximal $t$-ideal of $D.$ Then $M[X]_{S_{v}}$ is a
maximal ideal of $D[X]_{S_{v}}$ and so contains a $d$-homogeneous ideal $
\mathcal{I}$ $=(f_{1},f_{2},...,f_{n})D[X]_{S_{v}}.$ Now let $
I=(f_{1},f_{2},...,f_{n})$. Then $\mathcal{I}$ $=ID[X]_{S_{v}}$ and $
I\subseteq (A_{I})_{t}[X]_{S_{v}}\subseteq M[X]_{S_{v}},$ since $
M[X]_{S_{v}} $ is a $t$-ideal and $f_{i}\in M[X]_{S_{v}}\cap D[X].$ This
gives $\mathcal{I}$ $=ID[X]_{S_{v}}$ $\subseteq
(A_{I})_{t}[X]_{S_{v}}\subseteq M[X]_{S_{v}}$ making $(A_{I})_{t}[X]_{S_{v}}$
another homogeneous ideal, contained in $M[X]_{S_{v}}$ and containing $
\mathcal{I}$. But then $(A_{I})_{t}$ $\subseteq M$ is a $t$-homogeneous
ideal, by Lemma \ref{Lemma G6}. (b) Use part (a) and Corollary \ref
{Corollary G5}.
\end{proof}
The other property that can be mentioned \textquotedblleft off
hand\textquotedblright\ is given in the following statement.
\begin{theorem}
\label{Theorem H1} A $t$-f-potent domain of $t$-dimension one is a GCD
domain of finite $t$-character.{}
\end{theorem}
A domain of $t$-dimension one that is of finite $t$-character is called a
weakly Krull domain. ($D$ is weakly Krull if $D=\cap D_{P}$ where $P$ ranges
over a family $\mathcal{F}$ of height one prime ideals of $D$ and each
nonzero non unit of $D$ belongs to at most a finite number of members of $
\mathcal{F}$.) A weakly Krull domain $D$ is dubbed in \cite{AZ 2019} as $
\ast $-weakly Krull domain or as a type $1$ $\ast $-SH domain. Here a $\ast $
-homogeneous ideal $I$ is said to be of type $1$ if $M(I)=\sqrt{I^{\ast }}$
and $D$ is a type $1$ $\ast $-SH domain if every nonzero non unit of $D$ is
a $\ast $-product of finitely many $\ast $-homogeneous ideals of type $1$.
In the following lemma we set $\ast =t.$
\begin{lemma}
\label{Lemma K} A $t$-f-potent weakly Krull domain is a type $1$ $t$-f-SH
domain.
\end{lemma}
\begin{proof}
A weakly Krull domain is a type $1$ $t$-SH domain. But then for every pair $
I,J$ of similar homogeneous ideals $I^{n}\subseteq J^{\ast }$ and $
J^{m}\subseteq I^{\ast }$ for some positive integers $m,n.$ So $J$ is a $t$
-f-homogeneous ideal if $I$ is and vice versa. Thus in a $t$-f-potent weakly
Krull domain the $t$-image of every $t$-homogeneous ideal is principal
whence every nonzero non unit of $D$ is expressible as a product of $t$
-f-homogeneous elements which makes $D$ a $t$-f-SH domain and hence a GCD
domain.
\end{proof}
\begin{proof}
of Theorem \ref{Theorem H1} Use Theorem 5.3 of \cite{HZ 2019} for $\ast =t$
to decide that $D$ is of finite $t$-character and of $t$-dimension one.
Indeed, that makes $D$ a weakly Krull domain that is $t$-f-potent. The proof
would be complete once we apply Lemma \ref{Lemma K} and note that a $t$-f-SH
domain is a GCD domain and of course of finite $t$-character.
\end{proof}
Generally a domain that is $t$-f-potent and with $t$-dimension $>1,$ is not
necessarily GCD nor of finite $t$-character.
\begin{example}
\label{Example L}$D=Z+XL[[X]]$ where $Z$ is the ring of integers and $L$ is
a proper extension of $Q$ the ring of rational numbers. Indeed $D$ is prime
potent and two dimensional but neither of finite $t$-character nor a GCD
domain.
\end{example}
There are some special cases, in which a $t$-f-potent domain is GCD of
finite $t$-character.
i) If every nonzero prime ideal contains a $t$-f- homogeneous ideal. (Use
(4) of Theorem 5 of \cite{AZ 2019}) along with the fact that $D$ is a $t$
-f-SH domain if and only if $D$ is a $t$-SH domain with every $t$
-homogeneous ideal $t$-f-homogeneous. Thus a $t$-f-potent domain of $t$-dim $
1$ is of finite character.
ii) If $D$ is a $t$-f-potent PVMD of finite $t$-character that contains a
set $S$ multiplicatively generated by $t$-f-homogeneous elements of $D$ and
if $D_{S}$ is a GCD domain then so is $D.$
I'd be doing a grave injustice if I don't mention the fact that before there
was any modern day multiplicative ideal theory there were prime potent
domains as $Z$ the ring of integers and the rings of polynomials over them.
It is also worth mentioning that there are three dimensional prime potent
Prufer domains that are not Bezout. The examples that I have in mind are due
to Loper \cite{Lop 1999}. These are non-Bezout Prufer domains whose maximal
ideals are generated by principal primes.
\section{\label{S3}$\ast $-finite ideal monoids}
In \cite{YZ 2011}, we called a directed p.o. group $G$ pre-Riesz if its
positive elements satisfied the following property.
(pR): If $x_{1},x_{2},...,x_{n}$ are strictly positive elements in $G$ and $
x_{i}$ are such that there is at least one $g\in G$ with $g\nleq 0$ such
that $g\leq x_{1},x_{2},...,x_{n}$ then there is at least one $r\in G$ such
that $0<r\leq x_{1},x_{2},...,x_{n}.$
By a basic element, in the above paper, we meant a strictly positive element
$c\in G$ such that for every pair of strictly positive elements $c_{1},c_{2}$
preceding $c$ we have $r\in G$ such that $0<r\leq c_{1},c_{2}.$
Note that it is essentially the positive cone $G^{+}=\{g\in G|g\geq 0\}$ of
the pre-Riesz group that satisfies the (pR), but with reference to elements
of its main group. So let's call a commutative p.o. monoid $M=<M,+,0,\leq ~>$
a pre-Riesz monoid if $M$ is upper directed and satisfies (pR'): For any
finite set of strictly positive elements $x_{1},x_{2},...,x_{n}\in M,$
either $glb(x_{1},x_{2},...,x_{n})=0$ or there is $r\in M$ such that $
0<r\leq x_{1},x_{2},...,x_{n}.$ Note that the `$+$' and `$0$' are mainly
symbolic, standing in for the monoid operation and the identity. Note also
that to avoid getting into trivialities we shall only consider non-trivial
pre-Riesz monoids, i.e., ones that are different from $\{0\}.$
Here, of course, we do not require that $a\leq b$ $\Leftrightarrow a+x=b$ $.$
The partial order may be pre-assigned but must be compatible with the binary
operation of the monoid. Let's call $M$ a divisibility p.o. monoid if in $M$
$a\leq b$ $\Leftrightarrow a+x=b,$ for some $x\in M.$
A monoid $M$ is said to have cancellation if $a+b=a+c$ implies $b=c.$
Obviously if in a cancellation monoid with order defined as above we have $
a+b\leq a+c$ then $b\leq c.$
\begin{proposition}
\label{Proposition FX} Let $a,b\in M$ where $M$ is a divisibility pre-Riesz
monoid with cancellation. Then $lub(a,b)=a+b$ if and only if $glb(a,b)=0.$
\end{proposition}
\begin{proof}
Suppose that $lub(a,b)=a+b$ and let there be $r>0$ such that $r\leq a,b.$
Then $a=r+x$ and $b=r+y$ for some $x,y\in M.$ Obviously, as $r>0,$ $x<a$ and
$y<b.$ Thus $r+x+y<a+b$ yet $r+x+y\geq a,b$ contradicting the assumption
that $lub(a,b)=a+b$. Conversely suppose that $glb(a,b)=0$ and let there be,
by way of contradiction, $r$ such that $r\geq a,b$ yet $r<a+b$. Then $
r=a+x=b+y$ and $a+b=r+z.$ Taking $r=a+x$ we have $a+b=a+x+z.$ Cancelling $a$
from both sides we get $b=x+z.$ Similarly substituting for $r=b+y$ and
cancelling $b$ from both sides we get $a=y+z.$ But then $z\leq a,b$ and
hence $z=0$ forcing $a=y$ and $b=x$ and $r=a+b$, a contradiction.
\end{proof}
If $glb(a,b)$ (resp., $lub(a,b))$ exists in a monoid $M$ we denote it by $
a\wedge b$ (resp., $a\vee b)$
\begin{example}
\label{Example GX} (1) If $G$ is a Riesz group then as shown in \cite[
Proposition 3.1]{YZ 2011}, $G^{+}$ is a pre-Riesz monoid. (2) Indeed $G$ is
a pre-Riesz group if and only if $G^{+}$ is a pre-Riesz monoid and indeed a
pre-Riesz group can be regarded as a pre-Riesz monoid. (3) Let $\ast $ be a
finite character star operation defined on a domain $D$ and let $\Gamma $ be
the set of all proper $\ast $-ideals of finite type of $D$. Then $\Gamma
\cup \{D\}$ is a pre-Riesz monoid under $\ast $-multiplication because $
glb(A_{1},A_{2},...,A_{n})=D$ if and only if $(A_{1},A_{2},...,A_{n})^{\ast
}=D.$ Let's denote this monoid by $<\Gamma \cup \{D\},\ast ,D,\leq $ $>$ and
call it $\ast $-finite ideals monoid ($\ast $-FIM)
\end{example}
(This is because the $\ast $-product of finitely many members of $\Gamma $
is again of finite type and this $\ast $-product is associative. Here the
partial order is induced by reverse containment i.e. for $A,B\in \Gamma ,$ $
A\leq B$ if and only if $A\supseteq B$ and of course $\ast $-multiplication
is compatible with the order, i.e., for $A,B,C\in \Gamma $ with $A\leq B$
then $(AC)^{\ast }\leq (BC)^{\ast }$ (since $A\supseteq B$ implies that $
(AC)^{\ast }\supseteq (BC)^{\ast }).)$
Let's call $D$ $\ast $-coherent if for all $A,B\in \Gamma $ we have $A\cap
B\in \Gamma .$
\begin{proposition}
\label{Proposition HX} Let $<\Gamma \cup \{D\},\ast ,D,\leq $ $>$ be a $\ast
$-finite monoid (1) For all $H,K\in \Gamma ,$ (We have $H\wedge K\in \Gamma $
as $(H,K)^{\ast }$ and if $H\cap K\in \Gamma ,$ then $H\vee K=H\cap K.$
\end{proposition}
\begin{proof}
Indeed as $(H,K)^{\ast }\leq H,K$ (sine $(H,K)^{\ast }\supseteq H,K)$ and if
$A\leq H,K$ for some $A\in \Gamma ,$ (i.e., $A\supseteq H,K$) then $A\leq
(H,K)^{\ast }.$ Let's put it this way $(H,K)^{\ast }$ is standard for $\inf
(H,K)$ and $H\cap K,$ if it exists is standard for $\sup (H,K)$ in ideal
theory and so it is here.
\end{proof}
So, a $\ast $-finite monoid is actually a semilattice. Now let $M$ be a
pre-Riesz monoid and $H\in M.$ Call $H$ homogeneous if for all $0<R,S\leq H$
we have a $0<t<R,S.$ Obviously $0<K\in M$ is not homogeneous if and only if
there are $0<R,S<K$ such that $\inf (R,S)=0.$ Let's call $X,Y\in M$ disjoint
if $\inf (X,Y)=0$ and note that if $H$ is homogeneous then $H$ cannot be non
disjoint with two or more disjoint elements. Also if $X,Y$ are disjoint and $
0\,<x<X$ then $x$ and $Y$ are disjoint, for if not then there is $0<r<x,Y,X$
making $X,Y$ non-disjoint.
Call a set $S$ of homogeneous elements of a pre-Riesz monoid $M$ an
independent set if every pair of elements of $S$ is disjoint. In notes of my
work with Yang and a student of his \cite{LYZ 2014}, other, restricted,
versions of the following were proved. As the notes are not made public yet
and there is a significant difference of the notions involved, I include
below some related results that are relevant to this write up.
\begin{proposition}
\label{Proposition JX} Let $S$ be an independent set of homogeneous
elements, in a pre-Riesz monoid, satisfying a property $(Q).$ Then $S$ can
be enlarged to a maximal independent set $T$ of homogeneous elements
satisfying $(Q).$
\end{proposition}
\begin{proof}
Let $\Gamma =\{B|B\supseteq S$ is an independent set of homogeneous elements
satisfying $(Q)\}$. Obviously $\Gamma \neq {\Greekmath 011E} $. Now let $\{B_{{\Greekmath 010B} }\}$
be a chain of members of $\Gamma $ and let $C=\cup B_{{\Greekmath 010B} }.$ Then $
C\supseteq S$ and for any pair $x,y\in C$, $x,y$ are in $B_{{\Greekmath 010B} }$ for
some ${\Greekmath 010B} $ so elements of $C$ are homogeneous, satisfy $(Q)$ and are
homogeneous. So, $C\in \Gamma .$ Thus by Zorn's Lemma $\Gamma $ must contain
a maximal element and that is our $T.$
\end{proof}
We shall call a set $S$ of mutually disjoint elements, of a monoid $M,$ a
maximal disjoint set if (as usual) no set $T$ exists of mutually disjoint
elements such that $M\supseteq T\supsetneq S$ and we shall call a set $S$ of
mutually disjoint elements of $M$ order maximal if no element $s$ of $S$ can
be replaced by two distinct predecessors to form a set $(S\backslash
\{s\})\cup \{x,y)$ of mutually disjoint elements. A maximal set of disjoint
homogeneous elements is obviously order maximal too, but a mere maximal set
of mutually disjoint elements may not be, as we have seen in the case of
ideals in a ring.
An order maximal independent set $B$ of homogeneous elements of a pre-Riesz
monoid $M$ is called a basis if $B$ is also an order maximal set of mutually
disjoint elements.
\begin{lemma}
\label{Lemma KX}. Let $M$ be a pre-Riesz monoid. Then a non-empty subset $S$
of $M$ is a basis if and only if $S$ is disjoint and ($S\backslash \{s\}$)$
\cup \{x,y\}$ is non-disjoint for any $s\in S$ and for any $\{x,y\}\subseteq
$ $(M\backslash S)\cup \{s\}$, with $x\neq y$.
\end{lemma}
\begin{proof}
Let $S$ be a basis and suppose that for some $s\in S$, $(S\backslash
\{s\})\cup \{x,y\}$ is disjoint for some $\{x,y\}\subseteq $ $(M\backslash
S)\cup \{s\}$, with $x\neq y$. Then $x\wedge s\neq 0$ and $y\wedge s\neq 0$
because $S$ is a maximal set of disjoint elements of $M$. Since $M$ is
pre-Riesz, there is $t\in M$ such that $0<t\leq x,s$ and $u\in M$ such that $
0<u\leq y,s$. Next as $s$ is homogeneous, there is $w\in M$ such that $
0<w\leq t,u,x,y$, a contradiction. Conversely, suppose that $S$ is disjoint
and satisfies the condition in the lemma. If $S$ $\cup \{x\}$ is disjoint
for some $x\in M\backslash S$, then for any $s\in S$, $(S\backslash
\{s\})\cup \{s,x\}$ is disjoint and $s\neq x$, a contradiction. Therefore, $
S $ is an maximal disjoint set. If $s\in S$ and $s$ is not homogeneous, then
there exists at least one pair of elements $0<x,y<s$ such that $x\wedge y=0$
. But then $x,y\notin S,x\neq y$ and $(S\backslash \{s\})\cup \{x,y\}$ is
disjoint, a contradiction. Thus, $S$ is a maximal disjoint set consisting of
homogeneous elements, i.e., $S$ is a basis.
\end{proof}
\begin{theorem}
\label{Theorem LX} (cf \cite[Theorem 9]{LYZ 2014}) A pre-Riesz monoid $M$
has a basis if and only if (P): each $0<x\in M$ exceeds at least one
homogeneous element . Every basis of $M$ is an order maximal independent
subset and every order maximal independent subset of $M$ is a basis provided
$M$ has a basis.
\end{theorem}
\begin{proof}
Let $S$ = $\{0<a_{{\Greekmath 010D} }|{\Greekmath 010D} \in \Gamma \}$ be a basis for $M$, and
consider $0<x\in M$. There exists ${\Greekmath 010D} \in \Gamma $ such that $x\wedge
a_{{\Greekmath 010D} }\neq 0$ for otherwise $S$ is not a maximal set of disjoint
elements. This means that there is $0<h\leq x,a_{{\Greekmath 010D} }$ and $h$ is
homogeneous because $a_{{\Greekmath 010D} }$ is homogeneous and $h\in (0,a_{{\Greekmath 010D} }]$.
Thus, $M$ satisfies (P). Conversely, suppose that $M$ satisfies the property
(P). Since $M$ is non-trivial there is at least one homogeneous element and,
by Proposition \ref{Proposition JX}, there exists a maximal independent
subset $T=\{0<a_{{\Greekmath 010D} }|{\Greekmath 010D} \in \Gamma \}$ of $M$, assuming that $(Q)$
means \textquotedblleft no restriction". All we need show is that $T$ is a
maximal set of disjoint elements. Suppose on the contrary that there is an
element $0<x\in M\backslash T$ such that $x\wedge a_{{\Greekmath 010D} }=0$ for all $
{\Greekmath 010D} \in \Gamma $. But then by the property (P), $x$ exceeds a homogeneous
element $h$, and $h$ is disjoint with $a_{{\Greekmath 010D} }$ for all ${\Greekmath 010D} \in
\Gamma .$ Therefore, $T\cup \{h\}\supsetneq T$and $T$ $\cup \{h\}$ is an
independent subset of $M$, but this is contrary to our choice of $T$.
\end{proof}
Conrad's F-condition on a pre-Riesz monoid reads thus: Each strictly
positive element $x$ in a pre-Riesz monoid $M$ is greater than at most a
finite number of (mutually) disjoint positive elements.
\begin{proposition}
\label{Proposition MX} If a pre-Riesz monoid $M$ satisfies Conrad's
F-condition, then $M$ has a basis.
\end{proposition}
\begin{proof}
Suppose that the condition holds but $M$ has no basis. Then by Theorem \ref
{Theorem LX}, there is at least one $0<y\in M$ such that no homogeneous
element is contained in $\{x\in M:0<x\leq y\}$. Then there exist two
disjoint elements $x_{1},y_{1}$ with $0<x_{1},y_{1}<y$ where none of $
x_{1},y_{1}$ exceeds a homogeneous element for otherwise $y$ would. So, say,
$0<x_{2},y_{2}<x_{1}$ with $x_{2}\wedge y_{2}$ $=0$. Since $x_{1}\wedge
y_{1}=0$ and $y_{2}<x_{1}$ we have $y_{1}\wedge y_{2}=0$. Next $
0<x_{3},y_{3}<x_{2}$ with $x_{3}\wedge y_{3}=0$. We can conclude that $
y_{1},y_{2},y_{3}$ are mutually disjoint. Similarly producing $x_{i}$ s, $
y_{i}$ s and using induction we can produce an infinite sequence $\{y_{i}\}$
of mutually disjoint elements less than $y$. Contradicting the assumption
that $M$ satisfies Conrad's F-condition.
\end{proof}
\begin{corollary}
\label{Corollary NX} The following are equivalent for a pre-Riesz monoid $M$
: (i) $M$ satisfies Conrad's F-condition, (ii) Every strictly positive
element exceeds at least one and at most a finite number of homogeneous
elements that are mutually disjoint, (iii) $M$ contains a subset $\Gamma $
of strictly positive elements such that every strictly positive element of $
M $ exceeds at least one member of $\Gamma $ and at most a finite number of
mutually disjoint members of $\Gamma .$
\end{corollary}
\begin{proof}
(i) $\Rightarrow $ (ii) Conrad's F-condition, via Proposition \ref
{Proposition MX}, implies that every strictly positive element $x$ exceeds
at least one homogeneous element say $h.$ The set $\{h\}$ is an independent
set of ($(Q)$)homogeneous elements preceding $x$ and by Proposition \ref
{Proposition JX}, $\{h\}$ can be expanded to a maximal independent set $T$
of elements preceding $x.$ But again by Conrad's F-condition, $T$ must be
finite. For (ii) )$\Rightarrow $(i), suppose that (ii) holds yet $M$ does
not satisfy (i). Then there is $0<x\in M$ that exceeds an infinite sequence $
\{x_{i}\}$ of mutually disjoint strictly positive elements of $M$. Now each
of $x_{i}$ exceeds at least one homogeneous element $h_{i}$. Since $
\{x_{i}\} $ are mutually disjoint, $\{h_{i}\}$ are mutually disjoint, which
causes a contradiction. Whence, we have the conclusion. (ii)$\Rightarrow $
(iii) Take $\Gamma =\{h|h$ is a homogeneous element of $M\}$, then every
positive element exceeds at least one member of $\Gamma $ and at most a
finite number. (iii)$\Rightarrow $ (i) Suppose that the given condition
holds but Conrad's F-condition doesn't. That means there is some element $
y>0 $ such that $y$ is greater than an infinite number of mutually disjoint
elements $\{y_{{\Greekmath 010B} }\}$ of $M.$ By (iii) each $y_{{\Greekmath 010B} }$ exceeds a
member $z_{{\Greekmath 010B} }$ of $\Gamma .$ As $y_{{\Greekmath 010B} }$ are mutually disjoint,
making $y$ exceed an infinite number of mutually disjoint members of $\Gamma
,$ a contradiction.
\end{proof}
\begin{corollary}
\label{Corollary PX} (Corollary to Corollary \ref{Corollary NX}) Let $D$ be
an integral domain, $\ast $ a finite character star operation on $D$ and let
$\Gamma $ be a set of proper, nonzero, $\ast $-ideals of finite type of $D$
such that every proper nonzero finite type $\ast $-ideal of $D$ is contained
in some member of $\Gamma $ . Then $D$ is of finite $\ast $-character if and
only if every nonzero finitely generated ideal $I$ of $D$ with $I^{\ast
}\neq D$ is contained in at least one and at most a finite number of
mutually $\ast $-comaximal members of $\Gamma .$
\end{corollary}
\begin{proof}
We know that $M=\{A^{\ast }|$ $A^{\ast }\neq D$ is a $\ast $-ideal of finite
type of $D\}\cup \{D\}$ is a pre-Riesz monoid under $\ast $-multiplication
and the set $\Gamma $ can just be regarded as a subset of $M$ and the
theorem requires every strictly positive member of $M$ exceeds at least one
member of $\Gamma $ and at most a finite number of mutually disjoint members
of $\Gamma .$ Now this means, according to Corollary \ref{Corollary NX},
that every element $A^{\ast }$ exceeds at least one basic element and at
most a finite number of basic elements of $M.$ Now take an element $A^{\ast
} $ in $M$ and let $h$ be a basic element of $\Gamma $ containing $A^{\ast
}. $ Then, by Proposition \ref{Proposition JX}, there is at least one
maximal set $S$ of mutually disjoint basic elements containing $A^{\ast }$
and each $h\in S$ exceeds some member of $\Gamma $ giving a maximal set $T$
of basic elements in $\Gamma $ and containing $A^{\ast }$ Now this
translates to: If the condition is satisfied, then or every $\ast $-ideal of
finite type $A$ there is a maximal set $T$ of homogeneous $\ast $-ideals
containing $A$ and by the condition, $T$ is finite. Now let $|T|=n$ and
recall that if $T=\{H_{1},...,H_{n}\}$ then each of the $H_{i}$ determines a
unique maximal $\ast $-ideal $M(H_{i}).$ To show that $T^{\prime
}=\{M(H_{1}),...,M(H_{n})\}$ contains all the maximal $\ast $-ideals
containing $A^{\ast }$ assume that there is a maximal $\ast $-ideal $N\notin
T^{\prime }$ and containing $A^{\ast }.$ Then there is $x\in N\backslash
(\cup M(H_{i}).$ But then $xD$ is $\ast $-comaximal with $H_{i}$ for each $i$
and hence $(x,A)^{\ast }\subseteq N$ is $\ast $-comaximal with each $H_{i}$
which translates to: $(x,A)^{\ast }$ is disjoint with each basic element $
H_{i}.$ But then $(x,A)^{\ast }$ exceeds a basic element $K$ which must be
disjoint with each of $H_{i}$, killing the maximality of $T.$ The converse
is obvious because if there is an infinite number of mutually $\ast $
-comaximal members of $\Gamma $ then $D$ cannot be of finite $\ast $
-character because a maximal $\ast $-ideal cannot afford mutually $\ast $
-comaximal ideals.
\end{proof}
Finally, it's important to mention that not all p.o. monoids are pre-Riesz
monoids. According to Proposition 4.2 of \cite{YZ 2011} The group of
divisibility $G(D)$ of a domain $D$ is pre-Riesz if and only if (P): for all
$x_{1},x_{2},...,x_{n}\in D\backslash \{0\},$ $(x_{1},x_{2},...,x_{n})_{v}=D$
or $(x_{1},x_{2},...,x_{n})\subseteq rD$ for some non unit $r\in D.$ As we
can readily see, a domain satisfying (P) above is a domain satisfying the
PSP property and in a PSP domain every atom is a prime. Thus an atomic
domain (every nonzero non unit is expressible as a product of atoms) with
PSP property is a UFD. Thus, say, if $D$ is a non UFD Noetherian domain then
$G(D)$ is not pre-Riesz. It may be noted that the set of principal ideals is
under multiplication is a submonoid of $\Gamma \cup \{D\}.$
\subsection{Riesz monoids}
First off let's note that when we say "monoid" we mean a commutative monoid.
Now call a directed p.o. monoid $M=<M,+,0,\leq $ $>$ a sub-Riesz monoid, if
every element $x$ of $M$ is primal i.e. for $y_{1},y_{2}\in M,$ $x\leq
y_{1}+y_{2}$ $\Rightarrow x=x_{1}+x_{2}$ such that $x_{i}\leq y_{i}$ and a
Riesz monoid if $M$ is also divisibility and cancellative.
One may ask whether Riesz monoids satisfy the Riesz interpolation, as do
Riesz groups. The answer is yes and can be readily checked as we show below.
Note that by $M^{+}$ we mean the set $\{x\in M|x\geq 0\}$
\begin{theorem}
\label{Theorem QX}TFAE for a commutative cancellation divisibility
monoid $M$. (1) Every $0\leq x\in M$ is primal (2) For all $a,b,x,y\in M^{+}$
with $a,b\leq $ $x,y$ there is $z$ such that $a,b\leq z\leq x,y,$ (3) For
all $a,b,x_{1},x_{2},...,x_{n}$ $\in M^{+}$ with $a,b\leq $ $
x_{1},x_{2},...,x_{n}$ there exists $z$ such that $a,b\leq z\leq
x_{1},x_{2},...,x_{n}$, (4) For all $
a_{1},a_{2},...,a_{n},b_{1},b_{2},...,b_{m}\in M^{+}$ with $
a_{1},a_{2},...,a_{n}\leq $ $b_{1},b_{2},...,b_{m}$ there exists $d$ such
that $a_{1},a_{2},...,a_{n}\leq d\leq b_{1},b_{2},...,b_{m}.$
\end{theorem}
\begin{proof}
(1) $\Rightarrow $ (2) Let every positive element of $M$ be primal.
Let $a,b\leq x,y.$ Then $x=x_{1}+a=x_{2}+b$ and $y=y_{1}+a=y_{2}+b......(1)$
Since $x_{1}+a=x_{2}+b,$ $b\leq x_{1}+a$ and since $b$ is primal $
b=b_{1}+b_{2}$ where $b_{1}\leq x_{1}$ and $b_{2}\leq a.$ (2)
Let $x_{1}=x_{1}^{\prime }+b_{1}$ and $a=a_{1}+b_{2}.$ Then $x_{1}+a=x_{2}+b$
can be written as $x_{1}^{\prime }+b_{1}+a_{1}+b_{2}=x_{2}+b$, or $
x_{1}^{\prime }+a_{1}+b_{1}+b_{2}=x_{2}+b.$ Noting that $b=b_{1}+b_{2}$ and
cancelling $b$ from both sides we get $x_{1}^{\prime }+a_{1}=x_{2}.$
......(3)
Since $a_{1}+b_{2}=a$ we have $a,b\leq a_{1}+b........(4)$
Using the value of $x_{2}$ we have $a_{1}+b\leq x.$ (Note: $
x=x_{2}+b=(x_{1}^{\prime }+a_{1})+b)$ ... (5)
Now consider $y_{1}+a=y_{2}+b.$ Using $a=a_{1}+b_{2}$ and $b=b_{1}+b_{2}$ we
have $y_{1}+a_{1}+b_{2}=y_{2}+b_{1}+b_{2}.$ Cancelling $b_{2}$ from both
sides we get $y_{1}+a_{1}=y_{2}+b_{1}.$ So that $b_{1}\leq y_{1}+a_{1}$ and
as $b_{1}$ is primal we have $b_{1}=b_{3}+b_{4}$ where $b_{3}\leq y_{1}$ and
$b_{4}\leq a_{1}.$ Writing $y_{1}=y_{1}^{\prime }+b_{3}$ and $
a_{1}=a_{1}^{\prime }+b_{4}$ we can express $y_{1}+a_{1}=y_{2}+b_{1}$ as $
y_{1}^{\prime }+b_{3}+a_{1}^{\prime }+b_{4}=y_{2}+b_{1}.$ Cancelling $
b_{1}=b_{3}+b_{4}$ from both sides we get $y_{2}=y_{1}^{\prime
}+a_{1}^{\prime }.$ This gives $y=y_{2}+b=y_{1}^{\prime }+a_{1}^{\prime
}+b=y_{1}+a.$ Now as $y_{1}^{\prime }\leq y_{1}$ we get $y_{1}=y_{4}+y_{1}^{
\prime }$ which on substituting in $y_{1}^{\prime }+a_{1}^{\prime
}+b=y_{1}+a $ gives $y_{1}^{\prime }+a_{1}^{\prime }+b=y_{4}+y_{1}^{\prime
}+a$ and cancelling $y_{1}^{\prime }$ we get $y_{4}+a=a_{1}^{\prime }+b$ and
so $a\leq a_{1}^{\prime }+b.$ That is $a,b\leq a_{1}^{\prime }+b$ and $
a_{1}^{\prime }+b\leq y.$ But as $a_{1}^{\prime }\leq a_{1}$ and $
x_{2}=x_{1}^{\prime }+a_{1}$ we have $a_{1}^{\prime }+b\leq x_{2}+b=x.$ So
we have $z=a_{1}^{\prime }+b$ such that $a,b\leq z\leq x,y.$
(2) $\Rightarrow $ (1). Let $a\leq b+c$.
Then as $a,b\leq b+c,$ $a+b$ there is $x$ such that $a,b\leq x\leq b+c,$ $
a+b $ ..........(i)
Now as $a\leq x$ we have $x=x_{1}+a$ ..........(ii)
and as $b\leq x$ we have $x=x_{2}+b.......$...(iii)
Using (i) and (iii) $x_{2}\leq a$ and $x_{2}\leq c.$ Now as $x_{2}\leq a,$
setting $a=x_{3}+x_{2}$ we have from $x_{1}+a=x_{2}+b$, the equation $
b=x_{1}+x_{3}.$ So $a\leq b+c$ implies that $a=x_{2}a+x_{3},$ with $
x_{2},x_{3}\in M^{+}$ such that $x_{3}\leq b$ and $x_{2}\leq c.$
(2) $\Rightarrow $ (3). Let $a,b\leq x_{1},x_{2},...,x_{n}.$ If $n=2$ we
have the result by (2). So suppose that $n>2$ and suppose that for all $
x_{1},x_{2},...,x_{n-1}$ the statement is true. Then for $a,b\leq
x_{1},x_{2},...,x_{n-1}$ there is a $d_{1}$ such that $a,b\leq d_{1}\leq
x_{1},x_{2},...,x_{n-1}.$ But then for $d_{1},x_{n}$ there is $d$ with $
a,b\leq d\leq d_{1},x_{n}.$ But this $d$ satisfies $a,b\leq d\leq
x_{1},x_{2},...,x_{n}.$
(3) $\Rightarrow $ (4). Let $a_{1},a_{2},...,a_{n}\leq
b_{1},b_{2},...,b_{m}. $ Then $a_{1},a_{2}\leq b_{1},b_{2},...,b_{m}$ and so
there is a $d_{1}$ such that $a_{1},a_{2}\leq d_{1}\leq
b_{1},b_{2},...,b_{m}.$ Now $d_{1},a_{3},...,a_{n}\leq b_{1},b_{2},...,b_{m}$
and induction on $n$ completes the job. (4) $\Rightarrow $ (2). Obvious
because (2) is a special case of (4).
\end{proof}
Part (2) of Theorrem \ref{Theorem QX} is also called $(2,2)$ Riesz
interpolation Property and (4) is $(n,m)$ interpolaion for positive integral
$n$ and $m$.
Call a subset $S$ of a monoid $M$ conic if $x+y=0$ implies $x=0=y,$ for all $
x,y\in S.$ In a p.o. group $G$ the sets $G^{+}$ and $-G^{+}$ are conic. If $
D $ is an integral domain then the set $m(D)$ of nonzero principal ideals of
$D $ is a monoid under multiplication, with identity $D,$ ordered by $aD\leq
bD$ $\Leftrightarrow $ there is $c\in D$ such that $bD=acD\Leftrightarrow
aD\supseteq bD.$ The monoid $m(D)$ is cancellative too and in $m(D)$ $
xDyD=1\Rightarrow xD=yD=1.$ So, $m(D)$ is a divisibility cancellative conic
monoid. The monoid $m(D)$ is of interest because of the manner it generates
a group. We know how the field of quotients of a domain is formed as a set
of ordered pairs eah pair representing an equivalence class with $
(a,b)=(c,d) $ $\Leftrightarrow da=bc$ and then we represent the pair $(a,b),$
$b\in D\backslash \{0\}$ by $\frac{a}{b}=ab^{-1}.$ Now the group of $m(D)$
gets the form $G(D)=\{\frac{a}{b}D|\frac{a}{b}\in qf(D)\backslash \{0\}\},$
ordered by $\frac{a}{b}D\leq \frac{c}{d}D$ $\Leftrightarrow \frac{a}{b}
D\supseteq \frac{c}{d}D\Leftrightarrow $ there is $hD\in m(D)$ such that $
\frac{a}{b}DhD=\frac{a}{b}D,$ so that $m(D)$ is the positive cone of $G(D).$
The group $G(D)$ gets the name group of divisibility of $D$ (actually of $
m(D)).$ Now any divisibility monoid that is also a cancellative and conic
monoid $M,$ with least element $0$ can be put through a similar process of
forming equivalent classes of ordered pairs to get group of divisibility
like group $G(M)=\{a-b|a,b\in M\}$ with $x\leq y$ in $G(M)$ $\Leftrightarrow
x+h=y$ for some $h\in M.$
\begin{corollary}
\label{Corollary RX}A Riesz Monoid $M$ has the pre-Riesz property. Also $
M^{+}$ is conic for a Riesz monoid $M.$
\end{corollary}
\begin{proof}
Let $0\leq x,y$ in $M$ and suppose that there is $g\in M$ such that $g$ is
not greater than or equal to $0$ yet $g\leq x,y,$ that is $0,g\leq x,y.$
Then by the $(2,2)$ interpolation property there is $r\in M$ such that $
0,g\leq r\leq x,y.$ But then $r>0,$ as $r\geq 0$ and $r\neq 0$ because $
r\geq g.$ Next suppose $x,y\geq 0$. If $x+y=0$ and say $x\neq 0,$ then we
have $0,x\leq x,x+y$ and by the $(2,2)$ interpolation there is $r$ such that
$0<x,x+y$ contradicting the fact that $x+y=0.$
\end{proof}
Well a p.o. monoid $M$ is a p.o. group if every element of $M$ has an
inverse and obviously if a p.o. monoid is a Riesz monoid and a group it is a
Riesz group. This brings up the question: Let $M$ be a Riesz monoid and $
M^{+}$ the positive cone of it, will $M^{+}$ generate a Riesz group? As we
shall be mostly concerned with monoids $M$ with $0$ the least element, i.e. $
M=M^{+}$ we remodel the question as: Let $M$ be a Riesz monoid with $M^{+}=M$
the positive cone of it, will $M$ generate a Riesz group? The following
result whose proof was indicated to me by G.M. Bergman, in an email,
provides the answer.
\begin{theorem}
\label{Theorem SX}Suppose $M$ is a cancellative abelian monoid, which is
"conical", i.e., no two nonidentity elements sum to $0$, and which we
partially order by divisibility; and suppose every element of $M$ is primal,
namely, that with respect the divisibility order, (1) $x\leq a+b=>x=u+v$
such that $u\leq a$ and $v\leq b$. Then the group generated by $M$ is a
Riesz group.
\end{theorem}
\begin{proof}
Let us rewrite (1) by translating all the inequalities into their
divisibility statements; so that $x\leq a+b$ becomes $x+y=a+b$ for some $y$
and $u\leq a$ becomes $a=u+u\prime $, and similarly for the last inequality;
and finally, let us rename the elements more systematically; in particular,
using $a,b,c,d$ for the above$x,y,a,b$. Then we find that (1) becomes $
a+b=c+d\Rightarrow a=a\prime +a",c=a\prime +b\prime ,d=a"+b"$ for some $
a\prime ,a",b\prime ,b"\in M.$ Now if we substitute the three equations to
the right of the "$\Rightarrow $" into the equation before the "$\Rightarrow
$", and use cancellativity, we find that $b=b\prime +b"$; so the full
statement is (2) $a+b=c+d\Rightarrow a=a\prime +a",~b=b\prime +b",~c=a\prime
+b\prime ,~d=a"+b",$ for some $a\prime ,a",b\prime ,b"\in M$. Now let $G$ be
the group generated by $M$, ordered so that $M$ is the positive cone. We
want to show $G$ has the Riesz Interpolation property. So suppose that in $G$
we have $p,q\leq r,s$. We can write these inequalities as (3) $
r=p+a,s=p+c,r=q+d,s=q+b$ where $a,b,c,d\in M$. Now the sum of the first and
last equations gives a formula for $r+s$, and so does the sum of the second
and third equations.Equating the results, and cancelling the summands $p+q$
on each side, we get an equation in $M:a+b=c+d$. Hence we can apply (2) to
get decompositions of $a,b,c,d$, and substitute these into (3), getting (4) $
r=p+a\prime +a",s=p+a\prime +b\prime ,r=q+a"+b",s=q+b\prime +b"$. Equating
the first and third equations (or if we prefer, the second and fourth) and
cancelling the common term $a"$ (respectively, the common term $b\prime $),
we get (whichever choice we have made) (5) $p+a\prime =q+b"$.The element
given by (5) is clearly $\geq p,q$, while from (4) (using whichever of the
equations for $r$ we prefer and whichever of the equations for $s$ we
prefer), we see that it is $\leq r,s$. So this is the element whose
existence is required for the ($(2,2)$) Riesz interpolation property for $G$.
\end{proof}
A fractional ideal $I$ is called $\ast $-invertible if $(II^{-1})^{\ast }=D.$
It is well known that if $I$ is $\ast $-invertible for a finite character
star operation $\ast $ then $I^{\ast }$ and $I^{-1}$ are of finite type.
Denote the set of all $\ast $-invertible fractional $\ast $-ideals of $D$ by
$Inv_{\ast }(D)$ and note that given an integral ideal $I$ it is possible
that $I$ cannot always be expressed as a product of
integral
ideals. So when we talk about an integral $\ast $-invertible $\ast $-ideal
we are talking about the end result and not how it is expressed. Let $
\mathcal{I}_{\ast }(D)$ be the set of integral $\ast $-invertible $\ast $
-ideals and note that $\mathcal{I}_{\ast }(D)$ is a monoid under $\ast $
-multiplication. Note that $\mathcal{I}_{\ast }(D)$ can be partially ordered
by $I\leq J$ if and only if $I\supseteq J$. Indeed $J\subseteq I$ if and
only if $(JI^{-1})^{\ast }=H\subseteq D$, if and only if $J=(IH)^{\ast },$
and as $J,$ $I$ are $\ast $-invertible, $H$ is $\ast $-invertible and
integral. Thus in $\mathcal{I}_{\ast }(D)$, $I\leq J$ $\Leftrightarrow
J=(IH)^{\ast }$ for some $H\in \mathcal{I}_{\ast }(D).$ In other words $
\mathcal{I}_{\ast }(D)$ is a divisibility p.o. monoid. Because $\mathcal{I}
_{\ast }(D)$ involves only $\ast $-invertible $\ast $-ideals, it is
cancellative too. Finally $\mathcal{I}_{\ast }(D)$ is directed because of
the definition of order. That $Inv_{\ast }(D)$ is generated by $\mathcal{I}
_{\ast }(D)$ follows from the fact that every fractionary ideal of $D$ can
be written in the form $A/d$ where $A\in F(D)$ and $d\in D\backslash \{0\}.$
Finally, the partial order in $Inv_{\ast }(D)$ gets induced by $\mathcal{I}
_{\ast }(D)$ in that for $I,J\in $ $Inv_{\ast }(D)$ we have $I\leq J$ $
\Leftrightarrow J\subseteq I\Leftrightarrow (JI^{-1})^{\ast }\in $ $\mathcal{
I}_{\ast }(D).$ Call $I\in \mathcal{I}_{\ast }(D)$ $\ast $-primal if for all
$J,K\in \mathcal{I}_{\ast }(D)$ $I\leq (JK)^{\ast }$ we have $
I=(I_{1}I_{2})^{\ast }$ where $I_{1}^{\ast }\leq J$ and $I_{2}^{\ast }\leq
K. $ Call $D$ $\ast $-Schreier, for star operation $\ast $ of finite
character, if every integral $\ast $-invertible $\ast $-ideal of $D$ is $
\ast $-primal.
\begin{proposition}
\label{Proposition TX}Let $\ast $ be a finite character star operation
defined on $D.$ Then $D$ is a $\ast $-Schreier domain if and only if $
Inv_{\ast }(D)$ is a Riesz group under $\ast $-multiplication and order
defined by $A\leq B\Leftrightarrow A\supseteq B.$
\end{proposition}
\begin{proof}
Suppose that $D$ is $\ast $-Schreier, as defined above. That is each $I\in $
$\mathcal{I}_{\ast }(D)$ is primal. The notion of $\ast $-Schreier suggests
that we define $\leq $ by $A\leq B$ $\Leftrightarrow A\supseteq B.$ Then as
for each pair of integral ideals $IJ$, $(IJ)^{\ast }=D$ $\Rightarrow J^{\ast
}=I^{\ast }=D,$ the same holds for members of $\mathcal{I}_{\ast }(D)$ which
are all $\ast $-ideals. So $(IJ)^{\ast }=D\Rightarrow I=J=D.$ and so $
\mathcal{I}_{\ast }(D)$ is conic. Of course $\mathcal{I}_{\ast }(D)$ is
cancellative by the choice of ideals and by the definition of ordr $\mathcal{
I}_{\ast }(D)$ is a divisibility monoid. So by Theorem \ref{Theorem SX} $
\mathcal{I}_{\ast }(D)$ generates a Riesz group and by the above
considerations $Inv_{\ast }(D)$ is generated by $\mathcal{I}_{\ast }(D).$
Consequently $Inv_{\ast }(D)$ is a Riesz group. Conversely if $Inv_{\ast
}(D) $ is a Riesz group, with that order defined on it, then $\mathcal{I}
_{\ast }(D)$ is the positive cone of the Riesz group $Inv_{\ast }(D)$ and so
each element of $\mathcal{I}_{\ast }(D)$ must be primal.
\end{proof}
Proposition \ref{Proposition TX} brings together a number of notions studied
at different times. The first was quasi-Schreier, study started in \cite{DM
2003} and completed in \cite{ADZ 2007}. The target in these papers was
studying $\mathcal{I}_{d}(D),$ i.e. the monoid of invertible integral ideals
of $D,$ when $Inv_{\ast }(D)$ is a Riesz group. Another study targeting $
\mathcal{I}_{t}(D),$ i.e. the monoid of $t$-invertible integral $t$-ideals
of $D,$ for study along the same lines as above appeared in \cite{DZ 2011}.
Now let's step back and require that every $\ast $-invertible $\ast $-ideal
of $D$ be principal. Then in Proposition \ref{Proposition TX}, $\mathcal{I}
_{\ast }(D)$ is the monoid of principal ideals, each of which is primal and
the Riesz group $Inv_{\ast }(D)$ of consists just of principal fractional
ideals of $D$, and hence the group of divisibility of $D$. It is well known
that if $\ast $ is of finite type each $\ast $-invertible $\ast $-ideal is a
$t$-invertible $t$-ideal (\cite{Zaf 2000}) and that in a pre-Schreier domain
each $t$-invertible $t$-ideal is principal (\cite[Theorem 3.6]{Zaf 1987}).
So we have the following corollary.
\begin{corollary}
\label{Corollary VX}Let $D$ be $\ast $-Schreier for any star operation $\ast
$ of finite character. Then $D$ is pre-Schreier if and only if each element
of $\mathcal{I}_{\ast }(D)$ is principal.
\end{corollary}
\begin{proof}
Suppose that eah member of $\mathcal{I}_{\ast }(D)$ is principal then in $
\mathcal{I}_{\ast }(D)$. Then for $a,b,c\in D\backslash \{0\}$ we have $
aD,bD,cD\in \mathcal{I}_{\ast }(D)$ and for $a|bc$ in $D$ would be $aD\leq
bDcD$ and in $\mathcal{I}_{\ast }(D)$ we must have $aD=(I_{1}I_{2})^{\ast }$
where $I_{1}\leq bD$ and $I_{2}\leq cD.$ But $I_{i}$ being in $\mathcal{I}
_{\ast }(D)$ must be principal. So,say, $I_{i}=a_{i}D$. But this gives $
a=a_{1}a_{2}$ and $a_{1}D\leq bD,a_{2}D\leq cD$ gives $a_{1}|b,a_{2}|c.$ In
sum for all $a,b,c\in D\backslash \{0\}$ $a|bc$ $\Rightarrow a=a_{1}a_{2}$
where $a_{1}|b$ and $a_{2}|c$ which is a way of saying that every nonzero
element of $D$ is primal. Conversely as indicated earlier $D$ being
pre-Schreier makes each $\ast $-invertible $\ast $-ideal of $D$ principal
and consequently all members of $\mathcal{I}_{\ast }(D)$ principal.
\end{proof}
This brings us to the last item on the \textquotedblleft
agenda\textquotedblright . In 1998, Professor Halter-Koch wrote a book, \cite
{H-K 1998} and restated all the then known conepts of multiplicative ideal
theory for monoids, in terms of ideal systems, except for one, he did not
include a Schreier monoid nor a pre-Schreier monoid. Provided below is one
of the missing definitions.
\begin{definition}
\label{Definition WX}A conic, cancellative divisibility monoid $<M,\bullet
,1,\leq >$is a pre-Schreier or a Riesz monoid if every element of $M$ is
primal.
\end{definition}
To end it all let's note, as Professor Halter-Koch would have, that an
integral domain $D$ all nonzero elements of whose multiplicative monoid are
primal is pre-Schreier if $\leq $ is replaced by $|$.
\end{document}
|
\begin{document}
\title{Supplementary Material : A Primary Radiation Standard Based on Quantum Nonlinear Optics }
\author{Samuel~Lemieux}
\email[]{[email protected]}
\affiliation{Department of Physics and Max Planck Centre for Extreme and Quantum Photonics, University of Ottawa, 25 Templeton Street, Ottawa, Ontario K1N 6N5, Canada}
\author{Enno~Giese}
\affiliation{Institut für Quantenphysik and Center for Integrated Quantum Science and Technology $\left(\text{IQ}^{\text{ST}}\right)$, Universität Ulm, Albert-Einstein-Allee 11, D-89081, Germany}
\author{Robert~Fickler}
\affiliation{Institute for Quantum Optics and Quantum Information (IQOQI), Austrian Academy of Sciences, Boltzmanngasse 3, 1090 Vienna, Austria}
\author{Maria~V.~Chekhova}
\affiliation{Max Planck Institute for the Science of Light, G.-Scharowsky Str.1/Bau 24, 91058 Erlangen, Germany}
\affiliation{Physics Department, Lomonosov Moscow State University, Moscow 119991, Russia}
\affiliation{University of Erlangen-Nuremberg, Staudtstrasse 7/B2, 91058 Erlangen, Germany}
\author{Robert~W.~Boyd}
\affiliation{Department of Physics and Max Planck Centre for Extreme and Quantum Photonics, University of Ottawa, 25 Templeton Street, Ottawa, Ontario K1N 6N5, Canada}
\affiliation{Institute of Optics, University of Rochester, Rochester, New York 14627, USA}
\date{\today}
\begin{abstract}
We discuss several relevant quantities for radiometry in a general manner, in particular the connection of the photon statistics of a quantized mode to the number of photons detected by a detector.
Further, we investigate the angular dependence of the intensity of down-converted light and the approximation used for angular mode selection by a pinhole and the wavelength dependence of the gain.
Also, we describe the experimental setup in detail and discuss details of the data analysis for both the spontaneous and the high-gain regime of parametric down-conversion.
We finally prove that the low-gain experiments have been performed in the spontaneous regime.
\end{abstract}
\maketitle
\normalsize
\section{Radiometry}
Since the quantization of the electric field is usually performed in plane-wave modes denoted by a wave vector $\vec{k}$, we express general radiometric quantities through the photon number per plane wave mode $\mathcal{N}(\vec{k})$ of the field under consideration.
A detector cannot detect all of these modes, and hence the detected photon-number density in the quantization volume can be written as
\begin{equation}
\varrho= \frac{1}{(2\pi)^3} \smashoperator{\int_{\text{detector}}} \textrm{d}^3 k\, \mathcal{N}(\vec{k})= \smashoperator{\int_{\Delta \lambda}} \textrm{d}\lambda \smashoperator{\int_{\Delta \Omega}} \textrm{d}\Omega \, \frac{1}{\lambda^4}\, \mathcal{N}(\vec{k}),
\end{equation}
where we used $\textrm{d}^3 k = k^2 \textrm{d}k \textrm{d}\Omega = (2\pi)^3\lambda^{-4} \textrm{d}\lambda \textrm{d}\Omega$ in the last step.
We neglect here the index of refraction of air and assume that the detector has a bandwidth of $\Delta \lambda$ and collects light from a solid angle $\Delta \Omega$.
In the following we introduce for a more convenient notation the Jacobian $\mathcal{D}(\lambda)= (2\pi)^3 \lambda^{-4}$, which is proportional to the mode density.
For a sufficiently small detector bandwidth around the wavelength $\lambda$ and a small solid angle around $\Omega$, we can perform the integration and find
\begin{equation}\label{e_rho_approx}
\varrho(\lambda, \Omega)\cong \frac{1}{(2\pi)^3}[\Delta \lambda \Delta \Omega]\mathcal{D}(\lambda) \mathcal{N}(\vec{k}),
\end{equation}
where $\mathcal{N}(\vec{k})$ implicitly depends on $\lambda$ and $\Omega$ through the wave vector $\vec{k}$.
This quantity is closely related to the spectral radiance $\hbar \omega (2\pi)^{-3}c \mathcal{D}(\lambda) \mathcal{N}$, which is
the energy per units of time, area of the source, solid angle and bandwidth (in wavelength) of the detector~\cite{datla20051}.
To calculate the total number of photons that are detected, the density from eq.~\eqref{e_rho_approx} has to be integrated over the volume of the source,
\begin{equation}
\label{eq:N_lambda}
N(\lambda,\Omega)= \smashoperator{\int_{\text{source}}} \textrm{d}^3 r\, \varrho(\lambda, \Omega) \cong (2\pi)^{-3} [A_s c\tau_s][\Delta \lambda \Delta \Omega]\mathcal{D}\,\mathcal{N}
\end{equation}
where in the last step we assumed that the source has a surface area of $A_s$ and emits light for a time duration $\tau_s$.
We have not yet specified the photon distribution per plane wave mode $\mathcal{N}$.
We do that in the next section and show that the assumption of a small solid angle as well as a small bandwidth of the detector is justified.
\section{Angular distribution of spontaneous PDC}
The photon statistics per plane wave mode $\mathcal{N}$ for spontaneous PDC of a bulk crystal of length $L$ with a nonlinearity $\chi^{(2)}$ and illuminated by a plane wave pump with a field amplitude $E_p$ is
\begin{equation}\label{e_statistics}
\mathcal{N} = c^{-2}\,\left(L \chi^{(2)} E_p\right)^2\frac{\omega_s}{n_s(\omega_s)} \frac{\omega_i}{n_i(\omega_i)}\, \textrm{sinc}^2 ( \Delta \kappa L/2 ),
\end{equation}
where $\omega$ and $\omega_i$ are the frequencies of the signal and idler photons, and $n_s$ and $n_i$ their respective indices of refraction.
The longitudinal wave vector mismatch $\Delta \kappa = k_p-\kappa_s-\kappa_i$ is the difference of the wave vector $k_p$ of the pump and the longitudinal wave vectors $\kappa_{s,i}\equiv \sqrt{k_{s,i} - \vec{q}_{s,i}}$.
Here, the signal and idler photons have the wave vectors $k_s$ and $k_i$ and the transverse wave vectors $\vec{q}_s$ and $\vec{q}_i$.
Note that $k_j = \omega_j n_j / c$, with $c$ the speed of light and $\omega_j$ the frequency of the signal, idler and pump fields with $j=s,i,p$.
With this notation, we find the expression
\begin{equation}
\Delta \kappa = k_p - k_s \left(\kappa_s/k_s + \sqrt{(k_i/k_s)^2 - (\vec{q}_i/k_s)^2} \right),
\end{equation}
for the longitudinal wave vector mismatch.
Since we assume in \eqref{e_statistics} a plane wave and monochromatic pump, we have due to energy conservation $\omega_i=\omega_p-\omega_s$ and due to momentum conservation $\vec{q}_s = -\vec{q}_i$.
Hence, our expression depends only on $\omega_s$ and $\vec{q}_s$, which we can link to quantities of the detected field, which are written without a subscript.
We find the connection $\omega_s = 2\pi c/\lambda$ when we express every quantity by the detected wavelength $\lambda$.
Moreover, introducing spherical coordinates, we can define the polar angle $\theta$ of the detected field and have $\cos \theta = \kappa_s/k_s$ and $\sin \theta = \vec{q}_s/k_s$.
The longitudinal wave vector mismatch
\begin{equation}
\Delta \kappa = k_p - k_s \left(\cos \theta + \sqrt{(k_i/k_s)^2 - \sin^2 \theta} \right)
\end{equation}
therefore depends only on $\lambda$ and $\theta$, as does equation~\eqref{e_statistics}.
In eq.~\eqref{e_rho_approx} we approximated the integral of $\mathcal{N}$ over $\text{d}\lambda$ and $\text{d}\Omega = \sin \theta\text{d}\theta \text{d}\phi$ by just multiplying the integration intervals.
This is of course only valid if $\mathcal{N}$ depends weakly on both $\lambda$ and $\theta$ over the range of interest.
\begin{figure}
\caption{
Numerically generated spectrum of spontaneous PDC, plotting $ \mathcal{D}
\label{fig:xcurve}
\end{figure}
In the experiment we place a pinhole in the far field of the spontaneous PDC light to filter a small range of angles.
We show in the density plot of Fig.~\ref{fig:xcurve} the product $\mathcal{D}(\lambda)\mathcal{N}$ as a function of $\theta$ and $\lambda$ and mark the size of our pinhole by a semi-transparent white strip.
This numerical result is based on the Sellmeyer equations of the three fields for BBO~{\cite{eimerl1987optical}.
We further assume that $\mathcal{G}$ is constant in the wavelength range of interest, and we justify this assumption in the next section.
We work close to collinear propagation, with $\theta \approx 0$, where the function $\mathcal{D}(\lambda)\mathcal{N}$ does not vary significantly across the pinhole area so that we can perform the integration by just multiplying with the solid angle.
Similarly, the size of a pixel corresponds roughly to a bandwidth of $0.063$\,nm.
On this scale, the function $\mathcal{N}$ does not change significantly.
Hence, our approximation in eq.~\eqref{e_rho_approx} is valid for our setup.
Of course, an integration of the pinhole angle can be performed as well to obtain an even more accurate result, but at some point the contribution of other crystal properties such as its length $L$ as well as the dispersion relations of all the light fields will dominate.
In the spirit of an easy-to-implement calibration technique, we refrained from this more complex analysis but emphasize that it is possible.
In a similar manner, one could include both the frequency as well as the angular profile of the pump in eq.~\eqref{e_statistics}.
However, on axis this would not lead to a different result and our plane wave and monochromatic assumption is well-justified for our laser system.
\section{Wavelength dependence of gain}
In the main body of our article, we assumed that the wavelength dependence of the gain function
\begin{equation}\label{eq:gain}
\mathcal{G} = c^{-1} L \chi^{(2)} E_p / \sqrt{n_s n_i}
\end{equation}
can be neglected.
In this section, we investigate different effects that could contribute to the wavelength dependence in our experiment and demonstrate that they do not vary much across the spectral region of interest.
In addition to the linear dispersion ($n_s(\lambda)$ and $n_i(\lambda)$) as well as the nonlinear dispersion $\chi^{(2)}(\omega_p, \omega_s,\omega_i)$, obvious from eq.~(\ref{eq:gain}), other contributions arise from tilting the angle of the crystal to scan different phase-matching conditions.
By tilting the crystal, the Fresnel coefficients vary (for the pump or for the down-converted light) and the effective length $L$ of the nonlinear crystal (defined as the length of propagation of the pump inside the crystal) changes.
The different Fresnel coefficients change the intensity of the pump inside the crystal, as well as how much of the down-converted light couples out of the crystal.
Using the Sellmeyer equations for BBO~\cite{eimerl1987optical} and Miller's rule~\cite{BOYD} (relating the first order and second order susceptibilities), we estimate the impact of those contributions, and show our results in Fig.~\ref{fig:cont}.
The largest deviations are attributed to the dispersion in the nonlinear susceptibility $\chi^{(2)}$ and to the change in the effective length of the nonlinear crystal upon tilting it.
However, over a spectral range of 300\,nm around degeneracy, the gain function $\mathcal{G}$ does not vary by more than 1\%.
\begin{figure}
\caption{
Wavelength dependence of $\mathcal{G}
\label{fig:cont}
\end{figure}
\begin{figure}
\caption{
Experimental setup.
The down-converted light from the nonlinear crystal $\chi^{(2)}
\label{fig:setup}
\end{figure}
\section{Experimental setup}
The third harmonic~($355\,$nm wavelength, $29.4\,$ps pulse duration, $50\,$Hz repetition rate) of a pulsed Nd:YAG laser is prepared to serve as the pump for PDC: a pair of dispersive prisms suppresses the spurious frequencies from the laser; a half-wave plate and $\alpha$-BBO Glan-Laser polarizer set the polarization; a pair of lenses (focal lengths $f_1 = 300\,\textrm{mm}$ and $f_2 = 100\,\textrm{mm}$, separated by the distance $f_1 + f_2$) bring the diameter of the laser beam down to approximately $0.6\,$mm; a pinhole of size 100\,\textmu m is introduced between the lenses at the beam focus (distance $f_1$ from the first lens) to spatially filter the beam; and another $\alpha$-BBO Glan-Laser polarizer confirms the polarization of the beam.
The remainder of the experimental setup is shown in Fig.~\ref{fig:setup}.
Parametric down-conversion is generated from the interaction of the pump beam with a nonlinear crystal $\chi^{(2)}$ ($\beta$-BBO, 3-mm thickness, type-I phase-matching, uncoated, cut for degenerate PDC with a 355-nm pump) on a motorized rotation mount.
The wavelengths that satisfy the phase-matching condition are tuned by varying the angle between the optic axis of the crystal and the wavevector of the pump.
Two dichroic mirrors (DM) suppress the pump after the crystal and reflect the pump light onto a photodiode (PD) to monitor its intensity.
A concave mirror (M$_1$) of focal length $200\,\textrm{mm}$ is used to bring the down-converted light to the far field, where a pinhole $\Delta \Omega$ ($0.5\,\textrm{mm}$ diameter) selects a small solid angle.
A broadband polarizing beam spliter PBS placed before the iris is set to transmit the polarization of the down-converted light.
A pair of lenses of (L$_1$) and (L$_2$) of focal lengths $200\,\textrm{mm}$ and $150\,\textrm{mm}$ are used to image the iris onto the entrance slit (1-$\textrm{mm}$ wide) of the spectrometer, with a magnification of $4/3$.
The spectrometer is an imaging spectrograph (Acton SP-2558) with a CCD camera (PIXIS:100BR\_eXcelon, $1340 \times 100$ pixels of size 20\,\textmu m$ \times $20\,\textmu m).
The integration time for each spectrum is $500\, \textrm{ms}$.
Transverse hardware binning~(summing the photoelectron count for the 100 transverse pixels) is enabled.
Each spectrum spans the range from $450\, \textrm{nm}$ to $900\, \textrm{nm}$.
To cover this range, we need to repeat the acquisition for different angular positions of the grating (600 grooves per mm, 500-nm blaze).
The experiment is automated: after each acquisition by the spectrometer, the motorized holder rotates the crystal through an angle of about $0.01\degree$, up to a total change of approximately 8\degree.
The pump energy measured at the photo-diode is recorded for each position of the crystal.
The wavelength of the spectrometer is calibrated using a neon-argon lamp along with Princeton Instruments Intellical system.
After the experiment a reference lamp is introduced at the crystal plane.
Its spectrum is acquired using the same experimental settings.
The reference response function that we use to verify our method is obtained by comparing the measured spectrum of a calibration lamp (LED-stack with a diffuser for relative intensity calibration) and a reference spectrum provided by Princeton Instruments.
\section{Details on the data analysis}
Our calibration method relies on the comparison of the measured phase-matched number of counts $M(\lambda_\textrm{PM})$ to the expected number of phase-matched photons $N(\lambda_\textrm{PM})$.
We therefore acquire a large number of spectra $M_j$ corresponding to different phase-matching conditions over a broad spectral range.
However, the peak number of counts in a measured spectrum does not correspond, in general, to $M(\lambda_\textrm{PM})$.
Instead, we can extract the response function from the properties of $\mathcal{N}$.
From the main text, we know that
\begin{equation}
\mathcal{N} \sim \omega(\omega_p - \omega) \, \mathrm{sinc}^2 ( \Delta \kappa L/2 ) \leq \omega (\omega_p - \omega),
\end{equation}
where the inequality becomes equality only for phase matching $\Delta \kappa = 0$.
We denote the wavelength of phase matching with $\lambda_\text{PM}$.
With eq.~(5) from the main text we find the inequality
\begin{equation}
\label{eq:Rgreater}
R(\lambda)\geq R(\lambda) \operatorname{sinc}^2 \frac{\Delta \kappa L}{2} \propto \frac{M_j(\lambda)}{\mathcal{D}(\lambda) \omega (\omega_p-\omega)}
\end{equation}
with an equality sign for $\lambda=\lambda_\text{PM}$.
If we approximate the phase matching function by a Gaussian, i.e.,
\begin{equation}
\textrm{sinc}^2(\Delta \kappa L /2 ) \propto \textrm{exp}[ -(\lambda - \lambda_{\textrm{PM}})^2/(2\sigma_\lambda ^2)],
\end{equation}
it is easy to show that the peak of the product $R(\lambda)\operatorname{sinc}^2 (\Delta \kappa L/2)$ shifts to the wavelength
\begin{equation}
\tilde{\lambda} = \lambda_{\textrm{PM}} + \frac{1 }{R}\frac{\textrm{d}R }{\textrm{d}\lambda}\bigg\rvert_{\tilde{\lambda}} \sigma_\lambda^2.
\label{eq:shift}
\end{equation}
Hence, the shift between phase-matched wavelength and peak increases, the steeper the slope of the response function or the wider the peak is.
Since the response function is not known but is the result of the calibration procedure, eq.~\eqref{eq:shift} cannot be used to determine the phase-matching wavelength.
However, eq.~\eqref{eq:Rgreater} directly gives a method to determine the response function despite the shift:
when we acquire a large number of spectra $M_j$, each with a slightly varying $\lambda_\text{PM}$, the amplitude of $M_j/[\mathcal{D}\omega (\omega_p-\omega)]$ at one particular wavelength is the largest if the wavelength corresponds to $\lambda_\text{PM}$.
Hence, we obtain the response function from
\begin{equation}
R(\lambda) = \underset{j}{\max} \left[ \frac{M_j(\lambda)}{\mathcal{D}(\lambda) \omega (\omega_p -\omega)} \right] \bigg/ \underset{j}{\textrm{max}} \left[ \frac{4M_j(2 \lambda_p)}{\mathcal{D}(2 \lambda_p) \, \omega_p^2} \right],
\label{eq:max}
\end{equation}
where we normalize the response function to unity at the degenerate wavelength $\lambda = 2 \lambda_p$.
To reduce errors in the analysis according to eq.~\eqref{eq:max}, we suppress for each $M_j(\lambda)$ spectrum the high-frequency content, filtered out via a fast-Fourier-transform procedure.
A similar idea can be used for absolute calibration.
For an arbitrary $\mathcal{G}$, the photon distribution per plane-wave mode assuming a monochromatic plane wave pump can be written as~\cite{klyshko1989photons}
\begin{equation}\label{eq:N_HG}
\mathcal{N}^\text{(HG)}=\frac{\mathcal{G}^2 \mathcal{Q}^2}{\mathcal{G}^2 \mathcal{Q}^2-(\Delta \kappa L/2)^2} \sinh^2 \sqrt{\mathcal{G}^2 \mathcal{Q}^2-(\Delta \kappa L/2)^2},
\end{equation}
where $\mathcal{Q}^2\equiv \omega (\omega_p-\omega)$, and the superscript $\text{(HG)}$ highlights that we are using this equation to describe the high-gain regime of PDC.
Since the maximum of this function occurs for phase matching ($\Delta \kappa = 0$), we find
\begin{equation}
\label{eq:N_PM}
\mathcal{N}^\text{(HG)} \leq \sinh^2 (\mathcal{G}\mathcal{Q}) \equiv \mathcal{N}^\text{(HG)}_\text{PM}
\end{equation}
where we defined the phase-matched photon distribution $\mathcal{N}^\text{(HG)}_\text{PM}$ that has the well-known hyperbolic form of parametric amplification and is used in the main body of our article.
Note further that for $\mathcal{GQ}\ll 1$ we recover the low-gain result.
The quantum efficiency at the degenerate wavelength $\alpha = \eta (2\lambda_p)$ is
\begin{equation}
\alpha = M_j(\lambda)/[R(\lambda)N(\lambda)]
\end{equation}
with the definitions from the main body of the article.
With that, we find from eq.~\eqref{eq:N_PM} and with the help of eq.~\eqref{eq:N_lambda} the inequality
\begin{equation}
\alpha \sinh^2 \mathcal{G}\mathcal{Q} \geq M_j(\lambda)/[R(\lambda)\mathcal{D}(\lambda)\Delta \Omega\Delta \lambda A_s c \tau_s],
\end{equation}
where again the equal sign is valid for $\lambda = \lambda_\text{PM}$.
Hence, we find, similarly to the low-gain method,
\begin{equation}
\alpha \sinh^2 \mathcal{G}\mathcal{Q}= \underset{j}{\max}\left[ \frac{M_j(\lambda)}{R(\lambda)\mathcal{D}(\lambda)\Delta \Omega\Delta \lambda A_s c \tau_s}\right]
\end{equation}
as an exact equality if the spectra are sufficiently dense.
Taking the maximum of all recorded spectra, each one of them divided by $R(\lambda)\mathcal{D}(\lambda)$ and a numerical factor that depends on laboratory parameters (spatial dimensions and bandwidths), we can fit the data to the function $\alpha \sinh^2 \mathcal{G}\mathcal{Q}$ with two fitting parameters $\alpha$ and $\mathcal{G}$.
Note that we do not need to measure the exponential increase of the generated photons with increasing pump intensity, but determine both parameters from the distortion of the \emph{spectral shape} of the maximum of all spectra.
With this fitting procedure, one can not only determine the quantum efficiency $\eta(\lambda) = \alpha R(\lambda)$, but also the gain $\mathcal{G}$.
Even though we do not use the exponential increase with the pump power for our calibration method, we still record the intensity while scanning different phase matching functions.
We do this to correct for drifts and fluctuations during the course of one measurement.
We are then able to perform the fitting procedure using $\mathcal{G}/E_j$, where $E_j$ is the pump field amplitude during measurement corresponding to the the $j$th phase-matching condition.
The $\alpha$ obtained using our method for absolute calibration is compared to an estimated quantum efficiency based on the properties of each optical component in the experimental setup, listed in table~\ref{table:1}.
The efficiency of uncoated components is estimated from the Fresnel coefficients, while the efficiency of coated components is taken from the manufacturers.
\begin{table} [htb]
\caption{Contribution of each optical component to the total quantum efficiency of the experimental setup.
The parentheses denote the number of components.
The total efficiency is obtained by multiplying all the contributions and propagating the uncertainties accordingly.
}
\centering
\begin{tabular}{lr}
\hline
Optical component \quad & Efficiency \\
\hline
Crystal output facet (1) & $0.94 \pm 0.01$ \\
Dichroic mirror (2) & $0.95 \pm 0.01$ \\
Dielectric mirrors (6) \quad \quad & $0.99 \pm 0.01$ \\
Polarizing beam splitter (1) \quad \quad & $0.98 \pm 0.01$ \\
Uncoated lens (2) & $0.92 \pm 0.01$ \\
Diffraction grating (1) & $0.60 \pm 0.02$ \\
Spectrometer camera (1) & $0.95 \pm 0.02$ \\
\hline
Total & $0.38 \pm 0.07$ \\
\hline
\end{tabular}
\label{table:1}
\end{table}
\section{Spontaneous regime of parametric down-conversion}
\begin{figure}
\caption{
PDC spectral density as a function of the pump energy per pulse. The number of counts was extracted at the phase-matched wavelength $\lambda = 690\,\textrm{nm}
\label{fig:lin}
\end{figure}
As shown in the section above in eq.~\eqref{eq:N_HG}, the photon-number distribution grows exponentially with the intensity of the pump.
In the low-gain regime, where the photon pairs are generated spontaneously, the number of photons grows linearly with the intensity which can be seen from the expansion
\begin{equation}
\mathcal{N}_\text{PM}^\text{(HG)} = \sinh^2 \mathcal{GQ} \cong \mathcal{G}^2\mathcal{Q}^2 = \mathcal{G}^2 \omega(\omega_p-\omega) = \mathcal{N}_\text{PM} ,
\end{equation}
where $\mathcal{N}_\text{PM}$ is the low-gain photon distribution for phase matching.
To obtain the response function $R(\lambda)$, we do not need to know the exact value of $\mathcal{G}$ but rely on the fact that the first-order expansion above is valid.
Note that $\mathcal{G}^2$ is proportional to the intensity of the pump $I_p$~\cite{klyshko1989photons}.
To verify that we work in the spontaneous regime of PDC, we measure the number of counts for a single wavelength and increase the pump intensity.
The results are shown in Fig.~\ref{fig:lin}.
We see, that we are well in the linear regime up to roughly 150\,\textmu J.
We performed the relative calibration experiment at a pump intensity of 100\,\textmu J, while the high-gain part of the experiment used a more intense pump, around 200\,\textmu J and higher.
\end{document}
|
\begin{document}
\begin{abstract}
Lehmer's totient problem asks if there exist composite integers $n$ satisfying the condition $\varphi(n)|(n-1)$, (where $\varphi$ is the Euler-phi function) while Carmichael numbers satisfy the weaker condition $\lambda(n)|(n-1)$ (where $\lambda$ is the Carmichael universal exponent function). We weaken the condition further, looking at those composite $n$ where each prime divisor of $\varphi(n)$ also divides $n-1$. (So $\rad{\varphi(n)}|(n-1)$.) While these numbers appear to be far more numerous than the Carmichael
numbers, we show that their distribution has the same rough upper bound as that of the Carmichael numbers, a bound which is heuristically tight.
\end{abstract}
\title{RADICALLY WEAKENING THE LEHMER AND CARMICHAEL CONDITIONS}
\section{Introduction}
Let $\varphi(n)$ denote the Euler totient function of $n$. Lehmer \cite{Lehmer} asked whether there exist composite positive integers $n$ such that $\varphi(n)|n-1$. Integers which satisfy this \lq\lq Lehmer Condition" are sometimes referred to as Lehmer numbers, however no examples are known. Cohen and Hagis \cite{Cohen} have shown that any Lehmer numbers would necessarily have at least 14 prime factors, and computations by Pinch \cite{Pinch_Lehmer} show that any examples must be greater than $10^{30}$. Further, Luca and Pomerance \cite{Luca} have shown that if $\mathcal{L}(x)$ is the number of Lehmer numbers up to $x$ then, as $x \to \infty$,
\[\mathcal{L}(x) \leq \frac{x^{1/2}}{(\log x)^{1/2+o(1)}}.\]
Carmichael numbers are the composite integers $n$ which satisfy the congruence $a^n \equiv a \pmod{n}$ for every integer $a$. (Fermat's little theorem guarantees that any prime number $n$ satisfies this congruence.) Carmichael numbers were first characterized by Korselt \cite{Korselt} in 1899:
\begin{korselt}A composite number $n$ is a Carmichael number if and only if $n$ is square-free, and for each prime $p$ which divides $n$, $p - 1$ divides $n - 1$.
\end{korselt}
Korselt did not find any Carmichael numbers, however. The smallest, 561, was found by Carmichael in 1910 \cite{Carmichael}. Carmichael also gave a new characterization of these numbers as those composite $n$ which satisfy $\lambda(n)|n-1$, where $\lambda(n)$, the Carmichael lambda function, denotes the size of the largest cyclic subgroup of $(\mathbb{Z}/n\mathbb{Z})^\times$.
Since $\lambda(n)|\varphi(n)$ for every integer $n$, the Carmichael property can be viewed as a weakening of the Lehmer property. Every Lehmer number would also be a Carmichael number. In contrast to the Lehmer numbers, it is known, due to Alford, Granville and Pomerance \cite{Infinite}, that there are infinitely many Carmichael numbers. Pomerance \cite{Pomerance} also proves an upper bound for the number $C(x)$ of Carmichael numbers up to $x$, namely as $x \to \infty$,
\begin{equation}C(x) \leq x^{1-\{1+o(1)\}\log\log\log x /\log\log x}, \label{car}
\end{equation}
and presents a heuristic argument that this is the true size of $C(x)$.
Grau and Oller-Marc{\'e}n \cite{Grau} present other possible weakenings of the Lehmer property: looking at the sets of those $n$ such that $\varphi(n)|(n-1)^k$ for a fixed value of $k$ as well as the set of those $n$ for which $\varphi(n)|(n-1)^k$ for some $k$, that is all of the primes dividing $\varphi(n)$ also divide $n-1$. Note that this last set is a weakening of both the Lehmer and Carmichael properties, since $\lambda(n)$ and $\varphi(n)$ have the same prime divisors. Our results resolve several conjectures that Grau and Oller-Marc{\'e}n made in their paper.
We focus primarily on this final set. Let $\kappa(n) = \rad{\varphi(n)}$ denote the product of the primes which divide the value $\varphi(n)$. (Note that $\kappa(n) = \rad{\varphi(n)} = \rad{\lambda(n)}$.) Let $\mathbb{K}(x) $ be the set of composite numbers $n \leq x$ which satisfy $\kappa(n)| n-1$, and let $K(x) = |\mathbb{K}(x)|$. (Observe that every prime number $p$ trivially satisfies $\kappa(p)|p-1$.)
We prove that the upper bound \eqref{car} for $C(x)$ also applies for $K(x)$. We also present upper bounds for the number of $n \in \mathbb{K}(x)$ which are the product of a fixed number of primes, as well as several related conjectures and computations.
\section{The Upper Bound}
The condition for $n$ to be a member of $\mathbb{K}(x)$ is substantially weaker than that required for $n$ to be a Carmichael number, and computations (see Section \ref{sec:computations}) show that $K(x)$ appears to be substantially greater than $C(x)$. It is therefore somewhat surprising to find that these two functions have the same rough upper bound. Our proof of this fact is similar to the one for $C(x)$ in \cite{Pomerance}.
\begin{theorem} Define $L(x) = \exp(\log x \frac{\log\log\log x}{\log\log x})$. Then as $x \rightarrow \infty$, \label{Main}
\[K(x) \leq \frac{x}{L(x)^{1 + o(1)}}.\]
\begin{proof} We consider first those integers $n \leq x$ which have a large prime divisor. Specifically, let $P(n)$ denote the largest prime divisor of $n$, and write $n = mp$ where $p = P(n)$. We restrict our attention to those $n$ with $P(n) > L(x)^2$, and let $K'(x) = \#\{n \in \mathbb{K}(x) \mid P(n) > L(x)^2\}$.
If $n=mp$ is to satisfy $\kappa(n) | n-1$, then we must have $m \leq \frac{x}{p} $, and $m$ must be congruent to 1 $\pmod{ \rad{p-1}}$. Thus, for any fixed $p$ there are at most $1 + \lfloor\frac{x}{p\cdot\rad{p-1}}\rfloor$ possibilities for $m$. Requiring $n$ to be composite (thus $m \neq 1$) leaves us with at most $\frac{x}{p\cdot\rad{p-1}}$ possibilities.
Thus we see that
\begin{align} K'(x) &= \sum_{\substack{n = mp\leq x \\ p > L(x)^2 \\ \kappa(n)|n-1} } 1 \leq \sum_{\substack{p > L(x)^2} } \frac{x}{p \hspace{1mm} \rad{p-1} } \notag \\
&\leq \sum_{\substack{p > L(x)^2} } \frac{x}{(p-1) \hspace{1mm} \rad{p-1}}. \label{squarefree}
\end{align}
Now, we observe that for each prime $p$, the denominator in \eqref{squarefree} is a squarefull number, and that any squarefull number can be represented uniquely as $d \hspace{1mm} \rad{d}$ for some integer $d$. We can therefore replace this sum with a sum over all squarefull numbers:
\begin{align*}
\sum_{\substack{p > L(x)^2} } \frac{x}{(p-1) \hspace{1mm} \rad{p-1} } & \leq \sum_{\substack{d > L(x)^2 \\ d \text{ squareful}} } \frac{x}{d} .
\end{align*}
Using partial summation and the fact that
\[\sum_{\substack{n \leq x \\ n \text{ squareful}} } 1 = \frac{\zeta(3/2)}{\zeta(3)} x^{1/2} + O(x^{1/3}),\]
we see that
\[K'(x) \leq \sum_{\substack{d > L(x)^2 \\ d \text{ squareful}} } \frac{x}{d} \ll \frac{x}{L(x)}.\]
We may assume that $n>\frac{x}{L(x)}$, so to prove the theorem, it suffices to count those $n$ with $\frac{x}{L(x)} < n \leq x$ and $P(n) \leq L(x)^2$. We denote this count by $K''(x)$. Observe that every such $n$ has a divisor $d$ satisfying \begin{equation}\label{div} \frac{x}{L(X)^3} < d \leq \frac{x}{L(x)}.\end{equation}
Write $n =md$, so $m\leq \frac{x}{d}$. Now, if $n = md$ is to satisfy $\kappa(md)|md-1$, we have $m \equiv 1 \pmod{\kappa(d)}$, and since $(n,\kappa(n)) = 1$ and $\kappa(d)|\kappa(n)$ we know $(d,\kappa(d))=1$. Thus the Chinese remainder theorem implies that there are at most $1 + \lfloor\frac{x}{d\kappa(d)}\rfloor$ possibilities for $m$. Thus
\[K''(x) \leq \sideset{}{'}\sum \left( 1 + \frac{x}{d\kappa(d)} \right) \leq \frac{x}{L(x)} + \sideset{}{'}\sum \left\lfloor \frac{x}{d\kappa(d)} \right\rfloor ,\]
where $\sideset{}{'}\sum$ denotes a sum over $d$ satisfying \eqref{div}. If $d\kappa(d) \leq x$ and $d$ satisfies \eqref{div}, then $\kappa(d) < L(x)^3$, so that
\begin{align}K''(x) &\leq \frac{x}{L(x)} + \sideset{}{'}\sum \left\lfloor \frac{x}{d\kappa(d)} \right\rfloor \notag \\
&\leq \frac{x}{L(x)} + x \sum_{c \leq L(x)^3} \frac{1}{c} \sideset{}{'}\sum_{\kappa(d) = c}\frac{1}{d}. \label{doublesum}
\end{align}
We treat the inner sum in \eqref{doublesum} by partial summation:
\begin{align} \sideset{}{'}\sum_{\kappa(d) = c}\frac{1}{d} = \frac{L(x)}{x} \sideset{}{'}\sum_{\kappa(d) = c} 1 + \int_{\frac{x}{L(x)^3}}^{\frac{x}{L(x)}}\hspace{2mm}\frac{1}{t^2} \hspace{2mm} \sideset{}{'}\sum_{\substack{\kappa(d) = c \\ d< t}} 1\hspace{2mm} dt. \label{parsum}
\end{align}
We are thus interested in obtaining an upper bound for $\mathcal{K}(t,c)$, the number of $d \leq t$ with $\kappa(d) = c$.
\begin{lemma} \label{uniform} As $t \rightarrow \infty$, $\mathcal{K}(t,c) \leq \frac{t}{L(t)^{1+o(1)}}$ uniformly for all $c$.
\end{lemma}
Before proving the lemma, we see that using this upper bound in \eqref{parsum} gives us
\begin{align*} \sideset{}{'}\sum_{\kappa(d) = c}\frac{1}{d} &\leq \frac{L(x)}{x} \mathcal{K}(\tfrac{x}{L(x)},c) + \int_{\frac{x}{L(x)^3}}^{\frac{x}{L(x)}}\hspace{2mm}\frac{1}{t^2} \hspace{2mm} \mathcal{K}(t,c)\hspace{2mm} dt \\
&\leq {L(\tfrac{x}{L(x)})^{-1+o(1)}} + \int_{\frac{x}{L(x)^3}}^{\frac{x}{L(x)}}\hspace{2mm}\frac{1}{tL(t)^{1+o(1)}} \hspace{2mm} dt \\
&= L(x)^{-1+o(1)}
\end{align*}
as $x \to \infty$. This can be used in \eqref{doublesum} to see that $K''(x) \leq \frac{x}{L(x)^{1+o(1)}}$. The theorem then follows immediately from our estimates of $K'(x)$ and $K''(x)$.
It thus remains to prove Lemma \ref{uniform}. We may assume that $c \leq t$, otherwise $\mathcal{K}(t,c) = 0$. Then, for any $r > 0$ we can write:
\begin{align*}
\mathcal{K}(t,c) &= \sum_{\substack{d \leq t\\ \kappa(d) = c}} 1 \leq t^r \sum_{\substack{\kappa(d) = c}} d^{-r}\\
& \leq t^r \sum_{\substack{p|d \Rightarrow \rad{p-1}|c}} d^{-r} = t^r \prod_{\substack{\rad{p-1}|c}}\tfrac{1}{1-p^{-r}}.
\end{align*}
Assuming $r \geq 1/2 + \epsilon$ then
\begin{align*} \prod_{\substack{\rad{p-1}|c}}\tfrac{1}{1-p^{-r}} &= \exp\left(\sum_{\substack{\rad{p-1}|c}}-\log({1-p^{-r})}\right) = \exp\left(\sum_{\substack{\rad{p-1}|c}}\hspace{3mm}\sum_{n=1}^\infty \frac{p^{-nr}}{n}\right) \\
&= \exp\left(\left(\sum_{\substack{\rad{p-1}|c}}p^{-r}\right) +
O_\epsilon(1)\right).
\end{align*}
So we have
\begin{align*} \mathcal{K}(t,c) &\ll_{\epsilon} t^r \exp \left(\sum_{\text{rad}(p-1) | c} p^{-r} \right) \leq t^r \exp \left(\sum_{\text{rad}(l) | c} l^{-r} \right) \\
& = t^r \exp \left(\prod_{p|c} (1-p^{-r})^{-1} \right) \leq t^r \exp\exp \left(\sum_{p|c}p^{-r} + O_\epsilon(1) \right)
\end{align*}
by applying this trick a second time.
Now, $\sum_{p|c}p^{-r}$ is maximized when $c$ is the largest primorial up to $t$, in other words $c = p_1p_2 \cdots p_k < t$, where $p_i$ is the $i$th prime. Further, if $t$ is sufficiently large, then the prime number theorem implies that $p_k \leq 2\log(t)$ and thus
\[ \sum_{p|c}p^{-r} \leq \sum_{p<2\log(t)}p^{-r} \]
Choose $r = 1-(\log\log\log t)/(\log\log t)$. Thus for large $t$, we may choose $\epsilon = 1/4$. Then we have $t^r = \frac{t}{L(t)}$ and \[\sum_{p<2\log(t)}p^{-r} = O(\log\log t/\log\log\log t).\] Thus
\begin{align*}\mathcal{K}(t,c) &\leq t^r \exp\exp \left(\sum_{p|c}p^{-r} + O_\epsilon(1) \right) \\
&= \frac{t}{L(t)}\exp\exp(O(\log\log t /\log\log\log t)) = \frac{t}{L(t)^{1 + o(1)}},
\end{align*}
as $t \to \infty$, which completes the proof of the lemma.
\end{proof}
\end{theorem}
\section{Bounds for integers in $\mathbb{K}(x)$ with $d$ prime factors}
Since the integers satisfying our condition have a similar behavior to the Carmichael numbers assymptotically, it is natural to wonder if the behavior of those numbers with a fixed number of prime factors behaves similarly as well. Granville and Pomerance \cite{Granville} conjecture that the number, $C_d(x)$, of Carmichael numbers with exactly $d$ prime factors is $x^{1/d + o(1)}$ when $d\geq 3$, and as $x \to \infty$. This has not been proven for any $k$. However, Heath-Brown \cite{Heath} has shown that $C_3(x) \ll_\epsilon x^{7/20+\epsilon}$. Note that there are no Carmichael numbers with 2 prime factors.
Let $K_d(x) = \#\{n \in \mathbb{K}(x), \omega(n)=d\}$ count the integers satisfying our condition up to $x$ with exactly $d$ prime factors. Using the same method as the first part of Theorem \ref{Main} we can prove
\begin{theorem} Uniformly for $d\geq 2$ we have the bound $K_d(x) \ll x^{1-\frac{1}{2d}}$. \label{kd}
\end{theorem}
\begin{proof} Consider first those $n > x/2$. Since $n$ has $d$ prime factors, the largest prime factor must then satisfy $P(n) > (x/2)^{1/d}$. Applying the same argument used for integers $n$ with a large prime factor in Theorem \ref{Main}, we find that the total contribution of such integers is at most $O(x^{1-\frac{1}{2d}})$. Hence, $K_d(x) - K_d(x/2) \ll x^{1-\frac{1}{2d}}$.
Now summing dyadically we have
\[K_d(x) = \sum_{i=0}^\infty K_d(2^{-i}x)-K_d(2^{-i-1}x) \ll \sum_{i \geq 0} \left(\frac{x}{2^i}\right)^{1-\frac{1}{2d}} \ll x^{1-\frac{1}{2d}}. \]
\end{proof}
In contrast to the situation for Carmichael numbers, there do exist numbers satisfying our condition with two prime factors, and we can prove a substantially better bound than that of Theorem \ref{kd} in this case. As a matter of fact, their behavior appears to be like that conjectured for Carmichael numbers with a given number of prime factors.
\begin{theorem} The numbers in $\mathbb{K}(x)$ with exactly two prime factors satisfy the bound $K_2(x) \leq x^{1/2}\exp\left(\frac{2(2\log x)^{1/2}}{\log\log x}\left(1+O\left(\tfrac{1}{\log\log x}\right)\right)\right)$.
\end{theorem}
\begin{proof}Write $n = pq \leq x$. Since $\kappa(pq) = \rad{(p-1)(q-1)}$ and $pq-1 = (p-1)(q-1)+(p-1)+(q-1)$ we have that $\kappa(pq)|pq-1$ if and only if $\rad{p-1}=\rad{q-1}$. Thus
\begin{align*}
K_2(x) &= \sum_{\substack{pq\leq x\\ \kappa(pq)|pq-1}} 1
\hspace{2mm}= \sum_{\substack{pq\leq x\\ \rad{p-1}=\rad{q-1}}} \hspace{-5mm} 1 \hspace{4mm} \leq \sum_{\substack{(m+1)(n+1) \leq x\\ \rad{m}=\rad{n}}} 1 \\ &\leq \sum_{\substack{mn \leq x\\ \rad{m}=\rad{n}}} 1 \hspace{2mm} \leq x^r \sum_{\substack{mn\leq x\\ \rad{m}=\rad{n}}} \frac{1}{(mn)^r}
\end{align*}
for any $r \geq 0$. We can rewrite this as a double sum:
\begin{align*}
x^r \hspace{-7mm} \sum_{\substack{mn\leq x\\ \rad{m}=\rad{n}}} \frac{1}{(mn)^r} &=x^r \sum_{m\leq x} \frac{1}{m^r}\sum_{\substack{n\leq x/m\\ p|m \text{ iff } p|n}} \frac{1}{n^r} \leq x^r \sum_{m\leq x} \frac{1}{m^r}\prod_{\substack{p|m}} \frac{\frac{1}{p^r}}{1-\frac{1}{p^r}} \\
&= x^r \sum_{m\leq x} \frac{1}{m^r\rad{m}^r}\prod_{\substack{p|m}} \frac{1}{1-p^{-r}}\\
&= x^r \sum_{m\leq x} \frac{1}{m^r\rad{m}^r}\exp\left(\sum_{\substack{p|m}}-\log\left(1-p^{-r}\right)\right)\\
& = x^r\sum_{m\leq x} \frac{1}{m^r\rad{m}^r}\exp\left(\sum_{\substack{p|m}} \sum_{j=1}^\infty \frac{p^{-jr}}{j}\right).
\end{align*}
As in the proof of Lemma \ref{uniform}, we can replace the condition $p|m$ above with $p\leq 2 \log x$, and $m\hspace{1mm}\rad{m}$ by a squareful integer $d$. We also set $r = 1/2$. Thus:
\begin{align*}
x^{1/2}\sum_{m\leq x} &\frac{1}{m^{1/2}\rad{m}^{1/2}}\exp\left(\sum_{\substack{p|m}} \sum_{j=1}^\infty \frac{p^{-j/2}}{j}\right)
\\& \leq x^{1/2}\exp\left(\sum_{\substack{p\leq 2\log x}}\left( p^{-1/2} + \frac{1}{2p} + \sum_{j=3}^\infty \frac{p^{-j/2}}{j}\right)\right)\sum_{\substack{d\leq x^2 \\ d \text{ squarefull}}} \frac{1}{d^{1/2}}.
\end{align*}
By the prime number theorem we have \[\sum_{\substack{p\leq 2\log x}} p^{-1/2} = \text{li}\left(\left(2\log x\right)^{1/2}\right)\left(1+O\left(\frac{1}{\log\log x}\right)\right).\]
So we can rewrite the expression above as
\begin{align*}
x^{1/2}\exp & \left( \text{li} \left((2\log x)^{1/2}\right)\left(1+O\left(\tfrac{1}{\log\log x}\right)\right ) + \tfrac{1}{2}\log\log\log x + O(1) \right) \hspace{-4mm}\sum_{\substack{d\leq x^2 \\ d \text{ squarefull}}}\hspace{-4mm}\frac{1}{d^{1/2}} \\
&= x^{1/2}\exp\left(\frac{2(2\log x)^{1/2}}{\log\log x}\left(1+O\left(\tfrac{1}{\log\log x}\right)\right)\right) \hspace{-4mm}\sum_{\substack{d\leq x^2 \\ d \text{ squarefull}}}\hspace{-4mm}\frac{1}{d^{1/2}}.
\end{align*}
By partial summation, we see that \[\sum_{\substack{d\leq x^2 \\ d \text{ squarefull}}}\hspace{-4mm}\frac{1}{d^{1/2}} = O(\log x),\]
which can be absorbed into the existing error term in our equation, proving the theorem.
\end{proof}
Note that if we assume a strong form of the prime $k$-tuples conjecture, due to Hardy and
Littlewood, we can show that this is fairly close to the actual size of $K_2(x)$. Their conjecture implies that the number of integers $m$ up to $x^{1/2}$ with both $m+1$ and
$2m+1$ prime is asymptotically $cx^{1/2}/(\log x)^{2}$. Now, whenever both are prime, (and $m \neq 1$) we see that
$\kappa((m+1)(2m+1)) = \rad{2m^2} = \rad{m}$, (since $m$ is necessarily even) and ${\rad{m}|(m+1)(2m+1)-1}$. Thus $K_2(x)$ would be at least of order $x^{1/2}/(\log x)^{2}$.
\section{$k$-Lehmer Numbers}
Grau and Oller-Marc{\'e}n \cite{Grau} define a $k$-Lehmer number to be an integer $n$ satisfying the condition $\varphi(n)|(n-1)^k$. (Note that they do not require $n$ to be composite, as we have in our definitions.) In their paper they make several conjectures about the counts of these $k$-Lehmer numbers. Our Theorem \ref{Main}, which shows in particular that $K(x) = O(\pi(x))$ (where $\pi(x)$ is the prime counting function) resolves four of these conjectures, Conjectures 8 (i)-(iv). Namely, this result proves Conjectures 8 (i),(ii) and (iv), while disproving (iii). Our methods, combined with the methods used in \cite{Lehmer23} to obtain a bound on the Lehmer numbers, can also be used to bound the counts of the $k$-Lehmer numbers.
We let $\mathbb{L}_k(x)$ be the set of composite $n$ up to $x$ which satisfy ${\varphi(n)|(n-1)^k}$, and $L_k(x) = |\mathbb{L}_k(x)|$. (So Grau and Oller-Marc{\'e}n's function $C_k(x)=L_k(x)+\pi(x) +1$.)
\begin{theorem} For $k \geq 2$ we have $L_k(x)\ll_k x^{1- \frac{1}{4k-1}}$.
\end{theorem}
\begin{proof} We consider three cases, based on the size of the largest prime divisor. We consider first those $n$, $x^{1-\frac{1}{4k-1}}< n \leq x$, which have $P(n)<x^{\frac{k}{4k-1}}$. Any such $n$ will have a divisor $d$ in the range $(x^\frac{k}{4k-1},x^\frac{2k}{4k-1})$. Write $n = md$, so $m \leq /d$ and since $\varphi(md)|(md-1)^k$, we see that $(md-1)^{k} \equiv 0 \pmod{\varphi(d)}$.
Now, for any positive integer $N$, the number of residue classes $r \pmod{N}$ with $r^k \equiv 0 \pmod{N}$ is at most $N^\frac{k-1}{k}$. Thus, for any fixed $d$, using the fact that $(d,\varphi(d))=1$, we see that $m$ must be in one of at most $\varphi(d)^{\frac{k-1}{k}}$ residue classes mod $\varphi(d)$, giving us at most \[\varphi(d)^{\frac{k-1}{k}}\left\lceil\frac{x}{d\varphi(d)}\right\rceil \leq \varphi(d)^{\frac{k-1}{k}}\left(1+\frac{x}{d\varphi(d)}\right)\]
choices for $m$.
Summing over all $d$ in the range $I = (x^\frac{k}{4k-1},x^\frac{2k}{4k-1})$, we get
\begin{align*}
\sum_{d \in I} \varphi(d)^{\frac{k-1}{k}}\left(1+\frac{x}{d\varphi(d)}\right) &\leq \sum_{d \in I} d^\frac{k-1}{k} + \frac{x}{d^{1+\frac{1}{k}}}\left(\frac{d}{\varphi(d)}\right)^{\frac{1}{k}} \\
& \leq \sum_{d \in I} d^\frac{k-1}{k} + \sum_{d \in I} \frac{x}{d^{1+\frac{1}{k}}}\left(\frac{d}{\varphi(d)}\right). \\
\end{align*}
The first sum is $\ll x^{1-\frac{1}{4k-1}}$.
Now, using partial summation on the second sum and the fact that $\sum_{t \leq x} \frac{t}{\varphi(t)} = O(x)$, we get
\begin{align*}
\sum_{d \in I} \frac{x}{d^{1+\frac{1}{k}}}\left(\frac{d}{\varphi(d)}\right) &\ll \frac{x}{x^{(\frac{2k}{4k-1})({1+\frac{1}{k}})}}\sum_{d\leq x^\frac{2k}{4k-1}}\frac{d}{\varphi(d)}+ x\int_{x^\frac{k}{4k-1}}^{x^\frac{2k}{4k-1}}\frac{1}{t^{2+\frac{1}{k}}}\sum_{i<t}\frac{t}{\varphi(t)}dt\\
&\ll \frac{x}{x^{(\frac{2k}{4k-1})({1+\frac{1}{k}})}}\left(x^\frac{2k}{4k-1}\right) + x\int_{x^\frac{k}{4k-1}}^{x^\frac{2k}{4k-1}}\frac{1}{t^{1+\frac{1}{k}}}dt\\
&\ll_k x^{1-\frac{2}{4k-1}} + \frac{x}{x^{(\frac{k}{4k-1})({\frac{1}{k}})}} \ll x^{1-\frac{1}{4k-1}}.
\end{align*}
In the second case we consider those $n$ with $x^{\frac{k}{4k-1}}<P(n) \leq x^{\frac{2k}{4k-1}}$. In this case $n$ again has a divisor in the range $(x^\frac{k}{4k-1},x^\frac{2k}{4k-1})$, namely $p$, and the above argument applies verbatim.
Finally we've reduced to the case that $P(n)>x^{\frac{2k}{4k-1}}$, and the argument used for large primes in our main theorem gives us that the number of $n$ with $\kappa(n)|n-1$ and $P(n)> x^{\frac{2k}{4k-1}}$ is at most $x^{1-\frac{k}{4k-1}}$, hence for those $n$ in $\mathbb{L}_k(x)$ as well, and our result follows.
\end{proof}
We note that it may be possible to improve upon this bound by using techniques developed in more recent papers to obtain better bounds on the Lehmer numbers.
\section{Computations and Conjectures}
\label{sec:computations}
Table \ref{table} shows the values of $K(x)$ we computed for increasing powers of 10, compared with values of $C(x)$, computed by Richard Pinch \cite{Pinch}. Our computations were done using trial divison, in which a candidate number, $n$, was rejected as soon as soon as it was found to be nonsquarefree, or to have a prime divisor $p$, which failed to satisfy $\rad{p-1}|n-1$.
\begin{table}[ht]
\caption{Values of $C(x)$ and $K(x)$ to $10^{11}$.}
\label{table}
\begin{tabular}{|l|l|l|}
\hline
$n$ & $C(10^n)$ & $K(10^n)$ \\ \hline
2 & 0 & 4 \\
3 & 1 & 19 \\
4 & 7 & 103 \\
5 & 16 & 422 \\
6 & 43 & 1559 \\
7 & 105 & 5645 \\
8 & 255 & 19329 \\
9 & 646 & 64040 \\
10 & 1547 & 205355 \\
11 & 3605 & 631949 \\
\hline
\end{tabular}
\end{table}
Despite the similar asymptotic bounds that we have for $C(x)$ and $K(x)$, it is clear that $K(x)$ is growing substantially faster, which leads to the conjecture:
\begin{conjecture} $\lim_{x \to \infty} K(x)/C(x) = \infty$.
\end{conjecture}
At the moment, however, we are unable to prove even the much weaker conjecture:
\begin{conjecture} $\lim_{x \to \infty} K(x) - C(x) = \infty$.
\end{conjecture}
\section*{Acknowledgments}
I would like to thank my advisor, Carl Pomerance, for suggesting the problem and for his invaluable guidance and encouragement throughout the development of this paper.
\end{document}
|
\begin{document}
\begin{abstract}
We classify up to isomorphism all gradings by an arbitrary group $G$ on the Lie algebras of zero-trace upper block-triangular matrices over an algebraically closed field of characteristic $0$. It turns out that the support of such a grading always generates an abelian subgroup of $G$.
Assuming that $G$ is abelian, our technique also works to obtain the classification of $G$-gradings on the upper block-triangular matrices as an associative algebra, over any algebraically closed field. These gradings were originally described by A. Valenti and M. Zaicev in 2012 (assuming characteristic $0$ and $G$ finite abelian) and classified up to isomorphism by A. Borges et al. in 2018.
Finally, still assuming that $G$ is abelian, we classify $G$-gradings on the upper block-triangular matrices as a Jordan algebra, over an algebraically closed field of characteristic $0$. It turns out that, under these assumptions, the Jordan case is equivalent to the Lie case.
\end{abstract}
\maketitle
\section{Introduction}
The algebras of upper block-triangular matrices are an essential example of non-simple algebras. Moreover, viewed as Lie algebras, they are an example of the so-called parabolic subalgebras of simple Lie algebras. Group gradings on the upper triangular matrices (a Borel subalgebra) were investigated in \cite{pkfy2017}.
In this paper, we classify gradings by any abelian group $G$ on the upper block-triangular matrices, viewed as an associative, Lie or Jordan algebra, over an algebra\-ical\-ly closed field $\mathbb{F}$, which is assumed to have characteristic $0$ in the Lie and Jordan cases. The basic idea is to show that every $G$-grading on the upper block-triangular matrices (of trace zero in the Lie case) can be extended uniquely to a grading on the full matrix algebra. However, not every $G$-grading on the full matrix algebra restricts to a grading on the upper block-triangular matrices, which leads us to consider an additional $\mathbb{Z}$-grading. In the associative case, this approach to the classification of gradings is different from the one of A. Valenti and M. Zaicev, who investigated upper triangular matrices in \cite{VaZa2007} and upper block-triangular matrices in \cite{VaZa2012} (under more restrictive assumptions than here). The Lie and Jordan cases are new. It turns out that the automorphism group of the upper block-triangular matrices, viewed as a Jordan algebra, is the same as the automorphism group of the upper block-triangular matrices of trace zero, viewed as a Lie algebra. Hence, the classifications of abelian group gradings in both cases are equivalent. The Jordan algebra of upper triangular matrices was investigated in \cite{pkfy2017Jord}.
Moreover, we prove that, in the Lie case, there is no loss of generality in assuming $G$ abelian, because the support of any group grading on the zero-trace upper block-triangular matrices generates an abelian subgroup.
The paper is structured as follows. After a brief review of terminology and relevant results on gradings in Section \ref{prelim}, we obtain a classification of gradings by abelian groups on the associative algebra of upper block-triangular matrices in Section \ref{assoc_case} (see Theorem \ref{th:main_assoc} and Corollary \ref{cor:main assoc}). In Section \ref{Lie_case}, we classify gradings on the Lie algebra of upper block-triangular matrices (Theorem \ref{th:main_Lie} and Corollary \ref{cor:main_Lie}). The center of this algebra is spanned by the identity matrix, and we actually classify gradings on the quotient modulo the center. The effect that this transition has on the classification of gradings is discussed in Section \ref{practical_iso}, the main results of which (Theorem \ref{th:main_practical} and Corollary \ref{cor:main_practical}) are quite general and may be of independent interest. Our approach to classification in Section \ref{Lie_case} follows the same lines as in the associative case. However, the Lie case is substantially more difficult, and some technical aspect is postponed until Section \ref{commut_supp}, where we also prove the commutativity of support (Theorem \ref{supp_commutativity}). Finally, the Jordan case is briefly discussed in Section \ref{Jord_case}.
\section{Preliminaries on group gradings}\label{prelim}
Let $A$ be an arbitrary algebra over a field $\mathbb{F}$ and let $G$ be a group. We say that $A$ is \emph{$G$-graded} if $A$ is endowed with a fixed vector space decomposition,
\[
\Gamma:A=\bigoplus_{g\in G}A_g,
\]
such that $A_gA_h\subset A_{gh}$, for all $g,h\in G$. The subspace $A_g$ is called the \emph{homogeneous component of degree $g$}, and the non-zero elements $x\in A_g$ are said to be homogeneous of degree $g$. We write $\deg x=g$ for these elements. The \emph{support} of $A$ (or of $\Gamma$) is the set $\mathrm{Supp}\,A=\{g\in G\mid A_g\ne 0\}$.
A subspace $I\subset A$ is called \emph{graded} if $I=\bigoplus_{g\in G}I\cap A_g$. If $I$ is a \emph{graded ideal} {(that is, it is simultaneously an ideal and a graded subspace)}, then the quotient algebra $A/I$ inherits a natural $G$-grading. $A$ is said to be \emph{graded-simple} if $A^2\ne 0$ and $A$ does not have nonzero proper graded ideals.
If $A$ is an associative or Lie algebra, then a \emph{graded $A$-module} is an $A$-module $V$ with a fixed vector space decomposition $V=\bigoplus_{g\in G}V_g$ such that $A_g\cdot V_h\subset V_{gh}$, for all $g,h\in G$. A nonzero graded $A$-module is said to be \emph{graded-simple} if it does not have nonzero proper graded submodules. {(A \emph{graded submodule} is a submodule that is also a graded subspace.)}
Let $H$ be any group and let $\alpha:G\to H$ be a homomorphism of groups. Then $\alpha$ induces a $H$-grading, say $A=\bigoplus_{h\in H} A_h'$, on the $G$-graded algebra $A$ if we define
\[
A_h'=\bigoplus_{g\in\alpha^{-1}(h)}A_g.
\]
The $H$-grading is called the coarsening of $\Gamma$ induced by the homomorphism $\alpha$.
Let $B=\bigoplus_{g\in G}B_g$ be another $G$-graded algebra. A map $f:A\to B$ is called a \emph{homomorphism of $G$-graded algebras} if $f$ is a homomorphism of algebras and $f(A_g)\subset B_g$, for all $g\in G$. If, moreover, $f$ is an isomorphism, we call $f$ a \emph{$G$-graded isomorphism} (or an isomorphism of graded algebras), and we say that $A$ and $B$ are \emph{$G$-graded isomorphic} (or isomorphic as graded algebras). Two $G$-gradings, $\Gamma$ and $\Gamma'$, on the same algebra $A$ are \emph{isomorphic} if $(A,\Gamma)$ and $(A,\Gamma')$ are isomorphic as graded algebras.
Let $T$ be a finite abelian group and let $\sigma:T\times T\to\mathbb{F}^\times$ be a map, where $R^\times$ denotes the group of invertible elements in a ring $R$. We say that $\sigma$ is a \emph{2-cocycle} if
\[
\sigma(u,v)\sigma(uv,w)=\sigma(u,vw)\sigma(v,w),\quad\forall u,v,w\in T.
\]
The twisted group algebra $\mathbb{F}^\sigma T$ is constructed as follows: it has $\{X_t\mid t\in T\}$ as an $\mathbb{F}$-vector space basis, and multiplication is given by $X_uX_v=\sigma(u,v)X_{uv}$. It is readily seen that $\mathbb{F}^\sigma T$ is an associative algebra if and only if $\sigma$ is a 2-cocycle, which we will assume from now on. Note that $A=\mathbb{F}^\sigma T$ has a natural $T$-grading, where each homogeneous component has dimension 1, namely $A_t=\mathbb{F}X_t$, for each $t\in T$. This is an example of the so-called \emph{division grading}. A graded algebra $D$ is a \emph{graded division algebra} (or $D$ has a division grading) if every non-zero homogeneous element of $D$ is invertible.
Define $\beta:T\times T\to\mathbb{F}^\times$ by $\beta(u,v)=\sigma(u,v)\sigma(v,u)^{-1}$. Then we have
\[
X_uX_v=\beta(u,v)X_vX_u,\quad\forall u,v\in T,
\]
and $\beta$ is an alternating bicharacter of $T$, that is, $\beta$ is multiplicative in each variable and $\beta(u,u)=1$ for all $u\in T$. If $\mathrm{char}\,\mathbb{F}$ does not divide $|T|$, then $\mathbb{F}^\sigma T$ is semisimple as an (ungraded) algebra. It follows that $\mathbb{F}^\sigma T$ is a simple algebra if and only if $\beta$ is non-degenerate. In particular, the non-degeneracy of $\beta$ implies that $|T|=\dim\mathbb{F}^\sigma T$ is a perfect square. It is known that, if $\mathbb{F}$ is algebraically closed, the isomorphism classes of matrix algebras endowed with a division grading by an abelian group $G$ are in bijection with the pairs $(T,\beta)$, where $T$ is a finite subgroup of $G$ (namely, the support of the grading) and $\beta:T\times T\to\mathbb{F}^\times$ is a non-degenerate alternating bicharacter (see e.g. \cite[Theorem 2.15]{EK2013}).
For each $n$-tuple $(g_1,\ldots,g_n)$ of elements of $G$, we can define a $G$-grading on $M_n=M_n(\mathbb{F})$ by declaring that the matrix unit $E_{ij}$ is homogeneous of degree $g_ig_j^{-1}$, for all $i$ and $j$. A grading on $M_n$ is called \emph{elementary} if it is isomorphic to one of this form. For any $g\in G$ and any permutation $\sigma\in S_n$, the $n$-tuple $(g_{\sigma(1)}g,\ldots,g_{\sigma(n)}g)$ defines an isomorphic elementary $G$-grading. Hence, an isomorphism class of elementary gradings is described by a function $\kappa:G\to\mathbb{Z}_{\ge0}$, where $g\in G$ appears exactly $\kappa(g)$ times in the $n$-tuple. Moreover, $G$ acts on these functions by translation: given $g\in G$, one defines the function $g\kappa:G\to\mathbb{Z}_{\ge0}$ by $g\kappa(x)=\kappa(g^{-1}x)$. For any $\kappa:G\to\mathbb{Z}_{\ge0}$ with finite support, we denote $|\kappa|:=\sum_{x\in G}\kappa(x)$.
If $\mathbb{F}$ is algebraically closed, then, for a fixed abelian group $G$, the isomorphism classes of $G$-gradings on $M_n$ are parametrized by the triples $(T,\beta,\kappa)$, where $T$ is a finite subgroup of $G$, $\beta:T\times T\to\mathbb{F}^\times$ is a non-degenerate alternating bicharacter, and $\kappa:G/T\to\mathbb{Z}_{\ge0}$ is a function with finite support such that $|\kappa|\sqrt{|T|}=n$. A grading in the isomorphism class corresponding to $(T,\beta,\kappa)$ can be explicitly constructed by making the following two choices:
(i) a $k$-tuple $\gamma=(g_1,\ldots,g_k)$ of elements in $G$ such that each element $x\in G/T$ occurs in the $k$-tuple $(g_1T,\ldots,g_kT)$
exactly $\kappa(x)$ times (hence $k=|\kappa|$) and (ii) a division grading on $M_\ell$ with support $T$ and bicharacter $\beta$ (hence $|T|=\ell^2$). Since $n=k\ell$, we identify $M_n$ with $M_k\otimes M_\ell$ via Kronecker product and define a $G$-grading on $M_n$ by declaring the matrix $E_{ij}\otimes d$, with $1\le i,j\le k$, and $d$ a nonzero homogeneous element of $M_\ell$, to be of degree $g_i \deg(d) g_j^{-1}$.
Finally, two triples $(T,\beta,\kappa)$ and $(T',\beta',\kappa')$ determine the same isomorphism class if and only if $T'=T$, $\beta'=\beta$, and there exists $g\in G$ such that $\kappa'=g\kappa$ (see e.g. \cite[Theorem 2.27]{EK2013}).
\section{Associative case}\label{assoc_case}
Let $\mathbb{F}$ be {a field} and let $V$ be a finite-dimensional $\mathbb{F}$-vector space. Denote by $\mathscr{F}$ a flag of subspaces in $V$, that is
\begin{align*}
0=V_0\subsetneq V_1\subsetneq\ldots\subsetneq V_s=V.
\end{align*}
Let $n=\dim V$ and $n_i=\dim V_i/V_{i-1}$, for $i=1,2,\ldots,s$. We denote by $U(\mathscr{F})$ the set of endomorphisms of $V$ preserving the flag $\mathscr{F}$, which coincides with the upper block-triangular matrices $UT(n_1,\ldots,n_s)$ after a choice of basis of $V$ respecting the flag $\mathscr{F}$. We fix such a basis and identify $U(\mathscr{F})=UT(n_1,\ldots,n_s)\subset M_n$.
For each $m\in\mathbb{Z}$, if $|m|<s$, let $J_m\subset M_n$ denote the $m$-th block-diagonal of matrices. Formally,
\begin{align*}
J_m=\mathrm{Span}\{&E_{ij}\in M_n\mid\text{there exists $q\in\mathbb{Z}_{\ge0}$ such that}\\
&n_1+\dots+n_q<i\le n_1+\dots+n_{q+1},\mathrm{ and }\\
&n_1+\dots+n_{q+m}<j\le n_1+\dots+n_{q+m+1}\}.
\end{align*}
Setting $J_m=0$ for $|m|\ge s$, we obtain a $\mathbb{Z}$-grading $M_n=\bigoplus_{m\in\mathbb{Z}}J_m$, which is the elementary grading defined by the $n$-tuple
\[
(\underbrace{-1,\ldots,-1}_{n_1 \text{ times}},\underbrace{-2,\ldots,-2}_{n_2\text{ times}},\ldots,\underbrace{-s,\ldots,-s}_{n_s\text{ times}}).
\]
This grading restricts to $U(\mathscr{F})$, and we will refer to the resulting grading $U(\mathscr{F})=\bigoplus_{m\ge 0}J_m$ as the \emph{natural $\mathbb{Z}$-grading} of $U(\mathscr{F})$. The associated filtration consists of the powers of the Jacobson radical $J$ of $U(\mathscr{F})$, that is, {$\bigoplus_{i\ge m}J_i=J^m$} for all $m\ge 0$.
Let $G$ be any abelian group and denote $G^\#=\mathbb{Z}\times G$. We identify $G$ with the subset $\{0\}\times G\subset G^\#$ and $\mathbb{Z}$ with $\mathbb{Z}\times\{1_G\}\subset G^\#$. We want to find a relation between $G^\#$-gradings on $M_n$ and $G$-gradings on $U(\mathscr{F})$.
First, we note that, given any $G^\#$-grading on $M_n$, we obtain a $\mathbb{Z}$-grading on $M_n$ if we consider the coarsening induced by the projection onto the first component $G^\#\to\mathbb{Z}$.
\begin{Def}
A $G^\#$-grading on $M_n$ is said to be \emph{admissible} if $U(\mathscr{F})$ with its natural $\mathbb{Z}$-grading is a graded subalgebra of $M_n$, where $M_n$ is viewed as a $\mathbb{Z}$-graded algebra induced by the projection $G^\#\to\mathbb{Z}$.
We call an isomorphism class of $G^\#$-grading on $M_n$ \emph{admissible} if it contains an admissible grading.
\end{Def}
\begin{Lemma}\label{ind_admissible}
For any admissible $G^\#$-grading on $M_n$, {the $\mathbb{Z}$-grading induced by the projection $G^\#\to\mathbb{Z}$} has $J_m$ as its homogeneous component of degree $m$.
\end{Lemma}
\begin{proof}
From the definition of admissible grading, we know that, for any $m\ge 0$, $J_m$ is contained in the homogeneous component of degree $m$ in the induced $\mathbb{Z}$-grading on $M_n$. In particular, each $E_{ii}$ is homogeneous of degree $0$. It follows that $E_{ii}M_nE_{jj}=\mathbb{F}E_{ij}$ is a graded subspace. Hence, all $E_{ij}$ are homogeneous. Moreover, if $E_{ij}\in J_{-m}$, then $E_{ji}\in J_m$ has degree $m$, so $E_{ij}$ must have degree $-m$, since $E_{ii}=E_{ij}E_{ji}$. The result follows.
\end{proof}
Recall from Section \ref{prelim} that, {over an algebraically closed field,} any isomorphism class of $G^\#$-gradings on $M_n$ is given by a finite subgroup $T$ of $G^\#$ (hence, in fact, $T\subset G$), a non-degenerate bicharacter $\beta:T\times T\to\mathbb{F}^\times$ and a function $\kappa:G^\#/T\to\mathbb{Z}_{\ge0}$ with finite support, where $n=k\ell$, $k=|\kappa|$ and $\ell=\sqrt{|T|}$.
\begin{Lemma}\label{lem1}
Consider a $G^\#$-grading on $M_n$ with parameters $(T,\beta,\kappa)$ and let
\[
\gamma=\big((a_1,g_1),(a_2,g_2),\ldots,(a_k,g_k)\big)
\]
be a $k$-tuple of elements of $G^\#$ associated to $\kappa$.
Then the $\mathbb{Z}$-grading on $M_n$ induced by the projection $G^\#\to\mathbb{Z}$ is an elementary grading
defined by the $n$-tuple
\[
(\underbrace{a_1,\ldots,a_1}_{\ell\text{ times}},\underbrace{a_2,\ldots,a_2}_{\ell\text{ times}},\ldots,\underbrace{a_k,\ldots,a_k}_{\ell\text{ times}}).
\]
\end{Lemma}
\begin{proof}
We have a $G^\#$-graded isomorphism $M_n\simeq M_k\otimes M_\ell$, where $M_k$ has an elementary grading defined by $\gamma$ and $M_\ell$ has a division grading with support $T$. Since $T$ is contained in the kernel of the projection $G^\#\to\mathbb{Z}$, the factor $M_\ell$ will get the trivial induced $\mathbb{Z}$-grading. The result follows.
\end{proof}
By the previous two lemmas, the isomorphism class of $G^\#$-gradings on $M_n$ with parameters $(T,\beta,\kappa)$ is admissible if and only if $\gamma$ has the following form, up to permutation and translation by an integer:
\[
\gamma=\big((-1,g_{11}),\ldots,(-1,g_{1k_1}),(-2,g_{21}),\ldots,(-2,g_{2k_2})\ldots,(-s,g_{s1}),\ldots,(-s,g_{sk_s})\big),
\]
where $n_i=k_i\ell$ for all $i=1,2,\ldots,s$. Equivalently, this condition can be restated directly in terms of $\kappa$, regarded as a function $\mathbb{Z}\times G/T\to\mathbb{Z}_{\ge 0}$, as follows: there exist $a\in\mathbb{Z}$ and $\kappa_1,\ldots,\kappa_s:G/T\to\mathbb{Z}_{\ge0}$ with $|\kappa_i|\sqrt{|T|}=n_i$ such that
\[
\kappa(a-i,x)=\kappa_i(x),\quad \forall i\in\{1,2,\ldots,s\},\, x\in G/T,
\]
and $\kappa(a-i,x)=0$ if $i\notin\{1,2,\ldots,s\}$.
By Lemma \ref{ind_admissible}, every admissible $G^\#$-grading
$
M_n=\bigoplus_{(m,g)\in G^\#}A_{(m,g)}
$
restricts to a $G^\#$-grading on $U(\mathscr{F})$, hence the projection onto the second component $G^\#\to G$ induces a $G$-grading on $U(\mathscr{F})$, namely, $U(\mathscr{F})=\bigoplus_{g\in G}B_g$ where $B_g=\bigoplus_{m\ge 0}A_{(m,g)}$.
\begin{Lemma}\label{lem2}
If two admissible $G^\#$-gradings on $M_n$ are isomorphic then they induce isomorphic $G$-gradings on $U(\mathscr{F})$.
\end{Lemma}
\begin{proof}
Assume that $\psi$ is an isomorphism between two admissible $G^\#$-gradings on $M_n$. Since $\psi$ preserves degree in $G^\#$, it fixes $U(\mathscr{F})$ as a set and therefore restricts to an automorphism of $U(\mathscr{F})$. This restriction is an isomorphism between the induced $G$-gradings on $U(\mathscr{F})$.
\end{proof}
Now we want to go back from $G$-gradings on $U(\mathscr{F})$ to $G^\#$-gradings on $M_n$. First note that the $G$-gradings on $U(\mathscr{F})$ obtained as above are not arbitrary, but satisfy the following:
\begin{Def}
We say that a $G$-grading on $U(\mathscr{F})$ is \emph{in canonical form} if, for each $m\in\{0,1,\ldots,s-1\}$, the subspace $J_m$ is $G$-graded.
\end{Def}
In other words, a $G$-grading $\Gamma:U(\mathscr{F})=\bigoplus_{g\in G}B_g$ is in canonical form if and only if it is compatible with the natural $\mathbb{Z}$-grading on $U(\mathscr{F})$. If this is the case, we obtain a $G^\#$-grading on $U(\mathscr{F})$ by taking $J_m\cap B_g$ as the homogeneous component of degree $(m,g)$. We want to show that this $G^\#$-grading uniquely extends to $M_n$.
To this end, let us look more closely at the automorphism group of $U(\mathscr{F})$.
We denote by $\mathrm{Int}(x)$ the inner automorphism $y\mapsto xyx^{-1}$ determined by an invertible element $x$.
\begin{Lemma}\label{aut}
$\mathrm{Aut}(U(\mathscr{F}))\simeq\left\{\psi\in\mathrm{Aut}(M_n)\mid\psi(U(\mathscr{F}))=U(\mathscr{F})\right\}$.
\end{Lemma}
\begin{proof}
It is proved in \cite[Corollary 5.4.10]{Cheung} that
\begin{align*}
\mathrm{Aut}(U(\mathscr{F}))=\{\mathrm{Int}(x)\mid x\in U(\mathscr{F})^\times\}.
\end{align*}
On the other hand, every automorphism of the matrix algebra is inner, so let $y\in M_n^\times$ and assume $yU(\mathscr{F})y^{-1}=U(\mathscr{F})$. Then, by the description of $\mathrm{Aut}(U(\mathscr{F}))$ above, we can find $x\in U(\mathscr{F})^\times$ such that
\begin{align*}
\mathrm{Int}(x)\mid_{U(\mathscr{F})}=\mathrm{Int}(y)\mid_{U(\mathscr{F})}.
\end{align*}
It follows that $xy^{-1}$ commutes with all elements of $U(\mathscr{F})$. Hence $yx^{-1}=\lambda\,{1}$, for some $\lambda\in\mathbb{F}^\times$, and $y=\lambda x\in U(\mathscr{F})^\times$.
\end{proof}
Assume for a moment that {$\mathbb{F}$ is algebraically closed and $\mathrm{char}\,\mathbb{F}=0$. Since $G$ is abelian,} it is well known that $G$-gradings on a finite-dimensional algebra $A$ are equivalent to actions of the algebraic group $\widehat{G}:=\mathrm{Hom}_\mathbb{Z}(G,\mathbb{F}^\times)$ by automorphisms of $A$, that is, homomorphisms of algebraic groups $\widehat{G}\to\mathrm{Aut}(A)$ (see, for example, \cite[\S 1.4]{EK2013}). The homomorphism $\eta_\Gamma:\widehat{G}\to\mathrm{Aut}(A)$ corresponding to a grading $\Gamma:A=\bigoplus_{g\in G}A_g$ is defined by $\eta_\Gamma(\chi)(x)=\chi(g)x$ for all $\chi\in\widehat{G}$, $g\in G$ and $x\in A_g$.
By Lemma \ref{aut}, we have
\begin{align*}
\mathrm{Aut}\left(U(\mathscr{F})\right)\simeq\mathrm{Stab}_{\mathrm{Aut}(M_n)}(U(\mathscr{F}))\subset\mathrm{Aut}(M_n),
\end{align*}
hence, if {$\mathbb{F}$ is algebraically closed and} $\mathrm{char}\,\mathbb{F}=0$, we obtain the desired unique extension of gradings from $U(\mathscr{F})$ to $M_n$. To generalize this result to {arbitrary $\mathbb{F}$}, we can use group schemes instead of groups. Recall that an \emph{affine group scheme} over a field $\mathbb{F}$ is a representable functor from the category $\mathrm{Alg}_\mathbb{F}$ of unital commutative associative $\mathbb{F}$-algebras to the category of groups (see e.g. \cite{Waterhouse} or \cite[Appendix A]{EK2013}). For example, the \emph{automorphism group scheme} of a finite-dimensional algebra $A$ is defined by
\[
\mathbf{Aut}(A)(R):=\mathrm{Aut}_R(A\otimes R),\quad\forall R\in\mathrm{Alg}_\mathbb{F}.
\]
Another example of relevance to us is $\mathbf{GL}_1(A)$, for a finite-dimensional associative algebra $A$, defined by $\mathbf{GL}_1(A)(R):=(A\otimes R)^\times$. (In particular, $\mathbf{GL}_1(M_n)=\mathbf{GL}_n$.) Note that we have a homomorphism $\mathrm{Int}:\mathbf{GL}_1(A)\to\mathbf{Aut}(A)$.
If $G$ is an abelian group, then the group algebra $\mathbb{F}G$ is a commutative Hopf algebra, so it represents an affine group scheme, which is the scheme version of the character group $\widehat{G}$. It is denoted by $G^D$ and given by $G^D(R)=\mathrm{Hom}_{\mathbb{Z}}(G,R^\times)$. In particular, $G^D(\mathbb{F})=\widehat{G}$. If we have a $G$-grading $\Gamma$ on $A$, then we can define a homomorphism of group schemes $\eta_\Gamma:G^D\to\mathbf{Aut}(A)$ by generalizing the formula in the case of $\widehat{G}$: $(\eta_\Gamma)_R(\chi)(x\otimes r)=x\otimes \chi(g)r$ for all $R\in\mathrm{Alg}_\mathbb{F}$, $\chi\in G^D(R)$, $r\in R$, $g\in G$ and $x\in A_g$. In this way, over an arbitrary field, $G$-gradings on $A$ are equivalent to homomorphisms of group schemes $G^D\to\mathbf{Aut}(A)$.
\begin{Lemma}\label{aut_scheme}
Over an arbitrary field, $\mathbf{Aut}(U(\mathscr{F}))$ is a quotient of $\mathbf{GL}_1(U(\mathscr{F}))$, and $\mathbf{Aut}\left(U(\mathscr{F})\right)\simeq\mathbf{Stab}_{\mathbf{Aut}(M_n)}(U(\mathscr{F}))$ via the restriction map.
\end{Lemma}
\begin{proof}
We claim that the homomorphism $\mathrm{Int}:\mathbf{GL}_1(U(\mathscr{F}))\to\mathbf{Aut}(U(\mathscr{F}))$ is a quotient map {(in the sense of affine group schemes, see e.g. \cite[Chapter 15]{Waterhouse} or \cite[\S A.2]{EK2013})}. Since $\mathbf{GL}_1(U(\mathscr{F}))$ is smooth, it is sufficient to verify that (i) the group homomorphism $\mathrm{Int}:(U(\mathscr{F})\otimes\overline{\mathbb{F}})^\times\to\mathrm{Aut}_{\overline{\mathbb{F}}}(U(\mathscr{F})\otimes\overline{\mathbb{F}})$ is surjective, where $\overline{\mathbb{F}}$ is the algebraic closure of $\mathbb{F}$, and (ii) the Lie homomorphism $\mathrm{ad}:U(\mathscr{F})\to\mathrm{Der}(U(\mathscr{F}))$ is surjective (see e.g. \cite[Corollary A.49]{EK2013}). But (i) is satisfied by Corollary 5.4.10 in \cite{Cheung}, mentioned above, and (ii) is satisfied by Theorem 2.4.2 in the same work.
Since the homomorphism $\mathrm{Int}:\mathbf{GL}_1(U(\mathscr{F}))\to\mathbf{Aut}(U(\mathscr{F}))$ factors through the restriction map $\mathbf{Stab}_{\mathbf{Aut}(M_n)}(U(\mathscr{F}))\to\mathbf{Aut}\left(U(\mathscr{F})\right)$, it follows that this latter is also a quotient map. But its kernel is trivial, because the corresponding restriction maps for the group $\mathrm{Stab}_{\mathrm{Aut}_{\overline{\mathbb{F}}}(M_n(\overline{\mathbb{F}}))}(U(\mathscr{F})\otimes\overline{\mathbb{F}})$ and Lie algebra $\mathrm{Stab}_{\mathrm{Der}(M_n)}(U(\mathscr{F}))$ are injective (see e.g. \cite[Theorem A.46]{EK2013}).
\end{proof}
Coming back to a $G$-grading $\Gamma$ on $U(\mathscr{F})$ in canonical form, we conclude by Lemma \ref{aut_scheme} that the corresponding $G^\#$-grading on $U(\mathscr{F})$ extends to a unique $G^\#$-grading $\Gamma^\#$ on $M_n$. By construction, $\Gamma^\#$ is admissible and induces the original grading $\Gamma$ on $U(\mathscr{F})$. It is also clear that $\Gamma^\#$ is uniquely determined by these properties. Thus, we have a bijection between admissible $G^\#$-gradings on $M_n$ and $G$-gradings on $U(\mathscr{F})$ in canonical form.
\begin{Lemma}\label{can_assoc}
For any $G$-grading on $U(\mathscr{F})$, there exists an isomorphic $G$-grading in canonical form.
\end{Lemma}
\begin{proof}
It follows from Lemma \ref{aut_scheme} that the Jacobson radical $J=\bigoplus_{m>0}J_m$ of $U(\mathscr{F})$ is stabilized by $\mathbf{Aut}(U(\mathscr{F}))$. Hence, $J$ is a $G$-graded ideal, so the proof of \cite[Lemma 1]{y2018} shows that, in fact, there exists an isomorphic grading such that each block is a graded subspace.
\end{proof}
\begin{Lemma}\label{iso_grad1}
If two $G$-gradings, $\Gamma_1$ and $\Gamma_2$, on $U(\mathscr{F})$ are in canonical form and isomorphic to one another, then there exists a block-diagonal matrix $x\in U(\mathscr{F})^\times$ such that $\psi_0=\mathrm{Int}(x)$ is an isomorphism between $\Gamma_1$ and $\Gamma_2$.
\end{Lemma}
\begin{proof}
Let $\psi=\mathrm{Int}(y)$ be an isomorphism between $\Gamma_1$ and $\Gamma_2$. Write $y=(y_{ij})_{1\le i\le j\le s}$ in blocks and let $x=\mathrm{diag}(y_{11},\ldots,y_{ss})$. Then $x$ is invertible, so let $\psi_0=\mathrm{Int}(x)$.
Fix $m\in\{0,1,\ldots,s-1\}$ and let $a\in J_m$ be $G$-homogeneous with respect to $\Gamma_1$. Since $J^m=J_m\oplus J^{m+1}$, we can uniquely write $\psi(a)=b+c$, where $b\in J_m$ and $c\in J^{m+1}$. Since $\Gamma_2$ is in canonical form, $J_m$ and $J^{m+1}$ are $G$-graded subspaces with respect to $\Gamma_2$. Since $\psi$ preserves $G$-degree, it follows that $b$ and $c$ are $G$-homogeneous elements with respect to $\Gamma_2$ of the same $G$-degree as $a$ with respect to $\Gamma_1$. Finally, note that $\psi_0(a)=b$. Since $m$ and $a$ were arbitrary, we have shown that $\psi_0$ is an isomorphism between $\Gamma_1$ and $\Gamma_2$.
\end{proof}
Now we can prove the converse of Lemma \ref{lem2}.
\begin{Lemma}\label{iso_grad2}
If two admissible $G^\#$-gradings on $M_n$ induce isomorphic $G$-gradings on $U(\mathscr{F})$, then they are isomorphic.
\end{Lemma}
\begin{proof}
Let $\Gamma_1$ and $\Gamma_2$ be two isomorphic $G$-gradings on $U(\mathscr{F})$ obtained from two $G^\#$-gradings on $M_n$, $\Gamma_1^\#$ and $\Gamma_2^\#$, respectively. For $i=1,2$, let $\eta_i:(G^\#)^D\to\mathbf{Aut}(M_n)$ be the action corresponding to $\Gamma_i^\#$. Consider also the restriction $\Gamma_i'$ of $\Gamma_i^\#$ to $U(\mathscr{F})$ and the corresponding action $\eta_i':(G^\#)^D\to\mathbf{Aut}(U(\mathscr{F}))$. By Lemma \ref{iso_grad1}, we can find an isomorphism $\psi_0=\mathrm{Int}(x)$ between $\Gamma_1$ and $\Gamma_2$, where $x$ is block-diagonal. Such $\psi_0$ preserves the natural $\mathbb{Z}$-grading, so it is actually an isomorphism between the $G^\#$-gradings $\Gamma_1'$ and $\Gamma_2'$. Hence, $\psi_0\eta_1'(\chi)=\eta_2'(\chi)\psi_0$ for all $\chi\in(G^\#)^D(R)$ and all $R\in\mathrm{Alg}_\mathbb{F}$. By Lemma \ref{aut_scheme}, this implies $\psi_0\eta_1(\chi)=\eta_2(\chi)\psi_0$ for all $\chi\in(G^\#)^D(R)$, which means $\psi_0$ is an isomorphism between $\Gamma_1^\#$ and $\Gamma_2^\#$.
\end{proof}
We summarize the results of this section:
\begin{Thm}\label{th:main_assoc}
{Over an arbitrary field,} the mapping of an admissible $G^\#$-grading on $M_n$ to a $G$-grading on $U(\mathscr{F})$, given by restriction and coarsening, yields a bijection between the admissible isomorphism classes of $G^\#$-gradings on $M_n$ and the isomorphism classes of $G$-gradings on $U(\mathscr{F})$.\qed
\end{Thm}
{If $\mathbb{F}$ is algebraically closed, then the} admissible isomorphism classes of $G^\#$-gradings on $M_n$ can be parametrized by the triples $(T,\beta,(\kappa_1,\ldots,\kappa_s))$, where $T\subset G$ is a finite subgroup, $\beta:T\times T\to\mathbb{F}^\times$ is a non-degenerate alternating bicharacter and $\kappa_i:G/T\to\mathbb{Z}_{\ge0}$ are functions with finite support such that $|\kappa_i|\sqrt{|T|}=n_i$, for each $i=1,2,\ldots,s$. Hence, isomorphism classes of $G$-gradings on $U(\mathscr{F})$ are parametrized by the same triples.
Choosing, for each $\kappa_i$, a $k_i$-tuple $\gamma_i$ of elements of $G$, where $k_i=|\kappa_i|$, we reproduce the description of $G$-gradings on $U(\mathscr{F})$ originally obtained in \cite{VaZa2012}. Note, however, that we do not need to assume that $G$ is finite, nor $\mathrm{char}\,\mathbb{F}=0$. Also note that we have a description not only of $G$-gradings but of their isomorphism classes, which gives an alternative proof of the following result first established in \cite[Corollary 4]{BFD2018}:
\begin{Cor}\label{cor:main assoc}
Two $G$-gradings on $U(\mathscr{F})$, determined by $(T,\beta,(\kappa_1,\ldots,\kappa_s))$ and $(T',\beta',(\kappa_1',\ldots,\kappa_s'))$, are isomorphic if and only if $T'=T$, $\beta'=\beta$ and there exists $g\in G$ such that $\kappa_i'=g\kappa_i$, for all $i=1,2,\ldots,s$.\qed
\end{Cor}
\section{Lie case}\label{Lie_case}
Now we turn our attention to $U(\mathscr{F})^{(-)}$, that is, $U(\mathscr{F})$ viewed as a Lie algebra with respect to the commutator $[x,y]=xy-yx$. {Since we will be working with Lie and associative products at the same time, we will always indicate the former by brackets and keep using juxtaposition for the latter.} We assume that the grading group $G$ is abelian and the ground field $\mathbb{F}$ is algebraically closed of characteristic $0$, and follow the same approach as in the associative case.
Denote by $\tau$ the flip along the secondary diagonal on $M_n${, that is, $\tau(E_{ij})=E_{n-j+1,n-i+1}$, for all matrix units $E_{ij}\in M_n$}. Note that $U(\mathscr{F})^\tau=U(\mathscr{F})$ if and only if $n_i=n_{s-i+1}$ for all $i=1,2,\ldots,\lfloor\frac{s}2\rfloor$. Let
\[
U(\mathscr{F})_0=\{x\in U(\mathscr{F})\mid\mathrm{tr}(x)=0\},
\]
which is a Lie subalgebra of $U(\mathscr{F})^{(-)}$. Moreover, $U(\mathscr{F})^{(-)}=U(\mathscr{F})_0\oplus\mathbb{F}{1}$, where ${1}\in U(\mathscr{F})$ is the identity matrix.
The center $\mathfrak{z}(U(\mathscr{F})^{(-)})=\mathbb{F}{1}$ is always graded, so ${1}$ is a homogeneous element. If we change its degree arbitrarily, we obtain a new well-defined grading, which is not isomorphic to the original one, but will induce the same grading on $U(\mathscr{F})^{(-)}/\mathbb{F}{1}\simeq U(\mathscr{F})_0$ (compare with \cite[Definition 6]{pkfy2017}). It turns out that, up to isomorphism, a $G$-grading on $U(\mathscr{F})^{(-)}$ is determined by the induced $G$-grading on $U(\mathscr{F})_0$ and the degree it assigns to the identity matrix (see Corollary \ref{reduction_to_trace_0} in Section \ref{practical_iso}). Conversely, any $G$-grading on $U(\mathscr{F})_0$ extends to $U(\mathscr{F})^{(-)}=U(\mathscr{F})_0\oplus\mathbb{F}{1}$ by defining the degree of ${1}$ arbitrarily. Thus, we have a bijection between the isomorphism classes of $G$-gradings on $U(\mathscr{F})^{(-)}$ and the pairs consisting of an isomorphism class of $G$-gradings on $U(\mathscr{F})_0$ and an element of $G$.
We start by computing the automorphism group of $U(\mathscr{F})_0$. To this end, we will use the following description of the automorphisms of $\mathrm{Aut}(U(\mathscr{F})^{(-)})$, which was proved in \cite{MaSo1999} for the field of complex numbers.
\begin{Thm}[{\cite[Theorem 4.1.1]{Cecil}}]\label{aut_cecil}
Let $\phi$ be an automorphism of $U(\mathscr{F})^{(-)}$, and assume $\mathrm{char}\,\mathbb{F}=0$ or $\mathrm{char}\,\mathbb{F}>3$. Then there exist $p,d\in U(\mathscr{F})$, with $p$ invertible and $d$ block-diagonal, such that one of the following holds:
\begin{enumerate}
\item $\phi(x)=pxp^{-1}+\mathrm{tr}(xd){1}$, for all $x\in U(\mathscr{F})$, or
\item $\phi(x)=-px^\tau p^{-1}+\mathrm{tr}(xd){1}$, for all $x\in U(\mathscr{F})$.\qed
\end{enumerate}
\end{Thm}
\begin{remark}\label{antiaut}
Case (2) in the previous theorem occurs if and only if $U(\mathscr{F})$ is invariant under $\tau$, that is, $n_i=n_{s-i+1}$ for all $i$. It follows that $U(\mathscr{F})$ admits an anti-automorphism only under this condition. Indeed, if $\psi$ is an anti-automorphism of $U(\mathscr{F})$, then $-\psi$ is a Lie automorphism of $U(\mathscr{F})$. Hence, by Theorem \ref{aut_cecil}, we have $-\psi(x)=pxp^{-1}+\mathrm{tr}(xd){1}$ for all $x\in U(\mathscr{F})$ or $n_i=n_{s-i+1}$ for all $i$. However, the first possibility cannot occur if $n>2$, since it would imply that the composition $\psi\,\mathrm{Int}(p^{-1})$, which maps $x\mapsto-x+\mathrm{tr}(xd'){1}$ where $d'$ is the block-diagonal part of $-pdp^{-1}$, is an anti-automorphism of $U(\mathscr{F})$, but this is easily seen not to be the case. (Of course, if $n=2$ then we have $n_i=n_{s-i+1}$ for all $i$.)
\end{remark}
As a consequence, we obtain the following analog of Lemma \ref{aut}. {(As usual, the symbol $\rtimes$ denotes a semidirect product in which the second factor acts on the first.)}
\begin{Lemma}\label{aut_Lie}
If $n>2$ and $n_i=n_{s-i+1}$ for all $i$, then
\[
\mathrm{Aut}(U(\mathscr{F})_0)\simeq\{\mathrm{Int}(x)\mid x\in U(\mathscr{F})^\times\}\rtimes\langle-\tau\rangle;
\]
otherwise, $\mathrm{Aut}(U(\mathscr{F})_0)\simeq\{\mathrm{Int}(x)\mid x\in U(\mathscr{F})^\times\}$. In both cases,
\[
\mathrm{Aut}(U(\mathscr{F})_0)\simeq\mathrm{Stab}_{\mathrm{Aut}(\mathfrak{sl}_n)}(U(\mathscr{F})_0).
\]
\end{Lemma}
\begin{proof}
Let $\psi\in\mathrm{Aut}(U(\mathscr{F})_0)$. We extend $\psi$ to an automorphism $\phi$ of $U(\mathscr{F})^{(-)}$ by setting
$\phi({1})={1}$. By the previous result, $\phi$ must have one of two possible forms. Assume it is the first one:
\[
\phi(x)=pxp^{-1}+\mathrm{tr}(xd){1},\quad\forall x\in U(\mathscr{F}).
\]
But as $U(\mathscr{F})_0$ is an invariant subspace for $\phi$, we see that, for all $x\in U(\mathscr{F})_0$,
\[
0=\mathrm{tr}(\phi(x))=\mathrm{tr}(pxp^{-1}+\mathrm{tr}(xd){1})=n\,\mathrm{tr}(xd).
\]
Therefore, $\mathrm{tr}(xd)=0$ and hence $\psi(x)=\phi(x)=pxp^{-1}$, for all $x\in U(\mathscr{F})_0$, so $\psi=\mathrm{Int}(p)$. The same argument applies if $\phi$ has the second form.
Note that, for $n=2$, the second form reduces to the first on $UT(1,1)_0$, since $-\tau$ coincides with $\mathrm{Int}(p)$ on $\mathfrak{sl}_2$, where $p=\mathrm{diag}(1,-1)$.
On the other hand, for $n>2$, the two forms do not overlap, since the action of $-\tau$ differs already on the set of zero-trace diagonal matrices from the action of any inner automorphism.
We conclude the proof in the same way as for Lemma \ref{aut}.
\end{proof}
Let $G$ be an abelian group and define $G^\#=\mathbb{Z}\times G$.
Similarly to the associative case, we want to relate $G$-gradings on $U(\mathscr{F})_0$ and $G^\#$-gradings on $\mathfrak{sl}_n$, since for the latter a classification of group gradings is known \cite{BK2010} (see also \cite[Chapter 3]{EK2013}).
Recall that $J_m$ stands for the $m$-th block-diagonal of matrices. We consider again the \emph{natural $\mathbb{Z}$-grading} on $U(\mathscr{F})_0$: its homogeneous component of degree $m\in\mathbb{Z}$ is $J_m\cap U(\mathscr{F})_0$ if $0\le m<s$ and $0$ otherwise. We say that a $G$-grading on $U(\mathscr{F})_0$ is in \emph{canonical form} if, for each $m\in\{0,\ldots,s-1\}$, the subspace $J_m\cap U(\mathscr{F})_0$ is $G$-graded. A $G^\#$-grading on $\mathfrak{sl}_n$ is said to be \emph{admissible} if the coarsening induced by the projection $G^\#\to\mathbb{Z}$ has $U(\mathscr{F})_0$, with its natural $\mathbb{Z}$-grading, as a graded subalgebra. An isomorphism class of $G^\#$-grading on $\mathfrak{sl}_n$ is called \emph{admissible} if it contains an admissible grading.
Since any $\mathbb{Z}$-grading on $\mathfrak{sl}_n$ is the restriction of a unique $\mathbb{Z}$-grading on the associative algebra $M_n$, Lemma \ref{ind_admissible} still holds if we replace $M_n$ by $\mathfrak{sl}_n$. Therefore, every admissible $G^\#$-grading on $\mathfrak{sl}_n$ restricts to $U(\mathscr{F})_0$ and, by means of the projection $G^\#\to G$, yields a $G$-grading on $U(\mathscr{F})_0$, which is clearly in canonical form.
Conversely, thanks to Lemma \ref{aut_Lie}, if a $G$-grading on $U(\mathscr{F})_0$ is in canonical form then it comes from a unique admissible $G^\#$-grading on $\mathfrak{sl}_n$ in this way. Therefore, similarly to the associative case, we obtain a bijection between admissible $G^\#$-grading on $\mathfrak{sl}_n$ and $G$-gradings on $U(\mathscr{F})_0$ in canonical form.
The following result is technical and will be proved in Section \ref{commut_supp}:
\begin{Lemma}\label{can_Lie}
For any $G$-grading on $U(\mathscr{F})_0$, there exists an isomorphic $G$-grading in canonical form.
\end{Lemma}
Clearly, as in Lemma \ref{lem2}, if two admissible $G^\#$-gradings on $\mathfrak{sl}_n$ are isomorphic then they induce isomorphic $G$-gradings on $U(\mathscr{F})_0$. The converse is established by the same argument as Lemma \ref{iso_grad2}, using the following analog of Lemma \ref{iso_grad1}:
\begin{Lemma}
If two $G$-gradings, $\Gamma_1$ and $\Gamma_2$, on $U(\mathscr{F})_0$ are in canonical form and isomorphic to one another, then there exists an isomorphism $\psi_0$ between $\Gamma_1$ and $\Gamma_2$ of the form $\psi_0=\mathrm{Int}(x)$ or $\psi_0=-\mathrm{Int}(x)\tau$ where the matrix $x\in U(\mathscr{F})^\times$ is block-diagonal.
\end{Lemma}
\begin{proof}
Let $\psi$ be an isomorphism between $\Gamma_1$ and $\Gamma_2$. If $\psi=\mathrm{Int}(y)$ then we are in the situation of the proof of Lemma \ref{iso_grad1}. If $\psi=-\mathrm{Int}(y)\tau$ then the same proof still works because all subspaces $J_m$ are invariant under $\tau$.
\end{proof}
In summary:
\begin{Thm}\label{th:main_Lie}
The mapping of an admissible $G^\#$-grading on $\mathfrak{sl}_n$ to a $G$-grading on $U(\mathscr{F})_0$, given by restriction and coarsening, yields a bijection between the admissible isomorphism classes of $G^\#$-gradings on $\mathfrak{sl}_n$ and the isomorphism classes of $G$-gradings on $U(\mathscr{F})_0$.\qed
\end{Thm}
There are two families of gradings on $\mathfrak{sl}_n$, $n>2$, namely, Type I and Type II. (Only Type I exists for $n=2$.) Their isomorphism classes are stated in Theorem 3.53 of \cite{EK2013}, but we will use Theorem 45 of \cite{BKE2018}, which is equivalent but uses more convenient parameters.
By definition, a $G^\#$-grading of Type I is a restriction of a $G^\#$-grading on the associative algebra $M_n$, so it is parametrized by $(T,\beta,\kappa)$, where, as in Section \ref{assoc_case}, $T\subset G$ is a finite group, $\beta:T\times T\to\mathbb{F}^\times$ is a non-degenerate alternating bicharacter and $\kappa:\mathbb{Z}\times G/T\to\mathbb{Z}_{\ge0}$ is a function with finite support satisfying $|\kappa|\sqrt{|T|}=n$.
For a Type II grading, there is a unique element $f\in G^\#$ of order $2$ (hence, in fact, $f\in G$), called the \emph{distinguished element}, such that the coarsening induced by the natural homomorphism $G^\#\to G^\#/\langle f\rangle$ is a Type I grading. The parametrization of Type II gradings depends on the choice of character $\chi$ of $G^\#$ satisfying $\chi(f)=-1$. So, we fix $\chi\in\widehat{G}$ with $\chi(f)=-1$ and extend it trivially to the factor $\mathbb{Z}$. Then, the parameters of a Type II grading are a finite subgroup $T\subset G^\#$ (hence $T\subset G$) containing $f$, an alternating bicharacter $\beta:T\times T\to\mathbb{F}^\times$ with radical $\langle f\rangle$ (so, $\beta$ determines the distinguished element $f$), an element $g^\#_0\in G^\#$, and a function $\kappa:\mathbb{Z}\times G/T\to\mathbb{Z}_{\ge0}$ with finite support satisfying $|\kappa|\displaystyle\sqrt{|T|/2}=n$. These parameters are required to satisfy some additional conditions, as follows.
To begin with, for a Type II grading, $T$ must be $2$-elementary. Its Type I coarsening is a grading by $G^\#/\langle f\rangle\simeq\mathbb{Z}\times\overline{G}$ with parameters $(\overline{T},\bar{\beta},\kappa)$, where $\overline{T}:=T/\langle f\rangle$ is a subgroup of $\overline{G}:=G/\langle f\rangle$, $\bar{\beta}:\overline{T}\times\overline{T}\to\mathbb{F}^\times$ is the non-degenerate bicharacter induced by $\beta$, and $\kappa$ is now regarded as a function on $\mathbb{Z}\times\overline{G}/\overline{T}\simeq\mathbb{Z}\times G/T$.
Since $T$ is $2$-elementary, $\beta$ can only take values $\pm 1$ and $\ell:=\sqrt{|T|/2}$ is a power of $2$. If one uses Kronecker products of Pauli matrices (of order $2$) to construct a division grading on $M_\ell$ with support $\overline{T}$ and bicharacter $\bar{\beta}$, then the transposition will preserve degree and thus become an involution on the resulting graded division algebra $D$. The choice of such an involution is arbitrary, and it will be convenient for our purposes to use $\tau$, which also preserves degree. Since all homogeneous components of $D$ are $1$-dimensional, we have
\[
(X_{\bar{t}})^\tau=\bar{\eta}(\bar{t})X_{\bar{t}},\quad\forall\bar{t}\in\overline{T},\,X_{\bar{t}}\in D_{\bar{t}},
\]
where $\bar{\eta}:\overline{T}\to\{\pm 1\}$ satisfies $\bar{\eta}(\bar{u}\bar{v})=\bar{\beta}(\bar{u},\bar{v})\bar{\eta}(\bar{u})\bar{\eta}(\bar{v})$ for all $\bar{u},\bar{v}\in\overline{T}$. If we regard $\bar{\eta}$ and $\bar{\beta}$ as maps of vector spaces over the field of two elements, this equation means that $\bar{\eta}$ is a quadratic form with polarization $\bar{\beta}$. {Define a quadratic form $\eta:T\to\{\pm 1\}$ with polarization $\beta$ by $\eta(t)=\chi(t)\bar{\eta}(\bar{t})$, where $\bar{t}$ denotes the image of $t\in T$ in the quotient group $\overline{T}$.}
Recall that a concrete $G^\#/\langle f\rangle$-grading with parameters $(\overline{T},\bar{\beta},\kappa)$ is constructed by selecting a $k$-tuple of elements of $G^\#/\langle f\rangle$, as directed by $\kappa$, to get an elementary grading on $M_k$, where $k=|\kappa|$, and identifying $M_n\simeq M_k\otimes D$ via Kronecker product. The remaining parameter $g^\#_0$ can then be used, together with the chosen involution $\tau$ on $D$, to define an anti-automorphism $\varphi$ on $M_n$ by the formula
\[
\varphi(X)=\Phi^{-1}X^\tau\Phi,\quad\forall X\in M_n,
\]
where the matrix $\Phi\in M_k\otimes D\simeq M_k(D)$ is constructed in such a way that $\varphi^2$ acts on $M_n$ in exactly the same way as $\chi^2$, which acts on $M_n$ because it can be regarded as a character on $G^\#/\langle f\rangle$ (since $\chi^2(f)=1$) and $M_n$ is a $G^\#/\langle f\rangle$-graded algebra. As a result, we can split each homogeneous component of the $G^\#/\langle f\rangle$-grading on $M_n$ into (at most $2$) eigenspaces of $\varphi$ so that the action of $\chi$ on the resulting $G^\#$-graded algebra $M_n^{(-)}$ coincides with the automorphism $-\varphi$. Finally, the restriction of this $G^\#$-grading to $\mathfrak{sl}_n$ is a $G^\#$-grading of Type II with parameters $(T,\beta,g^\#_0,\kappa)$.
In order to construct $\Phi$, two conditions must be met:
\begin{enumerate}
\item[(i)] $\kappa$ is \emph{$g^\#_0$-balanced} in the sense that $\kappa(x)=\kappa((g_0^{\#})^{-1}x^{-1})$ for all $x\in\mathbb{Z}\times G/T$ (where the inverse in $\mathbb{Z}$ is understood with respect to addition);
\item[(ii)] $\kappa(g^\#T)$ is even whenever $g_0^\#(g^\#)^2\in T$ and $\eta(g^\#_0(g^\#)^2)=-1$ for some $g^\#\in G^\#$.
\end{enumerate}
Such a matrix $\Phi\in M_k(D)$ is given explicitly by Equations (3.29) and (3.30) in \cite{EK2013}, but in relation to the usual transposition. Since we are using $\tau$, the order of the $k$ rows has to be reversed and the entries in $D$ chosen in accordance with the above quadratic form $\bar{\eta}$ rather than the quadratic form in \cite{EK2013}. It will also be convenient in our situation to order the $k$-tuple associated to $\kappa$ in a different way, as will be described below.
We are only interested in admissible isomorphism classes of $G^\#$-gradings on $\mathfrak{sl}_n$. If $n=2$, the isomorphism condition for (Type I) gradings is the same as in the associative case: all translations of $\kappa$ determine isomorphic gradings. If $n>2$, however, one isomorphism class of Type I gradings on $\mathfrak{sl}_n$ can consist of one or two isomorphism classes of gradings on $M_n$, because $(T,\beta,\kappa)$ and $(T,\beta^{-1},\bar{\kappa})$ determine isomorphic gradings on $\mathfrak{sl}_n$, where the function $\bar{\kappa}:\mathbb{Z}\times G/T\to\mathbb{Z}_{\ge0}$ is defined by $\bar{\kappa}(i,x):=\kappa(-i,x^{-1})$. Hence, the isomorphism class of $G^\#$-gradings of Type I with parameters $(T,\beta,\kappa)$ is admissible if and only if at least one of the functions $\kappa$ and $\bar{\kappa}$ has the form described after Lemma \ref{lem1}. Assuming it is $\kappa$, there must exist $a\in\mathbb{Z}$ and functions $\kappa_1,\ldots,\kappa_s:G/T\to\mathbb{Z}_{\ge0}$ with $|\kappa_i|\sqrt{|T|}=n_i$, such that
\begin{equation}\label{ref_kappa_seq}
\kappa(a-i,x)=\kappa_i(x),\quad\forall i\in\{1,2,\ldots,s\},\,x\in G/T,
\end{equation}
and $\kappa(a-i,x)=0$ if $i\not\in\{1,2,\ldots,s\}$. Then $\bar{\kappa}$ can be expressed in the same form, but with the function $\bar{\kappa}_i(x):=\kappa_i(x^{-1})$ playing the role of $\kappa_{s-i+1}$ for each $i$. Thus, the isomorphism classes of $G$-gradings of Type I on $U(\mathscr{F})_0$ are parametrized by $(T,\beta,(\kappa_1,\ldots,\kappa_s))$, and, if $n_i=n_{s-i+1}$ for all $i$, then $(T,\beta,(\kappa_1,\ldots,\kappa_s))$ and $(T,\beta^{-1},(\bar{\kappa}_s,\ldots,\bar{\kappa}_1))$ determine isomorphic $G$-gradings on $U(\mathscr{F})_0$.
Now consider the isomorphism class of Type II gradings on $\mathfrak{sl}_n$ ($n>2$) with parameters $(T,\beta,g^\#_0,\kappa)$. Admissibility is a condition on the $\mathbb{Z}$-grading induced by the projection $G^\#\to\mathbb{Z}$, which factors through the natural homomorphism $G^\#\to G^\#/\langle f\rangle$. So, for this isomorphism class to be admissible, it is necessary and sufficient for $\kappa$ to have the form given by Equation \eqref{ref_kappa_seq}, but with $|\kappa_i|\sqrt{|T|/2}=n_i$.
\begin{Lemma}\label{balance_kappa_i}
If $g^\#_0=(a_0,g_0)$ and $\kappa$ is given by Equation \eqref{ref_kappa_seq}, then $\kappa$ is $g^\#_0$-balanced if and only if $a_0=s+1-2a$ and $\kappa_i(x)=\kappa_{s-i+1}(g_0^{-1}x^{-1})$ for all $x\in G/T$ and all $i$.
\end{Lemma}
\begin{proof}
Consider the function $\kappa_\mathbb{Z}:\mathbb{Z}\to\mathbb{Z}_{\ge 0}$ given by $\kappa_\mathbb{Z}(m)=\sum_{g\in G/T}\kappa(m,g)$. Then the support of $\kappa_\mathbb{Z}$ is $\{a-s,\ldots,a-1\}$. On the other hand, if $\kappa$ is $g^\#_0$-balanced, then $\kappa_\mathbb{Z}$ is $a_0$-balanced{, that is, $\kappa_\mathbb{Z}(i)=\kappa_\mathbb{Z}(-a_0-i)$, for all $i\in\mathbb{Z}$}, which implies $-a_0-(a-s)=a-1$. The result follows.
\end{proof}
Therefore, we can replace the parameters $g^\#_0$ and $\kappa$ by $g_0$ and $(\kappa_1,\ldots,\kappa_s)$. Also, since $g_0^\#(g^\#)^2\notin T$ for any $g^\#=(a-i,g)$ with $s+1\ne 2i$, condition (ii) is automatically satisfied if $s$ is even, and affects only $\kappa_{\frac{s+1}{2}}$ if $s$ is odd. Hence, we can restate conditions (i) and (ii) in terms of $\kappa_1,\ldots,\kappa_s$ as follows:
\begin{enumerate}
\item[(i')] $\kappa_i(x)=\kappa_{s-i+1}(g_0^{-1}x^{-1})$ for all $x\in G/T$ and all $i$;
\item[(ii')] either $s$ is even or $s$ is odd and $\kappa_{\frac{s+1}{2}}(gT)$ is even whenever $g_0g^2\in T$ and $\eta(g_0g^2)=-1$ for some $g\in G$.
\end{enumerate}
Note that condition (i') implies that $n_i=|\kappa_i|\ell=|\kappa_{s-i+1}|\ell=n_{s-i+1}$, so Type II gradings on $U(\mathscr{F})_0$ can exist only if $n_i=n_{s-i+1}$ for all $i$, as expected from the structure of the automorphism group (see Lemma \ref{aut_Lie}).
Let us describe explicitly a Type II grading on $U(\mathscr{F})_0$ in the isomorphism class parametrized by $(T,\beta,g_0,(\kappa_1,\ldots,\kappa_s))$. For each $1\le i<\frac{s+1}{2}$, we fill two $|\kappa_i|$-tuples, $\gamma_i$ and $\gamma_{s-i+1}$, simultaneously as follows, going from left to right in $\gamma_i$ and from right to left in $\gamma_{s-i+1}$. For each coset $x\in G/T$ that lies in the support of $\kappa_i$, we choose an element $g\in x$ and place $\kappa_i(x)$ copies of $g$ into $\gamma_i$ and as many copies of $g_0^{-1}g^{-1}$ into $\gamma_{s-i+1}$. If $s$ is odd, we fill the middle $|\kappa_i|$-tuple $\gamma_i$, with $i=\frac{s+1}{2}$, in the following manner: $\gamma_i$ will be the concatenation of (possibly empty) tuples $\gamma^\triangleleft$, $\gamma^+$, $\gamma^0$, $\gamma^-$ and $\gamma^\triangleright$ (in this order), where $\gamma^\triangleleft$ and $\gamma^+$ are to be filled from left to right, $\gamma^-$ and $\gamma^\triangleright$ from right to left, and $\gamma^0$ in any order. For each $x$ in the support of $\kappa_i$, we choose an element $g\in x$. If $g_0g^2\notin T$, we place $\kappa_i(x)$ copies of $g$ into $\gamma^\triangleleft$ and as many copies of $g_0^{-1}g^{-1}$ into $\gamma^\triangleright$. If $g_0g^2\in T$ and $\eta(g_0g^2)=-1$, we place $\frac12\kappa_i(x)$ copies of $g$ in each of $\gamma^+$ and $\gamma^-$. Finally, if $g_0g^2\in T$ and $\eta(g_0g^2)=1$, we place $\kappa_i(x)$ copies of $g$ into $\gamma^0$. Concatenating these $\gamma_1,\ldots,\gamma_s$ results in a $k$-tuple $\gamma=(g_1,\ldots,g_k)$ of elements of $G$. Taking them modulo $\langle f\rangle$, we define a $\overline{G}$-grading on $M_k$ and, consequently, on $M_n\simeq M_k\otimes D$, so $M_n=\bigoplus_{\bar{g}\in\overline{G}}R_{\bar{g}}$. Then we construct a matrix $\Phi\in M_k(D)\simeq M_k\otimes D$ as follows:
\begin{equation}\label{Phi}
\begin{split}
\Phi&=\mathrm{diag}(\chi(g_1^{-1})I_\ell,\ldots,\chi(g_p^{-1})I_\ell)\oplus\mathrm{diag}(X_{\bar{g}_0\bar{g}_{p+1}^2},\ldots,
X_{\bar{g}_0\bar{g}_{p+q}^2})\\
&\oplus\widetilde{\mathrm{diag}}(X_{\bar{g}_0\bar{g}_{p+q+1}^2},\ldots,X_{\bar{g}_0\bar{g}_{k-p-q}^2})\\
&\oplus\mathrm{diag}(-X_{\bar{g}_0\bar{g}_{k-p-q+1}^2},\ldots,-X_{\bar{g}_0\bar{g}_{k-p}^2})\oplus\mathrm{diag}(\chi(g_{k-p+1}^{-1})I_\ell,\ldots,\chi(g_k^{-1})I_\ell),
\end{split}
\end{equation}
where $p$ is the sum of the lengths of $\gamma_1,\ldots,\gamma_{\lfloor\frac{s}2\rfloor}$, and $\gamma^\triangleleft$, $q$ is the length of $\gamma^+$, and $\widetilde{\mathrm{diag}}$ denotes arrangement of entries along the secondary diagonal (from left to right).
Finally, we use $\Phi$ to define a $G$-grading on $M_n^{(-)}$:
\begin{equation}\label{Type2Lie}
M_n^{(-)}=\bigoplus_{g\in G}R_g\text{ where }R_g=\{X\in R_{\bar{g}}\mid\Phi^{-1}X^\tau\Phi=-\chi(g)X\},
\end{equation}
which restricts to the desired grading on $U(\mathscr{F})_0$.
Thus we obtain the following classification of $G$-gradings on $U(\mathscr{F})_0$ from our Theorem \ref{th:main_Lie} and the known classification for $\mathfrak{sl}_n$ (as stated in \cite[Theorem 45]{BKE2018} and \cite[Theorem 3.53]{EK2013}).
\begin{Cor}\label{cor:main_Lie}
Every grading on $U(\mathscr{F})_0$ by an abelian group $G$ is isomorphic either to a Type I grading with parameters $(T,\beta,(\kappa_1,\ldots,\kappa_s))$, where $|\kappa_i|=n_i\sqrt{|T|}$, or to a Type II grading with parameters $(T,\beta,g_0,(\kappa_1,\ldots,\kappa_s))$, where $|\kappa_i|\sqrt{|T|/2}=n_{i}$ and $T$ is $2$-elementary. Type II gradings can occur only if $n>2$ and $n_i=n_{s-i+1}$ for all $i$, and their parameters are subject to the conditions (i') and (ii') above. Moreover, gradings of Type I are not isomorphic to gradings of Type II, and within each type we have the following:
\begin{enumerate}
\item[(I)] $(T,\beta,(\kappa_1,\ldots,\kappa_s))$ and $(T',\beta',(\kappa_1',\ldots,\kappa_s'))$ determine the same isomorphism class if and only if $T'=T$ and there exists $g\in G$ such that either $\beta'=\beta$ and $\kappa_i'=g\kappa_i$ for all $i$, or $n>2$, $\beta'=\beta^{-1}$ and $\kappa_i'=g\bar{\kappa}_{s-i+1}$ for all $i$, where $\bar{\kappa}(x):=\kappa(x^{-1})$ for all $x\in G/T$.
\item[(II)] $(T,\beta,g_0,(\kappa_1,\ldots,\kappa_s))$ and $(T',\beta',g_0',(\kappa_1',\ldots,\kappa_s'))$ determine the same isomorphism class if and only if $T'=T$, $\beta'=\beta$, and there exists $g\in G$ such that $g'_0=g^{-2}g_0$ and $\kappa'_i=g\kappa_i$ for all $i$.\qed
\end{enumerate}
\end{Cor}
\section{Commutativity of the grading group}\label{commut_supp}
Our immediate goal is to prove Lemma \ref{can_Lie}. The arguments will work without assuming a priori that the grading group is abelian, and, in fact, our second goal will be to prove that the elements of the support of any group grading on $U(\mathscr{F})_0$ must commute with each other. It will be more convenient to make computations in $U(\mathscr{F})^{(-)}$. So, suppose $U(\mathscr{F})^{(-)}$ is graded by an arbitrary group $G$. We still assume that $\mathrm{char}\,\mathbb{F}=0$, but $\mathbb{F}$ need not be algebraically closed.
Write $U(\mathscr{F})=\bigoplus_{1\le i\le j\le s}B_{ij}$, where each $B_{ij}$ is the set of matrices with non-zero entries only in the $(i,j)$-th block. Thus, $J_m=B_{1,m+1}\oplus B_{2,m+2}\oplus\cdots\oplus B_{s-m,s}$ for all $m\in\{0,1,\ldots,s-1\}$. It is important to note that $[J_1,J_{m}]=J_{m+1}$ and hence the Lie powers of the Jacobson radical $J=\bigoplus_{m>0}J_m$ coincide with its associative powers.
Let $e_i\in B_{ii}$ be the identity matrix of each diagonal block and let
\[
\mathfrak{d}=\mathrm{Span}\{e_1,e_2,\ldots,e_s\}.
\]
We can write $B_{ii}=\mathfrak{s}_i\oplus \mathbb{F}e_i$, where $\mathfrak{s}_i=[B_{ii},B_{ii}]\simeq\mathfrak{sl}_{n_i}$. Let $S=\bigoplus_{i=1}^s\mathfrak{s}_{i}$ and $R=\mathfrak{d}\oplus J$. Then $U(\mathscr{F})^{(-)}=S\oplus R$ is a Levi decomposition.
We will need the following graded version of Levi decomposition, which was established in \cite{PRZ2013} and then improved in \cite{Gord2016} by weakening the conditions on the ground field:
\begin{Thm}[{\cite[Corollaries 4.2 and 4.3]{Gord2016}}]\label{thm_gord}
Let $L$ be a finite-dimensional Lie algebra over a field $\mathbb{F}$ of characteristic $0$, graded by an arbitrary group $G$.
Then the radical $R$ of $L$ is graded and there exists a maximal semisimple subalgebra $B$ such that $L=B\oplus R$ (direct sum of graded subspaces).\qed
\end{Thm}
\begin{Cor}\label{Levi}
Consider any $G$-grading on $U(\mathscr{F})^{(-)}$. Then the ideal $R$ is graded. Moreover, there exists an isomorphic $G$-grading on $U(\mathscr{F})^{(-)}$ such that $S$ is also graded.
\end{Cor}
\begin{proof}
By Theorem \ref{thm_gord}, there exists a graded Levi decomposition $U(\mathscr{F})^{(-)}=B\oplus R$. But $U(\mathscr{F})^{(-)}=S\oplus R$ is another Levi decomposition, so, by Malcev's Theorem (see e.g. \cite[Corollary 2 on p.~93]{Jac1979}), there exists an (inner) automorphism $\psi$ of $U(\mathscr{F})^{(-)}$ such that $\psi(B)=S$. Applying $\psi$ to the given $G$-grading on $U(\mathscr{F})^{(-)}$, we obtain a new $G$-grading on $U(\mathscr{F})^{(-)}$ with respect to which $S$ is graded.
\end{proof}
\begin{Lemma}\label{part_diag}
For any $G$-grading on $U(\mathscr{F})^{(-)}$, there exists an isomorphic $G$-grading such that the subalgebras $\mathfrak{d}$ and $S$ are graded.
\end{Lemma}
\begin{proof}
We partition $\{1,\ldots,s\}=\{i_1,\ldots,i_r\}\cup\{j_1,\ldots,j_{s-r}\}$ so that $n_{i_k}=1$ and $n_{j_k}>1$. Denote $e_{\triangle}=\sum_{k=1}^r e_{i_k}$, then $e_{\triangle}U(\mathscr{F})e_{\triangle}\simeq UT_r$, the algebra of upper triangular matrices (if $r>0$).
By Corollary \ref{Levi}, we may assume that $S$ is graded. Then its centralizer in $R$, $N:=\mathrm{C}_R(S)$, is a graded subalgebra. It coincides with $\mathrm{Span}\{e_{j_1},\ldots,e_{j_t}\}\oplus e_{\triangle}U(\mathscr{F})e_{\triangle}$, and its center (which is also graded) coincides with $\mathrm{Span}\{e_{j_1},\ldots,e_{j_t},e_{\triangle}\}$.
If $r=0$, then $N=\mathfrak{d}$ and we are done. Assume $r>0$. Then we obtain a $G$-grading on $N/\mathfrak{z}(N)\simeq UT_r^{(-)}/\mathbb{F} {1}\simeq (UT_r)_0$.
These gradings were classified in \cite{pkfy2017}, where it was shown that, after applying an automorphism of $UT_r^{(-)}$, the subalgebra of diagonal matrices in $UT_r^{(-)}$ is graded. Since $-\tau$ preserves this subalgebra, we may assume that the automorphism in question is inner. But an inner automorphism of $e_{\triangle}U(\mathscr{F})e_{\triangle}$ can be extended to an inner automorphism of $U(\mathscr{F})$. {Indeed, let $y$ be an invertible element of $e_{\triangle}U(\mathscr{F})e_{\triangle}$.} Then $x=\sum_{k=1}^{s-r} e_{j_k}+y\in U(\mathscr{F})^\times$ and $\mathrm{Int}(x)$ extends $\mathrm{Int}(y)$. Moreover, $\mathrm{Int}(x)$ preserves $S$. Therefore, we may assume that the subalgebra of diagonal matrices in $N/\mathfrak{z}(N)$ is graded. But the inverse image of this subalgebra in $N$ is precisely $\mathfrak{d}$, so $\mathfrak{d}$ is graded.
\end{proof}
It will be convenient to use the following technical concept:
\begin{Def}
Let $L$ be a $G$-graded Lie algebra. We call $x\in L$ \textit{semihomogeneous} if $x=x_h+x_z$, with $x_h$ homogeneous and $x_z\in\mathfrak{z}(L)$. If $x_h\notin\mathfrak{z}(L)$, we define the \emph{degree} of $x$ as $\deg x_h$ and denoted it by $\deg x$.
\end{Def}
An important observation is that if $x$ and $y$ are semihomogeneous and $[x,y]\ne 0$, then $[x,y]$ is homogeneous of degree $\deg x\deg y$ (as $[x,y]$ will coincide with $[x_h,y_h]$).
\begin{Prop}\label{diagonal}
For any $G$-grading on $U(\mathscr{F})^{(-)}$, there exists an isomorphic $G$-grading with the following properties:
\begin{enumerate}
\item[(i)] the subalgebras $\mathfrak{s}_{k}+\mathfrak{s}_{s-k+1}$ are graded,
\item[(ii)] the elements $e_k-e_{s-k+1}$ ($k\ne\frac{s+1}{2}$) are semihomogeneous of degree $1_G$, and
\item[(iii)] the elements $e_k+e_{s-k+1}$ are semihomogeneous of degree $f$ (if $s>2$), where $f\in G$ is an element of order at most $2$.
\end{enumerate}
\end{Prop}
\begin{proof}
By Lemma \ref{part_diag}, we may assume that $S$ and $\mathfrak{d}$ are graded subalgebras. Also note that $J=[R,R]$ and all of its powers are graded ideals. We proceed by induction on $s$. If $s=1$, then $\mathfrak{s}_1=S$ is graded and there is nothing more to prove. If $s=2$, then $\mathfrak{s}_{1}\oplus\mathfrak{s}_{2}=S$ is graded. Also, $\mathrm{Span}\{e_1,e_2\}=\mathfrak{d}$ and $e_1+e_2={1}$ is central, so $e_1-e_2$ is a semihomogeneous element. Its degree must be equal to $1_G$, because $[e_1-e_2,x]=2x$ for any $x\in J=B_{12}$. Now assume $s>2$.
\textbf{Claim 1}: $N:=B_{11}\oplus B_{ss}\oplus\mathbb{F}{1}\oplus J$ is graded.
First suppose $s\ge 4$. Consider $J^{s-2}=J_{s-2}\oplus J_{s-1}$ (the three blocks in the top right corner) and the graded ideal $C:=\mathrm{C}_R(J^{s-2})=R\cap\mathrm{C}_{U(\mathscr{F})^{(-)}}(J^{s-2})$. It is easy to see that
\[
C=\mathrm{Span}\{e_2,\ldots,e_{s-1}\}\oplus\mathbb{F}{1}\oplus B_{23}\oplus\cdots\oplus B_{s-2,s-1}\oplus J^2.
\]
Now, the adjoint action induces on $C/J^2$ a natural structure of a graded $U(\mathscr{F})^{(-)}$-module, and one checks that $N=\mathrm{Ann}_{U(\mathscr{F})^{(-)}}(C/J^2)+J$, so $N$ is graded.
If $s=3$, then consider $J^2=J_2=B_{13}$ and the graded ideal $\tilde{C}:=\mathrm{C}_{U(\mathscr{F})^{(-)}}(J^2)$. One checks that
\[
\tilde{C}=B_{22}\oplus\mathbb{F}{1}\oplus J,
\]
and hence $N=\mathrm{Ann}_{U(\mathscr{F})^{(-)}}(\tilde{C}/J)$. This completes the proof of Claim 1.
It follows that $S\cap N=\mathfrak{s}_{1}\oplus\mathfrak{s}_{s}$ is a graded subalgebra, and
\[
I_1:=\mathfrak{d}\cap N=\mathrm{Span}\{e_1,e_s,{1}\}
\]
is graded as well. Hence, $\mathrm{C}_{I_1}(J^{s-1})=\mathrm{Span}\{e_1+e_s,{1}\}$ is graded, so we conclude that $e_1+e_s$ is semihomogeneous. Denote its degree by $f$.
\textbf{Claim 2}: $f^2=1_G$ and $e_1-e_s$ is semihomogeneous of degree $1_G$.
Since $I_1/\mathbb{F}{1}$ is spanned by the images of $e_1$ and $e_s$, there must exists a semihomo\-ge\-neous linear combination $\tilde{e}$ of $e_1$ and $e_s$ that is not a scalar multiple of $e_1+e_s$. Consider the graded $I_1$-module $J^{s-2}/J^{s-1}$. As a module, it is isomorphic to $B_{1,s-1}\oplus B_{2,s}$, where ${1}$ acts as $0$, $e_1$ as the identity on the first summand and $0$ on the second, and $e_s$ as $0$ on the first and the negative identity on the second. Using this isomorphism, we will write the elements $x\in J^{s-2}/J^{s-1}$ as $x=x_1+x_2$ with $x_1\in B_{1,s-1}$ and $x_2\in B_{2,s}$. Since the situation is symmetric in $e_1$ and $e_s$, we may assume without loss of generality that $\tilde{e}=e_1+\alpha e_s$, $\alpha\ne 1$. Pick a homogeneous element $x=x_1+x_2$ with $x_1\ne 0$. First, we observe that $(e_1+e_s)\cdot((e_1+e_s)\cdot x)=x$, which implies $f^2=1_G$. If $x_2=0$, then $\tilde{e}\cdot x=(e_1+e_2)\cdot x=x$, and this implies that the semihomogeneous elements $\tilde{e}$ and $e_1+e_2$ both have degree $1_G$, which proves the claim. If $\alpha=0$, then $\tilde{e}\cdot x=x_1-\alpha x_2=x_1$ is homogeneous and we can apply the previous argument. So, we may assume that $\alpha\ne 0$.
Suppose for a moment that we have $\deg\tilde{e}=1_G$. If $\alpha=-1$, we are done. Otherwise, we can consider the homogeneus element $0\ne x+\alpha^{-1}\tilde{e}\cdot x\in B_{1,s-1}$ and apply the previous argument again.
It remains to prove that $\deg\tilde{e}=1_G$. Denote this degree by $g$ and assume $g\ne 1_G$. Considering
\[
D:=\mathrm{Span}\{x,\tilde{e}\cdot x,\tilde{e}\cdot(\tilde{e}\cdot x),\ldots\},
\]
we see, on the one hand, that $\dim D\le2$, because $D\subset\mathrm{Span}\{x_1,x_2\}$. On the other hand, non-zero homogeneous elements of distinct degrees are linearly independent, so the order of $g$ does not exceed $2$. By our assumption, it must be equal to $2$. Then $x$ and $\tilde{e}\cdot x$ form a basis of $D$ and $y:=\tilde{e}\cdot(\tilde{e}\cdot x)$ has the same degree as $x$. Therefore, $y=\lambda x$ for some $\lambda\ne0$. On the other hand, $y=x_1+\alpha^2 x_2$, hence $\alpha=\pm1$. The case $\alpha=1$ is excluded, whereas $\alpha=-1$ implies $\tilde{e}\cdot x=x$, which contradicts $g\ne 1_G$. The proof of Claim 2 is complete.
We have established all assertions of the proposition for $k=1$. We are going to use the induction hypothesis for $k>1$. To this end, let $e:={1}-(e_1+e_s)$ and consider $eU(\mathscr{F})e\simeq UT(n_2,\ldots,n_{s-1})$. Observe that the operator $\mathrm{ad}(e_1-e_s)$ on $U(\mathscr{F})^{(-)}$ preserves degree and acts as $0$ on $B_{11}\oplus eU(\mathscr{F})e\oplus B_{ss}$, as the identity on the blocks $B_{12},\ldots,B_{1,s-1}$ and $B_{2s},\ldots,B_{s-1,s}$ and as $2$ times the identity on $B_{1s}$. It follows that
\begin{align*}
T_1:&=\Big(\mathrm{id}-\frac12\mathrm{ad}(e_1-e_s)\Big)\Big(\mathrm{id}-\mathrm{ad}(e_1-e_s)\Big)U(\mathscr{F})^{(-)}\\
&=B_{11}\oplus eU(\mathscr{F})e\oplus B_{ss},
\end{align*}
is a graded subspace.
Hence, $L_1:=\mathrm{C}_{T_1}(J^{s-1})=\mathbb{F}(e_1+e_s)\oplus eU(\mathscr{F})e$ is graded and we can apply the induction hypothesis to $L_1/\mathbb{F}(e_1+e_s)\simeq UT(n_2,\ldots,n_{s-1})$. Therefore, for $1<k\le\frac{s+1}{2}$, the subalgebras $\mathbb{F}(e_1+e_s)\oplus(\mathfrak{s}_k+\mathfrak{s}_{s-k+1})\subset L_1$ are graded, the elements $e_k+e_{s-k+1}$ are semi\-homo\-ge\-neous of degree $f'$ in $L_1$ (if $s>4$), and the elements $e_k-e_{s-k+1}$ ($k\ne\frac{s+1}{2}$) are semihomogeneous of degree $1_G$ in $L_1$. {For the subalgebras, we can get rid of the unwanted term $\mathbb{F}(e_1+e_s)$ by passing to the derived algebra, so we conclude that $\mathfrak{s}_k+\mathfrak{s}_{s-k+1}$ are graded. For the elements, since $\mathfrak{z}(L_1)=\mathbb{F}(e_1+e_s)\oplus\mathbb{F}1$, we also have to get rid of $\mathbb{F}(e_1+e_s)$ before we can conclude that they are semihomogeneous in $U(\mathscr{F})^{(-)}$.}
\textbf{Claim 3}: $e_k+e_{s-k+1}$ are semihomogeneous of degree $f$ in $U(\mathscr{F})^{(-)}$.
If $s=3$, then $e_2={1}-(e_1+e_3)$ is semihomogeneous of degree $f$. If $s=4$, then $e_2+e_{s-1}={1}-(e_1+e_s)$ is semihomogeneous of degree $f$. So, assume $s>4$. By the above paragraph, we know there exist $\alpha_k$ such that $\alpha_k(e_1+e_s)+e_k+e_{s-k+1}$ are semihomogeneous of degree $f'$ in $U(\mathscr{F})^{(-)}$. If $\alpha_2=0$, then pick a non-zero homogeneous element $x\in J^{s-2}/J^{s-1}$. Since $(e_1+e_s)\cdot x=-(e_2+e_{s-1})\cdot x\ne 0$, we conclude that $f=f'$ and the claim follows, because we can subtract the scalar multiples of $e_1+e_s$ from the elements $\alpha_k(e_1+e_s)+e_k+e_{s-k+1}$. If $\alpha_2\ne0$, consider instead the graded $U(\mathscr{F})^{(-)}$-module $([e_1-e_s,J^2]+J^3)/J^3$. As a module, it is isomorphic to $B_{13}\oplus B_{s-2,s}$, so $e_2+e_{s-1}$ annihilates it. Picking a non-zero homogeneous element $x$, we get
\[
(\alpha_2(e_1+e_s)+e_2+e_{s-1})\cdot x=\alpha_2(e_1+e_s)\cdot x\ne0,
\]
so again $f=f'$ and the claim follows.
\textbf{Claim 4}: $e_k-e_{s-k+1}$ are semihomogeneous of degree $1_G$ in $U(\mathscr{F})^{(-)}$.
We know there exist $\alpha'_k$ such that $\alpha'_k(e_1+e_s)+e_k-e_{s-k+1}$ are semihomogeneous of degree $1_G$ in $U(\mathscr{F})^{(-)}$. If $f=1_G$, then we can subtract the scalar multiples of $e_1+e_s$, so we are done. If $f\ne 1_G$, we want to prove that $\alpha'_k=0$. By way of contradiction, assume $\alpha'_k\ne 0$. If $k<\frac{s}{2}$, then $e_k-e_{s-k+1}$ annihilates the graded module $([e_1-e_s,J^k]+J^{k+1})/J^{k+1}$, so, using the argument in the proof of Claim~3, we conclude that $\deg(e_1+e_s)=1_G$, a contradiction. It remains to consider the case $s=2k$. If $s>4$, then $e_{s/2}-e_{s/2+1}$ annihilates the graded module $([e_1-e_s,J]+J^2)/J^2$, which is isomorphic to $B_{12}\oplus B_{s-1,s}$, so the same argument works. If $s=4$, then $e_2-e_3$ does not annihilate this module, but acts on it as the negative identity. Picking a non-zero homogeneous element $x$, we get
\[
x+(\alpha'_2(e_1+e_s)+e_2-e_3)\cdot x=\alpha'_2(e_1+e_s)\cdot x\ne 0,
\]
so again $\deg(e_1+e_s)=1_G$, a contradiction.
The proof of the proposition is complete.
\end{proof}
\begin{proof}[Proof of Lemma \ref{can_Lie}]
We extend a given $G$-grading on $U(\mathscr{F})_0$ to $U(\mathscr{F})^{(-)}$ by defining the degree of ${1}$ an arbitrarily. Then $U(\mathscr{F})_0\simeq U(\mathscr{F})^{(-)}/\mathbb{F}{1}$ as a graded algebra.
By Lemma \ref{part_diag}, we may assume that $\mathfrak{d}$ and $S$ are graded, hence the subalgebra $J_0=\mathfrak{d}\oplus S$ and its homomorphic image $J_0/\mathbb{F}{1}\simeq J_0\cap U(\mathscr{F})_0$ in $U(\mathscr{F})_0$ are graded. (In fact, by Proposition \ref{diagonal}, we can say more: every subalgebra $B_{ii}+B_{s-i+1}+\mathbb{F}{1}$ is graded.) To deal with $J_m$ for $m>0$, we will use the semihomogeneous elements $d_i:=e_i-e_{s-i+1}$ of degree $1_G$ ($i\ne \frac{s+1}{2}$). Fix $i<j$. If $i+j\ne s+1$, then
\[
B_{ij}\oplus B_{s-j+1,s-i+1}=\mathrm{ad}(d_i-d_j)\mathrm{ad}(d_i)\mathrm{ad}(d_j)U(\mathscr{F})^{(-)},
\]
which is a graded subspace. If $i+j=s+1$, then
\[
B_{ij}=(\mathrm{id}-\mathrm{ad}(d_i))\mathrm{ad}(d_i)J^{s-i+1}
\]
is graded. Thus, $B_{ij}+B_{s-j+1,s-i+1}$ is graded for all $i<j$, hence so is $J_m$.
\end{proof}
Now, we proceed to prove that the support of any $G$-grading on $U(\mathscr{F})_0$ is a \emph{commutative subset} of $G$ in the sense that its elements commute with each other. The key observation is that, if $x$ and $y$ are homogeneous elements in any $G$-graded Lie algebra and $[x,y]\ne 0$, then $\deg x$ must commute with $\deg y$. By induction, one can generalize this as follows: if $x_1,\ldots,x_k$ are homogeneous and $[\ldots[x_1,x_2],\ldots,x_k]\ne 0$ then the degrees of $x_i$ must commute pair-wise. This fact was used to show that the support of any graded-simple Lie algebra is commutative (see e.g. \cite[Proposition 2.3]{PRZ2013} or the proof of Proposition 1.12 in \cite{EK2013}). We will need the following two lemmas.
\begin{Lemma}\label{cyclic}
Suppose a semidirect product of Lie algebras $V\rtimes L$ is graded by a group $G$ in such a way that both the ideal $V$ and the subalgebra $L$ are graded. Assume that the support of $L$ is commutative and, as an $L$-module, $V$ is faithful and generated by a single homogeneous element. Then the support of $V\rtimes L$ is commutative.
\end{Lemma}
\begin{proof}
Let $v$ be a homogeneous generator of $V$ as an $L$-module and let $g=\deg v$. Denote by $H$ the abelian subgroup generated by $\mathrm{Supp}\,L$. Then $\mathrm{Supp}\,V$ is contained in the coset $Hg$. In particular, the subgroup generated by $\mathrm{Supp}\,(V\rtimes L)$ is also generated by $H$ and $g$, so it is sufficient to prove that $g$ commutes with all elements of $\mathrm{Supp}\,L$. Let $a\ne 0$ be a homogeneous element of $L$. Since $V$ is faithful, there exists a homogeneous element $w\in V$ such that $a\cdot w\ne 0$. But, in the semidirect product, $a\cdot w=[a,w]$, hence $\deg a$ and $\deg w$ commute. Since $\deg a\in H$, $\deg w\in Hg$, and $H$ is abelian, we conclude that $\deg a$ commutes with $g$.
\end{proof}
{
\begin{Lemma}\label{cyclic2}
Let $L=L_1\times\cdots\times L_k$ and suppose the semidirect product $V\rtimes L$ is graded by a group $G$ in such a way that $V$ and each subalgebra $L_i$ are graded. Assume that $V$ is graded-simple as an $L$-module and, for each $i$, $\mathrm{Supp}\,L_i$ is commutative and $V$ is faithful as an $L_i$-module. Then the support of $V\rtimes L$ is commutative.
\end{Lemma}
\begin{proof}
One checks that, if we redefine the bracket on the ideal $V$ to be zero while keeping the same bracket on the subalgebra $L$ and the same $L$-module structure on $V$, the resulting semidirect product is still $G$-graded, so we may suppose $[V,V]=0$.
Let $v$ be any non-zero homogeneous element of $V$ (hence a generator of $V$ as an $L$-module). Let $W_i$ be the $L_i$-submodule generated by $v$. Since the actions of $L_i$ and $L_j$ on $V$ commute with each other for all $j\ne i$, $W_i$ must be a faithful $L_i$-module, so we can apply Lemma \ref{cyclic} to the graded subalgebra $W_i\rtimes L_i$ and conclude that $\deg v$ commutes with the elements of $\mathrm{Supp}\,L_i$ for each $i$. It remains to prove that the elements of $\mathrm{Supp}\,L_i$ commute with the elements of $\mathrm{Supp}\,L_j$ for $j\ne i$. Let $a\ne 0$ be a homogeneous element of $L_i$. Pick a homogeneous $v\in V$ such that $v':=a\cdot v\ne0$ and denote $g=\deg v$ and $g'=\deg v'$. By the previous argument, both $g$ and $g'$ commute with every element of $\mathrm{Supp}\,L_j$. But this implies that $\deg a$ commutes with every element of $\mathrm{Supp}\,L_j$.
\end{proof}
}
\begin{Thm}\label{supp_commutativity}
The support of any group grading on $U(\mathscr{F})_0$ over a field of charac\-te\-ristic $0$ generates an abelian subgroup.
\end{Thm}
\begin{proof}
The result is known for simple Lie algebras, so we assume $s>1$.
We extend the grading to $U(\mathscr{F})^{(-)}$ and bring it to the form described in Proposition \ref{diagonal}. Then, as in the proof of Lemma \ref{can_Lie} just above, we can break $J$ into the direct sum of graded subspaces of the form $B_{ij}\oplus B_{s-j+1,s-i+1}$ ($i+j\ne s+1$) or $B_{ij}$ ($i+j=s+1$), for all $1\le i<j\le s$. Also, $\tilde{\mathfrak{s}}_i:=\mathfrak{s}_i+\mathfrak{s}_{s-i+1}$ are graded subalgebras (possibly zero). Note that any non-zero $\tilde{\mathfrak{s}}_i$ is graded-simple and, therefore, its support is commutative, except in the following situation: $i\ne\frac{s+1}{2}$ and one of the ideals $\mathfrak{s}_i$ and $\mathfrak{s}_{s-i+1}$ is graded. In this case, the other ideal is graded, too, being the centralizer of the first in $\tilde{\mathfrak{s}}_i$, and we can apply Lemma \ref{cyclic2} to the graded algebra $B_{i,s-i+1}\oplus\tilde{\mathfrak{s}}_i\simeq B_{i,s-i+1}\rtimes(\mathfrak{s}_i\times\mathfrak{s}_{s-i+1})$ to conclude that the support of $\tilde{\mathfrak{s}}_i$ is still commutative. Moreover, its elements commute with those of $\mathrm{Supp}\,B_{i,s-i+1}$, so we are done in the case $s=2$. From now on, assume $s>2$. {Let $f$ be the element of $G$ as in Proposition \ref{diagonal}.}
\textbf{Case 1}: $f=1_G$.
Here each block $B_{ij}$ and each subalgebra $\mathfrak{s}_i$ is graded. Indeed, each element $e_i$ is semihomogeneous of degree $1_G$. If $i+j=s+1$, then we already know that $B_{ij}$ is graded, and otherwise $B_{ij}=\mathrm{ad}(e_i)(B_{ij}\oplus B_{s-j+1,s-i+1})$, so it is still graded. For $\tilde{\mathfrak{s}}_i$, it is sufficient to consider $i\le\frac{s+1}{2}$. If $i=\frac{s+1}{2}$, then we already know that $\mathfrak{s}_i$ is graded, and otherwise we can find $j>i$ such that $j\ne s-i+1$, which implies that $\mathfrak{s}_i=\mathrm{C}_{\tilde{\mathfrak{s}}_i}(B_{ij})$ is still graded.
Applying Lemma \ref{cyclic2} to $B_{ij}\rtimes (\mathfrak{s}_i\times\mathfrak{s}_j)$, we conclude that the supports of non-zero $\mathfrak{s}_i$ and $\mathfrak{s}_j$ commute element-wise with one another and also with $\mathrm{Supp}\,B_{ij}$. (This works even if one of $\mathfrak{s}_i$ and $\mathfrak{s}_j$ is zero.) It follows that $\mathrm{Supp}\,S$ generates an abelian subgroup $H$ in $G$. It also commutes element-wise with $\mathrm{Supp}\,J$. Indeed, since $\mathrm{Supp}\,B_{ij}$ is contained in a coset of $H$, it is sufficient to prove that the degree of one non-zero homogeneous element of $B_{ij}$ commutes with the elements of $\mathrm{Supp}\,\mathfrak{s}_k$. We already know this if $k=i$ or $k=j$. Otherwise, we will have $k<i<j$, $i<k<j$ or $i<j<k$. In the last case, we have $[B_{ij},B_{jk}]=B_{ik}$, so we can find homogeneous elements $x\in B_{ij}$ and $y\in B_{jk}$ such that $0\ne [x,y]\in B_{ik}$. Since the elements of $\mathrm{Supp}\,\mathfrak{s}_k$ commute with $\deg y$ and with $\deg x\deg y$, they must commute with $\deg x$ as well. The other two cases are treated similarly.
It remains to prove that $\mathrm{Supp}\,J$ is commutative. Since $J_1$ generates $J$ as a Lie algebra, it is sufficient to prove that, for any $1\le i\le j\le s-1$, the sets $\mathrm{Supp}\,B_{i,i+1}$ and $\mathrm{Supp}\,B_{j,j+1}$ commute with one another element-wise. But we can find homogeneous elements $x_1\in B_{12},\,x_2\in B_{23},\ldots,x_{s-1}\in B_{s-1,s}$ such that $[\ldots[x_1,x_2],\ldots,x_{s-1}]\ne 0$, so the degrees of $x_1,x_2,\ldots,x_{s-1}$ must commute pair-wise. The coset argument completes the proof of Case~1.
\textbf{Case 2}: $f\ne 1_G$.
Here we work with $\tilde{B}_{ij}:=B_{ij}+B_{s-j+1,s-i+1}$. If $\tilde{\mathfrak{s}}_i$ and $\tilde{\mathfrak{s}}_j$ are distinct (that is, $i+j\ne s+1$) and non-zero, then $\tilde{B}_{ij}$ is a direct sum of two non-isomorphic simple $(\tilde{\mathfrak{s}}_i\times\tilde{\mathfrak{s}}_j)$-submodules. We claim that it is a graded-simple $(\tilde{\mathfrak{s}}_i\times\tilde{\mathfrak{s}}_j)$-module. Indeed, otherwise one of the submodules $B_{ij}$ and $B_{s-j+1,s-i+1}$ would be graded. But there exist scalars $\lambda_i$ such that $\tilde{e}_i:=e_i+e_{s-i+1}+\lambda_i {1}$ are homogeneous of degree $f$, and $\mathrm{ad}(\tilde{e}_i)$ acts as the identity on $B_{ij}$ and the negative identity on $B_{s-j+1,s-i+1}$, which forces $f=1_G$, a contradiction.
Therefore, we can apply Lemma \ref{cyclic2} to $\tilde{B}_{ij}\rtimes (\tilde{\mathfrak{s}}_i\times\tilde{\mathfrak{s}}_j)$ and conclude that the supports of non-zero $\tilde{\mathfrak{s}}_i$ and $\tilde{\mathfrak{s}}_j$ commute element-wise with one another, hence $\mathrm{Supp}\,S$ is commutative.
Now consider $\tilde{B}_{ij}$, with $i+j\ne s+1$, as an $((\tilde{\mathfrak{s}}_i\times\tilde{\mathfrak{s}}_j)\times\mathbb{F}\tilde{e}_i)$-module, where one of $\tilde{\mathfrak{s}}_i$ and $\tilde{\mathfrak{s}}_j$ is allowed to be zero. The simple submodules $B_{ij}$ and $B_{s-j+1,s-i+1}$ are non-isomorphic, because they are distinguished by the action of $\tilde{e}_i$. Hence, our argument in the first paragraph shows that $\tilde{B}_{ij}$ is a graded-simple module, so we can apply Lemma \ref{cyclic2} to $\tilde{B}_{ij}\rtimes((\tilde{\mathfrak{s}}_i\times\tilde{\mathfrak{s}}_j)\times\mathbb{F}\tilde{e}_i)$ and conclude that the supports of $\tilde{\mathfrak{s}}_i$ and $\tilde{\mathfrak{s}}_j$ commute element-wise with $f$ and also with $\mathrm{Supp}\,\tilde{B}_{ij}$. Moreover, $f$ commutes with $\mathrm{Supp}\,\tilde{B}_{ij}$. If $i+j=s+1$, then $\tilde{B}_{ij}=B_{ij}$ and we can apply Lemma \ref{cyclic} to $B_{ij}\rtimes\tilde{\mathfrak{s}}_i$.
Therefore, the elements of $\mathrm{Supp}\,S$ commute with $f$ and together generate an abelian subgroup $H$ in $G$. Then, by the same argument as in Case~1 (but using $\tilde{B}_{ij}$ instead of $B_{ij}$), we show that $\mathrm{Supp}\,S$ commutes element-wise with $\mathrm{Supp}\,J$.
In order to prove that $f$ commutes with $\mathrm{Supp}\,J$, it is sufficient to consider $J_1$. As we have seen, $f$ commutes with $\mathrm{Supp}\,\tilde{B}_{ij}$ where $i+j\ne s+1$. The only case that is not covered in $J_1$ is $\tilde{B}_{s/2,s/2+1}=B_{s/2,s/2+1}$ for even $s$. Since $s>2$, we have $[\tilde{B}_{s/2-1,s/2},B_{s/2,s/2+1}]=\tilde{B}_{s/2-1,s/2+1}$. Since $f$ commutes with $\mathrm{Supp}\,\tilde{B}_{s/2-1,s/2}$ and with $\mathrm{Supp}\,\tilde{B}_{s/2-1,s/2+1}$, we conclude that $f$ commutes with $\mathrm{Supp}\,B_{s/2,s/2+1}$ as well. The commutativity of $\mathrm{Supp}\,J$ is proved by the same argument as in Case~1.
\end{proof}
\section{Jordan case}\label{Jord_case}
Every Jordan isomorphism from the algebra $U(\mathscr{F})$, $s>1$, to an arbitrary associative algebra $R$ is either an associative isomorphism or anti-isomorphism \cite[Corollary 3.3]{BDW2016}. By the remark after Theorem \ref{aut_cecil}, $U(\mathscr{F})$ admits an anti-automorphism if and only if $n_{i}=n_{s-i+1}$ for all $i$. So, taking into account the structure of the automorphism group of $U(\mathscr{F})$ (see Lemma \ref{aut}), we obtain that the automorphism group of $U(\mathscr{F})^{(+)}$, that is, the algebra $U(\mathscr{F})$ viewed as a Jordan algebra with respect to the symmetrized product $x\circ y=xy+yx$, is either $\{\mathrm{Int}(x)\mid x\in U(\mathscr{F})^\times\}$ or $\{\mathrm{Int}(x)\mid x\in U(\mathscr{F})^\times\}\rtimes\langle\tau\rangle$. In both cases, the following holds:
\begin{Lemma}
{If $n>2$,} $\mathrm{Aut}(U(\mathscr{F})^{(+)})\simeq\mathrm{Aut}(U(\mathscr{F})_0)$.\qed
\end{Lemma}
Hence, if $\mathbb{F}$ is algebraically closed of characteristic $0$ and the grading group $G$ is abelian, then the classification of $G$-gradings on the Jordan algebra $U(\mathscr{F})^{(+)}$ is equivalent to the classification of $G$-gradings on the Lie algebra $U(\mathscr{F})_0$ (see also \cite[\S 5.6]{EK2013} for the simple case, $s=1$). Thus, we get the same parametrization of the isomorphism classes of gradings as in Corollary \ref{cor:main_Lie}. The only difference is the sign in the construction of Type II gradings on $M_n^{(+)}$ (compare with Equation \eqref{Type2Lie} and recall that $\Phi$ is given by Equation \eqref{Phi}):
\begin{equation*}
M_n^{(+)}=\bigoplus_{g\in G}R_g\text{ where }R_g=\{X\in R_{\bar{g}}\mid\Phi^{-1}X^\tau\Phi=\chi(g)X\},
\end{equation*}
which are then restricted to $U(\mathscr{F})^{(+)}$. {Hence, for $n>2$, an explicit bijection between the $G$-gradings (or their isomorphism classes) on $U(\mathscr{F})^{(+)}$ and those on $U(\mathscr{F})_0$ is the following: restriction for Type I gradings and restriction with shift by the distinguished element $f$ for Type II gradings (which occur on $U(\mathscr{F})^{(+)}$ even for $n=2$, but in this case restrict to Type I gradings on $U(\mathscr{F})_0$).}
We note, however, that this result does not exclude the existence of group gradings on $U(\mathscr{F})^{(+)}$ with non-commutative support. In view of Theorem \ref{supp_commutativity}, these gradings, if they exist, are not analogous to gradings on $U(\mathscr{F})_0$.
\section{Isomorphism and practical isomorphism of graded Lie algebras}\label{practical_iso}
We use the main result of this section to obtain a classification of group gradings for $U(\mathscr{F})^{(-)}$
from the classification for $U(\mathscr{F})_0$, but it is completely general and may be of independent interest.
Let $G$ be a group and let $L_1$ and $L_2$ be two $G$-graded Lie algebras over an arbitrary field $\mathbb{F}$.
\begin{Def}[{\cite[Definition 7]{pkfy2017}}]
$L_1$ and $L_2$ are said to be \emph{practically $G$-graded isomorphic} if there exists an isomorphism of (ungraded) algebras $\psi:L_1\to L_2$ that induces a $G$-graded isomorphism $L_1/\mathfrak{z}(L_1)\to L_2/\mathfrak{z}(L_2)$.
\end{Def}
Note that, in this case, for every homogeneous non-central $x\in L_1$, we can find $z\in\mathfrak{z}(L_1)$ such that $y=\psi(x+z)$ is homogeneous in $L_2$ and $\deg x=\deg y$.
Clearly, if $L_1$ and $L_2$ are $G$-graded isomorphic then they are practically $G$-graded isomorphic.
The converse does not hold, but if $L_1$ and $L_2$ are practically $G$-graded isomorphic
then the derived algebras $L_1'$ and $L_2'$ are $G$-graded isomorphic. More precisely:
\begin{Lemma}\label{iso_derived}
Assume $\psi:L_1\to L_2$ is an isomorphism of algebras that induces a $G$-graded isomorphism
$L_1/\mathfrak{z}(L_1)\to L_2/\mathfrak{z}(L_2)$.
Then $\psi$ restricts to a $G$-graded isomorphism $L_1'\to L_2'$.
\end{Lemma}
\begin{proof}
Let $0\ne x\in L_1'$ be homogeneous of degree $g\in G$. Then there exist in $L_1$ nonzero homogeneous $x'_i$ of degree $g'_i$ and $x''_i$ of degree $g''_i$, $i=1,\ldots,m$, such that $x=\sum_{i=1}^m[x'_i,x''_i]$ and $g'_ig''_i=g$ for all $i$.
Also, there exist $z'_i,z''_i\in\mathfrak{z}(L_1)$ such that $\psi(x'_i+z'_i)$ is homogeneous of degree $g'_i$ and $\psi(x''_i+z''_i)$ is homogeneous of degree $g''_i$, for all $i$. Hence,
\[
\psi(x)=\psi\left(\sum_{i=1}^m[x'_i+z'_i,x''_i+z''_i]\right)=\sum_{i=1}^m[\psi(x'_i+z'_i),\psi(x''_i+z''_i)]
\]
is homogeneous in $L_2$ of degree $g$, as desired.
\end{proof}
Now we will see what happens if we strengthen the hypothesis on $\psi$ by assuming, in addition, that it restricts to a $G$-graded isomorphism $\mathfrak{z}(L_1)\to\mathfrak{z}(L_2)$. This does not yet imply that $\psi$ itself is a $G$-graded isomorphism, but we have the following:
\begin{Thm}\label{th:main_practical}
Let $L_1$ and $L_2$ be $G$-graded Lie algebras, and assume that there exists an isomorphism of (ungraded) algebras $\psi:L_1\to L_2$ such that both the induced map $L_1/\mathfrak{z}(L_1)\to L_2/\mathfrak{z}(L_2)$ and the restriction $\mathfrak{z}(L_1)\to\mathfrak{z}(L_2)$ are $G$-graded isomorphisms. Then $L_1$ and $L_2$ are isomorphic as $G$-graded algebras.
\end{Thm}
\begin{proof}
Let $N_1\subset\mathfrak{z}(L_1)$ be a graded subspace such that
\[
\mathfrak{z}(L_1)=N_1\oplus(\mathfrak{z}(L_1)\cap L_1').
\]
By our hypothesis, $N_2:=\psi(N_1)$ is a graded subspace of $\mathfrak{z}(L_2)$. Since $L_1'\oplus N_1$ is a graded subspace of $L_1$, there exists a linearly independent set $\mathcal{B}_1=\{u_i\}_{i\in\mathscr{I}}$ of homogeneous element of $L_1$ satisfying
\[
L_1=L_1'\oplus N_1\oplus\mathrm{Span}\,\mathcal{B}_1.
\]
By our hypothesis, we can find $z_i\in\mathfrak{z}(L_1)$ such that $\psi(u_i+z_i)$ is a homogeneous element of $L_2$ that has the same degree as $u_i$. Since $\mathfrak{z}(L_1)\subset L_1'\oplus N_1$, the set $\mathcal{B}_2:=\{\psi(u_i+z_i)\}_{i\in\mathscr{I}}$ is linearly independent and satisfies
\[
L_2=L_2'\oplus N_2\oplus\mathrm{Span}\,\mathcal{B}_2.
\]
Now define a linear map $\theta:L_1\to L_2$ by setting $\theta|_{L_1'\oplus N_1}=0$ and $\theta(u_i)=\psi(z_i)$ for all $i\in\mathscr{I}$. This is a ``trace-like map'' in the sense that its image is contained in $\mathfrak{z}(L_2)$ and its kernel contains $L_1'$. It follows that $\tilde\psi:=\psi+\theta$ is an isomorphism of algebras $L_1\to L_2$. Applying Lemma \ref{iso_derived}, we see that $\psi$, and hence $\tilde{\psi}$, restricts to a $G$-graded isomorphism $L_1'\oplus N_1\to L_2'\oplus N_2$. By construction, $\tilde\psi(u_i)=\psi(u_i+z_i)$. It follows that $\tilde\psi$ is an isomorphism of $G$-graded algebras.
\end{proof}
\begin{Cor}\label{cor:main_practical}
Let $\Gamma_1$ and $\Gamma_2$ be two $G$-gradings on a Lie algebra $L$ and consider the $G$-graded algebras
$L_1=(L,\Gamma_1)$ and $L_2=(L,\Gamma_2)$. If $L_1/\mathfrak{z}(L_1)=L_2/\mathfrak{z}(L_2)$ and $\mathfrak{z}(L_1)=\mathfrak{z}(L_2)$ as $G$-graded algebras, then $L_1\simeq L_2$ as $G$-graded algebras.
\end{Cor}
\begin{proof}
Apply the previous theorem with $\psi$ being the identity map.
\end{proof}
\begin{Cor}\label{reduction_to_trace_0}
Let $\Gamma_1$ and $\Gamma_2$ be two $G$-gradings on $U(\mathscr{F})^{(-)}$ {and assume $\mathrm{char}\,\mathbb{F}\nmid n$.} Then $\Gamma_1$ and $\Gamma_2$ are isomorphic if and only if they assign the same degree to the identity matrix ${1}$ and induce isomorphic gradings on $U(\mathscr{F})^{(-)}/\mathbb{F} {1}\simeq U(\mathscr{F})_0$.
\end{Cor}
\begin{proof}
The ``only if'' part is clear. For the ``if'' part, take an automorphism $\psi_0$ of $U(\mathscr{F})_0$ that sends
the grading induced by $\Gamma_1$ to the one induced by $\Gamma_2$, extend $\psi_0$ to an automorphism $\psi$ of $U(\mathscr{F})^{(-)}=U(\mathscr{F})_0\oplus\mathbb{F} {1}$ by setting $\psi({1})={1}$, and apply the theorem.
\end{proof}
\end{document}
|
\begin{document}
\title{Polymorphisms and Circuit Complexity}
\begin{abstract}
We present a framework for studying circuit complexity that is inspired by techniques that are used for analyzing the complexity of CSPs. We prove that the circuit complexity of a Boolean function $f$ can be characterized by the partial polymorphisms of $f$'s truth table. Moreover, the non-deterministic circuit complexity of $f$ can be characterized by the polymorphisms of $f$'s truth table.
\end{abstract}
\section{Introduction}
\label{section:introduction}
It is well known that almost all Boolean functions require circuits of exponential size, but so far we have not been able to pinpoint a single explicit function requiring circuits larger than $5n$.
The basic idea of our approach is to make use of techniques and results for analyzing the complexity of SAT problems to get a better understanding of circuit complexity.
Let SAT($S$) denote the SAT problem restricted to instances that are conjunctions of constraints build over the relations in $S$.
The complexity of SAT($S$) is characterized (up to polynomial-time reducibility) by the polymorphisms of $S$, denoted $Pol(S)$~\cite{TCS98}. For now, think about polymorphisms of $S$ as a generalized form of automorphisms, i.e., operations preserving the structure $S$. The richer the polymorphisms of a structure $S$ is, the \enquote{simpler} the structure is. Indeed, SAT($S$) is in P if $S$ has a non-trivial polymorphism, and NP-complete otherwise. It was observed in~\cite{JLNZ03} that the partial polymorphisms of $S$ (i.e., polymorphisms that may be undefined on some inputs), denoted $pPol(S)$,
paints a more fine grained picture for the complexity of SAT($S$). For example, if $pPol(S) \subseteq pPol(S')$ and SAT($S$) is solvable in $O(c^n)$ time, then SAT($S'$) is solvable in the same time $O(c^n)$ ($n$ denotes the number of variables).
Let $B_{n}$ denote the set of all Boolean functions with $n$ inputs and $1$ output.
Given $f \in B_{n}$ let $f^{\bullet}$ denote the truth table of $f$ i.e., the $(n+1) \times 2^n$ matrix where the first $n$ columns represents the inputs to $f$, the last column represents the output of $f$, and the rows of $f^{\bullet}$ are sorted in lexicographic order.
Our first observation is that if $f^{\bullet}$ is preserved by a non-trivial polymorphism (i.e., one that is not essentially unary or constant), then $f$ has a circuit of size $O(n)$ (Section~\ref{sec:upper}). Hence, it seems that our intuition from SAT($S$) carries over, if the polymorphisms of $f^{\bullet}$ are \enquote{rich}, then $f$ is simple (i.e., has low circuit complexity). To argue in the other direction, namely, that if the polymorphisms of $f^{\bullet}$ are not rich, then $f$ has high circuit complexity, we first give some general intuition.
To compute a Boolean function $f$, we need to avoid all potential errors, i.e., not output a $0$ on some input for which $f$ is $1$ or vice versa. In many computational models (e.g., Turing Machines, NFAs, or circuits), a computation consists of a composition of primitive computation steps/transitions/gates.
\textbf{Any function $w$ that is not a
polymorphism of $f^{\bullet}$ represent a potential error. For each such $w$, there must be at least one primitive (step/transition/gate) that catch/cover this error by not being preserved by $w$. Otherwise, $w$ is a polymorphism of the computation, and the error represented by $w$ manifest itself.}
The smallest number of individual steps/transitions/gates that cover all the potential errors of $f$ (i.e., all $w$ that are not polymorphisms of $f^{\bullet}$) is a lower bound on the complexity of $f$. Thus, the \enquote{poorer} the polymorphisms of $f^{\bullet}$ are, the larger the complexity of $f$ is.
This line of thinking is inspired by the method of approximation that was introduced by Razborov in his celebrated monotone circuit lower bound results~\cite{Razborov85b, Razborov85a} and further extended in~\cite{Razborov89}. The method of approximation was put in a different framework by Karchmer~\cite{Karchmer93} and the method presented in this framework was coined the fusion method by Wigderson in his survey of the topic~\cite{Wigderson93}. For those familiar with this previous line of work, we remark that the notion of a \enquote{fusion functional} (as used by Karchmer and Wigderson) corresponds to the functions $w$ that are not polymorphisms of $f^{\bullet}$.
Our first main result (Section~\ref{sec:partial}) is that, given $f \in B_n$, the smallest number of gates that cover all (witnesses of) \emph{partial} functions $w$, such that $w\notin pPol(f^{\bullet})$, is exactly the number of gates in an optimal circuit for $f$. Our second result (Section~\ref{sec:total}) is that the smallest number of gates that cover all (witnesses of) \emph{total} functions $w$, such that $w \notin Pol(f^{\bullet})$, equals the \emph{non-deterministic} circuit size of $f$, up to a constant factor.
\section{Preliminaries}
\subsection{Function algebra}
Any operation on $\{0,1\}$ can be extended in a standard way to an operation on tuples
over $\{0,1\}$, by applying the operation componentwise as follows.
\begin{definition}
Let $w \in B_k$ and let $R$ be an $n$-ary relation over $\{0,1\}$. For any collection of $k$ tuples,
$t_1,t_2, \dots, t_k \in R$, the $n$-tuple $w(t_1,t_2, \dots ,t_k)$ is defined as follows:
$w(t_1,t_2, \dots ,t_k) = (w(t_1[1],t_2[1], \dots, t_k[1]),$ $w(t_1[2],t_2[2], \dots, t_k[2]),\dots,$ $w(t_1[n],t_2[n], \dots, t_k[n]))$,
where $t_j[i]$ is the $i$th component in tuple $t_j$.
\end{definition}
\begin{definition}
If $w$ is an operation such that for all $t_1,t_2, \dots, t_k \in R$
$w(t_1,t_2, \dots ,t_k) \in R$, then $R$ is closed under $w$. An operation $w$ such that $R$ is closed under $w$ is called a polymorphism of $R$. The set of all polymorphisms of $R$ is denoted $Pol(R)$.
\end{definition}
Consider the following two binary functions $f$ and $g$, which truth tables $f^{\bullet}$ and $g^{\bullet}$ are given below.
\begin{table}[h]
\begin{center}
\begin{tabular}{cc|c}
$x_1$&$x_2$&$f$\\
\hline
$0$&$0$&$0$\\
$0$&$1$&$0$\\
$1$&$0$&$0$\\
$1$&$1$&$1$\\
\end{tabular}
\quad
\begin{tabular}{cc|c}
$x_1$&$x_2$&$g$\\
\hline
$0$&$0$&$1$\\
$0$&$1$&$0$\\
$1$&$0$&$0$\\
$1$&$1$&$1$\\
\end{tabular}
\caption{Truth tables $f^{\bullet}$ and $g^{\bullet}$}
\label{tab:ExampleFunctions}
\end{center}
\end{table}
Note that neither $f^{\bullet}$ nor $g^{\bullet}$ is closed under the ternary majority operation $maj(x,x,y)=maj(x,y,x)=maj(y,x,x) = x$. For $f^{\bullet}$, there is only one \enquote{witness} that $maj$ is not a polymorphism of $f^{\bullet}$, namely applying the $maj$ operation to the last 3 tuples, results in the tuple $(1,1,0)$ which is not in $f^{\bullet}$. The application of $maj$ to any other combination of three tuples in $f^{\bullet}$, results in a tuple in $f^{\bullet}$. If we consider $g^{\bullet}$ instead, applying $maj$ to \emph{any} 3 distinct tuples, results in a tuple which is not in $g^{\bullet}$. Hence, it seems natural to consider $g$ as being further away from being closed under $maj$ than $f$. Further more, every witness of the fact that an operation $w$ is not a polymorphism of the truth table of a function, constitute a potential error that any circuit computing the function must catch. Hence, not only do we need to keep track of operations $w$ that are not polymorphisms of the truth table of the function, but also the set of all witnesses of this (i.e., all combinations of tuples from the truth table, for which applying $w$ results in a tuple which is not in the truth table).
\begin{definition}
For $f \in B_n$, let $\overline{Pol}(f^{\bullet})$ denote the set of all functions $w \in B_{2^n}$ such that $w$ applied to the tuples in $f^{\bullet}$ (sorted in lexicographic order) results in a tuple that is not in $f^{\bullet}$. Hence, each $w \in \overline{Pol}(f^{\bullet})$ represents a witness that some function is not a polymorphism of $f^{\bullet}$. We sometimes refer to functions $w \in \overline{Pol}(f^{\bullet})$ as anti-polymorphisms.
\end{definition}
If we reconsider the truth tables of the functions $f$ and $g$ above, and consider:
\begin{center}
$w_1(x_1,x_2,x_3,x_4) = maj(x_1,x_2,x_3)$, \\
$w_2(x_1,x_2,x_3,x_4) = maj(x_1,x_2,x_4)$, \\
$w_3(x_1,x_2,x_3,x_4) = maj(x_1,x_3,x_4)$, \\
$w_4(x_1,x_2,x_3,x_4) = maj(x_2,x_3,x_4)$.
\end{center}
We have $w_1,\dots,w_4 \in \overline{Pol}(g^{\bullet})$ but only $w_4 \in \overline{Pol}(f^{\bullet})$.
Let $P_{n}$ denote the set of all partial Boolean operations/functions with $n$ inputs and $1$ output (i.e., operations that may be undefined for some inputs).
The concept of polymorphisms has a natural extension to partial operations.
\begin{definition}
Let $w \in P_k$ and $R$ an $n$-ary relation, then $R$ is closed under $w$ if for all $t_1,t_2, \dots, t_k \in R$ either $w(t_1,t_2, \dots ,t_k) \in R$ or at least one of $w(t_1[1],t_2[1], \dots, t_k[1]),$ $w(t_1[2],t_2[2], \dots, t_k[2]),\dots,$ $w(t_1[n],t_2[n], \dots, t_k[n])$ is undefined. A $w \in P_k$ such that $R$ is closed under $w$ is called a partial polymorphism of $R$. The set of all partial polymorphisms of $R$ is denoted $pPol(R)$. Note that $Pol(R) \subseteq pPol(R)$.
\end{definition}
\begin{definition}
For $f \in B_n$, let $\overline{pPol}(f^{\bullet})$ denote the set of all functions $w \in P_{2^n}$ such that $w$ applied to the tuples in $f^{\bullet}$ (sorted in lexicographic order) results in a tuple that is not in $f^{\bullet}$. Hence, each $w \in \overline{pPol}(f^{\bullet})$ represents a witness that some (partial) function is not a partial polymorphism of $f^{\bullet}$.
\end{definition}
\subsection{Circuits}
A Boolean circuit is a directed acyclic graph with three types of labeled vertices: sources (in-degree $0$) labeled $x_1,\dots,x_n$, a sink (the output), and vertices with in-degree $k > 0$ are gates labeled by Boolean functions on $k$ inputs. Unless otherwise specified, we assume the gates of the circuit to be fan-in two $\land$ and $\lor$ gates together with $\neg$ gates.
A non-deterministic circuit has, in addition to the ordinary inputs $x = (x_1,\dots,x_n)$, a set of \enquote{non-deterministic} inputs $y=(y_1,\dots,y_m)$. A non-deterministic circuit $C$ accepts input $x$ if there exists $y$ such that the circuit output $1$ on $(x,y)$. A co-non-deterministic circuit $C$ rejects an input $x$ if there exists $y$ such that $C$ output $0$ on $(x,y)$. Let $|C|$ denote the number of gates of a circuit $C$.
A family of non-deterministic circuits $\{C_n\}_{n \geq 0}$, with $C_n$ having $n$ (ordinary) input gates, decide a language $L$ if each $C_n$ decide $L_n$ (i.e., $C_n$ accepts $x$ if and only if $|x| =n$ and $x \in L$).
The class $NP/poly$ is defined as the class of languages decidable by non-deterministic circuit families $\{C_n\}$, with $|C_n| \leq poly(n)$. Recall that $P/poly$ is the class of languages decidable by (deterministic) circuit families $\{C_n\}$, with $|C_n| \leq poly(n)$.
Similarly, $coNP/poly$ is the class of languages decidable by polynomial size co-non-deterministic circuit families.
\subsection{Covers}
To be able investigate $Pol(f^{\bullet})$ in relation to the circuit complexity of $f \in B_n$ we need to introduce the computational model that we use. We define a $\land$ gate to be any $3 \times 2^n$ Boolean matrix where the third column is the $\land$ of the first two columns. A $\lor$ gate is defined analogously, and a $\neg$ gate is any $2 \times 2^n$ Boolean matrix where the second column is the complement of the first. An input (gate) is any $1 \times 2^n$ matrix that is one of the first $n$ column vectors in $f^{\bullet}$ (i.e, the inputs to $f$).
Given a gate $g_i$ we denote its input columns (in case they exist) by $g_{i_1}$ and $g_{i_2}$ (a gate may have just one input ($\neg$), or no inputs (i.e., an input gate)), and its output column (the last column in its matrix) by $g_i$ (all gates have an output).
Denote the function of the gate $g_i$ by $\circ_i$, e.g., $\circ_i \in \{\land,\lor,\neg\}$.
A circuit (or straight line program) is a sequence of gates $P = (g_1,g_2,\dots,g_t)$ (sometimes viewed as a $t \times 2^n$ matrix) such that the first $n$ gates $g_1,\dots,g_n$ are the input gates (i.e., the first $n$ columns of $f^{\bullet}$), and for every $i>n$, the inputs of $g_i$, i.e.,
$g_{i_1}$ and $g_{i_2}$, satisfy $i_1,i_2 < i$. That is, the inputs of $g_i$ must be the outputs of a gate preceding it in the sequence. The computation of $P$ on input $x \in \{0,1\}^n$ is defined as $P(x) = g_1(x) \cdots g_t(x) = u \in \{0,1\}^t$, where $u$ consists of the outputs of all gates in $P$ when propagating the input $x$ through the circuit (i.e., $u$ is the row of the matrix $P = (g_1,g_2,\dots,g_t)$ corresponding to the input $x$). For example,
$u_i = u_{i1} \circ_i u_{i2}$. We say that $P$ computes $f \in B_{n}$ if $g_t(x) = f(x)$ for all $x \in \{0,1\}^n$.
The key for obtaining a lower bound is the observation that if some $P$ of length $t$ computes $f$ (i.e., $f$ has a circuit of size $t-n$) then every $w \in \overline{Pol}(f^{\bullet})$ must fail to be consistent with $P$, i.e.,
for some $1 \leq i \leq t$, $w(g_{i_1}) \circ_i w(g_{i_2}) \neq w(g_i)$.
\begin{proposition}
\label{prop:consistent}
If $w \in \overline{Pol}(f^{\bullet})$ is consistent with the program $P = (g_1,g_2,\dots,g_t)$, then $P$ does not compute $f$.
\end{proposition}
\begin{proof}
Assume that $P = (g_1,g_2,\dots,g_t)$ is a circuit computing $f$. Let $x_1,\dots,x_{n+1}$ be the columns of $f^{\bullet}$ and $z = (w(x_1),w(x_2),\dots,w(x_n))$. Since $w \in \overline{Pol}(f^{\bullet})$ we know that $f(z) \neq w(x_{n+1})$.
Applying $w$ to the columns of the $t \times 2^n$ matrix $P$ results in a $t$-tuple $u = u_1 \dots u_t$ possibly corresponding to a correct computation of $z$ by the circuit.
We know that $w$ is consistent with $P$, i.e.,
$w(g_{i_1}) \circ_i w(g_{i_2}) = w(g_i)$
for all gates $g_i$, and hence,
$u_i = u_{i_1} \circ_i u_{i_2}$, and $u$ represents a correct computation of $P$ on input $z$. This leads to a contradiction since $f(z) = u_t \neq w(x_{n+1}) = u_t$.
\end{proof}
A gate $g_i$ is said to cover $w \in \overline{Pol}(f^{\bullet})$ if $w(g_{i_1}) \circ_i w(g_{i_2}) \neq w(g_i)$
\begin{definition}
\label{def:cover2}
A collection of gates $\mathcal{T}$ that cover all $w \in \overline{Pol}(f^{\bullet})$
is said to be a $Pol$ cover for $f$. A minimal $Pol$ cover (in terms of number of gates) for $f$ is denoted $\mathcal{T}(f)$, and its size (i.e., the number of gates) is denoted $|\mathcal{T}(f)|$.
\end{definition}
A cover for $f$ can be seen as an (unsorted) collection of gates that together catch all the potential errors that a circuit for $f$ must deal with. The idea is that since a cover is a simpler object than a circuit, it might be easier to prove lower bounds on the size of a cover for $f$ than the size of a circuit for $f$.
A gate $g_i$ is said to cover $w \in \overline{pPol}(f^{\bullet})$ if $w(g_{i_1})$ and $w(g_{i_2})$ are defined and
$w(g_{i_1}) \circ_i w(g_{i_2}) \neq w(g_i)$
(i.e, if $w$ is defined on the inputs to $g_i$ but $w$ is undefined on the output of $g_i$ or not consistent with $g_i$).
A collection of gates $\mathcal{P}$ cover $\overline{pPol}(f^{\bullet})$ if each $w \in \overline{pPol}(f^{\bullet})$ is covered by at least one gate $g_i \in \mathcal{P}$.
\begin{definition}
\label{def:cover}
A collection of gates $\mathcal{P}$ that cover $\overline{pPol}(f^{\bullet})$ such that: (1) no two gates in $\mathcal{P}$ output the same result, (2) the result column of $f^{\bullet}$ is not an input of any gate in $\mathcal{P}$, and (3) none of the input columns of $f^{\bullet}$ is an output of a gate in $\mathcal{P}$, is said to be a $pPol$ cover for $f$. A minimal $pPol$ cover (in terms of number of gates) for $f$ is denoted $\mathcal{P}(f)$, and its size (i.e., the number of gates) is denoted $|\mathcal{P}(f)|$.
\end{definition}
We remark that conditions (1)-(3) in Definition~\ref{def:cover} are used to avoid cycles when converting a $pPol$ cover to a circuit later on. They can be replaced by requiring $pPol$ covers to be acyclic.
\section{Non-trivial polymorphisms implies trivial circuits}
\label{sec:upper}
In this section we note that if $Pol(f^{\bullet})$ contains a non-trivial polymorphism, then the circuit complexity of $f \in B_{n}$ is at most $O(n)$. By a non-trivial polymorphism we mean any polymorphism which is not a constant function, a projection, or the negation of a projection.
\begin{theorem}
Given $f \in B_{n}$, if $Pol(f^{\bullet})$ contains a non-trivial polymorphism, then $f$ has a circuit of size $O(n)$.
\end{theorem}
\begin{proof}
By inspection of Post's lattice of Boolean clones~\cite{post}, we know that if $Pol(f^{\bullet})$ contains a non-trivial polymorphism, then it must contain at least one of the following four polymorphisms:
\begin{enumerate}
\item the majority operation $maj(x,x,y)=maj(x,y,x)=maj(y,x,x) = x$
\item the affine operation $aff(x,y,z) = x \oplus y \oplus z$ (where $\oplus$ is addition modulo $2$)
\item the and operation $and(x,y) = x \land y$
\item the or operation $or(x,y) = x \lor y$
\end{enumerate}
Given $f \in B_{n}$, in order to design our circuit $C$ we first pre-compute $f$ on the all $0$ input, the all $1$ input, the $n$ inputs having exactly one $1$, and the $n$ inputs having exactly one $0$. More formally, $t_{i}$ ($1 \leq i \leq n$) is the output of $f$ on the input that has a unique $1$ in position $i$, $t_{n+i}$ ($1 \leq i \leq n$) is the output of $f$ on the input that has a unique $0$ in position $i$, $t_{2n+1}$ is the output of $f(0,0,\dots,0)$, and $t_{2n+2}$ is the output of $f(1,1,\dots,1)$. We hard-wire these $(2n+2)$ bits of information $t_{i}$, $1 \leq i \leq 2n+2$ in our circuit.
The task of the circuit $C$ on input $x = (x_1,x_2,\dots,x_n)$ is to repeatedly apply the non-trivial polymorphism of $f^{\bullet}$ to these $(2n+2)$ bits $t_{i}$ until we arrive at the output of $f(x)$.
In the case where $Pol(f^{\bullet})$ contains the $or$ operation: \\
01: $r := t_{2n+1}$;\\
02: for $1 \leq i \leq n$ \{ \\
03: \quad if $x_i = 1$ \{ \\
04: \quad \quad $r := r \lor t_{i}$; \\
05: \quad \} \\
06: \} \\
07: return $r$; \\
In the case where $Pol(f^{\bullet})$ contains the $and$ operation: \\
01: $r := t_{2n+2}$;\\
02: for $1 \leq i \leq n$ \{ \\
03: \quad if $x_i = 0$ \{ \\
04: \quad \quad $r := r \land t_{n+i}$; \\
05: \quad \} \\
06: \} \\
07: return $r$; \\
In the case where $Pol(f^{\bullet})$ contains the $aff$ operation: \\
01: $r := t_{2n+1}$;\\
02: for $1 \leq i \leq n$ \{ \\
03: \quad if $x_i = 1$ \{ \\
04: \quad \quad $r := t_{2n+1} \oplus r \oplus t_{i}$; \\
05: \quad \} \\
06: \} \\
07: return $r$; \\
In the case where $Pol(f^{\bullet})$ contains the $maj$ operation: \\
01: $r := t_{2n+2}$;\\
02: for $1 \leq i \leq n$ \{ \\
03: \quad if $x_i = 0$ \{ \\
04: \quad \quad $r := maj(t_{2n+1}, r, t_{n+i})$; \\
05: \quad \} \\
06: \} \\
07: return $r$; \\
To see that the circuit $C$ on input $x = (x_1,x_2,\dots,x_n)$ output $r = f(x)$, we consider the case
where $or \in Pol(f^{\bullet})$ (the arguments in the other cases are very similar). In line 01 we initialize $r$
to be the output of $f(0,0,\dots,0)$. Then (in lines 02-04) we take the $\lor$ of all $f(0,\dots,0,x_i,0,\dots,0)$ for which $x_i = 1$. This is the final output $r$. The fact that $r = f(x)$ follows from $or \in Pol(f^{\bullet})$ since if we take the $\lor$ of all the inputs $(0,\dots,0,x_i,0,\dots,0)$ for which $x_i = 1$ (i.e., all the inputs corresponding to the outputs we took $\lor$ of), we arrive at the original input vector $x = (x_1,x_2,\dots,x_n)$.
Note that the circuit $C$ has size $O(n)$ as the number of bits that we hard wire is $O(n)$, and in each of the $n$ iterations of the for loop we carry out a constant number of operations.
\end{proof}
Note that the construction above is easy to extend to multi-output functions. Given a Boolean function $f$ with $n$ inputs and $m$ outputs, such that $Pol(f^{\bullet})$ (where $f^{\bullet}$ is now a $(n+m) \times 2^n$ matrix) contains a non-trivial polymorphism, the construction results in a circuit of size $O(nm)$.
Also note that it is easy to extend this upper bound to functions $f \in B_{n}$ for which $Pol(f^{\bullet})$ is \enquote{close} to contain a non-trivial polymorphism. For example, if we can modify at most $n^k$ outputs of $f$ such that the truth table of the resulting function $g^{\bullet}$ is closed under a non-trivial polymorphism, then $f$ has circuits of size $O(n^k)$. This is because we can hard wire in our circuit $C$ the correct outputs corresponding to the outputs that were modified. The circuit $C$ is then designed as before for computing $g$ instead. On input $x$ the circuit first checks whether $x$ corresponds to a modified output, and if this is the case, it looks up the correct output $f(x)$. Otherwise $g(x)$ is computed, as before.
\section{Partial polymorphisms and deterministic circuits}
\label{sec:partial}
In this section we prove that the circuit complexity of $f$ can be characterized by the partial polymorphisms of $f^{\bullet}$. More precisely, we prove that a collection of gates is a minimal $pPol$ cover for $f$ if and only if the collection of gates form an optimal circuit for $f$.
\begin{proposition}
$|\mathcal{P}(f)|$ is a lower bound on the circuit complexity of $f$.
\end{proposition}
\begin{proof}
Let $\mathcal{P}(f)$ be the gates in an optimal circuit $C$ for $f$. Note that conditions (1)-(3) in the definition of a $pPol$ cover (Definition~\ref{def:cover}) are satisfied by the gates of any optimal circuit for $f$. Assume there is a $w \in \overline{pPol}(f^{\bullet})$ that is not covered by $\mathcal{P}(f)$. Hence, for every gate $g$ in $\mathcal{P}(f)$, $w$ is either undefined on an input to $g$ or $w$ is consistent with $g$. If $w$ is undefined on an input to $g$, then $w$ must be undefined on an output of a direct predecessor $g'$ to $g$ (since $C$ is a circuit). Without loss of generality assume that $w$ is defined for all inputs to gates that precedes $g$ in $C$. Hence, $w$ is covered by $g'$ (contradicting that $w$ is not covered by $\mathcal{P}(f)$). Thus, $w$ must be defined on, and consistent with, all the gates in $\mathcal{P}(f)$. By the same reasoning as in Proposition~\ref{prop:consistent}, this is impossible due to $w \in \overline{pPol}(f^{\bullet})$, and we conclude that $w$ is covered by $\mathcal{P}(f)$.
\end{proof}
\begin{proposition}
$|\mathcal{P}(f)|$ is an upper bound on the circuit complexity of $f$.
\end{proposition}
\begin{proof}
Given an optimal cover $\mathcal{P}(f)$, unless $f$ is a projection of one of its inputs (in which case $\mathcal{P}(f)$ is empty), we note that
the result column of $f^{\bullet}$ (i.e., the last column $f^{\bullet}$) which we denote $r$, must be a column of one of the gates in $\mathcal{P}(f)$. If not, consider $w \in P_{2^n}$ that is a projection on its $i$th coordinate for all inputs except $r$, for which $w$ is the negation of its $i$th coordinate. Thus, $w \in \overline{pPol}(f^{\bullet})$, and $w$ is consistent with all gates in $\mathcal{P}(f)$, which is a contradiction.
Assume there is an input to a gate $g \in \mathcal{P}(f)$ that is not an input to $f$ and that is not an output of a gate in $\mathcal{P}(f)$. Since $\mathcal{P}(f)$ is minimal there is a $w \in \overline{pPol}(f^{\bullet})$ that is covered only by $g$ (and no other gate in $\mathcal{P}(f)$). Let $w'$ be undefined on the input to $g$ assumed above, but otherwise identical to $w$. Hence, $w'$ is not covered by $\mathcal{P}(f)$ and $w' \in \overline{pPol}(f^{\bullet})$, which is a contradiction
with the fact that $\mathcal{P}(f)$ is a cover.
Thus, the gates in $\mathcal{P}(f)$ would form an optimal circuit computing $f$, should no cycles be present. Utilizing conditions (1)-(3) in the definition of a $pPol$ cover (Definition~\ref{def:cover}), we can show that cycles are impossible. Assume that $\mathcal{P}(f)$ contains a cycle and pick an arbitrary gate $g$ on the cycle. Again, since $\mathcal{P}(f)$ is minimal there is a $w \in \overline{pPol}(f^{\bullet})$ that is covered only by $g$. Let $w'$ be identical to $w$ except that $w'$ is undefined on every output (and hence at least one input) of all the gates in the cycle that $g$ belongs to. By conditions (2) and (3), none of the columns in $f^{\bullet}$ can be part of a cycle, and hence $w' \in \overline{pPol}(f^{\bullet})$. To see that $w'$ is not covered by $\mathcal{P}(f)$, note that $g$ does not cover $w'$ as $w'$ is undefined on an input to $g$. If another gate $g'$ in $\mathcal{P}(f)$ cover $w'$ it must be because $w'$ (as opposed to $w$) is undefined on the output of $g'$, implying that $g'$ has the same output as a gate on the cycle, which is impossible by condition (1).
\end{proof}
\begin{corollary}
$P/poly$ is the class of languages defined by functions having polynomial $pPol$ covers, i.e., $\{f_n \in B_n\}_{n \geq 0}$ with $|\mathcal{P}(f_n)| \leq poly(n)$.
\end{corollary}
\section{Polymorphisms and non-deterministic circuits}
\label{sec:total}
We first introduce a special type of non-deterministic circuits called total single-valued non-deterministic circuits (TSVND circuits). These circuits have appeared previously in the literature mainly in relation to derandomization of Arthur-Merlin games, see for example~\cite{GST03,MV05}.
\begin{definition}\cite{GST03}
A TSVND circuit is a non-deterministic circuit $C(x,y)$ with three possible outputs $0,1$ and $quit$, such that for each $x \in \{0,1\}^n$, either $\forall y C(x,y) \in \{0,quit\}$ or $\forall y C(x,y) \in \{1,quit\}$. That is, there can be no $y,y'$ such that $C(x,y) = 1$ and $C(x,y') = 0$, and we define $C(x) = b \in \{0,1\}$ if there exist $y$ such that $C(x,y) = b$, and $C(x) = quit$ if there is no such $y$. Finally, we require $C$ to define a total function on $\{0,1\}^n$, i.e., for each $x \in \{0,1\}^n$ $C(x) \neq quit$.
\end{definition}
The following fact about TSVND circuit complexity is easy to realize.
\begin{proposition}
$f \in B_n$ has TSVND circuit complexity $O(s(n))$ if and only if $f$ has non-deterministic circuit complexity $O(s(n))$ and co-non-deterministic circuit complexity $O(s(n))$.
\end{proposition}
\begin{proof}
Given a non-deterministic circuit $C_1$ for $f$ (with non-deterministic inputs $y_1$) and a co-non-deterministic circuit $C_2$ for $f$ (with non-deterministic inputs $y_2$) we construct a TSVND circuit $C$ for $f$ (with non-deterministic inputs $y_1,y_2$) by using $C_1$ and $C_2$ as sub circuits. Let $C(x,y_1,y_2) = 1$ if $C_1(x,y_1) = 1$, $C(x,y_1,y_2) = 0$ if $C_2(x,y_2) = 0$, and $C(x,y_1,y_2) = quit$ otherwise.
Given a TSVND circuit $C(x,y)$ for $f$ we construct a non-deterministic circuit $C_1(x,y)$ for $f$ by changing all $quit$ outputs in $C$ to $0$. Similarly, we construct a co-non-deterministic circuit $C_2(x,y)$ for $f$ by changing all $quit$ outputs in $C$ to $1$.
\end{proof}
\begin{proposition}
$|\mathcal{T}(f)|$ is a lower bound on the TSVND circuit complexity of $f$.
\end{proposition}
\begin{proof}
Given an optimal TSVND circuit $C(x,y)$ for $f$ (with $n$ (ordinary) inputs $x$ and $m$ non-deterministic inputs $y$) we construct a cover $\mathcal{T}$, by for each $x$ fixing a witness $y$ such that $C(x,y) = b \in \{0,1\}$.
Denote by $fY$ the $(n+m)\times 2^n$ matrix resulting from appending to each input $x$ the corresponding witness $y$ and sorting the rows in lexicographic order. Each gate $g$ of $C$ (which is a $2^{n+m}$-gate) is transformed into $2^n$-gate $g'$.
For each $1 \leq i \leq 2^n$ the $i$th row of $g'$ is the input(s) and output of $g$ when $C$ is passed the inputs $(x,y)$ where $(x,y)$ is the $i$th row of $fY$. Let $\mathcal{T}$ denote the resulting collection of $2^n$-gates. Note that the number of $2^n$-gates in $\mathcal{T}$ is $|C|$.
To prove that $\mathcal{T}$ is a $Pol$ cover for $f$, assume to the contrary that there is some $w \in \overline{Pol}(f^{\bullet})$ that is not covered by any gate in $\mathcal{T}$.
Order the gates of $C$ such that no gate has an output which is the input of a gate earlier in the order, with the last gate in the order being the output gate. Order the result columns of all the gates in $\mathcal{T}$ in the exact same order and append them to $fY$. Denote the resulting $(n+m+|C|)\times 2^n$ matrix by $fY\mathcal{T}$ and let $v$ be the vector (of length $(n+m+|C|)$) resulting from applying $w$ to the columns of $fY\mathcal{T}$.
We claim that $v$ represents a correct computation of $(w(x),w(y))$ in $C$. If not, then there is a gate $g_i$
with inputs $v_{i_1}$ and $v_{i_2}$ such that $v_{i_1} \circ_i v_{i_2} \neq v_i$ ($v_i$ is the output of $g_i$). But this is impossible since applying $w$ to the gate $g'_i$ (i.e., the gate in $\mathcal{T}$ corresponding to $g_i$) results in $(v_{i_1}, v_{i_2}, v_i)$,
and $v_{i_1} \circ_i v_{i_2} = v_i$ since $w$ by assumption is not covered by any gate in $\mathcal{T}$. Hence, $v$ represents a correct computation $(w(x),w(y))$ in $C$. Note that the last element of $v$ (i.e., $v_r$ with $r=n+m+|C|$) is $f(w(x))$ since the last column of $fY\mathcal{T}$ is the result of the output gate. Thus, $C$ outputs $f(w(x))$ on input $(w(x),w(y))$, i.e., $C(w(x)) = f(w(x))$.
By the assumption that $w \in \overline{Pol}(f^{\bullet})$, we have $f(w(x)) \neq v_r$, and by the reasoning above we have $v_r = f(w(x))$.
Hence, $\mathcal{T}$ is a $Pol$ cover for $f$, and $|\mathcal{T}(f)|$ is a lower bound on the TSVND circuit complexity of $f$.
\end{proof}
\begin{proposition}
$f$ has TSVND circuit complexity $O(|\mathcal{T}(f)|)$.
\end{proposition}
\begin{proof}
Given a $Pol$ cover $\mathcal{T}$ for $f$, we show how to construct a TSVND circuit $C$ for $f$ of size $O(|\mathcal{T}|)$.
First note that the result column of $f^{\bullet}$ (i.e., the last column of $f^{\bullet}$), which we denote by $r$, must be a column of one of the matrices in $\mathcal{T}(f)$. If not, consider $w \in B_{2^n}$ that is a projection on its $i$th coordinate for all inputs except $r$, for which $w$ is the negation of its $i$th coordinate. Thus, $w \in \overline{Pol}(f^{\bullet})$, and $w$ is consistent with all gates in $\mathcal{T}$, which is a contradiction with the definition of a cover.
Name the columns of $f^{\bullet}$ $x_1,\dots,x_n,x_{n+1}$ (note that $x_{n+1} = r$). Name each column of $\mathcal{T}$ by the corresponding $x_i$, in case it appears in $f^{\bullet}$, otherwise name it $y_i$ such that identical columns get the same name and no two different columns get the same name.
The $x_i$'s are the deterministic inputs to $C$ and the $y_i$'s are the non-deterministic inputs. We hard code each gate from $\mathcal{T}$ in the circuit $C$ with the names given, i.e., if the gate is a $\land$ gate with the first two columns being $x_2$, $y_5$ and the last being $x_1$, we store it as $x_2 \land y_5 = x_1$.
On input $(x,y) = (x_1,\dots,x_n,x_{n+1},y_1,\dots,y_m)$, $C$ outputs $quit$ if $(x,y)$ is not a consistent assignment to the variables in the stored gates, and $x_{n+1} = r$ otherwise. First note that for all $x$ there is an $y$ such that $C(x,y) \in \{0,1\}$, namely, let $y$ be the assignment resulting from taking the row identified by $x$ in $\mathcal{T}(f)$. Secondly, for each $x$ there can be no $y$ and $y'$ such that
$C(x,y) = 1$ and $C(x,y') = 0$, since then one of $(x,y)$ or $(x,y')$ would correspond to a $w \in \overline{Pol}(f^{\bullet})$ that is not covered by $\mathcal{T}$. Hence, $C$ is a TSVND circuit computing $f$.
Note that as the amount of information that we need to hard code in $C$ is at most a constant times $|\mathcal{T}|$, and the operation of the circuit is a simple evaluation, $C$ has size $O(|\mathcal{T}|)$.
\end{proof}
\begin{corollary}
$NP/poly \cap coNP/poly$ is the class of languages defined by functions having polynomial $Pol$ covers, i.e., $\{f_n \in B_n\}_{n \geq 0}$ with $|\mathcal{T}(f_n)| \leq poly(n)$.
\end{corollary}
\end{document}
|
\begin{document}
\title{Two-step orthogonal-state-based protocol of quantum
secure direct communication with the help of order-rearrangement
technique} {\color{black} \author{Preeti Yadav}
\email{[email protected]} \affiliation{Dept. of Physics,
Indian Institute of Technology Kanpur, Kanpur- 208016, India.} \author{R. Srikanth}
\email{[email protected]} \affiliation{Poornaprajna Institute of
Scientific Research, Sadashivnagar, Bengaluru- 560080, India}
\affiliation{Raman Research Institute, Sadashivnagar, Bengaluru-
560060, India.} \author{Anirban Pathak}
\email{[email protected]} \affiliation{Jaypee Institute of
Information Technology, A-10, Sector-62, Noida, UP-201307, India}
\affiliation{RCPTM, Joint Laboratory of Optics of Palacky University
and \\ Institute of Physics of Academy of Science of the Czech
Republic, Faculty of Science, Palacky University, 17. listopadu
12, 77146 Olomouc, Czech Republic.}
\begin{abstract}
The Goldenberg-Vaidman (GV) protocol for quantum key distribution
(QKD) uses orthogonal encoding states of a particle. Its security
arises because operations accessible to Eve are insufficient to
distinguish the two states encoding the secret bit. We propose a
two-particle cryptographic protocol for quantum secure
direct communication, wherein orthogonal states encode
the secret, and security arises from restricting Eve from accessing
any two-particle operations. However, there is a non-trivial
difference between the two cases. While the encoding states are
perfectly indistinguishable in GV, they are partially distinguishable
in the bi-partite case, leading to a qualitatively different kind of
information-vs-disturbance trade-off and also options for Eve in the
two cases.
\end{abstract}
\pacs{03.67.Dd,03.67.Hk, 03.65.Ud}
\keywords{quantum cryptography, communication security}
\maketitle
\section{\label{sec:Introduction}Introduction}
Recent advances in device-independent quantum cryptography
\cite{mpa11} have brought to the fore the relevance of multi-particle
systems in quantum cryptography, following a line of thought first
initiated by Ekert \cite{E91}. In response to this work, Ref.
\cite{bbm92} proposed a quantum key distribution (QKD) scheme based on
Einstein-Podolsky-Rosen (EPR) correlations, which was equivalent to
the original Bennett-Brassard 1984 (BB84) \cite{bb84} protocol for
QKD, but uses separable particles instead of entangled ones. The
argument of Ref. \cite{bbm92} would suggest that the security
features of EPR were reflected in BB84. A similar relation exists
arguably between the Ping-pong \cite{ping-pong} on the one hand, and
DL04-QSDC \cite{DL04} or Lucamarini-Mancini 2005 (LM05) \cite{lm05}
protocols, on the other, in the sense that the former may be
considered as the entangled version of the latter.
All the separable-state protocols discussed above, BB84, DL04-QSDC and
LM05, employ non-orthogonal states, whose perfect indistinguishability
lies at the heart of their security. Further, perfect
indistinguishability of non-orthogonal states also provides security
to many other protocols of QKD, such as B92 \cite{b92} and DL04-QKD
\cite{Deng-Long04} protocols. By contrast, Goldenberg and Vaidman
\cite{GV95} proposed a protocol (GV), demonstrating that secure
cryptography can be accomplished even with orthogonal states. The key
point was that they were superpositions of geographically separated
wave packets. Secrecy in this case arises because of the set of
operations Eve can apply are restricted by quantum mechanics.
Most of the early protocols \cite{bb84,b92,E91} of quantum
cryptography were limited to QKD. Specifically, these quantum
cryptographic protocols are designed to generate an unconditionally
secure random key by quantum means and subsequently
classical cryptographic procedures are used to encode the message
using the key generated by these protocols. Interestingly, later
protocols for secure communiation \cite{banerjee2012,long2007review}
were proposed that allow to either generate a \textit{deterministic}
key or to circumvent the prior generation of key. These protocols of
secure direct quantum communication can broadly be divided into three
sub-classes: (i) Deterministic QKD protocols \cite{GV95, ping-pong,
lm05}; (ii) protocols for deterministic secure quantum communication
(DSQC) \cite{banerjee2012, xiu2009DSQC, long2007review}; and finally,
(iii) protocols for quantum secure direct communication (QSDC)
\cite{DL04}.
In deterministic QKD and DSQC, there is some information leakage of
classical data prior to detection of eavesdropping by Eve.
Deterministic QKD solves this problem by transmitting a random key,
rather than the secret message. In DSQC, the receiver (Bob) can
decode the message only after receipt of an encoding key, which is
some additional classical information (at least one bit for each qubit
transmitted by the sender (Alice)). Thus in the event of leakage
detection, the encoding key is not published, in order to protect the
message.
In contrast to DSQC, when no such additional classical information is
required, a direct secure quantum communication of the message can be
achieved, which happens in a QSDC protocol. Protocols of DSQC and
QSDC are interesting for various reasons. Firstly, a conventional
QKD-based quantum communication protocol uses a classical intermediate
step to transmit the message, but no such classical intermediary is
required in DSQC and QSDC\color{black}. Further, a QSDC or DSQC protocol can
always be turned into a protocol of QKD as the sender who is capable
of communicating a meaningful message can also choose to communicate a
random string of bits to convert the protocol into a protocol of QKD.
However, the converse is not true (i.e., a QKD protocol cannot be used
as a protocol of QSDC or DSQC).
In this work, we consider an orthogonal-state based quantum
cryptography protocol that uses two-particle entanglement. By
transmitting the two particles separately, we obtain security because
the set of states distinguishable via the accessible operations to Eve
fail to distinguish the encoding states. As in GV, our protocol
requires delayed measurement on the first particle in order to work.
(Therefore, from a practical perspective, quantum memory, an expensive
resource, is required). By contrast, for non-orthogonal-state based
protocols, delayed measurement is replaced by random measurement
choice, but can be used to improve efficiency \cite{DLW+04}. An
important difference with the single-particle case is the degree of
distinguishability, thus making the proof of security quite different
in the two-particle case. Further, our use of block
transmission and an order-rearrangement technique makes the protocol
suitable for QSDC, while GV is a protocol for QKD.
In GV, both the encoding states as well as error-checking states
involve only orthogonal states, while in BB84, both types of states
are non-orthogonal. More generally, one may consider cryptography
protocols that involve orthogonal-state encoding but allow conjugate
coding for error-checking \cite{LL02, BGL+04, WDL+05, WDL05,
LDZ+08}. Using the strategy adopted for eavesdropping checking in
the protocol proposed in the present paper it is possible to modify
these protocols \cite{LL02, BGL+04, WDL+05, WDL05, LDZ+08} into
equivalent completely orthogonal-state-based protocols.
\section{\label{sec:GV}The GV protocol and its security}
We briefly review GV. Alice and Bob are located at two ends of a large
Mach-Zehnder interferometer. Let $|U(t)\rangle$ and $|L(t)\rangle$ be
two localized wave packets of Alice's particle $S$, travelling by the
upper and lower arm of the interferometer, respectively. Classical
bit $j (= 0,1)$ is encoded as:
\begin{equation}
|\Psi_{j}\rangle = \frac{1}{\sqrt{2}}\left(|U(t_s)\rangle +
(-1)^j|L(t_s)\rangle\right),
\label{eq:psij}
\end{equation}
where it is assumed that there is no overlap between the supports of
$\{|U(t)\rangle\}$ and $\{|L(t)\rangle\}$. Alice sends Bob either
$|\Psi_{0}\rangle$ or $|\Psi_{1}\rangle$ by delaying packet $L$ by
time $\Delta$ to ensure that $|U\rangle$ and $|L\rangle$ are
\textit{not} present in the channel at the same time. In his station,
Bob receives $|U(t_s+\tau)\rangle$, where $\tau$ is the travel time of
the pulse from Alice's to Bob's station. Bob puts the pulse on hold
for time $\Delta$ (where $\tau < \Delta$), before combining it with
$|L(t_s+\Delta+\tau)\rangle$, to recreate the superposition state
$|\Psi_j^\prime\rangle$, which is the same as $|\Psi_{j}\rangle$,
apart from an inconsequential global phase. Bob then decodes bit
$j$ deterministically from his interferometric output.
Alice and Bob perform the following two tests to detect Eve's possible
malicious eavesdropping: (1) They compare the sending time $t_{s}$
with the receiving time $t_{r}$ for each wave packet. We must have
$t_{r}=t_{s}+\tau+\Delta.$ This ensures that Eve cannot delay the
upper packet until also having lower packet simultaneously, which
would allow her to decode the states. Even so, she may replace both
wave packets with a corresponding dummy. To avoid such an attack, the
timing of transmission of particles is kept random. To faciliatate
this, Alice and Bob discretize their sending times into a sequence of
time bins. (2) Alice selects a fraction of particles and announces
their time coordinates. Bob announces his measurement outcomes on
them. Alice ensures that his received bits are consistent with her
transmitted bits.
The security of GV can be understood in terms of an extended
no-cloning theorem applicable to orthogonal states, when Eve's
operations are restricted by the fact that she can physically access
only one of the pieces $|U(t)\rangle$ and $|L(t)\rangle$ at a given
time \cite{M98}. We present a slightly different version, amenable to
subsequent generalization.
The simplest operation accessible is a projective measurement onto the
basis $\{|U(t)\rangle,|L(t)\rangle\}$, where we may ignore the
time-dependence for convenience. If Eve measures projectively in this
basis, she merely disrupts the coherence between the wave packets and
is detected, but obtains no information about the secret bit $j$.
More generally, Eve can introduce a probe $P$ that interacts with
Alice's particle according to:
\begin{equation}
\mathcal{U} \equiv |U\rangle\langle U| \otimes C_U + |L\rangle\langle
L|\otimes C_L,
\label{eq:mez}
\end{equation}
where $C_U$ and $C_L$ are unitaries acting on the ancilla alone.
Because the two packets are never together on the channel, causality
demands that Eve's attack cannot unitarily mix the $U$ and $L$
pieces. An implication is that no attack by Eve, which is confined to
the form (\ref{eq:mez}), can extract secret bit $j$, because this is
stored as the phase information between the two wave packets, and
cannot be accessed \textit{even probabilistically}. We prove this
below.
Let $|R\rangle$ be the initial `ready' state of the probe. Acting on
the particle-probe system, Eq. (\ref{eq:mez}) transforms an initial
state $|\Psi_j\rangle \otimes |R\rangle$, after they are recombined by
Bob, to the state
\begin{equation}
\rho_{SP} = \frac{1}{4}\left(
\begin{array}{cc}
|u\rangle\langle u| & (-1)^j |d\rangle\langle u| \\
~ & ~ \\
(-1)^j|u\rangle\langle d| &
|d\rangle\langle d|
\end{array}\right),
\end{equation}
where $|u\rangle \equiv C_0|R\rangle$ and $|d\rangle = C_1|R\rangle$.
The probe is now left in the state:
\begin{equation}
\rho_P^\prime = \textrm{Tr}_S(\rho_{SP}) =
\frac{1}{2}(|d\rangle\langle d| + |u\rangle\langle u|),
\label{eq:detector}
\end{equation}
which, as with the case of projective measurements, yields no
information to Eve about the secret bit $j$. In other words, Eve
gains nothing by attacking in the case of individual attacks. It is
not difficult to see that this is also true for Eve's collective and
joint attacks.
\begin{figure}
\caption{Bob's information ($I_B$, falling dashed curve) and Eve's
information ($I_E$, rising line), respectively, as a function of
eavesdropping parameters observed error $e$. For $e \geq e_{\rm max}
\label{fig:info_dist_GV}
\end{figure}
Assuming ideal single-photon sources and detectors with Alice and Bob,
the only way for Eve to attack the GV protocol, is that she
substitutes dummies by blocking fraction $f$ of the genuine
particles. Suppose that Alice and Bob agree to discretize the random
sending time. In each sequential block of $\gamma$ (an integer) number
of time steps, one particle is transmitted by Alice in a randomly
chosen time cell within the block. Eve's strategy would be to fully
blockade a fraction $f$ of randomly chosen blocks, and transmit a
dummy prepared by her in a randomly chosen time within the block. The
probability that she gets a match with Alice's transmission cell is
$1/\gamma$.
To calculate the error rate Eve generates, we note that
even when Eve gets the timing right,
she will be wrong half the time about the encoded state. Thus Eve
generates error rate
\begin{equation}
e = f \times
\left[\frac{1}{2}\frac{1}{\gamma} + \left(1 - \frac{1}{\gamma}\right)\right]
= f \times \left(1-\frac{1}{2\gamma}\right),
\label{eq:wr}
\end{equation}
where $\gamma$ is a publicly known number. Bob's average information
on the sifted bits is given $I_B \equiv I(A:B) = 1 - h(e)$, where
$h(\cdot)$ is the binary Shannon information. On the dummies whose
timing is right, Eve has full information, i.e.,
\begin{equation}
I(A:E) = I(B:E) \equiv I_E= \frac{f}{\gamma}
= \frac{2e}{2\gamma-1},
\label{eq:Evef}
\end{equation}
where the last equation follows from Eq. (\ref{eq:wr}) and
the $I(A:E)$ and $I(B:E)$ denote Eve's mutual information on
Alice and Bob. She knows when she got it right when Alice and Bob
perform the equivalent of basis reconciliation for the sending times.
The corresponding data is plotted in Figure \ref{fig:info_dist_GV}.
The requirement for positive secret key rate is determined by
\cite{CK78}
\begin{equation}
K \equiv I_B - \textrm{min}(I(A:E),I(A:B)) = I_B-I_E,
\label{eq:key}
\end{equation}
from which the maximum tolerable error is found to be $e_{\rm max}
\approx 0.26$.
\section{Towards a two-particle orthogonal-state based
protocol \label{sec:2multi}}
In seeking a protocol that extends GV to a two-particle (or
multi-particle) scenario, we are naturally led to consider
cryptographic adaption of quantum dense coding to cryptography (cf.
the protocol of Ref. \cite{Deng} for dense coding based secure direct
communication.) On the analogy of GV, one might expect that Alice
should transmit the two entangled particles one after another at
random timings and such that both are not found on the open channel.
Surprisingly, this can be completely insecure against Eve, whose
strategy would be as follows. When the first particle comes, she
holds it, and transmits her own half of a Bell state towards Bob. She
can in principle find out the position of the randomly sent second
particle, measure it jointly with Alice's first particle, determine
their joint state, and then transmit a dummy particle appropriately
entangled with her first dummy particle. Here we have assumed that
Eve's measurements take negligible time.
To avoid this attack, such bi-partite cryptographic protocols may add
multi-partite non-orthogonal states either to the coding or in the
checking step (as in BB84). However, if we remain restricted to
orthogonal states, then the order of particles needs to be scrambled,
via the permutation of particle (PoP) action, an idea first introduced
by Deng and Long in 2003 in a pioneering work \cite{DL03} on the
``controlled order rearrangement encryption'' (CORE) QKD protocol.
In the present work, a two-particle QSDC protocol inspired by GV,
which is referred to as 2GV, is presented along these lines in the
next section.
Now suppose Eve does \textit{not} launch the dummy particle
attack. Assuming ideal sources and detectors, GV is secure.
Interestingly, a bipartite generalization of GV (without PoP) is not.
The reason is interesting and highlights a difference between single-
and bi-partite nonlocality: while Eve gets no information on the
encoded bits when the two packets are de-synchronized, in the
bipartite case, partial information can be obtained, as detailed
below.
Alice and Bob employ a key distribution protocol where the key is
shared via a dense coding strategy, and must test for Eve after the
transmissions are completed. Alice and Bob need to model Eve's attack
strategy, and estimate whether Eve's information on their secret
bits, as a function of observed noise, is too high to be eliminated by
subsequent classical post-processing. If it is, only then do they
abort the protocol run. We furnish a security proof of the protocol,
assuming individual attacks by Eve on each of the two coding
particles. From this we extract an information-vs-disturbance
trade-off, and hence determine the largest tolerable error rate.
As a specific example of the attack employed by Eve, we consider a
model given in Ref. \cite{zbi}, which is based on one proposed by Niu
and Griffiths \cite{NiuGri}. Probes $E_{0}$ an $E_{1}$ interact with
each transmitted qubit, being subjected to the
interaction: \begin{eqnarray} |0\rangle|E\rangle & \rightarrow &
\sqrt{\frac{1+\cos\theta}{2}}|0\rangle|\epsilon_{0}\rangle+
\sqrt{\frac{1-\cos\theta}{2}} |1\rangle|E_{0}\rangle\nonumber
\\ |1\rangle|E\rangle & \rightarrow &
\sqrt{\frac{1+\cos\theta}{2}}|1\rangle|\epsilon_{1}\rangle+
\sqrt{\frac{1-\cos\theta}{2}}|0\rangle|E_{1}\rangle,
\label{eq:niugri}\end{eqnarray}
where, furthermore $\langle\epsilon_0|\epsilon_1\rangle = \langle
E_0|E_1\rangle = \cos\theta$ by virtue of symmetry in the attack
strategy. For simplicity, the same attack parameter $\theta$ is
assumed to characterize the attack on both particles. This results in
the initial state $\rho_{AB}$, which is a Bell state in 2GV, evolving
into a joint state of the particles and probes,
$\rho_{ABE_1E_2}^{\prime\prime}$.
After some straightforward calculation, the above attack can be shown
to produce the reduced density operator
\begin{equation}
\rho_{AB}^{\prime\prime} = \textrm{Tr}_{E_1E_2}
\left(\rho_{ABE_1E_2}^{\prime\prime}\right) =
\left( \begin{array}{cccc} \frac{1}{2}(1 + \cos^2\theta) & 0 & 0 &
\frac{1}{2}(1 + \cos^2\theta)\cos^2\theta \\ 0 &
\frac{1}{2}\sin^2\theta & \frac{1}{2}\sin^2\theta\cos^2\theta & 0
\\ 0 & \frac{1}{2}\sin^2\theta\cos^2\theta & \frac{1}{2}\sin^2\theta
& 0 \\ \frac{1}{2}(1 + \cos^2\theta)\cos^2\theta & 0 & 0 &
\frac{1}{2}(1 + \cos^2\theta)
\end{array}\right).
\end{equation}
The quality of state received by Bob can be quantified by the fidelity
$\langle\Phi^+|\rho_{AB}^{\prime\prime}|\Phi^+\rangle = (1 +
\cos^2\theta)^2$ (where we assume $\rho_{AB} =
|\Phi^+\rangle\langle\Phi^+|$). It follows that in order to produce
no errors, Eve must ensure that $\theta=0$, which by virtue of
Eq. (\ref{eq:niugri}), implies that no entanglement is generated, and
in fact $|\epsilon_0\rangle = |\epsilon_1\rangle$, implying a trivial
interaction of the probe with Alice's qubit. Thus, if no errors are
generated, then Eve gains no information. More generally, suppose
finite errors are observed.
\begin{figure}
\caption{(A) Bob's information as a function of
eavesdropping parameters $\theta$ (overlap angle, defined by
Eq. (\ref{eq:niugri}
\label{fig:BobEvesInfo}
\end{figure}
The error rate observed by Alice and Bob is given by:
\begin{equation}
e=1-\langle\Phi|\rho^{\prime}_{AB}|\Phi\rangle
\label{eq:error}
\end{equation}
where $|\Phi\in\{\Phi^{\pm}\rangle,|\Psi^{\pm}\rangle\}$ and
\begin{equation}
\rho^{\prime}_{AB}(\theta,\lambda) = (1-\lambda)|\Phi\rangle\langle\Phi|
+ \lambda \rho^{\prime\prime}_{AB}
\label{eq:rhopab}
\end{equation}
is the corresponding two-particle state obtained assuming Eve attacks
fraction $\lambda$ of the incoming particle pairs with eavesdropping
parameter $\theta$ as defined in Eq. (\ref{eq:niugri}). Bob's
information $I_{B}$ is quantified as the Alice-Bob mutual information
$I_B \equiv I(A:B)$ when Bob measures the incoming states in the Bell
basis. As a function of $\theta, \lambda$, it is:
\begin{equation}
I_B(\theta,\lambda) = H(A) - H(A^\prime(\theta,\lambda)|B = \Phi),
\label{eq:IBE}
\end{equation}
where $H(A)$ is Alice's preparation entropy and $
H(A^\prime(\theta,\lambda)|B = \Phi)$ is the conditional entropy of
$\rho^\prime_{AB}(\theta,\lambda)$ when Bob measures in the Bell
basis. The quantity is presented in Fig. \ref{fig:BobEvesInfo}A as a
function of Eve's attack parameters.
Eve's information $I_E \equiv I(A:E) = I(A:B)$ is upper-bounded by the
Holevo bound $\chi$ of the reduced density operator of the two probes:
\begin{equation}
\chi = S\left(\sum_j p_j \rho^{\prime(j)}_{E_1E_2}\right)
- \sum_j S\left(\rho^{\prime(j)}_{E_1E_2}\right) \ge I_E(\theta,\lambda),
\label{eq:holevo}
\end{equation}
where $\rho^{\prime(j)}_{E_1E_2}$ ($j=0,1,2,3$) is the noisy version
of the density operator corresponding to the four Bell states
$|\Phi^\pm\rangle, |\Psi^\pm\rangle$, respectively, being sent by
Alice. This bound on Eve's information is depicted in Fig.
\ref{fig:BobEvesInfo}B. Using the following notation: $c \equiv
\cos(\theta), s \equiv \sin(\theta), K \equiv \frac{1}{2}(1 + c)$, $A
\equiv K^2c^2s^2$, $B \equiv K^2c^3s, C \equiv K^2s^3c$, $D \equiv
\frac{1}{4}s^3c^3$, $E \equiv \frac{1}{4}s^4c^2$, $F \equiv
\frac{1}{4}s^3c^2$, $H \equiv K^2(1 + c^4)$, $I \equiv
\frac{1}{4}(1+c^2)s^2c$, $J \equiv \frac{1}{4}s^4c$, $L \equiv
\frac{1}{4}s^2(1 + c^4)$, $M \equiv \frac{1}{4}s^3$, $N \equiv
\frac{1}{4}s^5c$, $P \equiv (1 - K)^2cs$, $Q \equiv (1 - K)^2s^2$, $R
\equiv 2(1 - K)^2c^2$, we find that if Alice transmits states
$|\Phi^\pm\rangle$, then the corresponding probe states of Eve are:
\begin{equation}
\rho^\pm_{E_1E_2} =
\left(\begin{array}{cccccccccccccccc}
H & B & 0 & 0 & B & A & 0 & 0 & 0 & 0 & \pm I & \pm M & 0 & 0 & \pm F & 0 \\
B & A & 0 & 0 & A & C & 0 & 0 & 0 & 0 & \pm F & 0 & 0 & 0 & \pm J & 0 \\
0 & 0 & L & D & 0 & 0 & D & E & \pm I & \pm M & 0 & 0 & \pm F & 0 & 0 & 0 \\
0 & 0 & D & E & 0 & 0 & E & N & \pm F & 0 & 0 & 0 & \pm J & 0 & 0 & 0 \\
B & A & 0 & 0 & A & C & 0 & 0 & 0 & 0 & \pm F & 0 & 0 & 0 & \pm J & 0 \\
A & C & 0 & 0 & C & K^2s^4 & 0 & 0 & 0 & 0 & \pm J & 0 & 0 & 0 & \pm\frac{1}{4}s^5 & 0 \\
0 & 0 & D & E & 0 & 0 & E & N & \pm F & 0 & 0 & 0 & \pm J & 0 & 0 & 0 \\
0 & 0 &E & N & 0 & 0 & N & \frac{1}{4}s^6 & \pm J & 0 & 0 & 0 & \pm\frac{1}{4}s^5 & 0 & 0 & 0 \\
0 & 0 & \pm I & \pm F & 0 & 0 & \pm F & \pm J & \frac{1}{2}s^2c^2 & Mc & 0 & 0 & Mc & 0 & 0 & 0 \\
0 & 0 & \pm M & 0 & 0 & 0 & 0 & 0 & Mc & \frac{1}{4}s^4 & 0 & 0 & 0 & 0 & 0 & 0 \\
\pm I & \pm F & 0 & 0 & \pm F & \pm J & 0 & 0 & 0 & 0 & R & P & 0 & 0 & P & 0 \\
\pm M & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & P & Q & 0 & 0 & 0 & 0 \\
0 & 0 & \pm F & \pm J &0&0& \pm J & \pm\frac{1}{4}s^5 & Mc & 0 & 0 & 0 & \frac{1}{4}s^4 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
\pm F & \pm J &0&0&\pm J & \pm\frac{1}{4}s^5 & 0 & 0 & 0 & 0 & P & 0 & 0 & 0 & Q & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0
\end{array}\right).
\end{equation}
It is immediately seen that $\rho^+_{E_1E_2}$ and $\rho^-_{E_1E_2}$
are not identical, implying that Eve can gain some information about
Alice's transmission by distinguishing $\rho^{\pm}_{E_1E_2}$. This
encoding-dependence of Eve's probe state in 2GV is in stark contrast
to the general probe state (\ref{eq:detector}) obtained when Eve
attacks GV. Thus Eve's attack in 2GV can obtain partial information
about the encoding, whereas she obtains none in the case of GV, even
when no dummy states are used. Therefore, unlike with GV, in the case
of there is an information-vs-disturbance trade-off even when Eve
employs no dummy particles.
The tolerable error rate is computed as:
\begin{equation}
e_{0}=\min_{I_{B}-\chi=0}e,
\label{eq:cliff}
\end{equation}
the smallest error for which $\chi$ just exceeds $I_B$. It may be
considered as the problem of minimizing $e$ subject to the constraint
that Eve's information has zero excess over Bob's. Numerically,
applying the criterion (\ref{eq:cliff}) to the
information-vs-disturbance trade-off data in Figures
\ref{fig:BobEvesInfo}, we found the tolerable error rate
$e_{0}=26.7\%$, as plotted in Figure \ref{fig:errorrate}. This rather
large tolerance can be attributed to the limited power of Eve's attack
here.
\begin{figure}
\caption{Plot of error (\ref{eq:error}
\label{fig:errorrate}
\end{figure}
\section{Two-particle orthogonal-state based protocol \label{sec:2GV}}
Instead of random transmission, Alice transmits multiple halves of
Bell states, and scrambles the order of the second halves. In
realistic protocols, there will be inevitable noise. As is usually
done, any error observed by Alice and Bob is attributed to a putative
Eve's intervention, though errors can arise also due to channel noise,
too. The presented scheme enumerated below uses this idea of
re-ordering or permutation of particle order. An illustration of the
protocol is given in Figure \ref{fig:2GV_protocol}.
\begin{figure}
\caption{Illustration of the quantum information processing steps of
the new protocol, where we indicate the block transmission of qubits
(cf. Ref. \cite{LL02}
\label{fig:2GV_protocol}
\end{figure}
\begin{enumerate}
\item Alice prepares the state $|\Psi^{+}\rangle^{\otimes 3n}$ where
$|\Psi^{+}\rangle = \frac{|00\rangle + |11\rangle} {\sqrt{2}}$.
She divides them into two sets: set $S_1$ of $n$ pairs and set $S_2$
of $2n$ pairs. Let $S_j^{(a)}$ denote the first half ($jn$ qubits)
of set $S_j$, and $S_j^{(b)}$ denote the second half of set $S_j$.
She keeps $S_2^{(a)}$ with herself ($2n$ qubits). On the remaining
$4n$ qubits of $S_1 \cup S^{(b)}_2$, she applies a random
permutation operation $\Pi_{4n}$ and transmits them to Bob; $2n$ of
the transmitted qubits are Bell pairs (the members of $S_1$) while
the remaining $2n$ (the members of $S^{(b)}_2$) are the entangled
partners of the particles remaining with Alice.
\item After receiving Bob's authenticated acknowledgment, Alice
classically announces the coordinates of the $2n$ members of $S_1$
among the transmitted particles. Bob measures them in the Bell
basis to determine if they are each in the state $|\Psi^+\rangle$.
If the error detected by Bob is within a tolerable limit, they
continue to the next step. Otherwise, they discard the protocol and
restart from Step 1.
\item Alice randomly chooses a sequence of $n$ qubits from the set
$S_2^{(a)}$ in her possession to form the verificiation string
$\Sigma_2^{(a|V)}$ for the next round of communication, and encodes
her key in the remaining $n$ qubits of $S_2^{(a)}$ to form the code
string $\Sigma_2^{(a|C)}$. To encode a 2-bit message or key, Alice
applies one of the 4 Pauli (dense coding) operations $I, X, iY, Z$
on her qubit. After the encoding operation, Alice sends all qubits
in her possession (i.e., $S_2^{(a)}$) to Bob.
\item Alice discloses the coordinates of the verification qubits
($\Sigma_2^{(a|V)}$) and their partner particles after receiving
authenticated acknowledgement of receipt of all the qubits from Bob.
Bob performs Bell measurement on the verification qubits and their
partner particles and computes the error rate as in Step 2.
\item If the error rate is tolerably low, then Alice announces the
coordinates of the partner partcles of $\Sigma_2^{(a|C)}$ and Bob
uses that information to decode the encoded message or key via a
Bell-state measurement on the remaining Bell pairs, and classical
post-processing.
\end{enumerate}
2GV may be considered as the bi-partite generalization of GV because
the encoding is via orthogonal states, and security arises because the
encoding states cannot be distinguished by the restricted operations
available to Eve. However, there are three important differences.
First is, as noted above, that randomizing the transmission schedule
of Alice's particle does not help. More importantly, whereas
geographic separation forbids Eve's attack in GV from unitarily mixing
the states $|U\rangle$ and $|L\rangle$, in 2GV, where the encoding
states are based on \textit{internal} degrees of freedom, the attack
can mix encoding states. Thus, there is no bar on Eve's accessing the
coherence between the particles, to gain partial information about the
Bell state being sent even when restricted to attack on single
particles. Thus, unlike in GV, there is an information versus
disturbance trade-off even when Eve does not use dummy particles,
which we discuss below.
Lastly, our protocol satisfies the stronger QSDC security
requirement, while GV in its original form is a protocol for
deterministic QKD which cannot be used for QSDC, but can be used for
DSQC \cite{pathak-book}. This can be understood clearly by
considering that Alice sends a meaningful message to Bob by
transmitting a sequence of $|\Psi_{0}\rangle$ and $|\Psi_{1}\rangle$
using the original GV protocol. In this situation, when Alice sends
$|U\rangle$ then Eve can keep it with her and substitute it by a fake
$|U\rangle$ and send that to Bob without causing any delay. Later,
when $|L\rangle$ is sent by Alice then also Eve will keep that with
her and send a fake $|L\rangle$ to Bob. Eve can now appropriately
superpose $|U\rangle$ and $|L\rangle$ and obtain the meaningful
information (message) encoded by Alice. To prevent this, Alice
randomizes her transmission schedule. Eve can still block particles
and decode the random bits, but she will be eventually caught when
Alice and Bob compare the sending and receiving times. The point is
that GV works by \textit{streaming} qubits. Thus by the time she is
caught, the encoded information will have already been leaked. This
leakage is not a problem with GV protocol (QKD), because if Eve's
interference is too high, Alice and Bob will not use that key for any
future encryption. In contrast to GV, our protocol uses particle
order arrangement in place of time schedule randomization, and
further, we use \textit{block transmission} \cite{LL02} in place of
stream transmission. As a result, eavesdropping does not reveal
information as the coordinates of the partner particles of the
information encoded qubits are announced only at the last step of the
protocol, i.e., after confirming that no eavesdropping has happened in
the second step of communication when $\Sigma_2^{(a|V)}$ and
$\Sigma_2^{(a|C)}$ are communicated. Clearly the proposed
cryptographic protocol is suitable DSQC. \color{black}
Assuming ideal sources and detectors with Alice and Bob, the PoP
device makes the protocol exponentially sensitive to Eve's
intervention. Suppose Eve chooses to attack fraction $f$ of $n$ pairs
of particles transmitted. Let $m \equiv \lfloor nf\rfloor$ is an
integer. The probability that the $m$ particles are pair-wise closed
(i.e., every particle's twin is within the attacked group) is $p_{\rm
closed} \equiv \left( \begin{array}{c} n \\ m \end{array}\right)
\left( \begin{array}{c} 2n \\ 2m \end{array}\right)^{-1}$ while the
probability that all selected $m$ particles are correctly paired by
Eve in the closed group is $p_{\rm pair} =
\frac{1}{m-1}\frac{1}{m-3}\cdots\frac{1}{3}$. Thus the probability
Eve's attack produces no error is $p_{\rm closed}p_{\rm pair}$, which
is exponentially small.
In our protocol, the efficiency of $\frac{1}{3}$, can be improved
upon in practice if the observed noise level remains stable over
sufficiently many runs, and thus fewer quantum resources need to be
sacrificed to determine it. Our protocol as stated makes no such
assumption about the noise, and thus considers the worst case
scenario. Consequently, in every transmission step, we have used half
of the transmission qubits for error checking. The statistics of
random sampling then guarantees that the probability that the fraction
of errors observed in the check bits deviates from the error fraction
in the code bits, is exponentially low \cite{NC00}.
\section{\label{sec:Conclusions} Conclusions and Discussions}
A two-particle QSDC protocol has been proposed, with the
motivation of understanding the similarity and difference between the
origins of security in GV and a multi-particle orthogonal-state based
cryptography scheme. It may be noted that 2GV is technically similar
to CORE QKD protocol \cite{DL03}-- with added ideas (block
transmission technique) from Ref. \cite{LL02}-- rather than to GV. A
non-trivial difference between the two situations was noted. 2GV uses
internal degrees of freedom, while GV uses the spatial degree of
freedom, as a result of which the nature of the
information-vs-disturbance trade-off and the options available to Eve
are quite different, apart from the obvious difference due to
employing different numbers of particles. The PoP technique is
crucial to 2GV, while it can optionally be used to enhance security of
GV. However, for GV, it suffices to increase the parameter $\gamma$,
which is experimentally easy to implement.
\section{acknowledgment}
PY thanks the Raman Research Institute, Bangalore, India for a student
fellowship, during which most of this work was completed. AP and RS
thank Department of Science and Technology (DST), India for support
provided through projects SR/S2/LOP-0012/2010 and SR/S2/LOP-02/2012,
respectively. AP also thanks the Operational Program Education for
Competitiveness - European Social Fund project CZ.1.07/2.3.00/20.0017
of the Ministry of Education, Youth and Sports of the Czech Republic.
\end{document}
|
\begin{document}
\title{Understanding over-parameterized deep networks by geometrization}
\author{\IEEEauthorblockN{Xiao Dong, Ling Zhou}
\IEEEauthorblockA{Faculty of Computer Science and Engineering, Southeast University, Nanjing, China}}
\IEEEtitleabstractindextext{
\begin{abstract}
A complete understanding of the widely used over-parameterized deep networks is a key step for AI. In this work we try to give a geometric picture of over-parameterized deep networks using our geometrization scheme. We show that the Riemannian geometry of network complexity plays a key role in understanding the basic properties of over-parameterizaed deep networks, including the generalization, convergence and parameter sensitivity. We also point out deep networks share lots of similarities with quantum computation systems. This can be regarded as a strong support of our proposal that geometrization is not only the bible for physics, it is also the key idea to understand deep learning systems.
\end{abstract}
\begin{IEEEkeywords}
over-parameterization, deep networks, geometrization, physics, quantum computation, Riemannian geometry
\end{IEEEkeywords}}
\maketitle
\IEEEdisplaynontitleabstractindextext
\IEEEpeerreviewmaketitle
\section{Motivation}
\emph{Are all layers created equal?} is a recent work which addressed the problem of how sensitive are the parameters in an over-parameterized deep network\cite{Bengio2019layer}. Their experiments show a heterogeneous characteristics of layers, where bottom layers have a higher sensitivity than top layers. This is an exciting observation since this is \emph{exactly} what the geometry of quantum computation told us about deep networks one decade ago!
In our former work\cite{Dong2019geo}, inspired by the facts that deep networks are effective descriptors for our physical world and deep networks share similar geometric structures of physical systems such as geometric mechanics, quantum computation, quantum many-body systems and even general relativity, we proposed a geometrization scheme to interpret deep networks and deep learning systems. The observation of \cite{Bengio2019layer} encouraged us to apply this scheme on over-parameterized deep networks to give a geometric description of such networks.
In the following parts of this paper, we will explore the similarities between deep networks and quantum computation systems. We will transfer the rich geometric structure of quantum mechanics and quantum computation systems to deep networks so that we have an intuitive geometric understanding of the basic properties of over-parameterized deep networks, including network complexity, generalization, convergence and the geometry formed by deep networks.
\section{Geometrization}
Geometrization of physics is the greatest and the most successful idea in understanding the rules of our physical world in human history. But why can our world be geometrized? In the last decade, we saw a new trend to combine geometrization and quantum information processing to draw a complete new picture of our world. Basically this is to regard our world, including spacetime, material and the interactions among them, as emergent from a complex quantum deep network. From this point of view, our world is built from deep networks and the geometric structure of the physical world emerges from the geometric structure of the underlying deep networks. So the geometrization of physics \emph{is} essentially the geometrization of the underlying quantum deep networks. The success of geometrization of physics indicates that geometrization is also the key to understand deep networks.
The similarities between deep networks and physical systems, including both classical geometric mechanics and quantum computation systems, have been addressed in our former works\cite{Dong_deep}\cite{Dong2019geo}. Here for simplicity we only give a brief recap of key points we have learned from the geometrization of quantum information processing that will be involved in this paper.
\subsection{Geometry of quantum information processing}
It's well known that quantum mechanics has a rich geometric structure so that we believe quantum mechanics is the ultimate rule of our world. Quantum information processing or quantum computation, which explores the complex structure of both quantum states and quantum state evolutions, is the ultimate tool to describe our world and the rules of quantum information processing systems can be applied to all physical systems, including deep networks. So what do we know already about quantum information processing systems?
\textbf{\emph{Gigantic quantum state space and the corner of physical states}} For simplicity we use the most popular model of quantum information processing, i.e. a quantum state is described by a n-qubit system and the quantum information processing is described by a quantum circuit model. The quantum state space is huge since the dimension of a n-qubit pure state system is $2^n$ and the number of possible states is $O(2^{2^n})$. In all the $O(2^{2^n})$ states, only a tiny zero measure subset, the corner of physical states, is physically realizable since the states in this subset can be generated with a polynomial complexity from a simple initial state such as the product state $|00...0\rangle$.
\textbf{\emph{Quantum computational complexity}} The concept of quantum computational complexity plays a key role not only in quantum computation but also in quantum gravity, black hole information problem and quantum phase transition\cite{Ge2018Quantum}\cite{Heydari_dynamicdiatance}\cite{Matsueda2013Emergent}\cite{Nielsen_geometry2}\cite{Susskind2016The}\cite{Susskind_ER_bridge}\cite{Susskind_ER_bridge_nowhere}. Basically a quantum algorithm on a n-qubit system is an unitary transformation $U\in \mathbf{U}(2^n)$ and its computational complexity $C(U)$ is given by the geodesic distance between the identity operation $I$ and $U$, where the geodesic is defined on the Riemannian manifold of $\mathbf{U}(2^n)$. For more details on the geometry of quantum computation, please refer to \cite{Nielsen_geometry}\cite{Nielsen_geometry2}. Accordingly the state complexity of a n-qubit quantum system $|\psi\rangle$ is defined as the minimal complexity of all the quantum algorithms that can generate $|\psi\rangle$ from $|00...0\rangle$, i.e. $C(|\psi\rangle)=\min(C(U), |\psi\rangle=U|00...0\rangle)$. Since the DOF of a general n-qubit transformation $U \in \mathbf{U}(2^n)$ is $O(2^n)$, obviously its computational complexity is $O(2^n)$. This is to say, a general n-qubit algorithm can only be achieved by a quantum circuit with $O(2^n)$ quantum gates, which is regarded as non-realizable. What we are interested are the polynomial complexity algorithms, which can be used to prepare the corner of physical states from the product state $|00...0\rangle$.
\textbf{\emph{Quantum computational complexity and geometry}} Quantum computational complexity has a rich geometrical structure. Firstly the quantum complexity is defined on the Riemannian structure of the manifold $\mathbf{U}(2^n)$. A natural question is then, what's the curvature of the Riemannian manifold of quantum computation? It's shown that this manifold may have a non-positive curvature everywhere\cite{Nielsen_geometry}\cite{Nielsen_geometry2}. This is to say, the geodesic on this manifold is not stable and it's initial momentum sensitive. Keen readers can immediately see that we have now a connection between quantum computation and the observation of \cite{Bengio2019layer}. Secondly, the concept of quantum computational complexity builds a correspondence or a duality between quantum states and quantum algorithms. That's to say, given a quantum state $|\psi\rangle$, we have a correspondent optimal quantum algorithm $U(|\psi\rangle)$ to prepare it from an initial product state. If we take the quantum circuit of the algorithm $U(|\psi\rangle)$ as a network of quantum operations, then we have a duality between quantum states and quantum deep networks. This duality may play a key role in understanding the geometry of spacetime\cite{Swingle2009Entanglement}\cite{Swingle2012Constructing}\cite{Dong2018GR}. In fact the geometry of spacetime is just the geometry of the quantum deep network. The take-home message is, \emph{the dual quantum deep network of a quantum state is determined by a Riemannian geometry of the quantum transformation space, and a quantum deep network also generates a Riemannian geometry}. So do we have two Riemannian structures? There are signs to show, if we use the Fisher-Rao metric of the deep network, then they can be united and general relativity can be deduced from it\cite{Matsueda2014Derivation}\cite{Dong2019geo}.
\textbf{\emph{Quantum mechanics and geometry}} Finally, even we consider the most classical quantum mechanics without the fancy concept of quantum complexity, we can also learn something that can be applied to understanding deep networks. The first observation is the geometry of quantum state space. It's well known that quantum mechanics show a probabilistic property so that in a projective measurement, the probability that the state falls in an eigen state of the observable is determined by the distance between the initial state and the final state. Geometrically this means the probabilistic property of quantum mechanics is determined by the Riemannian structure of quantum mechanics. The second observation is the geometry of quantum evolution. A general quantum state evolution of a n-qubit system can be written as a sequence of unitary transformations $U_nU_{n-1}...U_1$ with $U_i \in \mathbf{U}(2^n)$. Obviously this can be regarded as a linear deep network. How about the stability of this system? It has been shown that this system show a chaotic property, which means a tiny perturbation of the first operation $U_1$ will lead to a huge change of the composite operation $U_nU_{n-1}...U_1$.
We will see all the afero-mentioned observations can help us to understand over-parameterized deep networks.
\section{Geometrization of over-parameterized deep networks}
\subsection{Over-parameterized deep networks}
We first give a brief summary of the known facts and arguments about over-parameterized deep networks.
\textbf{\emph{Over-parameterization}}
By over-parameterized deep networks, we usually mean the number of network parameters is much larger than the number of training data. The over-parameterization is in both the width and the depth of deep networks. Existing works show that over-parameterization plays a key role in the network capacity, convergency, generalization and even the acceleration of the optimization. But how exactly the over-parameterization can affect the performance of deep networks remains not completely clear to us.
\textbf{\emph{Local minima and convergence}}
It's obvious that over-parameterized networks have a large number of local minima. In \cite{Soudry2016No} it's shown that for over-parameterized deep network, with a high probability, all the local minima are also global minima as far as the data are not degenerated. A similar argument in \cite{Du2018Gradient}\cite{Allen2018A} told us that for sufficiently over-parameterized deep networks, gradient descent can reach local minima with a high probability from any initialization point of the network. Of course this is because the over-parameterization re-shaped the loss landscape of deep networks. Can we have an intuitive geometric picture of this point?
\textbf{\emph{Network complexity and generalization}}
Although all the local minima can all fit the training data well, we know they are not equal since they have different generalization capabilities and we prefer to find out a configuration with good generalization performance. Generally the generalization of a network is related with the network complexity\cite{Liang2017Fisher} and a lower network complexity means a better generalization performance. In \cite{Lei2017Towards} it's shown that the minima that can generalize well have a larger volume of basin of attraction so that they dominate over the poor ones. This is an interesting observation and we will show this is essentially an analogue of the probabilistic characteristics of quantum mechanics and it has a geometrical origin.
\textbf{\emph{Loss landscape}}
Over-parameterization changes the loss landscape. \cite{Cooper2018Landscape} claimed that the locus of global minima is usually not discrete but rather an continuous high-dimensional submanifold of the parameter space. But how the structure of this submanifold changes with the number of parameters is still an open problem.
\textbf{\emph{Implicit acceleration by over-parameterization}}
In \cite{Arora2018acceleration} it's claimed that over-parameterization, especially in the depth direction, works as an acceleration mechanism for the optimization of deep networks and also this acceleration can not be achieved by a regularization. We will show maybe this is a misunderstanding of the role of over-parameterization.
\textbf{\emph{Layers are not created equal}}
For a multilayer deep network, it's a direct question to check if all the layers are equal. The recent work \cite{Bengio2019layer} showed that layers have different sentivities for either fully connected networks, convolutional networks or residual networks. What's the geometry behind this observation? We will try to understand this point as an analogue of quantum information processing systems.
\subsection{Geometric picture of over-parameterized deep networks}
The geometrization of deep networks has been explained in \cite{Dong_deep}\cite{Dong2019geo}, where we showed that deep networks share the same geometric structure of geometric mechanics and quantum computation systems. The key observation is that deep networks are curves to connect the identity transformation and the target transformation on the Riemannian manifold of data transformations. We will now see how over-parameterized deep networks can be understood in this geometrization framework.
\textbf{\emph{Over-parameterization}}
What's the role of over-parameterization in deep network? How to determine if a network is properly over-parameterized? In fact we can understand over-parameterization by comparing it with quantum computation systems. In quantum computation we have a gigantic state space and only a zero measure subset, the corner of physical states, is physically realizable. The duality between quantum states and quantum algorithms shows that this is also true for quantum algorithms. Similarly the space of possible functions between the input and output data of deep networks is also huge and only a small subset of it is physically interesting for us, which is the subset of functions that have a polynomial computational complexity. So essentially approximating a function by deep networks is to explore this subset. Compared with quantum computation systems, an universal shallow network is just a general unitary transformation $U \in U(2^n)$, which needs an exponential complexity to describe a transformation of data state space. A polynomial deep network is just a polynomial quantum circuit that only generate the corner of physical states. From this complexity point of view, deep networks are not really \emph{universal} since they only explore a subset of all possible transformations. In over-parameterized deep networks, increasing the width and depth of the networks can be understood as increasing the number of qubits and the length of the quantum circuit to achieve a quantum algorithm. A key point is that, in order to achieve a quantum algorithm $U$ the complexity of the quantum circuit, which is roughly proportional to the depth of the quantum circuit, has to exceed the quantum complexity of $U$.
\textbf{\emph{Local minima and convergence}}
How the over-parameterization can change the distribution of local minima and convergence is not very clear yet. If we compare deep networks with quantum mechanics, we can only say the cost function of deep networks can be regarded as a frustration free Hamiltonian and the global minima are ground states of the frustration free Hamiltonian. This observation is closely related with the concepts of parent Hamiltonian and uncle Hamiltonian. But if there is an exact correspondence between them is still under investigation.
\textbf{\emph{Network complexity and generalization}}
The relationship between network complexity and generalization capability is straight forward. In our former work to compare deep networks with the image registration problem, we indicated that the network complexity can be understood as the deformation energy of a diffeomorphic image transformation. So a lower network complexity means a smooth low energy deformation. Obviously a smooth image transformation has a better generalization performance. The observation of \cite{Lei2017Towards} that a solutions with a better generalization has a higher probability to be found during optimization from a random initialization then has an exact correspondence in quantum mechanics. As mentioned in the first section, during a projective measurement, the probability of a final quantum state $|\psi_f\rangle$ appears is related with its distance to the initial quantum state $|\psi_i\rangle$. This is to say, the probability $p(|\psi_i\rangle,|\psi_f\rangle)$ is determined by the complexity $C(U(|\psi_i\rangle,|\psi_f\rangle))$ of quantum transformation $U$ that transform the initial state to the final state so that $p(|\psi_i\rangle,|\psi_f\rangle)\sim e^{-C(U(|\psi_i\rangle,|\psi_f\rangle))}$. We see this is exactly what happens in over-parameterized deep networks. Here a better generalizaiton means a lower network complexity and a higher probability that this network configuration is found during optimization. Obviously we also have a relationship $p\sim e^{-C}$ between the probability and the complexity. So we can claim that the probability that a deep network configuration is found by optimization is determined by the network complexity, which is geometrically the Riemannian distance between the transformation achieved by this network and the identity transformation $I$. It's very interesting to see classical deep networks show the same probabilistic property of quantum mechanics. For us it's more interesting to check if this observation can be used to understand quantum mechanics from a deep network point of view, because the measurement problem of quantum mechanics is still not fully understood. Can the commonly used decoherence picture of quantum measurement can be formulated as a training process of deep networks?
\textbf{\emph{Loss landscape}}
It's straight forward to see that over-parameterized deep network has a locus of global minima as an high-dimensional submanifold of the parameter space.
But we are not clear about the exact structure of this submanifold and how it will change with the increasing number of network parameters. For example, we have no idea if this high-dimensional submanifold is a connected or a separated manifold or even has a fractal-like structure. We highly suspect that the locus of global minima has a fractal structure since the network is nonlinear and the sensitivities of different layers are different as will be further addressed in the following discussions.
\textbf{\emph{Implicit acceleration by over-parameterization}}
Can the over-parameterization provide an implicit acceleration of the optimization as claimed in \cite{Arora2018acceleration}? To clarify this, we first restate the argument of \cite{Arora2018acceleration}, in which a linear neural network is considered as follows:
$\mathcal{X}:=\mathbb{R}^d$ and $\mathcal{Y}:=\mathbb{R}^k$ are the input and output data space. A $N$-layer linear network is used to fit a training set ${(x_i,y_i)}_{i=1}^m\in \mathcal{X} \times \mathcal{Y}$ and the $\mathcal{l}_2$ loss function $\sum_{i=1}^m(\hat{y}_i-y_i)^2$ is used, where $\hat{y}_i$ is the output of the network given the input $x_i$. The parameters of the depth-$N$ linear network are ${W_1,W_2,...,W_N}$ and the end-to-end weight matrix is given by $W_e=W_NW_{n-1}...W_1$ so that $L^N(W1,W2,...,W_N)=L^1(W_e)$. The gradient descent based optimization of $W_e$ can then be written as
\begin{equation}\label{eq-0}
\begin{split}
W_e^{(k+1)}\Leftarrow & (1-\eta\lambda N)W_e^{(k)}-\eta\sum_{j=1}^N[W_e^{(k)}(W_e^{(k)})^\top]^{\frac{j-1}{N}}\\
\cdot &\frac{dL^1(W_e^{(k)})}{dW}\cdot[(W_e^{(k)})^\top W_e^{(k)}]^{\frac{N-j}{N}}
\end{split}
\end{equation}
where they assume $W_{j+1}^{\top}(k)W_{j+1}(k)=W_{j}(k)W_{j}(k)^{\top},j=1,2,...,N-1$ is fulfilled for the network. \cite{Arora2018acceleration} argued that the difference between the N-layer deep network and a 1-layer network is that the gradient $\frac{dL^1(W_e^{(t)})}{dW}$ is transformed by the two items $[W_e^{(k)}(W_e^{(k)})^\top]^{\frac{j-1}{N}}$ and $[W_e^{(k)}(W_e^{(k)})^\top]^{\frac{N-j}{N}}$. They interpreted the effect of overparameterization (replacing clasic linear model by dept-N linear networks) on gradient descent as the deep network structure reshapes the gradient $\frac{dL^1(W_e^{(t)})}{dW}$ by changing both its amplitude and direction so that this can be understood as introducing some forms of momentum and adaptive learning rate. Also they claimed that this over-parameterization effect can not be obtained by regularization.
Do we have a geometric description of this observation in our geometrization scheme? In fact this can be directly observed by comparing deep networks with diffeomorphic image registration problem as in \cite{Dong_deep}\cite{Dong2019geo}. What's more, we can directly generalize the conclusion of \cite{Arora2018acceleration} to a general nonlinear deep network without any further assumptions on the network.
Diffeomorphic image registration can be abstracted as a map $G\times V\rightarrow V$, where $G$ is the group of image transformations and $V$ is the vector space of images. Large deformation diffeomorphic metric mapping (LDDMM)\cite{Beg2004Computing} generates a deformation $\varphi$ as a flow $\varphi^u_t$ of a time-dependent vector field $u_t \in T_e(G)=\mathbf{g}$ so that
\begin{equation}\label{eq1}
\dot{\varphi}^u_t=u_t\circ\varphi^u_{t}, \varphi^u_{0}=Id, \varphi^u_{1}=\varphi
\end{equation}
The diffeomorphic matching of two images $I_0$ and $I_1$ with LDDMM is to find a vector field $u_t, t\in[0,1]$ to minimize the cost function
\begin{equation}\label{eq2}
\begin{split}
E(u_t)=&E_K(u_t)+E_C(u_t)\\
=&\int_0^1\frac{1}{2}|u_t|^2dt+\beta|I_1-I_o\circ\varphi^u_{1}|^2
\end{split}
\end{equation}
Here the regularity on $u_t$ is a kinetic energy term $E_K(u_t)=\frac{1}{2}\int_0^1|u_t|^2dt$ with $|u_t|$ a norm on the vector field defined as $|u_t|^2=\langle Lu_t,u_t\rangle_{L^2}$. The operator $L$ is a positive self-adjoint differential operator. Obviously the norm $|u_t|^2=\langle Lu_t,u_t\rangle_{L^2}$ defines a Riemannian metric on the manifold of the diffeomorphic transformation group $Diff(R^n)$. The second term $E_C(u_t)=\beta|I_1-I_o\circ\varphi^u_{1}|^2$ is the difference between the transformed image $I_o\circ\varphi^u_{1}$ and the target image $I_1$.
A necessary condition $DE(u_t)=0$ to minimize the cost function is that the vector field $u_t$ should satisfy the Euler-Poincar\'{e} (E-P) equation
\begin{equation}\label{eq3}
Lu_t=-\varphi^u_{0,t}I_0\diamond \varphi^u_{0,t}\varphi^u_{1,0}\pi
\end{equation}
where $\varphi^u_{s,t}=\varphi^u_t\circ\varphi^u_{s^{-1}}$, $\pi:=\beta(\varphi^u_{0,t}I_0-I1)^\flat \in V^*$. The $\flat$ operator is defined as $\flat:V\rightarrow V^*,\langle u^{\flat},v\rangle_{V^*\times V}=\langle u,v\rangle$ and $\diamond:TV^*\rightarrow \mathbf{g}^*,\langle I\diamond \pi,u\rangle_{\mathbf{g}^*\times \mathbf{g}}=\langle\pi,\zeta_u(I)\rangle_{V^*\times V}$ is the momentum map.
In LDDMM framework, the curve satisfying the E-P equation is found by a gradient descent algorithm, while the gradient is given by $u_t+K\varphi^u_{0,t}I_0\diamond \varphi^u_{0,t}\varphi^u_{1,0}\pi$ with $K=L^{-1}$. A direct calculation in the LDDMM framework following \cite{Beg2004Computing} shows that the update of $\varphi^u_1$ is given by
\begin{equation}\label{eq-4}
\begin{split}
\varphi^{u,(k+1)}_1 \Leftarrow & (1-\eta)\varphi^{u,(k)}_1\\
-&\eta K\star\int_0^1 D\varphi^u_{t,1}\cdot D\varphi^u_{t,1} \cdot \frac{dE_C(u_t)}{d\varphi_1}\varphi^u_{0,t}\varphi^u_{0,t}dt
\end{split}
\end{equation}
We can directly see this is almost the same as the update rule of $W_e$ given by (\ref{eq-0}). But here we are working with a nonlinear deep network so that we have a generalization of the linear network of \cite{Arora2018acceleration}. In fact the result of \cite{Arora2018acceleration} can be regarded as a special case of LDDMM called static vector flow (SVF), which is formulated on a Lie group instead of on a Riemannian manifold and the items $[W_e^{(k)}(W_e^{(k)})^\top]^{\frac{j-1}{N}}$, $[W_e^{(k)}(W_e^{(k)})^\top]^{\frac{N-j}{N}}$ can be understood as an analogue of the Lie exponential used in SVF framework.
LDDMM has a beautiful geometric picture which is the same as the geometric mechanics\cite{Bruveris2011The}\cite{Bruveris2013Geometry}\cite{Holm2009Geometric}. How to understand the effect of over-parameterization in this LDDMM framework? LDDMM formulates a smooth image transformation by a constrained curve described by (\ref{eq1}). The gradient descent based update of the curve is essentially a constrained optimal control as shown in \cite{Hart2013An}. So when we try to approximate a function by deep networks, the structure of over-parameterized deep network is essentially to set constraints on the possible solution space. The so-called acceleration effect of over-parameterization in \cite{Arora2018acceleration} is nothing but a natural result of the constrained optimal control formulation. Also their conclusion that this acceleration can not be obtained by regularization is also not exact since the constraints in optimal control can also be regarded as a kind of regularization in optimization problems\cite{Holm2009Euler}. The only difference is that the regularization is set on the structure of the network.
\textbf{\emph{Layers are not created equal}}
We have seen that in quantum computation, for both the general sequential unitary quantum evolution and the quantum circuit model, we observe the same initial value sensitivity property. This is to say, quantum information processing systems are playing with Riemannian manifolds with negative curvatures. If we compare these with the observation of \cite{Bengio2019layer}, we find the general quantum evolution system corresponds to the fully connect networks and the quantum circuit model corresponds to convolutional networks. So we can say the observed non-equality of layers in \cite{Bengio2019layer} is just a direct consequence of the principle of quantum computation system. But there is still one thing is missing, the residual network. It's observed in \cite{Bengio2019layer} that residual networks also show a non-equality of layers but the pattern is different from fully connected and convolutional networks. Can we also find the correspondence of residual networks in quantum computation systems? Yes, since residual networks are just differential equations, they are correspondent to the fundamental quantum mechanics rule, the Schrodinger equation. Since the finite time discretization of Schrodinger equation is just the general sequential unitary quantum evolution, we believe Schrodinger equation should have the same initial value sensitivity pattern. This means residual networks should have a similar pattern as the fully connected and convolutional networks. This is different from the observed pattern of residual networks\cite{Bengio2019layer}. How to resolve this contradiction? If we believe that quantum mechanics is the ultimate rule of the world and the main advantage of residual networks is to build a smoother manifold of transformations to approximate functions, then residual networks should be related with a smooth geometry and there is no reason that some layers of residual networks are more critical than other layers as observed in \cite{Bengio2019layer}. We assume this is due to the artifacts of the non-uniform discretization used in residual networks and noise during optimization. From another aspect, the redistribution of the sensitivity pattern of residual networks also indicates that the strong background negative curvature geometry of general deep networks is weakened in residual networks so that the random perturbation effects survive. This is in fact an evidence that residual networks are building and working on a flatter manifold than fully connected and convolutional networks.
Another problem is related with the spacetime structure. There is evidence that the geometry of spacetime is emergent from quantum information processing networks. Also in \cite{Dong2019geo} we indicated that in deep networks, if the Fisher-Rao metric is used to measure the network complexity, then the interaction between data and network structures is analogue of the interaction between material and spacetime geometry, i.e. the general relativity. But if a general quantum deep network has a negative curvature, how can our universe have a flat (in a large scale) spacetime? Does the existence of our flat universe is an evidence that there exists a subset of deep networks that can form a flat Euclidean geometry? If such a corner of Euclidean deep networks exist, then all the layers will be created equal in such networks. Can this help us to find better network structures? In random matrix based analysis of deep networks, a special type of network configuration with dynamic isometry property seems to fall in this subset. It has been shown that such kind of networks hold some advantages beyond normal networks such as a smooth information flow in both the forward and backward directions. In fact geometrically the smooth information flow is just the inertial movement in a flat spacetime, i.e. the first law of Newton. Of course, just as the corner of physical states in quantum mechanics, the corner of Euclidean deep networks is also a zero measurement set. So we assume this subset may not form an universal data processing system, just as our universe may be a very special case of the so-called multiverse picture.
Finally, the negative curvature will influence the loss landscape of deep networks. If a network configuration has a higher sensitivity at the bottom layers, it can be easily figured out that loss landscape is more sensitive to the bottom layers and more robust to top layers. Accordingly the locus of the global minima will have more valleys in the bottom layers and the locus may have a fractal-like complex pattern with a stronger over-parameterization. How exactly the over-parameterization will change the loss landscape is still open.
\section{Conclusions}
Geometrization is not only the key idea of physics, it's also a framework to understand deep networks. In this work we try to understand over-parameterized deep networks by geometrization. By establishing analogies between properties of over-parameterized deep networks and quantum information processing/diffeomorphic image registration systems, we found they share similar geometric structures. Our key observations are:(1)Polynomial complexity over-parameterized deep networks only explore a corner of polynomial complexity functions just as quantum computation systems only explore the corner of physical states in the gigantic quantum state space. The network structure sets constraints on the submanifold of functions that can be approximated by the network. (2)Over-parameterized deep networks may have a complex loss landscape and local minima have different generalization capabilities. The generalization capability is determined by the network complexity, which is computed as the geodesic distance on a Riemannian manifold between the transformation represented by the network and the identity transformation. The probability that a certain configuration is obtained is determined by the complexity of the network. This is an analogue of the measurement problem in quantum mechanics, where the probability of the final state is determined by the distance between the initial state and the final state. (3)Over-parameterized deep networks have a geometry with a negative curvature, just as quantum computation systems has a Riemannian geometry with a negative curvature. All these observations suggest that deep networks are closely related with physics and geometrization may provide a proper roadmap to interpret deep networks.
In this work we mainly explore the Riemannian structure of deep networks, for example the network complexity as the geodesic distance and the sensitivity of network parameters as Riemannian curvature. A natural question is, can other geometrical structures in physics help to understand over-parameterized deep networks? For example the symplectic structure of geometric mechanics plays a key role in the dynamics of classical mechanics. Can the dynamics of deep networks also be understood in a similar way? Fibre bundle structure is another key structure to understand interactions in physics, also it plays a key role in the geometry of quantum information processing such as the geometry of mixed state and quantum entanglement. Can it be used to understand interactions between subnetworks in a composite system with multiple subnetworks? In \cite{Dong2019geo} we have mentioned that fibre bundles may be related with important network structures such as attention mechanism, Turing neural machines and differential neural computers. There are signs that fibre bundles are also related with capsule networks and the recent quaternion neural networks. To explore the possibility to understand deep networks based on bundles will be our future work.
\end{document}
|
\begin{document}
\begin{doublespace}
\newtheorem{thm}{Theorem}[section]
\newtheorem{lemma}[thm]{Lemma}
\newtheorem{defn}[thm]{Definition}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{corollary}[thm]{Corollary}
\newtheorem{remark}[thm]{Remark}
\newtheorem{example}[thm]{Example}
\numberwithin{equation}{section}
\def\varepsilon{\varepsilon}
\def{
$\Box$
}{{
$\Box$
}}
\def{\mathcal H}{{\mathcal H}}
\def{\mathcal M}{{\mathcal M}}
\def{\mathcal B}{{\mathcal B}}
\def{\mathcal L}{{\mathcal L}}
\def{\mathcal F}{{\mathcal F}}
\def{\mathcal E}{{\mathcal E}}
\def{\mathcal Q}{{\mathcal Q}}
\def{\mathcal S}{{\mathcal S}}
\def{\rm{Cap}}{{\rm{Cap}}}
\def\widetilde{\widetilde}
\def{\mathbb R}{{\mathbb R}}
\def{\bf L}{{\bf L}}
\def{\mathbb E}{{\mathbb E}}
\def{\bf F}{{\bf F}}
\def{\mathbb P}{{\mathbb P}}
\def{\mathbb H}{{\mathbb H}}
\def{\mathbb N}{{\mathbb N}}
\def\varepsilon{\varepsilon}
\def\widehat{\widehat}
\def\noindent{\bf Proof.} {\noindent{\bf Proof.} }
\title{{\bf L}arge \bf
Potential Theory of Subordinate Brownian Motions Revisited
}
\author{{\bf Panki Kim}\thanks{This work was supported by Basic
Science Research Program through the National Research Foundation of
Korea(NRF) grant funded by the Korea
government(MEST)(2010-0001984).} \quad {\bf Renming Song} \quad and
\quad {\bf Zoran Vondra{\v{c}}ek}\thanks{Supported in part by the
MZOS grant 037-0372790-2801.} }
\date{ }
\maketitle
\begin{abstract}
The paper discusses and surveys some aspects of the potential theory of subordinate Brownian motion under the assumption that the Laplace exponent of the corresponding subordinator is comparable to a regularly varying function at infinity. This extends some results previously obtained under stronger conditions.
\end{abstract}
\noindent {\bf AMS
2010 Mathematics Subject Classification}: Primary 60J45
Secondary 60G51, 60J35, 60J75
\noindent {\bf Keywords and phrases}: Subordinator, subordinate
Brownian motion, potential theory, Green function, L\'evy density,
Harnack inequality, boundary Harnack principle
\section{Introduction}\label{ksv-sec-intro}
An ${\mathbb R}^d$-valued process $X=(X_t:\, t\ge 0)$ is called a L\'evy
process in ${\mathbb R}^d$ if it is a right continuous process with left
limits and if, for every $s, t\ge 0$, $X_{t+s}-X_s$ is independent
of $\{X_r, r\in [0, s]\}$ and has the same distribution as
$X_s-X_0$. A L\'evy process is completely characterized by its
L\'evy exponent ${\mathbb P}hi$ via
$$
{\mathbb E}[\exp\{i\langle \xi, X_t-X_0 \rangle\}]=\exp\{-t{\mathbb P}hi(\xi)\}, \quad
t\ge 0, \xi\in {\mathbb R}^d.
$$
The L\'evy exponent ${\mathbb P}hi$ of a L\'evy process is given by the
L\'evy-Khintchine formula
$$
{\mathbb P}hi(\xi)=i\langle
l, \xi \rangle + \frac12 \langle \xi, A\xi^T
\rangle + \int_{{\mathbb R}^d}\left(1-e^{i\langle \xi, x \rangle}+i\langle
\xi, x \rangle{\bf 1}_{\{|x|<1\}}\right){\mathbb P}i(dx), \quad \xi\in {\mathbb R}^d\, ,
$$
where $ l\in {\mathbb R}^d$, $A$ is a nonnegative definite $d\times d$
matrix, and ${\mathbb P}i$ is a measure on ${\mathbb R}^d\setminus\{0\}$ satisfying
$\int (1\wedge |x|^2)\, {\mathbb P}i(dx) <\infty$. $A$ is called the
diffusion matrix, ${\mathbb P}i$ the L\'evy measure, and
$(l, A, {\mathbb P}i)$ the generating triplet of the process.
Nowadays L\'evy processes are widely used in various fields, such as
mathematical finance, actuarial mathematics
and mathematical physics.
However, general L\'evy processes are not very easy to deal with.
A subordinate Brownian motion in ${\mathbb R}^d$ is a L\'evy process which
can be obtained by replacing the time of Brownian motion in ${\mathbb R}^d$ by
an independent subordinator (i.e., an increasing L\'evy process starting from 0).
More precisely, let $B=(B_t:\, t\ge 0)$ be a Brownian motion in
${\mathbb R}^d$ and $S=(S_t:\, t\ge 0)$ be a subordinator independent of $B$.
The process $X=(X_t:\, t\ge 0)$ defined by $X_t=B_{S_t}$ is a
rotationally invariant L\'evy process in ${\mathbb R}^d$ and is called a
subordinate Brownian motion.
The subordinator $S$ used to define the subordinate Brownian motion
$X$ can be interpreted as ``operational'' time or ``intrinsic''
time. For this reason, subordinate Brownian motions have been used
in mathematical finance and other applied fields. Subordinate
Brownian motions form a very large class of L\'evy processes.
Nonetheless, compared with general L\'evy processes, subordinate
Brownian motions are much more tractable. If we take the Brownian
motion $B$ as given, then $X$ is completely determined by the
subordinator $S$. Hence, one can deduce properties of $X$ from
properties of the subordinator $S$. On the analytic level this
translates to the following: Let $\phi$ denote the Laplace exponent
of the subordinator $S$, that is, ${\mathbb E}[\exp\{-\lambda S_t\}]
=\exp\{-t\phi(\lambda)\}$, $\lambda >0$. Then the characteristic
exponent ${\mathbb P}hi$ of the subordinate Brownian motion $X$ takes on the
very simple form ${\mathbb P}hi(x)=\phi(|x|^2)$ (our Brownian motion $B$ runs
at twice the usual speed). Therefore, properties of $X$ should follow
from properties of the Laplace exponent $\phi$.
The Laplace exponent $\phi$ of a subordinator $S$ is a Bernstein
function, hence it has a representation of the form
$$
\phi(\lambda)=b\lambda +\int_{(0,\infty)}(1-e^{-\lambda t})\, \mu(dt)
$$
where $b\ge 0$ and $\mu$ is a measure on $(0,\infty)$ satisfying
$\int_{(0,\infty)}(1\wedge t)\, \mu(dt)<\infty$. If $\mu$ has a
completely monotone density, the function $\phi$ is called a
complete Bernstein function. The purpose of this work is to study
the potential theory of subordinate Brownian motion under the
assumption that the Laplace exponent $\phi$ of the subordinator is a
complete Bernstein function comparable to a regularly varying
functions at infinity.
More precisely, we will assume that there exist $\alpha\in (0,2)$
and a function $\ell$ slowly varying at infinity such that
$$
\phi(\lambda)\asymp \lambda^{\alpha/2}\ell(\lambda)\, , \quad
\lambda \to \infty\, .
$$
Here and later, for two functions $f$ and $g$ we write
$f(\lambda)\asymp g(\lambda)$ as $\lambda \to \infty$ if the
quotient $f(\lambda)/g(\lambda)$ stays bounded between two positive
constants as $\lambda \to \infty$.
A lot of progress has been made in recent years in the study of the
potential theory of subordinate Brownian motions, see, for instance
\cite{CKSV, CKSV2, KSV1, KSV2, KSV3, RSV, SiSV} and \cite[Chapter
5]{BBKRSV}. In particular, an extensive survey of results obtained
before 2007 is given in \cite[Chapter 5]{BBKRSV}. At that
time, the focus was on the potential theory of the process $X$ in
the whole ${\mathbb R}^d$, the results for (killed) subordinate
Brownian motions in an open subset still being out of reach. In the
last few years significant progress has been made in studying the
potential theory of subordinate Brownian motions killed upon exiting
an open subset of ${\mathbb R}^d$. The main results include the boundary
Harnack principle and sharp Green function estimates. For
processes having a continuous component see \cite{KSV2} (for the
one-dimensional case) and \cite{CKS5, CKSV, CKSV2} (for
multi-dimensional case). For purely discontinuous processes, the
boundary Harnack principle was obtained in \cite{KSV1} and sharp
Green function estimates were discussed in the recent preprint
\cite{KSV3}. The main assumption in \cite{CKSV, CKSV2, KSV1} and
\cite[Chapter 5]{BBKRSV} is that the Laplace exponent of the
subordinator is regularly varying at infinity. The results were
established under different assumptions, some of which turned out to
be too strong and some even redundant. Time is now ripe to put some
of the recent progress under one unified setup and to give a survey
of some of these results. The survey builds upon the work done in
\cite[Chapter 5]{BBKRSV} and \cite{KSV1}. The setup we are going to
assume is more general than all these of the previous papers, so in
this sense, most of the results contained in this paper are
extensions of the existing ones.
In Section \ref{ksv-sec-subordinators} we first recall some basic facts
about subordinators, Bernstein functions and complete Bernstein
functions. Then we establish asymptotic behaviors, near the origin,
of the potential density and L\'evy density of subordinators.
In Section \ref{ksv-sec-sbm} we establish the asymptotic behaviors, near
the origin, of the Green function and the L\'evy density of
our subordinate Brownian motion. These results follow from the
asymptotic behaviors, near the origin, of the potential density and
L\'evy density of the subordinator.
In Section \ref{ksv-sec-hibhp} we prove that the Harnack inequality and
the boundary Harnack principle hold for our subordinate Brownian
motions.
The materials covered in this paper by no means include all that can
be said about the potential theory of subordinate Brownian motions.
One of the omissions is the sharp Green function estimates of
(killed)
subordinate Brownian motions in bounded $C^{1, 1}$ open sets
obtained in the recent preprint \cite{KSV3}. The present paper
builds up the framework for \cite{KSV3} and can be regarded as a
preparation for \cite{KSV3} in this sense.
Another omission is the Dirichlet heat kernel estimates of
subordinate Brownian motions in smooth open sets recently
established in \cite{CKS1, CKS2, CKS3, CKS4}. One of the reasons we
do not include these recent results in this paper is that all these
heat kernel estimates are for particular subordinate Brownian
motions only and are not yet established in the general case.
A third notable omission is the spectral theory for killed
subordinate Brownian motions developed in \cite{CS05, CS06a, CS06c}.
Some of these results have been summarized in \cite[Section
12.3]{SSV}.
A fourth notable omission is the potential theory of subordinate
killed Brownian motions developed in \cite{GPRSSV, GRSS, Son04,
SV03, SV04b, SV06}. Some of these results have been summarized in
\cite[Section 5.5]{BBKRSV} and \cite[Chapter 13]{SSV}. In this paper
we concentrate on subordinate Brownian motions without diffusion
components and therefore this paper does not include results
from\cite{CKSV, CKSV2, KSV2}. One of the reasons for this is that
subordinate Brownian motions with diffusion components require a
different treatment.
We end this introduction with few words on the notations. For
functions $f$ and $g$ we write $f(t)\sim g(t)$ as $t\to 0+$ (resp.
$t\to \infty$) if the quotient $f(t)/g(t)$ converges to 1 as $t\to
0+$ (resp. $t\to \infty$), and $f(t) \asymp g(t)$ as $t\to 0+$
(resp. $t\to \infty$) if the quotient $f(t)/g(t)$ stays bounded
between two positive constants as $t\to 0+$ (resp. $t\to \infty$).
\section{Subordinators}\label{ksv-sec-subordinators}\label{ksv-sec-sub}
\subsection{Subordinators and Bernstein functions}\label{ksv-ss:bf}
Let $S=(S_t:\, t\ge 0)$ be a subordinator, that is, an increasing
L\'evy process taking values in $[0,\infty)$ with $S_0=0$. A
subordinator $S$ is completely characterized by its Laplace exponent
$\phi$ via
$$
{\mathbb E}[\exp(-\lambda S_t)]=\exp(-t \phi(\lambda))\, ,\quad \lambda > 0.
$$
The Laplace exponent $\phi$ can be written in the form (cf. \cite[p.
72]{Ber})
$$
\phi(\lambda)=b\lambda +\int_0^{\infty}(1-e^{-\lambda t})\,
\mu(dt)\, .
$$
Here $b \ge 0$, and $\mu$ is a $\sigma$-finite measure on
$(0,\infty)$ satisfying
$$
\int_0^{\infty} (t\wedge 1)\, \mu(dt)< \infty\, .
$$
The constant $b$ is called the drift, and $\mu$ the L\'evy measure
of the subordinator $S$.
A $C^{\infty}$ function $\phi:(0,\infty)\to [0,\infty)$ is called a
Bernstein function if $(-1)^n D^n \phi\le 0$ for every positive
integer $n$. Every Bernstein function has a representation (cf.
\cite[Theorem 3.2]{SSV})
$$
\phi(\lambda)=a+b\lambda +\int_{(0,\infty)}(1-e^{-\lambda t})\,
\mu(dt)
$$
where $a,b\ge 0$ and $\mu$ is a measure on $(0,\infty)$ satisfying
$\int_{(0,\infty)}(1\wedge t)\, \mu(dt)<\infty$. $a$ is called the
killing coefficient, $b$ the drift and $\mu$ the L\'evy measure of
the Bernstein function. Thus a nonnegative function $\phi$ on $(0,
\infty)$ is the Laplace exponent of a subordinator if and only if it
is a Bernstein function with $\phi(0+)=0$.
Sometimes we need to deal with killed subordinators, that is,
subordinators killed at independent exponential times.
Let $e_a$ be an exponential random variable with parameter
$a\ge0$, i.e., ${\mathbb P}( e_a >t)=e^{-at}, \, t>0$.
We allow $a=0$ in which case $e_a=\infty$. Assume that $S$ is
a subordinator with Laplace exponent $\phi$ and
$e_a$ is independent of $S$.
We define a process $\widehat S$ by
$$
\widehat S_t= \begin{cases}
S_t , & t < e_a\\
\infty & t \ge e_a
\end{cases}.
$$
The process $\widehat S$ is the subordinator $S$ killed at an independent exponential time. We call $\widehat S$
a killed subordinator.
The
corresponding Laplace exponent $\widehat \phi$ is related to $\phi$ as
$$
\widehat \phi(\lambda)=a+\phi(\lambda), \qquad \lambda>0.
$$
In fact,
$$
{\mathbb E}[e^{i \xi \cdot \widehat S_t}]= {\mathbb E}[e^{i \xi \cdot S_t} {\bf 1}_{\{ t < e_a \}}]={\mathbb E}[e^{i \xi \cdot S_t}]{\mathbb P}( t < e_a )=e^{-t \phi(\lambda)}
e^{-at}=e^{-t(a+\phi(\lambda))}.$$
A function $\phi: (0, \infty)\to [0, \infty)$ is the Laplace
exponent of a killed subordinator if and only if $\phi$ is a
Bernstein function. For this reason, we use a killed subordinator sometimes.
A Bernstein function $\phi$ is called a complete Bernstein function
if the L\'evy measure $\mu$ has a completely monotone density
$\mu(t)$, i.e., $(-1)^n D^n \mu\ge 0$ for every non-negative integer
$n$. Here and below, by abuse of notation we will denote the L\'evy
density by $\mu(t)$. Complete Bernstein functions form a large
subclass of Bernstein functions. Most of the familiar Bernstein
functions are complete Bernstein functions. See \cite[Chapter
15]{SSV} for an extensive table of complete Bernstein functions.
Here are some examples of complete Bernstein functions:
\begin{description}
\item{(i)} $\phi(\lambda)=\lambda^{\alpha/2}$, $\alpha\in (0, 2]$;
\item{(ii)} $\phi(\lambda)=(\lambda+m^{2/\alpha})^{\alpha/2}-m$, $\alpha\in (0, 2), m\ge
0$;
\item{(iii)} $\phi(\lambda)=\lambda^{\alpha/2}+ \lambda^{\beta/2}$, $0\le
\beta<\alpha\in (0, 2]$;
\item{(iv)} $\phi(\lambda)=\lambda^{\alpha/2}(\log(1+\lambda))^{\gamma/2}$, $\alpha\in (0, 2),
\gamma\in (0, 2-\alpha)$;
\item{(v)} $\phi(\lambda)=\lambda^{\alpha/2}(\log(1+\lambda))^{-\beta/2}$, $0\le
\beta<\alpha\in (0, 2]$.
\end{description}
An example of a Bernstein function which is not a complete Bernstein
function is $1-e^{-\lambda}$.
It is known (cf. \cite[Proposition 7.1]{SSV}) that $\phi$ is a
complete Bernstein function if and only if the function
$\lambda/\phi(\lambda)$ is a complete Bernstein function. For other
properties of complete Bernstein functions we refer the readers to
\cite{SSV}.
The following result, which will play an important role later, says
that the L\'evy density of a complete Bernstein function cannot
decrease too fast in the following sense.
\begin{lemma}[{\cite[Lemma 2.1]{KSV3}}]\label{ksv-l:H2-valid}
Suppose that $\phi$ is a complete Bernstein function with L\'evy
density $\mu$. Then there exists $C_1>0$ such that $\mu(t)\le C_1
\mu(t+1)$ for every $t>1$.
\end{lemma}
\noindent{\bf Proof.} Since $\mu$ is a completely monotone function, by Bernstein's
theorem (cf. \cite[Theorem 1.4]{SSV}), there exists a measure $m$ on
$[0,\infty)$ such that $\mu(t)=\int_{[0,\infty)}e^{-tx} m(dx).$
Choose $r>0$ such that $\int_{[0, r]}e^{-x}\, m(dx)\ge \int_{(r,
\infty)}e^{-x}\, m(dx).$ Then, for any $t>1$, we have
\begin{eqnarray*}
\int_{[0, r]}e^{-t x}\, m(dx)&\ge&e^{-(t -1)r}\int_{[0, r]}e^{-x}\, m(dx)\\
&\ge &e^{-(t -1)r}\int_{(r, \infty)}e^{-x}\, m(dx)\,\ge \, \int_{(r,
\infty)}e^{-t x}\, m(dx).
\end{eqnarray*}
Therefore, for any $t>1$,
\begin{eqnarray*}
&&\mu(t+1)\ge \int_{[0, r]}e^{-(t+1) x}\, m(dx)\ge e^{-r}\int_{[0,
r]}e^{- t x}\, m(dx) \\
&&\ge \frac12\, e^{-r}\int_{[0, \infty)}e^{-t
x}\, m(dx)=\frac12\, e^{-r}\mu(t).
\end{eqnarray*}
{
$\Box$
}
The potential measure of
the (possibly killed) subordinator
$S$ is defined by
\begin{equation}\label{ksv-potential measure}
U(A)={\mathbb E} \int_0^{\infty}
{\bf 1}_{\{S_t\in A\}}
\, dt, \quad A\subset [0,
\infty).
\end{equation}
Note that $U(A)$ is the expected time the subordinator $S$ spends in the set $A$.
The Laplace transform of the measure $U$ is given by
\begin{equation}\label{ksv-lt potential measure}
{\mathcal L} U(\lambda)=\int_0^{\infty}e^{-\lambda t}\, dU(t)=
{\mathbb E}\int_0^{\infty} \exp(-\lambda S_t)\, dt
=\frac{1}{\phi(\lambda)}\, .
\end{equation}
We call a subordinator $S$ a complete subordinator if its Laplace
exponent $\phi$ is a complete Bernstein function. The following
characterization of complete subordinators is due to \cite[Remark
2.2]{SV06} (see also \cite[Corollary 5.3]{BBKRSV}).
\begin{prop}\label{ksv-p:2.2}
Let $S$ be a subordinator with Laplace exponent $\phi$ and
potential measure $U$. Then $\phi$ is a complete Bernstein function
if and only if
$$
U(dt)=c\delta_0(dt)+u(t)dt
$$
for some $c\ge 0$ and completely monotone function $u$.
\end{prop}
In case the constant $c$ in the proposition above is equal to zero,
we will call $u$ the potential density of the subordinator $S$.
An inspection of the argument, given in \cite[Chapter 5]{BBKRSV} or
\cite{SV06}, leading to the proposition above yields the following
two results (cf. \cite[Corollary 5.4 and Corollary 5.5]{BBKRSV} or
\cite[Corollary 2.3 and Corollary 2.4]{SV06}).
\begin{corollary}\label{ksv-c2.3}
Suppose that $S=(S_t:\, t\ge 0)$ is a subordinator whose Laplace exponent
$$
\phi(\lambda)=b\lambda +\int_0^{\infty} (1-e^{-\lambda t})\, \mu(dt)
$$
is a complete Bernstein function with $b>0$ or $\mu(0,
\infty)=\infty$. Then the potential measure $U$ of $S$ has a
completely monotone density $u$.
\end{corollary}
\noindent{\bf Proof.}
By
\cite[Corollary 5.4]{BBKRSV} or \cite[Corollary 2.3]{SV06},
if the drift of the complete subordinator $S$ is zero or the L\'evy measure $\mu$
has infinite mass, then the constant $c$ in Proposition \ref{ksv-p:2.2} is equal to zero
so the potential measure $U$ of $S$ has a density $u$. The completeness
of the density follows directly from Proposition \ref{ksv-p:2.2}.
{
$\Box$
}
\begin{corollary}\label{ksv-c2.4}
Let $S$ be a complete subordinator with Laplace exponent $\phi(\lambda)=\int_0^{\infty} (1-e^{-\lambda t})\mu(dt)$.
Suppose that the L\'evy measure $\mu$
has infinite mass.
Then the potential measure of a (killed) subordinator with
Laplace exponent $\psi(\lambda):=\lambda/\phi(\lambda)$ has a
completely monotone density $v$ given by
$$
v(t)=\mu(t, \infty).
$$
\end{corollary}
\noindent{\bf Proof.}
Since the drift of $S$ is zero and the L\'evy measure $\mu$
has infinite mass, by
\cite[Corollary 5.5]{BBKRSV} or \cite[Corollary 2.4]{SV06}, we have that
$$
\psi(\lambda)=a
+ \int_0^{\infty} (1-e^{-\lambda t})\, \nu(dt)
$$
where $a=\left(\int_0^{\infty} t\mu(t)dt\right)^{-1}$, the L\'evy measure $\nu$ of $\psi$
has infinite mass and the potential measure of a possibly killed (i.e., $a>0$) subordinator with
Laplace exponent $\psi$ has a
density $v$ given by
$
v(t)=\mu(t, \infty).
$
The completeness
of the density follows from \cite[Corollary 5.3]{BBKRSV}, which works for killed subordinators.
{
$\Box$
}
\subsection{Asymptotic behavior of the potential and L\'evy
densities}\label{ksv-ss:asympumu}
From now on we will always assume that $S$ is a complete
subordinator without drift and that the Laplace exponent $\phi$ of
$S$ satisfies $\lim_{\lambda\to\infty}\phi(\lambda)=\infty$ (or
equivalently, the L\'evy measure of $S$ has infinite mass). Under
this assumption, the potential measure $U$ of $S$ has a completely
monotone density $u$ (cf. Corollary \ref{ksv-c2.3}). The main purpose of
this subsection is to determine the asymptotic behaviors of $u$ and
$\mu$ near the origin. For this purpose, we will need the following
result due to Z\"ahle (cf. \cite[Theorem 7]{Z}).
\begin{prop}\label{ksv-p:zahle}
Suppose that $w$ is a
completely monotone function given by
$$
w(t)=\int^\infty_0e^{-st} f(s)\, ds,
$$
where $f$ is a nonnegative decreasing function. Then
$$
f(s)\le \left(1-e^{-1}\right)^{-1} s^{-1}w(s^{-1}), \quad s>0.
$$
If, furthermore, there exist $\delta\in (0, 1)$ and $a, s_0>0$ such
that
\begin{equation}\label{ksv-e:zahle}
w(\lambda t)\le a \lambda^{-\delta} w(t), \quad \lambda\ge 1, t\ge
1/s_0,
\end{equation}
then there exists $C_2=C_2(w,f,a,s_0, \delta)>0$ such that
$$
f(s)\ge C_2 s^{-1}w(s^{-1}), \quad s\le s_0.
$$
\end{prop}
\noindent{\bf Proof.}
Using the assumption that $f$ is a nonnegative decreasing function,
we get that, for any $r>0$, we have
\begin{eqnarray*}
w(t)&=&\frac1t\int^\infty_0e^{-s}f\left(\frac{s}{t}\right)ds\\
&\ge&\frac1t\int^r_0e^{-s}f\left(\frac{s}{t}\right)ds
\,\ge\,\frac1tf\left(\frac{r}{t}\right)\left(1-e^{-r}\right).
\end{eqnarray*}
Thus
$$
f\left(\frac{r}{t}\right)\le\frac{tw(t)}{1-e^{-r}}, \quad t>0, r>0.
$$
In particular, we have
$$
f(s)\le \left(1-e^{-1}\right)^{-1}s^{-1}w(s^{-1}), \quad s>0,
$$
and
\begin{equation}\label{ksv-e:zahle1}
f\left(\frac{s}{t}\right)\le
\left(1-e^{-1}\right)^{-1}\frac{t}{s}w\left(\frac{t}{s}\right),
\quad s>0, t>0.
\end{equation}
On the other hand, for $r\in (0, 1]$, we have
\begin{eqnarray*}
tw(t)&=&\int^r_0e^{-s}f\left(\frac{s}{t}\right)ds+\int_r^\infty
e^{-s}f\left(\frac{s}{t}\right)ds\\
&\le&\int^r_0e^{-s}f\left(\frac{s}{t}\right)ds +
f\left(\frac{r}{t}\right)e^{-r}\\
&\le&\left(1-e^{-1}\right)^{-1}t\int^r_0e^{-s}\frac1s\, w\left(\frac{t}{s}\right)ds
+ f\left(\frac{r}{t}\right)e^{-r},
\end{eqnarray*}
where in the last line we used \eqref{ksv-e:zahle1}.
Now we assume \eqref{ksv-e:zahle}, then
we get that
$$
w\left(\frac{t}{s}\right)\le a s^{\delta}w(t), \quad t\ge 1/s_0,
s<r.
$$
Thus, for $r\in (0, 1]$, we have,
$$
tw(t)\le
a\left(1-e^{-1}\right)^{-1}tw(t)\int^r_0e^{-s}s^{\delta-1}ds +
f\left(\frac{r}{t}\right)e^{-r}.
$$
Choosing $r\in(0, 1]$ small enough so that
$$
a\left(1-e^{-1}\right)^{-1}\int^r_0e^{-s}s^{\delta-1}ds\le \frac12,
$$
we conclude that for this choice of $r$, we have
$$
f\left(\frac{r}{t}\right)\ge c_1 tw(t), \quad t\ge 1/s_0
$$
for some constant $c_1>0$. Since $w$ is decreasing, we have
$$
f(s)\ge c_1\frac{r}{s}w\left( \frac{r}{s}\right) \ge
c_2 s^{-1}w(s^{-1}), \quad s\le rs_0,
$$
where $c_2=c_1r$. From this we immediately get that there exists
$c_3>0$ such that
$$
f(s)\ge c_3 s^{-1}w(s^{-1}), \quad s\le s_0.
$$
{
$\Box$
}
\begin{corollary}
The potential density $u$ of $S$ satisfies
\begin{equation}\label{ksv-e:u-upper-bound}
u(t)\le C_3 t^{-1}\phi(t^{-1})^{-1}\, ,\quad t>0\, .
\end{equation}
\end{corollary}
\noindent{\bf Proof.}
Apply the first part of Proposition \ref{ksv-p:zahle} to the
function
$$
w(t):=\int_0^{\infty}e^{-s t}u(s)\, ds =\frac{1}{\phi(t)}.
$$
{
$\Box$
}
We introduce now the main assumption on our Laplace exponent $\phi$
of the complete subordinator $S$ that we will use throughout the
rest of the paper. Recall that a function $\ell:(0,\infty)\to
(0,\infty)$ is slowly varying at infinity if
$$
\lim_{t\to \infty}\frac{\ell(\lambda t)}{\ell(t)}=1\, ,\quad \textrm{for every }\lambda >0\, .
$$
\noindent {\bf Assumption (H):} There exist $\alpha\in (0,2)$ and a
function $\ell:(0,\infty)\to (0,\infty)$ which is measurable,
locally bounded above and below by positive constants,
and slowly varying at infinity such that
\begin{equation}\label{ksv-e:reg-var}
\phi(\lambda) \asymp \lambda^{\alpha/2}\ell(\lambda)\, ,\quad \lambda \to \infty\, .
\end{equation}
\begin{remark}\label{ksv-r-interpretation-H}{\rm
The precise interpretation of \eqref{ksv-e:reg-var} will be as follows:
There exists a positive constant $c>1$ such that
$$
c^{-1}\le \frac{\phi(\lambda)}{\lambda^{\alpha/2}\ell(\lambda)} \le
c \qquad \textrm{for all }\lambda \in [1,\infty)\, .
$$
The choice of the interval $[1,\infty)$ is, of course, arbitrary.
Any interval $[a,\infty)$ would do, but with a different constant.
This follows from the
continuity of $\phi$ and the assumption that $\ell$ is
locally bounded above and below by positive constants.
Moreover, by choosing $a>0$ large enough, we could dispense with the
local boundedness assumption. Indeed, by \cite[Lemma 1.3.2]{BGT},
every slowly varying function at infinity is locally bounded on
$[a,\infty)$ for $a$ large enough.
Although the choice of interval $[1,\infty)$ is arbitrary, it will
have as a consequence the fact that all relations of the type
$f(t)\asymp g(t)$ as $t \to \infty$ (respectively $t\to 0+$)
following from \eqref{ksv-e:reg-var} will be interpreted as
$\tilde{c}^{-1} \le f(t)/g(t) \le \tilde{c}$ for $t\ge 1$
(respectively $0<t\le 1$). }
\end{remark}
The assumption \eqref{ksv-e:reg-var} is a very weak assumption on the
asymptotic behavior of $\phi$ at infinity. All the examples (in (i),
(iii) and (v), we need to take $\alpha<2$) above Lemma \ref{ksv-l:H2-valid}
satisfy this assumption. In fact they satisfy the following stronger
assumption
\begin{equation}\label{ksv-e:reg-var2}
\phi(\lambda)= \lambda^{\alpha/2}\ell(\lambda)\, ,
\end{equation}
where $\ell$ is a function slowly varying at infinity. By inspecting
the table in \cite[Chapter 15]{SSV}, one can come up with a lot more
examples of complete Bernstein functions satisfying this stronger
assumption.
In the next example we construct a complete Bernstein function
satisfying \eqref{ksv-e:reg-var}, but not the stronger
\eqref{ksv-e:reg-var2}.
\begin{example}{\rm
Suppose that $\alpha\in (0, 2)$. Let $F$ be a function on $[0,
\infty)$ defined by $F(x)=0$ on $0 \le x<1$
and
$$
F(x)=2^n\, , \quad 2^{2(n-1)/\alpha} \le x < 2^{2n/\alpha},\ n=1, 2,
\dots.
$$
Then clearly $F$ is non-decreasing and $x^{\alpha/2} \le F(x) \le 2
x^{\alpha/2}$ for all
$x \ge 1$.
This implies that for all $t>0$,
$$
\frac{t^{\alpha/2}}{2} \le \liminf_{x\to \infty} \frac{F(tx)}{F(x)} \le
\limsup_{x\to \infty} \frac{F(tx)}{F(x)} \le 2 t^{\alpha/2}.
$$
If $F$ were regularly varying, the above inequality would imply that
the index was $\alpha/2$, hence the limit of $F(tx)/F(x)$ as
$x\to \infty$ would be equal to $c t^{\alpha/2}$ for some positive
constant $c$. But this does not happen because of the following.
Take $t=2^{2/\alpha}$ and a subsequence $x_n=2^{2n/\alpha}$. Then $t
x_n= 2^{2(n+1)/\alpha}$ and therefore
$$
F(t x_n)/F(x_n)=2^{n+2} / 2^{n+1}=2
$$
which should be equal to $c t^{\alpha/2}=c
(2^{2/\alpha})^{\alpha/2}=2c$, implying $c=1$. On the other hand,
take any $t\in (1, 2^{2/\alpha})$ and the same subsequence
$x_n=2^{2n/\alpha}$. Then $t x_n\in [2^{2n/\alpha},
2^{2(n+1)/\alpha} )$ implying $F(t x_n)=F(x_n)$. Thus the quotient
$F(t x_n)/F(x_n)=1$ which should be equal to $c
t^{\alpha}=t^{\alpha}$ for all $t\in (1,2^{1/a})$. Clearly this is
impossible, so $F$ is not regularly varying. This also shows that
$F(x)$ is not $\sim$ to any $c x^{\alpha/2}$, as $x\to \infty$.
Let $\sigma$ be the measure corresponding to the nondecreasing function $F$ (in the sense that $\sigma(dt)=F(dt)$):
$$
\sigma:=\sum^{\infty}_{n=1} 2^n\delta_{2^{2n/\alpha}}\, .
$$
Since $\int_{(0,\infty)}(1+t)^{-1}\, \sigma(dt)<\infty$, $\sigma$ is a Stieltjes measure. Let
$$
g(\lambda):=\int_{(0,\infty)}\frac{1}{\lambda+t}\, \sigma(dt)=\sum_{n=1}^{\infty} \frac{2^n}{\lambda +2^{2n/\alpha}}
$$
be the corresponding Stieltjes function.
It follows from \cite[Theorem 1.7.4]{BGT} or \cite[Lemma 6.2]{WYY}
that $g$ is not regularly varying at infinity.
Moreover, since $F(x)\asymp x^{\alpha/2}$,
$x \to \infty$,
it follows from \cite[Lemma 6.3]{WYY} that $g(\lambda)\asymp \lambda^{\alpha/2-1}$, $\lambda \to \infty$.
Therefore, the function $f(\lambda):=1/ g(\lambda)$ is a
complete Bernstein function which is not regularly varying at infinity,
but satisfies $f(\lambda)\asymp \lambda^{1-\alpha/2}$, $\lambda \to \infty$.
}
\end{example}
Now we are going to establish the asymptotic behaviors of $u$ and
$\mu$ under the assumption {\bf (H)}.
First we claim that under the assumption \eqref{ksv-e:reg-var}, there exist
$\delta\in (0, 1)$ and $a, s_0>0$ such that
\begin{equation}\label{ksv-e:zahle3}
\phi(\lambda t)\ge a\lambda^{\delta}\phi(t), \quad \lambda\ge 1,
t\ge 1/s_0.
\end{equation}
Indeed, by Potter's theorem (cf. \cite[Theorem 1.5.6]{BGT}), for
$0<\varepsilonilon<\alpha/2$ there exists $t_1$ such that
$$
\frac{\ell( t)}{\ell(\lambda t)}\le 2 \max\left(\left(\frac{
t}{\lambda t}\right)^{\varepsilonilon}, \left(\frac{\lambda
t}{t}\right)^{\varepsilonilon}\right)=2\lambda^{\varepsilonilon}\, ,\quad \lambda \ge 1,
t\ge t_1\, .
$$
Hence,
$$
\phi(\lambda t)\ge c_2 (\lambda t)^{\alpha/2}\ell(\lambda t) = c_2
t^{\alpha/2}\ell(t) \lambda^{\alpha/2} \frac{\ell(\lambda
t)}{\ell(t)}\ge c_3 \phi(t)\lambda^{\alpha/2-\varepsilonilon}\, ,\quad
\lambda\ge 1, t\ge t_1.
$$
By taking $\delta:=\alpha/2 -\varepsilonilon\in (0,1)$, $a=c_3$, and
$s_0=1/t_1$ we arrive at \eqref{ksv-e:zahle3}.
\begin{thm}\label{ksv-t:behofu}
Let $S$ be a
complete (possibly killed) subordinator
with Laplace exponent $\phi$ satisfying
{\bf (H)}. Then the potential density $u$ of $S$ satisfies
\begin{equation}\label{ksv-e:behofu}
u(t)\asymp
t^{-1}\phi(t^{-1})^{-1}\asymp\frac{t^{\alpha/2-1}}{\ell(t^{-1})}\, ,
\quad t \to 0+\,.
\end{equation}
\end{thm}
\noindent{\bf Proof.}
Put
$$
w(t):=\int_0^{\infty}e^{-s t}u(s)\, ds =\frac{1}{\phi(t)},
$$
then by \eqref{ksv-e:zahle3} we have
$$
w(\lambda t)\le a^{-1}\lambda^{-\delta}w(t), \quad \lambda\ge 1, t\ge
1/s_0.
$$
Applying the second part of Proposition \ref{ksv-p:zahle} we see that
there is a constant $c>0$ such that
$$
u(t)\ge c t^{-1}w(t^{-1}),
$$
for small $t>0$. Combining this inequality with
\eqref{ksv-e:u-upper-bound} we arrive at \eqref{ksv-e:behofu}.
{
$\Box$
}
\begin{thm}\label{ksv-t:behofmu}
Let $S$ be a complete subordinator with Laplace exponent $\phi$
with zero killing coefficient
satisfying {\bf (H)}.
Then the L\'evy density $\mu$ of $S$ satisfies
\begin{equation}\label{ksv-e:behofmu}
\mu(t)\asymp t^{-1}\phi(t^{-1})\asymp t^{-\alpha/2-1}\ell(t^{-1})\,
, \quad t\to 0+\,.
\end{equation}
\end{thm}
\noindent{\bf Proof.} Since $\phi$ is a complete Bernstein function, the function
$\psi(\lambda):=\lambda/\phi(\lambda)$ is also a complete Bernstein
function and satisfies
$$
\psi(\lambda)\asymp \frac{\lambda^{1-\alpha/2}}{\ell(\lambda)}\,
,\quad \lambda \to \infty,
$$
where $\alpha\in (0,2)$ and $\ell$ are the same as in
\eqref{ksv-e:reg-var}. It follows from Corollary \ref{ksv-c2.4} that the
potential measure of
a killed subordinator
with Laplace exponent $\psi$ has
a complete monotone density $v$ given by
$$
v(t)=\mu(t, \infty)=\int^\infty_t\mu(s)ds.
$$
Applying Theorem \ref{ksv-t:behofu} to $\psi$ and $v$ we get
\begin{equation}\label{ksv-e:v-psi}
\mu(t,\infty)=v(t)\asymp t^{-1}\psi(t^{-1})^{-1}=\phi(t^{-1})\,
,\quad t \to 0\, .
\end{equation}
By using the elementary inequality $1-e^{-c y}\le c(1-e^{-y})$ valid
for all $c \ge 1$ and all $y>0$, we get that $\phi(c\lambda)\le
c\phi(\lambda)$ for all $c\ge 1$ and all $\lambda >0$. Hence
$\phi(s^{-1})=\phi(2 (2s)^{-1})\le 2\phi((2s)^{-1})$ for all $s>0$.
Therefore, by \eqref{ksv-e:v-psi}, for all $s \in (0, 1/2)$
$$
v(s)\le c_1 \phi(s^{-1})\le 2c_1 \phi((2s)^{-1}) \le c_2 v(2s)
$$
for some constants $c_1, c_2>0$. Since
$$
v(t/2)\ge v(t/2)-v(t)=\int_{t/2}^{t}\mu(s)\, ds \ge (t/2)\mu(t)\, ,
$$
we have for all
$t \in (0,1)$,
$$
\mu(t)\le 2 t^{-1}v(t/2)\le c_2 t^{-1} v(t)\le c_3
t^{-1}\phi(t^{-1})\, ,
$$
for some constant $c_3>0$.
Using \eqref{ksv-e:zahle3}
we get that for every $\lambda \ge 1$
$$
\phi(s^{-1})=\phi(\lambda(\lambda s)^{-1})\ge
a\lambda^{\delta}\phi((\lambda s)^{-1})\, ,\quad s\le
\frac{s_0}{\lambda}\, .
$$
It follows from \eqref{ksv-e:v-psi} that there
exists a constant $c_4>0$ such that
$$
c_4^{-1}\phi(s^{-1})\le v(s) \le c_4 \phi(s^{-1})\, , \quad s< 1\,.
$$
Fix $\lambda:=2^{1/\delta}((c_4^2 a^{-1})\vee 1)^{1/\delta}\ge1$.
Then for $s\le (s_0\wedge 1)/\lambda$,
$$
v(\lambda s)\le c_4\phi((\lambda s)^{-1})\le c_4 a^{-1}
\lambda^{-\delta}\phi(s^{-1})\le c_4^2a^{-1}\lambda^{-\delta}
v(s)
\le \frac12 v(s)
$$
by our choice of $\lambda$. Further,
$$
(\lambda-1)s\mu(s)\ge \int_s^{\lambda s}\mu(t)\, dt=v(s)-v(\lambda
s)\ge v(s)-\frac12 v(s)=\frac12 v(s)\, .
$$
This implies that for all small $t$
$$
\mu(t)\ge \frac{1}{2(\lambda-1)}t^{-1} v(t)= c_5 t^{-1}v(t)\ge c_6
t^{-1}\phi(t^{-1})
$$
for some constants $c_5, c_6>0$. The proof is now complete.
{
$\Box$
}
\section{Subordinate Brownian motion}\label{ksv-sec-sbm}
\subsection{Definitions and technical lemma}\label{ksv-ss:sbm}
Let $B=(B_t,
{\mathbb P}_x)$ be a Brownian motion in ${\mathbb R}^d$ with transition
density $p(t,x,y)=p(t,y-x)$ given by
$$
p(t,x)=(4\pi t)^{-d/2}\exp\left(-\frac{|x|^2}{4t}\right), \quad t>0,\,
x,y\in {\mathbb R}^d \, .
$$
The semigroup $(P_t:\, t\ge 0)$ of $B$ is defined by $P_tf(x)=
{\mathbb E}_x[f(B_t)]=\int_{{\mathbb R}^d}p(t,x,y)f(y)\, dy$, where $f$ is a
nonnegative Borel function on ${\mathbb R}^d$. Recall that if $d\ge 3$, the
Green function $G^{(2)}(x,y)=G^{(2)}(x-y)$, $x,y\in {\mathbb R}^d$, of $B$ is
well defined and is equal to
$$
G^{(2)}(x)=\int_0^{\infty}p(t,x)\, dt =
\frac{\Gamma(d/2-1)}{4\pi^{d/2}}\, |x|^{-d+2}\, .
$$
Let $S=(S_t:\, t\ge 0)$ be a complete subordinator independent of $B$, with
Laplace exponent $\phi(\lambda)$, L\'evy measure $\mu$ and potential
measure $U$. In the rest of the paper, we will always assume that
$S$ is a complete subordinator whose killing coefficient is zero, is dependent of $B$ and satisfies ({\bf H}).
Hence $\lim_{\lambda\to \infty}\phi(\lambda)=\infty$,
and thus $S$ has a completely monotone
potential density $u$.
We define a new process $X=(X_t:\, t\ge 0)$ by $X_t:=B_{S_t}$. Then
$X$ is a L\'evy process with characteristic exponent
${\mathbb P}hi(x)=\phi(|x|^2)$ (see e.g.\cite[pp.197--198]{Sat}) called a
subordinate Brownian motion. The semigroup $(Q_t:\, t\ge 0)$
of the process $X$ is given by
$$
Q_t f(x)= {\mathbb E}_x[f(X_t)]={\mathbb E}_x[f(
B_{S_t})]=\int_0^{\infty} P_s f(x)\,
{\mathbb P}(S_t\in ds)\, .
$$
The semigroup $Q_t$ has a
density $q(t,x,y)=q(t,x-y)$ given by $q(t,x)=\int_0^{\infty}p(s,x)\,
{\mathbb P}(S_t\in ds)$.
Recall that, according to the criterion of Chung-Fuchs type (see \cite{PS71}
or \cite[pp. 252--253]{Sat}), $X$ is transient if and only if for some small $r>0$,
$\int_{|x|<r} \frac{1}{{\mathbb P}hi(x)}\, dx <\infty$. Since
${\mathbb P}hi(x)=\phi(|x|^2)$, it follows that $X$ is transient if and only
if
\begin{equation}\label{ksv-transience}
\int_{0+}\frac{\lambda^{d/2-1}}{\phi(\lambda)}\, d\lambda <\infty\, .
\end{equation}
This is always true if $d\ge 3$, and, depending on the subordinator,
may be true for $d=1$ or $d=2$. In the case $d\le 2$, if there
exists $\gamma\in [0, d/2)$ such that
\begin{equation}\label{ksv-e:ass4trans}
\liminf_{\lambda \to 0}\frac{\phi(\lambda)}{\lambda^{\gamma}}>0,
\end{equation}
then \eqref{ksv-transience} holds.
For $x\in {\mathbb R}^d$
and a Borel subset $A$ of ${\mathbb R}^d$, the potential
measure of $X$ is given by
\begin{eqnarray*}
G(x,A)&=&
{\mathbb E}_x\int_0^{\infty}
{\bf 1}_{\{X_t\in A\}}
dt=
\int_0^{\infty}Q_t{\bf 1}_A(x)\, dt
=\int_0^{\infty}\int_0^{\infty}P_s {\bf 1}_A(x){\mathbb P}(S_t\in ds)\, dt\\
&=&\int_0^{\infty}P_s {\bf 1}_A\, u(s)\,ds =\int_A
\int_0^{\infty}p(s,x,y)\, u(s)\, ds\, dy \, ,
\end{eqnarray*}
where the second line follows from (\ref{ksv-potential measure}).
If $X$ is transient and $A$ is bounded, then
$G(x,A)<\infty$ for every
$x\in {\mathbb R}^d$.
In this case
we denote by $G(x,y)$ the density of the potential
measure $G(x,\cdot)$. Clearly, $G(x,y)=G(y-x)$ where
\begin{equation}\label{ksv-green function}
G(x)=\int_0^{\infty} p(t,x)\, U(dt)=\int_0^{\infty} p(t,x) u(t)\,
dt\, .
\end{equation}
The L\'evy measure ${\mathbb P}i$ of $X$ is given by
(see e.g.~\cite[pp. 197--198]{Sat})
$$
{\mathbb P}i(A)=\int_A \int_0^{\infty}p(t,x)\, \mu(dt)\, dx =\int_A J(x)\,
dx\, ,\quad A\subset {\mathbb R}^d\, ,
$$
where
\begin{equation}\label{ksv-jumping function}
J(x):= \int_0^{\infty}p(t,x)\, \mu(dt)=\int_0^{\infty}p(t,x)\mu(t)dt
\end{equation}
is the L\'evy density of $X$. Define the function $j:(0,\infty)\to
(0,\infty)$ by
\begin{equation}\label{ksv-function j measure}
j(r):= \int_0^{\infty} (4\pi)^{-d/2} t^{-d/2} \exp\left(-\frac{r^2}{4t}\right)\,
\mu(dt)\, , \quad r>0\, ,
\end{equation}
and note that by (\ref{ksv-jumping function}), $J(x)=j(|x|)$, $x\in
{\mathbb R}^d\setminus \{0\}$.
Since $x\mapsto p(t,x)$ is continuous and radially decreasing, we conclude that both $G$ and
$J$ are continuous on ${\mathbb R}^d\setminus \{0\}$ and radially decreasing.
The following technical lemma will play a key role in establishing
the asymptotic behaviors of the Green function $G$ and the
L\'evy density $J$ of the subordinate Brownian motion $X$ in the next
subsection.
\begin{lemma}\label{ksv-key technical}
Suppose that $w:(0,\infty)\to (0,\infty)$ is a decreasing function,
$\ell:(0,\infty)\to (0,\infty)$ a measurable
function which is
locally bounded above and below by positive constants and is
slowly varying at $\infty$, and $\beta\in [0,2]$,
$\beta>1-d/2$. If $d=1$ or $d=2$, we additionally assume that there
exist constants $c>0$ and $\gamma <d/2$ such that
\begin{equation}\label{ksv-asymp v2}
w(t)\le ct^{\gamma-1}\, , \quad \forall \, t \ge 1\, .
\end{equation}
Let
$$
I(x):=\int_0^{\infty}(4\pi t)^{-d/2}e^{-\frac{|x|^2}{4t}}w(t)\, dt\, .
$$
\begin{itemize}
\item[(a)] If
\begin{equation}\label{ksv-asymp v}
w(t)\asymp \frac{1}{t^{\beta}\ell(1/t)}\, , \quad t\to 0\, ,
\end{equation}
then
$$
I(x)\asymp \frac{1}{|x|^{d+2\beta-2}\, \ell \big(\frac{1}{|x|^2}\big)} \asymp\frac{w(|x|^2)}{|x|^{d-2}}\, , \quad |x|\to 0 \, .
$$
\item[(b)] If
\begin{equation}\label{ksv-asymp v-sim}
w(t)\sim \frac{1}{t^{\beta}\ell(1/t)}\, , \quad t\to 0\, ,
\end{equation}
then
$$
I(x)\sim \frac{\Gamma(d/2+\beta-1)}{4^{1-\beta}\pi^{d/2}}\, \frac{1}{|x|^{d+2\beta-2}\ell\big(\frac{1}{|x|^2}\big)}\, , \quad |x|\to 0\, .
$$
\end{itemize}
\end{lemma}
\noindent{\bf Proof.} (a) Let us first note that the assumptions of the lemma guarantee
that $I(x)<\infty $ for every $x\neq 0$. Now, let $\xi\ge 1/4$ to be
chosen later. By a change of variable we get
\begin{eqnarray}
\int_0^{\infty}(4\pi t)^{-d/2}e^{-\frac{|x|^2}{4t}}w(t)\, dt
&=& \frac{1}{4\pi^{d/2}}\left(|x|^{-d+2}\int_0^{\xi |x|^2} t^{d/2-2}
e^{-t} w\left(\frac{|x|^2}{4t}\right)\, dt \right.\nonumber\\
&&\qquad \left. +
|x|^{-d+2}\int_{\xi|x|^2}^{\infty} t^{d/2-2} e^{-t}
w\left(\frac{|x|^2}{4t}\right)\, dt\right)\nonumber\\
&=:&
\frac{1}{4\pi^{d/2}}\left(|x|^{-d+2}I_1(x)+|x|^{-d+2}I_2(x)\right)\, .\label{ksv-e:Ione+Itwo}
\end{eqnarray}
We first consider $I_1(x)$ for the case $d=1$ or $d=2$. It follows
from the assumptions that there exists a positive constant $c_1$
such that $w(s)\le c_1 s^{\gamma-1}$ for all $s\ge 1/(4\xi)$. Thus
\begin{eqnarray*}
I_1(x)\le \int_0^{\xi |x|^2}
t^{d/2-2}e^{-t}c_1\left(\frac{|x|^2}{4t}\right)^{\gamma-1}\, dt \le
c_2 |x|^{2\gamma-2}\int_0^{\xi|x|^2}t^{d/2-\gamma-1}\, dt = c_3
|x|^{d-2}\, .
\end{eqnarray*}
It follows that
\begin{equation}\label{ksv-Ione}
\lim_{|x|\to 0} |x|^{-d+2}I_1(x) \left(|x|^{d-2+2\beta}\,
\ell\left(\frac{1}{|x|^2}\right) \right) =0\, .
\end{equation}
In the case $d\ge 3$, we proceed similarly, using the bound $w(s)\le
w(1/(4\xi))$ for $s\ge 1/(4\xi)$.
Now we consider $I_2(x)$:
\begin{eqnarray*}
&&|x|^{-d+2}I_2(x)\,=\,\frac{1}{|x|^{d-2}}\int_{\xi |x|^2}^{\infty}
t^{d/2-2}e^{-t}w\left(\frac{|x|^2}{4t}\right)\, dt\\
&&=\frac{4^\beta}{|x|^{d+2\beta-2}\, \ell(\frac{1}{|x|^2})}\int_{\xi
|x|^2}^{\infty} t^{d/2-2+\beta}e^{-t} w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)\, \frac{\ell(\frac{1}{|x|^2})}{\ell
(\frac{4t}{|x|^2})}\, dt\, .
\end{eqnarray*}
Using the assumption (\ref{ksv-asymp v}), we can see that there is a
constant $c_1>1$ such that
$$
c_1^{-1} \le w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right) <c_1\, ,
$$
for all $t$ and $x$ satisfying $|x|^2/(4t)\le 1/(4\xi)$.
Now choose a $\delta\in (0,d/2-1+\beta)$ (note that by assumption,
$d/2-1+\beta>0$). By Potter's theorem (cf. \cite[Theorem 1.5.6
(i)]{BGT}), there exists $\rho=\rho(\delta)\ge1$ such that
\begin{equation}\label{ksv-e:potter1}
\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})}\le
2\left(\left(\frac{1/|x|^2}{4t/|x|^2}\right)^{\delta}\vee
\left(\frac{1/|x|^2}{4t/|x|^2}\right)^{-\delta}\right)=2\left((4t)^{\delta}\vee
(4t)^{-\delta}\right)\le c_2(t^{\delta}\vee t^{-\delta})
\end{equation}
whenever $\frac{1}{|x|^2}>\rho$ and $\frac{4t}{|x|^2} >\rho$.
By reversing the roles of $1/|x|^2$ and $4t/|x|^2$ we also get that
\begin{equation}\label{ksv-e:potter2}
\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})}\ge c_2^{-1}(t^{\delta}\wedge t^{-\delta})\
\end{equation}
for $\frac{1}{|x|^2}>\rho$ and $\frac{4t}{|x|^2} >\rho$.
Now we define $\xi:=\frac{\rho}{4}$ so that for all $x\neq 0$
with $|x|^2 \le \frac{1}{4\xi}$ and $t>\xi|x|^2$ we have that
\begin{eqnarray}
c_1^{-1} c_2^{-1} \, t^{d/2-2+\beta}e^{-t}
(t^{\delta}\wedge t^{-\delta}) &\le & t^{d/2-2+\beta}e^{-t} w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)
\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})} \,
\nonumber \\
& \le & c_1 c_2 \, t^{d/2-2+\beta}e^{-t}
(t^{\delta}\vee t^{-\delta})\, .\label{ksv-e:key-lemma-1}
\end{eqnarray}
Let
\begin{eqnarray*}
c_3&:=&c_1^{-1} c_2^{-1}\int_0^{\infty}t^{d/2-2+\beta}e^{-t}
(t^{\delta}\wedge t^{-\delta})
dt <\infty\, ,\\
c_4&:=& c_1 c_2 \int_0^{\infty} t^{d/2-2+\beta}e^{-t}
(t^{\delta}\vee t^{-\delta})
dt <\infty\, .
\end{eqnarray*}
The integrals are finite because of assumption $d/2-2+\beta-\delta>-1$.
It follows from \eqref{ksv-e:key-lemma-1} that
\begin{eqnarray*}
c_3&\le & \liminf_{|x|\to 0}\int_0^{\infty} t^{d/2-2+\beta}e^{-t}
w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)
\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})} \,
{\bf 1}_{(\xi|x|^2,\infty)}(t)
dt\\
&\le & \limsup_{|x|\to 0}\int_0^{\infty} t^{d/2-2+\beta}e^{-t} w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)
\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})} \,
{\bf 1}_{(\xi|x|^2,\infty)}(t)
dt \le c_4\, .
\end{eqnarray*}
This means that
\begin{eqnarray}
\lefteqn{|x|^{-d+2}I_2(x) \left(|x|^{d-2\beta+2}\, \ell(\frac{1}{|x|^2}) \right)}\nonumber \\
&=&4^{\beta}\int_{\xi
|x|^2}^{\infty} t^{d/2-2+\beta}e^{-t} w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)\, \frac{\ell(\frac{1}{|x|^2})}{\ell
(\frac{4t}{|x|^2})}\, dt \asymp 1\, .\label{ksv-Itwo}
\end{eqnarray}
Combining \eqref{ksv-Ione} and \eqref{ksv-Itwo} we have proved the first part of the lemma.
\noindent
(b) The proof is almost the same with a small difference at the very end. Since $\ell$ is slowly varying at $\infty$, we have that
$$
\lim_{|x|\to
0}\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})}=1\, .
$$
This implies that
\begin{eqnarray*}
& &\lim_{|x|\to 0} t^{d/2-2+\beta}e^{-t} w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)
\frac{\ell(\frac{1}{|x|^2})}{\ell(\frac{4t}{|x|^2})} \,
{\bf 1}_{(\xi|x|^2,\infty)}(t)\\
& &\quad = t^{d/2-2+\beta}e^{-t} {\bf 1}_{(0,\infty)}(t) \, .
\end{eqnarray*}
By the right-hand side inequality in \eqref{ksv-e:key-lemma-1}, we can apply the dominated convergence theorem to conclude that
\begin{eqnarray*}
\lefteqn{\lim_{|x|\to 0}|x|^{-d+2}I_2(x) \left(|x|^{d-2\beta+2}\, \ell(\frac{1}{|x|^2}) \right)}\\
&=&\lim_{|x|\to 0} 4^{\beta}\int_{0}^{\infty} t^{d/2-2+\beta}e^{-t} w\left(\frac{|x|^2}{4t}\right)
\left(\frac{|x|^2}{4t}\right)^{\beta}\, \ell\,
\left(\frac{4t}{|x|^2}\right)\, \frac{\ell(\frac{1}{|x|^2})}{\ell
(\frac{4t}{|x|^2})}{\bf 1}_{(\xi|x|^2,\infty)}(t)\, dt \\
&=&4^{\beta}\Gamma(d/2-1+\beta)\, .
\end{eqnarray*}
Together with \eqref{ksv-e:Ione+Itwo} and \eqref{ksv-Ione} this proves the second part of the lemma.
{
$\Box$
}
\subsection{Asymptotic behavior of the Green function and L\'evy
density}\label{ksv-ss:asympgj}
The goal of this subsection is to establish the asymptotic behaviors
of the Green function $G(x)$ and L\'evy density $J(x)$ of the
subordinate process
$X$ under certain assumptions on the Laplace
exponent $\phi$ of the subordinator $S$. We start with the Green
function.
\begin{thm}\label{ksv-t:Gorigin}
Suppose that the Laplace exponent $\phi$ is a complete Bernstein function satisfying the assumption
{\bf (H)} and that $\alpha\in (0,2\wedge d)$. In the case $d\le 2$,
we further assume \eqref{ksv-e:ass4trans}. Then
$$
G(x)\asymp \frac1{|x|^{d}\phi(|x|^{-2})}\asymp
\frac1{|x|^{d-\alpha}\ell(|x|^{-2})}, \qquad |x|\to 0.
$$
\end{thm}
\noindent{\bf Proof.} It follows from Theorem
\ref{ksv-t:behofu}
that the potential
density $u$ of $S$ satisfies
$$
u(t)\asymp
t^{-1}\phi(t^{-1})^{-1}\asymp\frac{t^{\alpha/2-1}}{\ell(t^{-1})}\, ,
\quad t \to 0+\,.
$$
Using \eqref{ksv-e:u-upper-bound} and \eqref{ksv-e:ass4trans}
we conclude that in case $d\le 2$ there exists $c>0$ such that
$$
u(t)\le ct^{\gamma-1}, \quad t\ge 1\, .
$$
We can now apply Lemma \ref{ksv-key technical} with $w(t)=u(t)$,
$\beta=1-\alpha/2$ to obtain the required asymptotic behavior. {
$\Box$
}
\begin{remark}\label{ksv-cond&zahle}{\rm (i) Since $\alpha$ is always assumed to be in $(0, 2)$,
the assumption $\alpha\in (0, 2\wedge d)$ in the theorem above makes a difference only
in the case $d=1$.
\noindent
(ii) In case $d\ge 3$, the conclusion of the theorem above is proved in \cite[Theorem 1 (ii)--(iii)]{Z}
under weaker assumptions. The statement of \cite[Theorem 1 (ii)]{Z} in case $d\le 2$ is incorrect and the proof has an error.
}
\end{remark}
The asymptotic behavior near the origin of $J(x)$ is contained in
the following result.
\begin{thm}\label{ksv-t:Jorigin}
Suppose that the Laplace exponent $\phi$ is a complete Bernstein
function satisfying the assumption {\bf (H)}.
Then
$$
J(x)\asymp \frac{\phi(|x|^{-2})}{|x|^{d}}\asymp
\frac{\ell(|x|^{-2})}{|x|^{d+\alpha}}, \qquad |x|\to 0.
$$
\end{thm}
\noindent{\bf Proof.} It follows from Theorem \ref{ksv-t:behofmu} that the
L\'evy density $\mu$ of $S$ satisfies
$$
\mu(t)\asymp t^{-1}\phi(t^{-1})\asymp t^{-\alpha/2-1}\ell(t^{-1})\,
, \quad t \to 0+\,.
$$
Since $\mu(t)$ is decreasing and integrable at infinity,
one can easily show that there exists $c>0$ such that
$$
\mu(t)\le ct^{-1}, \quad t\ge 1.
$$
In fact, if the claim above were not valid, we could find an increasing sequence
$\{t_n\}$ such that $t_1>1, t_n\uparrow\infty, t_n-t_{n-1}\ge t_n/2$ and that
$\mu(t_n)\ge n t_n^{-1}$. Then we would have
$$
\int^\infty_1\mu(t)dt=\int^{t_1}_1\mu(t)dt+\sum^\infty_{n=2}\int^{t_n}_{t_{n-1}}\mu(t)dt
\ge \frac{t_1-1}{t_1}+\sum^\infty_{n=2}\frac{n}2=\infty,
$$
contradicting the integrability of $\mu$ at infinity. Therefore the claim above is valid.
We can now apply Lemma \ref{ksv-key technical} with $w(t)=\mu(t)$,
$\beta=1+\alpha/2$ and $\gamma=0$ to obtain the required asymptotic
behavior. {
$\Box$
}
\begin{prop}\label{ksv-properties of j}
Suppose that the Laplace exponent $\phi$ is a complete Bernstein function satisfying the assumption
{\bf (H)}. Then the following assertions hold.
\begin{description}
\item{(a)} For any $K>0$, there exists $C_4=C_4(K)
>1$ such that
\begin{equation}\label{ksv-H:1}
j(r)\le C_4\, j(2r), \qquad \forall r\in (0, K).
\end{equation}
\item{(b)} There exists $C_5
>1$ such that
\begin{equation}\label{ksv-H:2}
j(r)\le C_5\, j(r+1), \qquad \forall r>1.
\end{equation}
\end{description}
\end{prop}
\noindent{\bf Proof.}
\eqref{ksv-H:1} follows immediately from Theorem \ref{ksv-t:Jorigin}.
However, we give below a proof of both \eqref{ksv-H:1} and \eqref{ksv-H:2}
using only \eqref{ksv-nu 0}--\eqref{ksv-nu infty}.
For simplicity we redefine in this proof the function $j$ by
dropping the factor $(4\pi)^{-d/2}$ from its definition. This does
not effect \eqref{ksv-H:1} and \eqref{ksv-H:2}. It follows from Lemma
\ref{ksv-l:H2-valid} and Theorem \ref{ksv-t:behofmu} that
\begin{description}
\item{(a)} For any $K>0$, there exists $c_1=c_1(K)
>1$ such that
\begin{equation}\label{ksv-nu 0}
\mu(r)\le c_1\, \mu(2r), \qquad \forall r\in (0, K).
\end{equation}
\item{(b)} There exists $c_2
>1$ such that
\begin{equation}\label{ksv-nu infty}
\mu(r)\le c_2\, \mu(r+1), \qquad \forall r>1.
\end{equation}
\end{description}
Let $0< r < K$. We have
\begin{eqnarray*}
\lefteqn{j(2r) = \int_0^{\infty} t^{-d/2} \exp(- r^2/t)\mu(t)\, dt} \\
& \ge & \frac12 \left(\int_{K/2}^{\infty}t^{-d/2} \exp(-
r^2/t)\mu(t)\, dt
+\int_0^{2K} t^{-d/2} \exp(- r^2/t)\mu(t)\, dt\right)\\
&=& \frac12 (I_1 + I_2).
\end{eqnarray*}
Now,
\begin{eqnarray*}
I_1 &=& \int_{K/2}^{\infty}t^{-d/2} \exp(- \frac{r^2}{t})\mu(t)\, dt
= \int_{K/2}^{\infty}t^{-d/2} \exp(-\frac{ r^2}{4t})
\exp(-\frac{3 r^2}{4t})\mu(t)\, dt \\
&\ge &\int_{K/2}^{\infty}t^{-d/2} \exp(-\frac{ r^2}{4t})
\exp(-\frac{3 r^2}{2K})\mu(t)\, dt
\ge e^{-3K/2}\int_{K/2}^{\infty}t^{-d/2} \exp(-\frac{ r^2}{4t})
\mu(t)\, dt\, ,\\
& & \\
I_2 &=& \int_0^{2K} t^{-d/2}\exp(- \frac{r^2}{t})\mu(t)\, dt
= 4^{-d/2+1} \int_0^{K/2} s^{-d/2}\exp(-\frac{ r^2}{4s})\mu(4s)\, ds \\
&\ge & c_1^{-2} 4^{-d/2+1}\int_0^{K/2} s^{-d/2}
\exp(-\frac{ r^2}{4s})\mu(s)\,ds. \\
\end{eqnarray*}
Combining the three displays above we get that $j(2r)\ge c_3\, j(r)$
for all $ r\in (0, K)$.
To prove \eqref{ksv-H:2} we first note that for all $t\ge 2$ and all
$r\ge 1$ it holds that
$$
\frac{(r+1)^2}{t}-\frac{r^2}{t-1}\le 1\, .
$$
This implies that
\begin{equation}\label{ksv-a}
\exp\left(-\frac{(r+1)^2}{4t}\right) \ge e^{-1/4} \exp\left(-\frac{ r^2}{4(t-1)}\right), \quad
\mbox{ for all }r>1, t>2\, .
\end{equation}
Now we have
\begin{eqnarray*}
\lefteqn{j(r+1)= \int_0^{\infty} t^{-d/2}\exp(-\frac{(r+1)^2}{4t}) \mu(t)\, dt }\\
&\ge & \frac12 \left( \int_0^8 t^{-d/2}\exp(-\frac{(r+1)^2}{4t})
\mu(t)\, dt +\int_3^{\infty}t^{-d/2}\exp(-\frac{(r+1)^2}{4t})
\mu(t)\, dt \right)\\
&=&\frac12(I_3+I_4).
\end{eqnarray*}
For $I_3$ note that $(r+1)^2\le 4r^2$ for all $r>1$. Thus
\begin{eqnarray*}
I_3 &=& \int_0^8 t^{-d/2}\exp(-\frac{(r+1)^2}{4t}) \mu(t)\, dt
\ge \int_0^8 t^{-d/2}\exp(- r^2/t) \mu(t)\, dt \\
&=& 4^{-d/2+1}\int_0^2 s^{-d/2}\exp(-\frac{ r^2}{4s}) \mu(4s)\, ds \\
& \ge & c_1^{-2} 4^{-d/2+1}\int_0^2 s^{-d/2}\exp(-\frac{ r^2}{4s}) \mu(s)\, ds\, ,\\
& &\\
I_4&=& \int_3^{\infty}t^{-d/2}\exp(-\frac{(r+1)^2}{4t}) \mu(t)\, dt\\
& \ge & \int_3^{\infty}t^{-d/2}\exp\{-1/4\} \exp(-\frac{ r^2}{4(t-1)})\, \mu(t)\, dt\\
&=&e^{-1/4} \int_2^{\infty} (s-1)^{-d/2}\exp(-\frac{ r^2}{4s})
\, \mu(s+1)\, ds\\
&\ge & c_1^{-1} e^{-1/4} \int_2^{\infty} s^{-d/2}\exp(-\frac{ r^2}{4s})
\mu(s)\, ds\, .
\end{eqnarray*}
Combining the three displays above we get that
$j(r+1)\ge c_4\, j(r)$ for all $ r>1$.
{
$\Box$
}
\subsection{Some results on subordinate Brownian motion in
${\mathbb R}$}\label{ksv-ss:1dsbm}
In this
subsection
we assume $d=1$. We will consider subordinate
Brownian motions in ${\mathbb R}$. Let $B=(B_t:\, t\ge 0)$ be a Brownian
motion in ${\mathbb R}$, independent of $S$, with
$$
{\mathbb E}\left[e^{i\theta(B_t-B_0)}\right] =e^{-t\theta^2}, \qquad\,
\forall \theta\in {\mathbb R}, t>0.
$$
The subordinate Brownian motion $X=(X_t:t\ge 0)$ in ${\mathbb R}$ defined by
$X_t=B_{S_t}$ is a symmetric L\'evy process with the characteristic
exponent ${\mathbb P}hi(\theta)=\phi(\theta^2)$ for all $\theta\in {\mathbb R}.$
In the first part of this
subsection,
up to Corollary \ref{ksv-c:vsm}, we do
not need to assume that $\phi$ satisfies the assumption ({\bf H}).
Let $\overline{X}_t:=\sup\{0\vee X_s:0\le s\le t\}$ and let
$L=(L_t:\, t\ge 0)$ be a local time of $\overline{X}-X$ at $0$. $L$
is also called a local time of the process $X$ reflected at the
supremum. Then the right continuous inverse $L^{-1}_t$ of $L$ is a
subordinator and is called the ladder time process of $X$. The
process $\overline{X}_{L^{-1}_t}$ is also a subordinator and is
called the ladder height process of $X$. (For the basic properties
of the ladder time and ladder height processes, we refer our readers
to \cite[Chapter 6]{Ber}.) Let $\chi$ be the Laplace exponent of the
ladder height process of $X$. It follows from \cite[Corollary
9.7]{Fris} that
\begin{equation}\label{ksv-e:formula4leoflh}
\chi(\lambda)=
\exp\left(\frac1\pi\int^{\infty}_0\frac{\log({\mathbb P}hi(\lambda\theta))}
{1+\theta^2}d\theta \right)
=\exp\left(\frac1\pi\int^{\infty}_0\frac{\log(
\phi(\lambda^2\theta^2))}{1+\theta^2}d\theta
\right), \quad \forall \lambda>0.
\end{equation}
The next result, first proved independently in \cite{KSV3} and
\cite{Kw}, tells us that $\chi$ is also complete Bernstein function.
The proof presented below is taken from \cite{KSV3}.
\begin{prop}\label{ksv-p:chi is cbf}
Suppose $\phi$, the Laplace exponent of the subordinator $S$, is a
complete Bernstein function. Then the Laplace exponent $\chi$ of the
ladder height process of the subordinate Brownian motion
$X_t=B_{S_t}$ is also a complete Bernstein function.
\end{prop}
\noindent{\bf Proof.}
It follows from Theorem \cite[Theorem 6.10]{SSV} that $\phi$ has
the following representation:
\begin{equation}\label{ksv-e:exp-repr}
\log \phi(\lambda)=\gamma
+\int^\infty_0\left(\frac{t}{1+t^2}-\frac1{\lambda+t}\right)\eta(t)dt,
\end{equation}
where $\eta$ in a function such that $0\le \eta(t)\le 1$ for all
$t>0$.
By \eqref{ksv-e:exp-repr} and \eqref{ksv-e:formula4leoflh}, we have
\begin{eqnarray*}
\log \chi(\lambda)
=\frac{\gamma}2+\frac1{\pi}\int^\infty_0\int^\infty_0
\left(\frac{t}{1+t^2}-\frac1{\lambda^2\theta^2+t}\right)\eta(t)dt
\frac{d\theta}{1+\theta^2}.
\end{eqnarray*}
By using $0\le \eta(t)\le 1$, we have
\begin{eqnarray*}
\eta(t)\left|\frac{t}{1+t^2}-\frac1{\lambda^2\theta^2+t}\right|
\frac{1}{1+\theta^2}&\le& \frac{1}{1+t^2} \frac{1}{1+\theta^2}\left(\frac1{\lambda^2
\theta^2+t }+ \frac{\lambda^2 \theta^2t}{\lambda^2 \theta^2+t
}\right)\\
& \le& \frac{1}{1+t^2} \left(\frac1{\lambda^2
\theta^2+t }+ \frac{\lambda^2 t}{\lambda^2 \theta^2+t
}\right).
\end{eqnarray*}
Since
\begin{eqnarray*}
\int^\infty_0\frac1{\lambda^2 \theta^2+t }\, d\theta
=\frac1t\int^\infty_0\frac1{\frac{\lambda^2\theta^2}t+1}\, d\theta=\frac1t\frac{\sqrt{t}}{\lambda}\int^\infty_0\frac1{\gamma^2+1}\, d\gamma
= \frac{\pi}{2\lambda\sqrt{t}},
\end{eqnarray*}
we can use Fubini's theorem to get
\begin{eqnarray}
\log \chi(\lambda)
&=&\frac{\gamma}2+\int^\infty_0\left(\frac{t}{2(1+t^2)}-
\frac1{2\sqrt{t}(\lambda+\sqrt{t})}\right)\eta(t)dt\label{ksv-e:logexp4chi}\\
&=&\frac{\gamma}2+\int^\infty_0\left(\frac{t}{2(1+t^2)}-
\frac1{2(1+t)}\right)\eta(t)dt
\nonumber\\
& & +\int^\infty_0
\left(\frac1{2(1+t)}-\frac1{2\sqrt{t}(\lambda+\sqrt{t})}\right)\eta(t)dt\nonumber\\
&=&\gamma_1+\int^\infty_0\left(\frac{s}{1+s^2}-\frac1{\lambda+s}
\right)\eta(s^2)ds.\nonumber
\end{eqnarray}
Applying \cite[Theorem 6.10]{SSV} we get that $\chi$ is a complete
Bernstein function. {
$\Box$
}
The potential measure of the ladder height process of $X$ is denoted
by $V$ and its density by $v$. We will also use $V$ to denote the
renewal function of $X$: $V(t):=V((0,t))=\int_0^t v(s)\, ds$.
The following result is first proved in \cite{KSV3}.
\begin{prop}\label{ksv-p:chiphi}
$\chi$ is related to $\phi$ by the following relation
$$
e^{-\pi/2}\sqrt{\phi(\lambda^2)}\le \chi(\lambda)\le e^{\pi/2}\sqrt{\phi(\lambda^2)}\, ,\qquad
\textrm{for all }\lambda>0\, .$$
\end{prop}
\noindent{\bf Proof.}
According to \eqref{ksv-e:logexp4chi}, we have
$$
\log \chi(\lambda)
=\frac{\gamma}2+\frac12\int^\infty_0\left(\frac{t}{1+t^2}-
\frac1{\sqrt{t}(\lambda+\sqrt{t})}\right)\eta(t)dt\, .
$$
Together with representation \eqref{ksv-e:exp-repr} we get that for all $\lambda >0$
\begin{eqnarray*}
\lefteqn{\left|\, \log \chi(\lambda)-\frac12 \log \phi(\lambda^2)\, \right| }\\
&=&
\frac12\left|\int_0^{\infty}\left(\Big(\frac{t}{1+t^2}- \frac1{\sqrt{t}
(\lambda+\sqrt{t})}\Big) -\Big(\frac{t}{1+t^2}-\frac1{\lambda^2+t}
\Big)\right)\eta(t)\, dt\right|\\
&\le &\frac12\int_0^{\infty}\frac{\lambda(\sqrt{t}+\lambda)}{(\lambda^2+t)
\sqrt{t}(\lambda+\sqrt{t})}\, dt=\frac12\int_0^{\infty}\frac{\lambda}{(\lambda^2+t)\sqrt{t}}\, dt =\frac{\pi}{2}\,
.
\end{eqnarray*}
This implies that
$$
-\pi/2 \le \log \chi(\lambda)-\frac12 \log \phi(\lambda^2) \le \pi/2\,
,\qquad \textrm{for all }\lambda>0\, ,
$$
i.e.,
$$
e^{-\pi/2}\le \chi(\lambda)\phi(\lambda^2)^{-1/2}\le e^{\pi/2}\, ,\qquad
\textrm{for all }\lambda>0\, .
$$
{
$\Box$
}
Combining the above two propositions with Corollary \ref{ksv-c2.3}, we obtain
\begin{corollary}\label{ksv-c:vsm}
Suppose $\phi$, the Laplace exponent of the subordinator $S$, is a
complete Bernstein function satisfying $\lim_{\lambda\to\infty}\phi(\lambda)=\infty$.
Then the potential measure of the
ladder height process of the subordinate Brownian motion
$X_t=B_{S_t}$ has a completely monotone density $v$. In particular,
$v$ and the renewal function $V$ are $C^{\infty}$ functions.
\end{corollary}
In the remainder of this paper we will always assume that $\phi$ satisfies
the assumption {\bf(H)}.
We will not explicitly mention this assumption anymore.
Since $\phi(\lambda)\asymp\lambda^{\alpha/2}\ell(\lambda)$ as $\lambda \to
\infty$, Lemma \ref{ksv-p:chiphi} implies that
\begin{equation}\label{ksv-e:abofkappaatinfty}
\chi(\lambda)\asymp\lambda^{\alpha/2}(\ell(\lambda^2))^{1/2}, \qquad
t\to \infty.
\end{equation}
It follows from \eqref{ksv-e:abofkappaatinfty} that $\lim_{\lambda\to
\infty} \chi(\lambda)/\lambda=0$, hence the ladder height process
does not have a drift. Recall that $V(t)=V((0,t))=\int_0^t v(s)ds$
is the renewal function of the ladder height process of $X$. In
light of \eqref{ksv-e:abofkappaatinfty}, we have, as a consequence of
Theorem \ref{ksv-t:behofu}, the following result.
\begin{prop}\label{ksv-p:abofgf4lhpat0}
As $t\to 0$, we have
$$
V(t)\,\asymp
\phi(t^{-2})^{-1/2}\asymp\,\frac{t^{\alpha/2}}{(\ell(t^{-2}))^{1/2}}
$$
and
$$
v(t)\,\asymp
t^{-1}\phi(t^{-2})^{-1/2}\asymp\,\frac{t^{\alpha/2-1}}{ (\ell(t^{-2}))^{1/2}}\, .
$$
\end{prop}
\begin{remark}\label{ksv-r:abofgf4lhpat0}{\rm
It follows immediately from the proposition above that there exists
a positive constant $c>0$ such that $V(2t)\le c V(t)$ for all $t\in
(0,2)$. }
\end{remark}
It follows from \eqref{ksv-e:abofkappaatinfty} above and
\cite[Lemma 7.10]{Ky} that the process $X$ does not creep upwards.
Since $X$ is symmetric, we know that $X$ also does not creep
downwards. Thus if, for any $a\in {\mathbb R}$, we define
$$
\tau_a=\inf\{t>0: X_t<a\}, \quad \sigma_a=\inf\{t>0: X_t\le a\},
$$
then we have
\begin{equation}\label{ksv-e:firstexittime}
{\mathbb P}_x(\tau_a=\sigma_a)=1, \quad x>a.
\end{equation}
Let $G_{(0, \infty)}(x, y)$ be the Green function of
$X$ in $(0, \infty)$.
Then we have the following result.
\begin{prop}\label{ksv-p:Greenf4kpXonhalfline} For any $x, y>0$ we have
$$
G_{(0, \infty)}(x, y)=\left\{\begin{array}{ll}
\int^x_0v(z)v(y+z-x)dz, & x\le y,\\
\int^x_{x-y}v(z)v(y+z-x)dz, & x>y.
\end{array}\right.
$$
\end{prop}
\noindent{\bf Proof.}
Let $X^{(0,\infty)}$ be the process obtained by killing $X$ upon exiting from
$(0, \infty)$.
By using (\ref{ksv-e:firstexittime}) above and \cite[Theorem 20,
p.~176]{Ber} we get that for any nonnegative function on $f$ on $(0,
\infty)$,
\begin{equation}\label{ksv-e:e1inpfoformforgf}
{\mathbb E}_x\left[ \int_0^{\infty} f(X^{(0, \infty)}_t)\, dt\right]=k
\int^{\infty}_0 \int^x_0v(z)f(x+z-y)v(y) dz dy\, ,
\end{equation}
where $k$ is the constant depending on the normalization of the
local time of the process $X$ reflected at its supremum. We choose
$k=1$. Then
\begin{eqnarray}\label{ksv-e:e2inpfoformforgf}
&&{\mathbb E}_x\left[ \int_0^{\infty} f(X^{(0, \infty)}_t)\, dt\right]
\,=\,\int_0^{\infty} \, v(y)\int_0^x \, v(z)f(x+y-z) dz dy\nonumber\\
&&=\int_0^x \, v(z)\int_0^{\infty} v(y)f(x+y-z) dy dz
\,=\,\int_0^x \, v(z)\int_{x-z}^{\infty}\, v(w+z-x)f(w) dw dz\nonumber\\
&&=\int_0^x f(w) \int_{x-w}^x \, v(z)v(w+z-x) dzdw+ \int_x^{\infty}
f(w) \int_0^x \, v(z)v(w+z-x) dzdw\, .\nonumber \\
&&
\end{eqnarray}
On the other hand,
\begin{equation}\label{ksv-e:e3inpfoformforgf}
{\mathbb E}_x\left[\int_0^{\infty} f(X^{(0, \infty)}_t)\, dt\right]
=\int_0^{\infty}G_{(0, \infty)}(x,w)f(w)\, dw.
\end{equation}
By comparing (\ref{ksv-e:e2inpfoformforgf}) and
(\ref{ksv-e:e3inpfoformforgf}) we arrive at our desired conclusion. {
$\Box$
}
For any $r>0$, let $G_{(0, r)}$ be the Green function
of $X$ in $(0,
r)$.
Then we have the following result.
\begin{prop}\label{ksv-p:upbdongfofkpinfiniteinterval}
For all $r>0$ and all $x\in (0,r)$
$$
\int_0^r G_{(0,r)}(x,y)\, dy \le 2 V(x) V(r)\, .
$$
In particular, for any $R>0$, there exists $C_6=C_6(R)>0$ such that
for all $r\in (0,R)$ and all $x\in (0,r)$,
$$
\int^r_0 G_{(0, r)}(x, y)dy \le C_6 (\phi(r^{-2})\phi(x^{-2}))^{-1/2}
\asymp\frac{r^{\alpha/2}}{(\ell(r^{-2}))^{1/2}} \frac{x^{\alpha/2}}{(\ell(x^{-2}))^{-1/2}}\, .
$$
\end{prop}
\noindent{\bf Proof.} For any $x\in (0, r)$, we have
\begin{eqnarray*}
&&\int^r_0G_{(0, r)}(x, y)dy
\le \int^r_0G_{(0, \infty)}(x, y)dy\\
&&=\int^x_0\int^x_{x-y}v(z)v(y+z-x)dzdy+
\int^r_x\int^x_0v(z)v(y+z-x)dzdy\\
&&=\int^x_0v(z)\int^x_{x-z}v(y+z-x)dydz
+\int^x_0v(z)\int^r_xv(y+z-x)dydz\, \le\, 2\,V(r)\,V(x).
\end{eqnarray*}
Now the desired conclusion follows easily from Proposition
\ref{ksv-p:abofgf4lhpat0}.
{
$\Box$
}
As a consequence of the result above, we immediately get the
following.
\begin{corollary}\label{ksv-p:upbdongfofkpinfiniteinterval2}
For all $r>0$ and all $x\in (0,r)$
$$
\int_0^r G_{(0,r)}(x,y)\, dy \le 2 V(r)\big(V(x) \wedge V(r-x)\big)\, .
$$
In particular, for any $R>0$, there exists $C_7=C_7(R)>0$ such that for all $x\in (0,
r)$, and $r\in (0, R)$,
\begin{eqnarray*}
\int^r_0G_{(0, r)}(x, y)dy&\le&
C_7 (\phi(r^{-2}))^{-1/2}\left((\phi(x^{-2}))^{-1/2}\wedge
(\phi((r-x)^{-2}))^{-1/2}\right) \\
&\asymp&\frac{r^{\alpha/2}}{(\ell(r^{-2}))^{1/2}}
\left(\frac{x^{\alpha/2}}{(\ell(x^{-2}))^{1/2}}\wedge
\frac{(r-x)^{\alpha/2}}{(\ell((r-x)^{-2}))^{1/2}}\right)\, .
\end{eqnarray*}
\end{corollary}
\noindent{\bf Proof.} The first inequality is a consequence of the identity $\int^r_0
G_{(0, r)}(x, y)dy=\int^r_0 G_{(0, r)}(r-x, y)dy$ which is true by
symmetry of the process $X$. The second one now follows exactly as
in the proof of Proposition \ref{ksv-p:upbdongfofkpinfiniteinterval}.
{
$\Box$
}
\begin{remark}\label{ksv-r:upbdongfofkpinfiniteinterval2}{\rm
With self-explanatory notation, an immediate consequence of the
above corollary is the following estimate
\begin{equation}\label{ksv-e:upbdongfofkpinfiniteinterval2}
\int_{-r}^r G_{(-r,r)}(x,y)\, dy \le 2 V(2r)\big(V(r+x) \wedge V(r-x)\big)\, .
\end{equation}
}
\end{remark}
\section{Harnack inequality and Boundary Harnack principle}\label{ksv-sec-hibhp}
From now on we will always assume that $X$ is a subordinate Brownian
motion in ${\mathbb R}^d$. Recall that {\bf (H)} is the standing assumptions
on the Laplace exponent $\phi$.
The goal of this section is to show that
the Harnack inequality and the boundary Harnack principle hold for
$X$. The infinitesimal generator ${\bf L}$ of the corresponding semigroup
is given by
\begin{equation}\label{ksv-3.1}
{\bf L} f(x)=\int_{{\mathbb R}^d}\left( f(x+y)-f(x)-y\cdot \nabla f(x)
{\bf 1}_{\{|y|\le1\}}
\right)\, J(y)dy
\end{equation}
for $f\in C_b^2({\mathbb R}^d)$. Moreover, for every $f\in C_b^2({\mathbb R}^d)$
$$
f(X_t)-f(X_0)-\int_0^t {\bf L} f(X_s)\, ds
$$
is a ${\mathbb P}_x$-martingale for every $x\in {\mathbb R}^d$. We recall the L\'evy
system formula for $X$ which describes the jumps of the process $X$:
for any non-negative measurable function $f$ on ${\mathbb R}_+ \times
{\mathbb R}^d\times {\mathbb R}^d$ with $f(s, y, y)=0$ for all $y\in {\mathbb R}^d$, any
stopping time $T$ (with respect to the filtration of
$X$)
and any
$x\in {\mathbb R}^d$,
\begin{equation}\label{ksv-e:levy}
{\mathbb E}_x \left[\sum_{s\le T} f(s,X_{s-}, X_s) \right]= {\mathbb E}_x \left[
\int_0^T \left( \int_{{\mathbb R}^d} f(s,X_s, y) J(X_s,y) dy \right) ds
\right].
\end{equation}
(See, for example, \cite[Proof of Lemma 4.7]{CK1} and \cite[Appendix A]{CK2}.)
\subsection{Harnack inequality}\label{ksv-ss:hi}
It follows from Theorem \ref{ksv-t:Jorigin} and the 0-version of
\cite[Propositions 1.5.8 and 1.5.10]{BGT} that
\begin{equation}\label{ksv-e:svphi1}
r^{-2}\int_0^r s^{d+1} j(s)
ds\asymp\frac{\ell(r^{-2})}{r^\alpha}\asymp \phi(r^{-2}), \qquad
r\to 0
\end{equation}
and
\begin{equation}\label{ksv-e:svphi2}
\int_r^{\infty} s^{d-1} j(s) ds
\asymp\frac{\ell(r^{-2})}{r^\alpha}\asymp \phi(r^{-2}), \qquad r\to
0.
\end{equation}
For any open set $D$, we use $\tau_D$ to denote the first exit time
from $D$, i.e., $\tau_D=\inf\{t>0: \, X_t\notin D\}$.
\begin{lemma}\label{ksv-L3.1} There exists a constant $C_8>0$ such that for
every $r\in (0,1)$ and every $t>0$,
$$
{\mathbb P}_x\left(\sup_{s\le t} |X_s-X_0|>r\right) \le C_8 \phi(r^{-2}) t\, .
$$
\end{lemma}
\noindent{\bf Proof.} It suffices to prove the lemma for $x=0$. Let $f\in
C^2_b({\mathbb R}^d)$, $0\leq f \leq 1$, $f(0)=0$, and $f(y)=1$ for all
$|y|\ge 1$. Let $c_1=\sup_{y}\sum_{j,k} |(\partial^2/\partial
y_j\partial y_k) f(y)|$. Then $|f(z+y)-f(z) -y\cdot \nabla f(z)|\le
\frac{c_1}{2} |y|^2$. For $r\in (0,1)$, let $f_r(y)=f(y/r)$. Then the
following estimate is valid:
\begin{eqnarray*}
|f_r(z+y)-f_r(z) -y\cdot \nabla f_r(z){\bf 1}_{\{|y|\le r\}}| &\le&
\frac{c_1}{2}
\frac{|y|^2}{r^2}{\bf 1}_{\{|y|\le r\}} + {\bf 1}_{\{|y|\ge r\}}\\
&\le& c_2({\bf 1}_{\{|y|\le r\}}
\frac{|y|^2}{r^2}+{\bf 1}_{\{|y|\ge r\}})\, .
\end{eqnarray*}
By using \eqref{ksv-e:svphi1} and \eqref{ksv-e:svphi2}, we get the following
estimate:
\begin{eqnarray}\label{ksv-referee0}
|{\bf L} f_r(z)| &\le & \int_{{\mathbb R}^d}
|f_r(z+y)-f_r(z) -y\cdot \nabla f_r(z){\bf 1}_{(|y|\le r)}| \, J(y)dy \nonumber \\
& \le & c_2\int_{{\mathbb R}^d} \left({\bf 1}_{\{|y|\le r\}}
\frac{|y|^2}{r^2}+{\bf 1}_{\{|y|\ge r\}}\right)\, J(y)dy\nonumber \\
& \le &
C_8\phi(r^{-2}) \, ,
\end{eqnarray}
where the constant $C_8$ is independent of $r$. Further, by the
martingale property,
\begin{equation}\label{ksv-referee}
{\mathbb E}_0 f_r(
X_{\tau_{B(0,r)}\wedge t}
) - f_r(0)= {\mathbb E}_0
\int_0^{\tau_{B(0,r)}\wedge t} {\bf L} f_r(X_s)\, ds
\end{equation}
implying the estimate
$$
{\mathbb E}_0 f_{r}(
X_{\tau_{B(0,r)}\wedge t}) \leq
C_8\phi(r^{-2}) t\, .
$$
If $X$ exits $B(0,r)$ before time $t$, then
$f_{r}(
X_{\tau_{B(0,r)}\wedge t})=1$, so the left hand side is larger
than ${\mathbb P}_0(\tau_{B(0,r)} \le t)$. {
$\Box$
}
\begin{lemma}\label{ksv-L3.2}
For every $r\in (0,1)$, and every $x\in {\mathbb R}^d$,
$$
\inf_{z\in B(x,r/2)} {\mathbb E}_z \left[\tau_{B(x,r)} \right] \geq
\frac{1}{C_8 \phi((r/2)^{-2})}\, ,
$$
where $C_8$ is the constant from Lemma \ref{ksv-L3.1}.
\end{lemma}
\noindent{\bf Proof.}
Using \eqref{ksv-referee0} and \eqref{ksv-referee}
we get that for any $t>0$ and $z\in B(x,r/2)$,
\begin{eqnarray*}
{\mathbb P}_0(\tau_{B(0,r/2)} \le t)&\le&
C_8\phi((r/2)^{-2}){\mathbb E}_0
\left[\tau_{B(0,r/2)}\wedge t\right]\\
&=&
C_8\phi((r/2)^{-2}){\mathbb E}_{z}
\left[\tau_{B(z,r/2)}\wedge t\right]\\
&\le &
C_8\phi((r/2)^{-2}){\mathbb E}_{z}
\left[\tau_{B(x,r)}\wedge t\right].
\end{eqnarray*}
Letting $t\to\infty$, we immediately
get the desired conclusion.
{
$\Box$
}
\begin{lemma}\label{ksv-L3.3}
There exists a constant $C_9>0$ such that for every $r\in (0,1)$ and
every $x\in {\mathbb R}^d$,
$$
\sup_{z\in B(x,r)}
{\mathbb E}_z \left[{\tau}_{B(x,r)}\right] \leq
\frac{C_9}{\phi(r^{-2})}\, .
$$
\end{lemma}
\noindent{\bf Proof.}
Let $r\in (0,1)$, and let $x\in {\mathbb R}^d$. Using the L\'evy system
formula \eqref{ksv-e:levy}, we get
\begin{eqnarray*}
1 & \geq & {\mathbb P}_z (|
X_{{\tau}_{B(x,r)}}-x|>r ) \\
& = & \int_{B(x,r)}G_{B(x,r)}(z,y) \int_{
\overline{B(x,r)}^c
} j(|u-y|)\, du \, dy \, ,
\end{eqnarray*}
where $G_{B(x,r)}$ denotes the Green function of the process
$X$ in $B(x,r)$.
Now we estimate the inner integral.
Let $y\in B(x,r)$, $u\in
\overline{B(x,r)}^c$. If $u\in B(x,2)$, then $|u-y|\le
2|u-x|$, while for $u\notin B(x,2)$ we use $|u-y|\le |u-x|+1$. Then
\begin{eqnarray*}
\lefteqn{\int_{
\overline{B(x,r)}^c} j(|u-y|)\, du}\\
& = & \int_{
\overline{B(x,r)}^c\cap B(x,2)}
j(|u-y|)\, du+
\int_{
\overline{B(x,r)}^c\cap B(x,2)^c} j(|u-y|)\, du \\
& \ge &\int_{
\overline{B(x,r)}^c\cap B(x,2)} j(2|u-x|)\, du+
\int_{
\overline{B(x,r)}^c\cap B(x,2)^c} j(|u-x|+1)\, du \\
& \ge & \int_{
\overline{B(x,r)}^c\cap B(x,2)}c^{-1} j(|u-x|)\, du+
\int_{
\overline{B(x,r)}^c\cap B(x,2)^c} c^{-1}j(|u-x|)\, du \\
&=& \int_{
\overline{B(x,r)}^c}c^{-1}j(|u-x|)\, du\, ,
\end{eqnarray*}
where in the next to last line we used \eqref{ksv-H:1} and \eqref{ksv-H:2}.
Now, It follows from \eqref{ksv-e:svphi2} that
\begin{eqnarray*}
1 &\ge & \int_{B(x,r)}G_{B(x,r)}(z,y)\, dy
\int_{
\overline{B(x,r)}^c}c^{-1}j(|u-x|)\, du \\
& = &
{\mathbb E}_z \left[\tau_{B(x,r)} \right]
c^{-1}\, c_1\int_r^{\infty} v^{d-1} j(v)\, dv \\
& = & c_2\phi(r^{-2})
{\mathbb E}_z \left[\tau_{B(x,r)} \right]
\end{eqnarray*}
which implies the lemma. {
$\Box$
}
An improved version of the above lemma will be given in Proposition \ref{ksv-l:tau} later
on.
\begin{lemma}\label{ksv-L3.4}
There exists a constant $C_{10}>0$ such that for every $r\in (0,1)$,
every $x\in {\mathbb R}^d$, and any $A\subset B(x,r)$
$$
{\mathbb P}_y\left(T_A < {\tau}_{B(x,3r)}\right) \geq C_{10} \frac{|A|}{|B(x, r)|}, \qquad
\textrm{for all }y\in B(x,2r)\, .
$$
\end{lemma}
\noindent{\bf Proof.} Without loss of generality assume that ${\mathbb P}_y(T_A < {\tau}_{B(x,3r)})<1/4$.
Set $\tau={\tau}_{B(x,3r)}$. By Lemma \ref{ksv-L3.1},
${\mathbb P}_y(\tau\leq t) \leq {\mathbb P}_y(\tau_{B(y,r)}\leq t) \leq c_1
\phi(r^{-2}) t$. Choose $t_0= 1/(4c_1 \phi(r^{-2}))$, so that
${\mathbb P}_y(\tau\leq t_0) \leq 1/4$. Further, if $z\in B(x, 3r)$ and $u\in
A \subset B(x,r)$, then $|u-z| \leq 4r$.
Since $j$ is decreasing,
$j(|u-z|) \geq j(4r)$. Thus,
\begin{eqnarray*}
{\mathbb P}_y (T_A < \tau) & \geq & {\mathbb E}_y \sum_{s\leq T_A \wedge \tau \wedge
t_0}
{\bf 1}_{\{X_{s-}\neq X_s, X_s\in A\}} \\
& = & {\mathbb E}_y \int_0^{T_A \wedge \tau \wedge t_0}
\int_A j(|u-X_s|)\, du \, ds \\
& \geq & {\mathbb E}_y \int_0^{T_A \wedge \tau \wedge t_0} \int_A j(4r)\,
du \, ds \\
& = & j(4r) |A| {\mathbb E}_y[T_A \wedge \tau \wedge t_0] \, ,
\end{eqnarray*}
where in the second line we used properties of the L\'evy system.
Next,
\begin{eqnarray*}
{\mathbb E}_y[T_A\wedge \tau \wedge t_0] & \ge & {\mathbb E}_y[t_0; \, T_A\geq \tau \geq t_0] \\
& = & t_0 {\mathbb P}_y(T_A \ge \tau \ge t_0) \\
& \ge & t_0[1-{\mathbb P}_y(T_A < \tau)-{\mathbb P}_y(\tau <t_0)] \\
& \ge & \frac{t_0}{2} = \frac{1}{8 c_1 \phi(r^{-2})}\, .
\end{eqnarray*}
The last two displays give that
$$
{\mathbb P}_y (T_A < \tau) \geq j(4r) |A| \frac{1}{8 c_1 \phi(r^{-2})} =
\frac{1}{8 c_1} |A| \frac{j(4r)}{\phi(r^{-2})}.
$$
The claim now follows immediately from \eqref{ksv-e:reg-var} and
Theorem \ref{ksv-t:Jorigin}. {
$\Box$
}
\begin{lemma}\label{ksv-L3.5}
There exist positive constant $C_{11}$ and $C_{12}$, such that if
$r\in (0,1)$, $x\in {\mathbb R}^d$, $z\in B(x,r)$, and $H$ is a bounded
nonnegative function with support in $B(x,2r)^c$, then
$$
{\mathbb E}_z H(
X_{{\tau}_{B(x,r)}}) \leq C_{11} {\mathbb E}_z [{\tau}_{B(x,r)}] \int H(y)
j(|y-x|) \, dy \, ,
$$
and
$$
{\mathbb E}_z H(
X_{{\tau}_{B(x,r)}}) \geq C_{12} {\mathbb E}_z [{\tau}_{B(x,r)}] \int H(y)
j(|y-x|) \, dy \, .
$$
\end{lemma}
\noindent{\bf Proof.} Let $y\in B(x,r)$ and $u\in B(x,2r)^c$. If $u\in B(x,2)$ we use
the estimates
\begin{equation}\label{ksv-3.7}
2^{-1}|u-x|\le |u-y| \le 2|u-x|,
\end{equation}
while if $u\notin B(x,2)$ we use
\begin{equation}\label{ksv-3.8}
|u-x|-1\le |u-y|\le |u-x|+1.
\end{equation}
Let $B\subset B(x,2r)^c$. Then using the L\'evy system we get
$$
{\mathbb E}_z \left[ {\bf 1}_B(X_{\tau_{B(x,r)}}) \right] = {\mathbb E}_z \int_0^{\tau_{B(x,r)}} \int_B
j(|u-X_s|)\, du\, ds\, .
$$
By use of \eqref{ksv-H:1}, \eqref{ksv-H:2}, \eqref{ksv-3.7}, and \eqref{ksv-3.8},
the inner integral is estimated as follows:
\begin{eqnarray*}
\int_B j(|u-X_s|)\, du &=& \int_{B\cap B(x,2)} j(|u-X_s|)\, du +
\int_{B\cap B(x,2)^c} j(|u-X_s|)\, du \\
& \le & \int_{B\cap B(x,2)} j(2^{-1}|u-x|)\, du +
\int_{B\cap B(x,2)^c} j(|u-x|-1)\, du \\
& \le &\int_{B\cap B(x,2)} c j(|u-x|)\, du +
\int_{B\cap B(x,2)^c} c j(|u-x|)\, du \\
& = & c \int_B j(|u-x|)\, du.
\end{eqnarray*}
Therefore
\begin{eqnarray*}
{\mathbb E}_z \left[ {\bf 1}_B(X_{\tau_{B(x,r)}}) \right] & \le & {\mathbb E}_z \int_0^{\tau_{B(x,r)}}
c \int_B j(|u-x|)\, du \\
& = & c\, {\mathbb E}_z (\tau_{B(x,r)}) \int {\bf 1}_B(u) j(|u-x|)\, du\, .
\end{eqnarray*}
Using linearity we get the above inequality when ${\bf 1}_B$ is replaced
by a simple function. Approximating $H$ by simple functions and
taking limits we have the first inequality in the statement of the
lemma.
The second inequality is proved in the same way. {
$\Box$
}
\begin{defn}\label{ksv-def:har1}
Let $D$ be an open subset of ${\mathbb R}^d$. A function $u$ defined on
${\mathbb R}^d$ is said to be
\begin{description}
\item{(1)} harmonic in $D$ with respect to $X$ if
$$
{\mathbb E}_x\left[|u(X_{\tau_{B}})|\right] <\infty \quad \hbox{ and } \quad
u(x)= {\mathbb E}_x\left[u(X_{\tau_{B}})\right], \qquad x\in B,
$$
for every open set $B$ whose closure is a compact subset of $D$;
\item{(2})
regular harmonic in $D$ with respect to $X$
if
it is harmonic in $D$ with respect to $X$ and
for each $x \in D$,
$u(x)= {\mathbb E}_x\left[u(X_{\tau_{D}})\right].$
\end{description}
\end{defn}
Now we give the proof of Harnack inequality.
The proof below is basically the proof given in \cite{SV04}
which is an adaptation of the proof given in \cite{BL02a}.
However, the proof below corrects some typos in the proof given in \cite{SV04}.
\begin{thm}\label{ksv-T:Har}
There exists $C_{13}>0$ such that, for any $r\in (0, 1/4)$,
$x_0\in{\mathbb R}^d$, and any function $u$ which is nonnegative on ${\mathbb R}^d$
and harmonic with respect to $X$ in $B(x_0,
17r)$, we have
$$
u(x)\le C_{13} u(y), \quad \textrm{for all }x, y\in B(x_0, r).
$$
\end{thm}
\noindent{\bf Proof.}
Without loss of generality we may assume that $u$ is strictly
positive in $B(x_0, 16r)$. Indeed, if $u(x)=0$ for some $x\in B(x_0,
16r)$, then by harmonicity $ 0=u(x)={\mathbb E}_x [u(X_{\tau_B})] $
for $x
\in B=B(x,\varepsilonilon) \subset B(x_0, 16r)$. This and the fact that the
Levy measure of $X$ is supported on all of ${\mathbb R}^d$ and has a density
imply that $u=0$ a.e. with respect to Lebesgue measure. Moreover,
by the harmonicity, for every $y \in B(x_0, 16r)$,
$u(y)={\mathbb E}_y[u(X_{\tau_B} )]=0$ where $B=B(y,\delta)\subset B(x_0,
16r)$. Therefore, if $u(x)=0$ for some $x$, then $u$ is identically
zero in $B(x_0, 16 r)$ and there is nothing to prove.
We first assume $u$ is bounded on ${\mathbb R}^d$.
Using the harmonicity of $u$ and Lemma
\ref{ksv-L3.4}, one can show that $u$ is bounded from below on $B(x_0,
r)$ by a positive number. To see this, let $\varepsilonilon>0$ be such that
$F=\{x\in B(x_0, 3r)\setminus B(x_0, 2r): u(x)>\varepsilonilon\}$ has
positive Lebesgue measure. Take a compact subset $K$ of $F$ so that
it has positive Lebesgue measure. Then by Lemma \ref{ksv-L3.4}, for
$x\in B(x_0, r)$, we have
$$
u(x)\,=\,{\mathbb E}_x\left[u(
X_{T_K\wedge \tau_{B(x_0,9r)}}
)
\right]
\,>\,c\,\varepsilonilon\,\frac{|K|}{|B(x_0, 3r)|},
$$
for some $c>0$. By taking a constant multiple of $u$ we may assume
that $\inf_{B(x_0, r)}u =1/2$. Choose $z_0\in B(x_0, r)$ such that
$u(z_0)\le 1$. We want to show that $u$ is bounded above in $B(x_0,
r)$ by a positive constant independent of $u$ and $r\in (0, 1/4)$.
We will establish this by contradiction: If there exists a point
$x\in B(x_0, r)$ with $
u(x)=K$ where $K$ is too large, we can obtain
a sequence of points in $B(x_0, 2r)$ along which $u$ is unbounded.
Using
Lemmas
\ref{ksv-L3.2}, \ref{ksv-L3.3} and \ref{ksv-L3.5}, one can see that
there exists $c_1>0$ such that if $x\in {\mathbb R}^d$, $s\in (0, 1)$ and $H$
is nonnegative bounded function with support in $B(x, 2s)^c$, then
for any $y, z\in B(x, s/2)$,
\begin{equation}\label{ksv-e:2.1}
{\mathbb E}_z H(
X_{\tau_{B(x, s)}}
)\,\le \,c_1\,{\mathbb E}_y H(
X_{\tau_{B(x, s)}}
).
\end{equation}
By Lemma \ref{ksv-L3.4}, there exists $c_2>0$ such that if $A\subset
B(x_0, 4r)$ then
\begin{equation}\label{ksv-e:2.2}
{\mathbb P}_y\left(T_A<\tau_{B(x_0, 16r)}\right)\,\ge\, c_2\,\frac{|A|}{|B(x_0, 4r)|}, \quad
\forall y\in B(x_0, 8r).
\end{equation}
Again by Lemma \ref{ksv-L3.4}, there exists $c_3>0$ such that if
$x\in{\mathbb R}^d$, $s\in (0, 1)$ and $F\subset B(x, s/3)$ with $|F|/|B(x,
s/3)|\ge 1/3$, then
\begin{equation}\label{ksv-e:2.3}
{\mathbb P}_x\left(T_F<\tau_{B(x, s)}\right)\,\ge\, c_3.
\end{equation}
Let
\begin{equation}\label{ksv-e:2.4}
\eta=\frac{c_3}3,\,\,\,\,\,\,\,\,\,\,\,
\zeta=(\frac13\wedge\frac1{c_1})\eta.
\end{equation}
Now suppose there exists $x\in B(x_0, r)$ with $u(x)=K$ for
$K>K_0:=\frac{2|B(x_0, 1)|}{c_2\zeta}\vee\frac{2(12)^d}{c_2\zeta}$. Let
$s$ be chosen so that
\begin{equation}\label{ksv-e:2.5}
|B(x, \frac{s}3)|=\frac{2|B(x_0, 4r)|}{c_2\zeta K}<1.
\end{equation}
Note that this implies
\begin{equation}\label{ksv-e:2.6}
s=12\left(\frac2{c_2\zeta}\right)^{1/d}rK^{-1/d}<r.
\end{equation}
Let us write $B_s$ for $B(x, s)$, $\tau_s$ for $\tau_{B(x, s)}$, and
similarly for $B_{2s}$ and $\tau_{2s}$. Let $A$ be a compact subset
of
$$
A'=\{y\in B(x, \frac{s}3): u(y)\ge \zeta K\}.
$$
It is well known that $u(X_t)$ is right continuous in
$[0,\tau_{B(x_0, 16r)})$. Since $z_0\in B(x_0, r)$ and $A'\subset B(x,
\frac{s}3)\subset B(x_0, 2r)$, we can apply (\ref{ksv-e:2.2}) to get
\begin{eqnarray*}
1&\ge&u(z_0)\ge {\mathbb E}_{z_0}[u(
X_{T_A\wedge\tau_{B(x_0, 16r)}}
){\bf 1}_{\{T_A<
\tau_{B(x_0, 16r)}\}}]\\
&\ge&\zeta K{\mathbb P}_{z_0}(T_A<\tau_{B(x_0, 16r)})\\
&\ge&c_2\zeta K\frac{|A|}{|B(x_0, 4r)|}.
\end{eqnarray*}
Hence
$$
\frac{|A|}{|B(x, \frac{s}3)|}\le\frac{|B(x_0, 4r)|} {c_2\zeta K
|B(x, \frac{s}3)|}=\frac12.
$$
This implies that $|A'|/|B(x, s/3)|\le 1/2$. Let $F$ be a compact
subset of $B(x, s/3)\setminus A'$ such that
\begin{equation}\label{ksv-e:2.7}
\frac{|F|}{|B(x, \frac{s}3)|}\ge \frac13.
\end{equation}
Let $H=u\cdot{\bf 1}_{B_{2s}^c}$. We claim that
$$
{\mathbb E}_x[u(
X_{\tau_s}
);
X_{\tau_s}
\notin B_{2s}]\le\eta K.
$$
If not, ${\mathbb E}_x H(
X_{\tau_s})>\eta K$, and by (\ref{ksv-e:2.1}), for all
$y\in B(x, s/3)$, we have
\begin{eqnarray*}
u(y)&=&{\mathbb E}_y u(
X_{\tau_s})\ge {\mathbb E}_y[u(
X_{\tau_s});
X_{\tau_s}\notin B_{2s}]\\
&\ge& c_1^{-1}{\mathbb E}_x H(
X_{\tau_s})\ge c_1^{-1}\eta K\ge \zeta K,
\end{eqnarray*}
contradicting (\ref{ksv-e:2.7}) and the definition of $A'$.
Let $M=\sup_{B_{2s}}u$. We then have
\begin{eqnarray*}
K&=&u(x)=
{\mathbb E}_x [u(
X_{\tau_s \wedge T_F}
)]\\
&=&{\mathbb E}_x[u(
X_{T_F}
); T_F<\tau_s]+
{\mathbb E}_x[u(
X_{\tau_s}); \tau_s<T_F,
X_{\tau_s}\in B_{2s}]\\
&&\,\,\,+
{\mathbb E}_x[u(
X_{\tau_s}); \tau_s<T_F,
X_{\tau_s}\notin B_{2s}]\\
&\le& \zeta K{\mathbb P}_x(T_F<\tau_s)+M{\mathbb P}_x(\tau_s<T_F)+\eta K\\
&=&\zeta K{\mathbb P}_x(T_F<\tau_s)+M(1-{\mathbb P}_x(T_F<\tau_s))+\eta K,
\end{eqnarray*}
or equivalently
$$
\frac{M}{K}\ge\frac{1-\eta-\zeta}{1-{\mathbb P}_x(T_F<\tau_s)} +\zeta .
$$
Using (\ref{ksv-e:2.3}) and (\ref{ksv-e:2.4}) we see that there exists
$\beta>0$ such that $M\ge K(1+2\beta)$. Therefore there exists
$x'\in B(x, 2s)$ with $u(x')\ge K(1+\beta)$.
Now suppose there exists $x_1\in B(x_0, r)$ with $u(x_1)=K_1>K_0$.
Define $s_1$ in terms of $K_1$ analogously to (\ref{ksv-e:2.5}). Using
the above argument (with $x_1$ replacing $x$ and $x_2$ replacing
$x'$), there exists $x_2\in B(x_1, 2s_1)$ with $u(x_2)=K_2\ge
(1+\beta)K_1$. We continue and obtain $s_2$ and then $x_3$, $K_3$,
$s_3$, etc. Note that $x_{i+1}\in B(x_i, 2s_i)$ and $K_i\ge
(1+\beta)^{i-1}K_1$. In view of (\ref{ksv-e:2.6}),
\begin{eqnarray*}\sum_{i=0}^{\infty}
|x_{i+1}-x_i|&\le& r+ 24 \left(\frac2{c_2\zeta}\right)^{1/d}r
\sum_{i=1}^{\infty}K_i^{-1/d}\\
& \le & r + 24 \left(\frac2{c_2\zeta}\right)^{1/d} K_1^{-1/d}r
\sum_{i=1}^{\infty} (1+\beta)^{-(i-1)/d}\\
&=& r + 24r \left(\frac2{c_2\zeta}\right)^{1/d} K_1^{-1/d}r\sum^\infty_{i=0}(1+\beta)^{-i/d}\\
&=& r+ c_4rK^{-1/d}_1
\end{eqnarray*}
where $c_4:=24 (\frac2{c_2\zeta})^{1/d}\sum^\infty_{i=0}(1+\beta)^{-i/d}$.
So if $K_1>c^d_4 \vee K_0$ then we have a sequence $x_1, x_2, \dots$
contained in $B(x_0, 2r)$ with $u(x_i)\ge
(1+\beta)^{i-1}K_1\rightarrow\infty$, a contradiction to $u$ being
bounded. Therefore we can not take $K_1$ larger than $c^d_4\vee
K_0$, and thus $\sup_{y\in B(x_0, r)}u(y)\le c^d_4\vee K_0$, which
is what we set out to prove.
In the case that $u$ is unbounded, one can follow the simple limit
argument in the proof of \cite[Theorem 2.4]{SV04} to finish the
proof. {
$\Box$
}
By using the standard chain argument one can derive the following
form of Harnack inequality.
\begin{corollary}\label{ksv-c:hi}
For every $a \in (0,1)$, there exists $C_{14}=C_{14}(a)>0$ such that
for every $r \in (0, 1/4)$, $x_0 \in {\mathbb R}^d$, and any function $u$ which is nonnegative on ${\mathbb R}^d$
and harmonic with respect to $X$ in $B(x_0, r)$, we have
$$
u(x)\le C_{14} u(y), \quad \textrm{for all }x, y\in B(x_0, ar)\, .
$$
\end{corollary}
\subsection{Some estimates for the Poisson kernel}
Recall that for any open set $D$ in ${\mathbb R}^d$,
$\tau_D$
is the first exit time of $X$ from
$D$.
We recall from Subsection \ref{ksv-ss:sbm} that $X$ has a transition
density $q(t, x, y)$, which is jointly continuous. Using this and the strong Markov property, one can easily check that
$$
q_D(t, x, y):=q(t, x, y)-{\mathbb E}_x[t>\tau_D, q(t-\tau_D, X_{\tau_D}, y)], \quad x, y \in D
$$
is continuous and the transition density of $X^D$.
For any bounded open set $D\subset {\mathbb R}^d$, we
will use $G_D$ to denote the Green function of $X^D$, i.e.,
$$
G_D(x, y):=\int^\infty_0 q_D(t, x, y)dt, \quad x, y\in D.
$$
Note that $G_D(x,y)$ continuous in $(D\times D)\setminus\{(x, x): x\in D\}$.
We will frequently use the well-known fact that
$G_D(\cdot, y)$ is harmonic in $D\setminus\{y\}$, and regular
harmonic in $D\setminus \overline{B(y,\varepsilon)}$ for every
$\varepsilon >0$.
Using the L\'{e}vy system for $X$, we know that for every bounded
open subset $D$, every $f \ge 0$ and all $x \in D$,
\begin{equation}\label{ksv-newls}
{\mathbb E}_x\left[f(X_{\tau_D});\,X_{\tau_D-} \not= X_{\tau_D} \right] =
\int_{\overline{D}^c} \int_{D} G_D(x,z) J(z-y) dz f(y)dy.
\end{equation}
For notational convenience, we define
\begin{equation}\label{ksv-PK}
K_D(x,y)\,:= \int_{D} G_D(x,z) J(z-y) dz, \qquad (x,y) \in D \times
\overline{D}^c.
\end{equation}
Thus \eqref{ksv-newls} can be simply written as
\begin{equation}\label{ksv-newls-2}
{\mathbb E}_x\left[f(X_{\tau_D});\,X_{\tau_D-} \not= X_{\tau_D} \right]
=\int_{\overline{D}^c} K_D(x,y)f(y)dy\, ,
\end{equation}
revealing $K_D(x,y)$ as a density of the exit distribution of $X$
from $D$. The function $K_D(x,y)$ is called
the Poisson kernel of $X$. Using the continuity of $G_D$ and $J$, one can
easily check that $K_D$ is continuous on $D \times \overline{D}^c$.
The following proposition is an improvement of Lemma \ref{ksv-L3.3}. The
idea of the proof comes from \cite{Sz2}.
\begin{prop}\label{ksv-l:tau}
For all $r>0$ and all $x_0\in {\mathbb R}^d$,
$$
{\mathbb E}_x[\tau_{B(x_0,r)}]\,\le\, 2V(2r) V(r-|x-x_0|)\, ,\qquad x\in B(x_0, r)\, .
$$
In particular, for any $R>0$, $r\in (0, R)$ and $x_0 \in {\mathbb R}^d$,
\begin{eqnarray*}
{\mathbb E}_x[\tau_{B(x_0,r)}]&\le & C_{7}\, (\phi(r^{-2})\phi((r-|x-x_0|)^{-2}))^{-1/2}\\
&\asymp &\frac{r^{\alpha/2}}{(\ell(r^{-2}))^{1/2}}\frac{(r-|x-x_0|)^{\alpha/2}}{(\ell((r-|x-x_0|)^{-2}))^{1/2}},
\qquad x\in B(x_0, r)\, ,
\end{eqnarray*}
where $C_7=C_7(R)$ is the constant form Proposition
\ref{ksv-p:upbdongfofkpinfiniteinterval2}.
\end{prop}
\noindent{\bf Proof.} Without loss of generality, we may assume that $x_0=0$. For
$x\neq 0$, put $Z_t=\frac{X_t\cdot x}{|x|}$. Then $Z_t$ is a L\'evy
process on ${\mathbb R}$ with
$$
{\mathbb E}(e^{i\theta Z_t})={\mathbb E}(e^{i\theta\frac{x}{|x|}\cdot X_t})
=e^{-t \phi(|\theta\frac{x}{|x|}|^2)}=e^{-t \phi(\theta^2)}
\qquad \theta\in {\mathbb R}.
$$
Thus $Z_t$ is of the type of one-dimensional subordinate Brownian
motion studied in Section \ref{ksv-ss:1dsbm}. It is easy to see that, if
$X_t\in B(0, r)$, then $|Z_t|<r$, hence
$$
{\mathbb E}_x[\tau_{B(0, r)}]\le {\mathbb E}_{|x|}[\tilde \tau],
$$
where $\tilde \tau=\inf\{t>0: |Z_t|\ge r\}$. Now the desired
conclusion follows easily from Proposition
\ref{ksv-p:upbdongfofkpinfiniteinterval2} (more precisely, from
\eqref{ksv-e:upbdongfofkpinfiniteinterval2}). {
$\Box$
}
As a consequence of Lemma \ref{ksv-L3.2}, Proposition \ref{ksv-l:tau} and
\eqref{ksv-PK}, we get the following result.
\begin{prop}\label{ksv-p:Poisson1}
There exist $C_{15}, C_{16}>0$ such that for every $r \in (0, 1)$ and
$x_0 \in {\mathbb R}^d$,
\begin{eqnarray}\label{ksv-P1}
K_{B(x_0,r)}(x,y) &\le & C_{15} \, j(|y-x_0|-r) \big(\phi(r^{-2})\phi((r-|x-x_0|)^{-2})\big)^{-1/2}\\
&\asymp & j(|y-x_0|-r) \frac{r^{\alpha/2}}
{(\ell(r^{-2}))^{1/2}}\frac{(r-|x-x_0|)^{\alpha/2}}
{(\ell((r-|x-x_0|)^{-2}))^{1/2}}\, ,\nonumber
\end{eqnarray}
for all $(x,y) \in B(x_0,r)\times \overline{B(x_0,r)}^c$ and
\begin{equation}\label{ksv-P2}
K_{B(x_0,r)}(x_0,y) \,\ge\, C_{16}\,\frac{j(|y-x_0|)}{\phi((r/2)^{-2})}\asymp
j(|y-x_0|)\frac{r^\alpha}{\ell(r^{-2})}
\end{equation}
for all $y \in\overline{B(x_0,r)}^c$.
\end{prop}
\noindent{\bf Proof.} Without loss of generality, we assume $x_0=0$. For $z \in B(0,
r)$ and $r<|y|<2$
$$
|y|-r \le |y|-|z| \le |z-y| \le |z|+|y| \le r +|y| \le 2|y|
,
$$
and for $z \in B(0, r)$ and $y \in B(0, 2)^c$,
$$
|y|-r \le |y|-|z| \le |z-y| \le |z|+|y| \le r +|y|\le |y|+1.
$$
Thus by the monotonicity of $j$, \eqref{ksv-H:1} and \eqref{ksv-H:2}, there
exists a constant $c>0$ such that
$$
c j(|y|) \,\le\, j(|z-y|) \, \le \, j(|y|-r)\, , \qquad (z,y) \in
B(0,r) \times \overline{B(0,r)}^c.
$$
Applying the above inequality, Lemma \ref{ksv-L3.2} and Proposition
\ref{ksv-l:tau} to \eqref{ksv-PK}, we have proved the proposition. {
$\Box$
}
\begin{prop}\label{ksv-p:Poisson2}
For every $a \in (0,1)$, $r \in (0, 1/4)$, $x_0 \in {\mathbb R}^d$ and $x_1, x_2 \in B(x_0, ar)$,
$$
K_{B(x_0,r)}(x_1,y) \,\le\, C_{14} K_{B(x_0,r)}(x_2,y), \qquad y \in
\overline{B(x_0,r)}^{\, c}\, ,
$$
where $C_{14}=C_{14}(a)$ is the constant from Corollary \ref{ksv-c:hi}.
\end{prop}
\noindent{\bf Proof.} Let $a\in (0,1)$, $r\in (0,1/4)$ and $x_0\in {\mathbb R}^d$ be fixed.
For every Borel set $A\subset \overline{B(x_0,r)}^{\, c}$,
the function $x\mapsto {\mathbb P}_x(X_{\tau_{B(x_0,r)}}\in A)$ is
harmonic in $B(x_0,r)$. By Corollary \ref{ksv-c:hi} and \eqref{ksv-newls-2},
we have for all $x_1, x_2 \in B(x_0, ar)$,
\begin{eqnarray*}
\int_A K_{B(x_0,r)}(x_1,y)\, dy&=& {\mathbb P}_{x_1}(X_{\tau_{B(x_0,r)}}\in A)\\
&\le & C_{14} {\mathbb P}_{x_2}(X_{\tau_{B(x_0,r)}}\in A)=\int_A K_{B(x_0,r)}(x_2,y)\, dy\, .
\end{eqnarray*}
This implies that $K_{B(x_0,r)}(x_1,y)\le C_{14} K_{B(x_0,r)}(x_2,y)$
for a.e.~$y\in \overline{B(x_0,r)}^{\, c}$, and hence by
the continuity of $ K_{B(x_0,r)}(x,\cdot)$ for every
$y\in \overline{B(x_0,r)}^{\, c}$.{
$\Box$
}
The next inequalities will be used several times in the remainder of
this paper.
\begin{lemma}\label{ksv-l:l}
There exists $C>0$
such that
\begin{equation}\label{ksv-el1}
\frac{s^{\alpha/2}}{\left(\ell(s^{-2})\right)^{1/2}} \,\le \, C \,
\frac{r^{\alpha/2}}{\left(\ell(r^{-2})\right)^{1/2}}, \qquad
0<s<r\le 4,
\end{equation}
\begin{equation}\label{ksv-el2}
\frac{s^{1-\alpha/2}}{\left(\ell(s^{-2})\right)^{1/2}} \,\le \, C \,
\frac{r^{1-\alpha/2}}{\left(\ell(r^{-2})\right)^{1/2}}, \qquad
0<s<r\le 4,
\end{equation}
\begin{equation}\label{ksv-el7}
s^{1-\alpha/2} \,{\left(\ell(s^{-2})\right)^{1/2}} \,\le \, C \,
r^{1-\alpha/2}\,{\left(\ell(r^{-2})\right)^{1/2}}, \qquad
0<s<r\le 4,
\end{equation}
\begin{equation}\label{ksv-el3}
\int^{\infty}_r
\frac{\left(\ell(s^{-2})\right)^{1/2}}{s^{1+\alpha/2}}ds
\,\le \, C \,
\frac{\left(\ell(r^{-2})\right)^{1/2}}{r^{\alpha/2}}, \qquad
0<r\le 4,
\end{equation}
\begin{equation}\label{ksv-el6}
\int^{r}_0 \frac{\left(\ell(s^{-2})\right)^{1/2}}{s^{\alpha/2}}ds
\,\le \, C \,
\frac{\left(\ell(r^{-2})\right)^{1/2}}{r^{\alpha/2-1}}, \qquad
0<r\le 4,
\end{equation}
\begin{equation}\label{ksv-el4}
\int^{\infty}_r \frac{\ell(s^{-2})}{s^{1+\alpha}}ds
\,\le \, C \,
\frac{\ell(r^{-2})}{r^{\alpha}}, \qquad 0<r\le 4,
\end{equation}
\begin{equation}\label{ksv-el5}
\int_{0}^r \frac{\ell(s^{-2})}{s^{\alpha-1}}ds
\,\le \, C \,
\frac{\ell(r^{-2})}{r^{\alpha-2}}, \qquad 0<r\le 4,
\end{equation}
and
\begin{equation}\label{ksv-el8}
\int_{0}^r \frac{s^{\alpha-1}}{\ell(s^{-2})}ds
\,\le \, C \,
\frac{r^{\alpha}}{\ell(r^{-2})}, \qquad \ 0<r\le 4.
\end{equation}
\end{lemma}
\noindent{\bf Proof.} The first three inequalities follow easily from \cite[Theorem
1.5.3]{BGT}, while the last five from the 0-version of
\cite[1.5.11]{BGT}. {
$\Box$
}
\begin{prop}\label{ksv-p:Poisson3}
For every $a \in (0,1)$, there exists $C_{17}=C_{17}(a)>0$ such that for every
$r \in (0, 1)$ and $x_0 \in {\mathbb R}^d$,
\begin{eqnarray*}
K_{B(x_0,r)}(x,y) \,&\le &\, C_{17}\,\frac{r^{\alpha/2-d}}{(\ell(r^{-2}))^{1/2}}
\frac{(\ell((|y-x_0|-r)^{-2}))^{1/2}} {(
|y-x_0|-r)^{\alpha/2}}\, ,\\
& & \qquad \qquad \qquad \forall x\in B(x_0, ar),\, y \in
\{r<|x_0-y| \le 2r\}\, .
\end{eqnarray*}
\end{prop}
\noindent{\bf Proof.} By Proposition \ref{ksv-p:Poisson2},
$$
K_{B(x_0,r)}(x,y) \le \frac{c_1}{r^d} \int_{B(x_0, a r)}
K_{B(x_0,r)}(w,y) dw
$$
for some constant $c_1=c_1(a)>0$. Thus from Proposition \ref{ksv-l:tau}, (\ref{ksv-P1}) and Remark \ref{ksv-r:abofgf4lhpat0} we have that
\begin{eqnarray*}
K_{B(x_0,r)}(x,y)&\le& \frac{
c_1}{r^d}\int_{B(x_0, r)}\int_{B(x_0,r)} G_{B(x_0,r)}(w,z)J(z-y) dz dw \\
&=& \frac{c_1}{r^d}\int_{B(x_0, r)} {\mathbb E}_z[\tau_{B(x_0,r)}]J(z-y) dz\\
& \le& \frac{c_2}{r^d} \frac{r^{\alpha/2}}{(\ell(r^{-2}))^{1/2}}
\int_{B(x_0, r)}\frac{(r-|z-x_0|)^{\alpha/2}}{(\ell((r-|z-x_0|)^{-2}))^{1/2}} J(z-y)dz
\end{eqnarray*}
for some constant $c_2=c_2(a)>0$.
Now applying Theorem \ref{ksv-t:Jorigin}, we get
$$
K_{B(x_0,r)}(x,y) \, \le\,
\frac{c_3 r^{\alpha/2-d}}{(\ell(r^{-2}))^{1/2}} \int_{B(x_0,
r)}\frac{(r-|z-x_0|)^{\alpha/2}} {(\ell((r-|z-x_0|)^{-2}))^{1/2}}
\frac{\ell(|z-y|^{-2})} {|z-y|^{d+\alpha}}dz
$$
for some constant $c_3=c_3(a)>0$. Since $r-|z-x_0| \le |y-z| \le 3r
\le
3$, from \eqref{ksv-el1} we see that
$$
\frac{(r-|z-x_0|)^{\alpha/2}} {(\ell((r-|z-x_0|)^{-2}))^{1/2}} \,
\le\, c_4
\frac{(|y-z|)^{\alpha/2}}
{(\ell(|y-z|^{-2}))^{1/2}}
$$
for some constant $c_4>0$. Thus we have
\begin{eqnarray*}
K_{B(x_0,r)}(x,y) & \le&
\frac{c_5 r^{\alpha/2-d}}{(\ell(r^{-2}))^{1/2}} \int_{B(x_0, r)}
\frac{(\ell(|z-y|^{-2}))^{1/2}}{|z-y|^{d+\alpha/2}}dz\\
& \le& \frac{c_5 r^{\alpha/2-d}}{(\ell(r^{-2}))^{1/2}} \int_{B(y,
|y-x_0|-r)^c}
\frac{(\ell(|z-y|^{-2}))^{1/2}}{|z-y|^{d+\alpha/2}}dz\\
&\le & \frac{c_6 r^{\alpha/2-d}}{(\ell(r^{-2}))^{1/2}}
\int_{|y-x_0|-r}^{\infty} \frac{\left(\ell(s^{-2})\right)^{1/2}}
{s^{1+\alpha/2}}ds
\end{eqnarray*}
for some constants $c_5=c_5(a)>0$ and $c_6=c_6(a)>0$. Using
\eqref{ksv-el3} in the above equation, we conclude that
$$
K_{B(x_0,r)}(x,y) \,\le \,
\frac{c_7 r^{\alpha/2-d}}{(\ell(r^{-2}))^{1/2}}
\frac{(\ell((|y-x_0|-r)^{-2}))^{1/2}} {( |y-x_0|-r)^{\alpha/2}}
$$
for some constant $c_7=c_7(a)>0$. {
$\Box$
}
\begin{remark}\label{ksv-r:Poisson3}{\rm
Note that the right-hand side of the estimate can be replaced by $\frac{V(r)}{r^d V(|y-x_0|-r)}$.
}
\end{remark}
\subsection{Boundary Harnack principle}\label{ksv-ss:bhp}
In this subsection, we additionally assume that $\alpha\in (0, 2\wedge d)$ and
in the case $d\le 2$, we further assume \eqref{ksv-e:ass4trans}.
The proof of the boundary Harnack principle is basically
the proof given in \cite{KSV1}, which is adapted from \cite{Bog97, SW99}.
The following result is a generalization of \cite[Lemma 3.3]{SW99}.
\begin{lemma}\label{ksv-l2.1}
For every $a \in (0, 1)$, there exists a positive constant $C_{19}=C_{19}(a)>0$
such that for any $r\in (0, 1)$ and any open
set $D$ with $D\subset B(0, r)$ we have
$$
{\mathbb P}_x\left(X_{\tau_D} \in B(0, r)^c\right) \,\le\, C_{19}\,r^{-\alpha}\,
\ell(r^{-2})\int_D G_D(x,y)dy, \qquad x \in D\cap B(0,
ar).
$$
\end{lemma}
\noindent{\bf Proof.}
We will use
$C^{\infty}_c({\mathbb R}^d)$ to denote
the space of infinitely differentiable functions with compact supports.
Recall that ${\bf L}$ is the $L_2$-generator of $X$ in \eqref{ksv-3.1} and
that $G(x,y)$ and $G_D(x,y)$ are the Green
functions of $X$
in ${\mathbb R}^d$ and $D$
respectively. We have ${\bf L} \,
G(x,y)=-\delta_x(y)$ in the weak sense. Since $ G_D(x,y)=G(x,y)
-{\mathbb E}_x[G(X_{\tau_D},y)] $,
we have, by the symmetry of ${\bf L}$, for any $x\in D$ and any
nonnegative $\phi \in C^{\infty}_c({\mathbb R}^d)$,
\begin{eqnarray*}
&&\int_D G_D(x,y) {\bf L} \phi(y)dy
=\int_{{\mathbb R}^d} G_D(x,y) {\bf L} \phi(y)dy\\
&&=
\int_{{\mathbb R}^d} G(x,y) {\bf L} \phi(y)dy-
\int_{{\mathbb R}^d} {\mathbb E}_x[G(X_{\tau_D},y)]
{\bf L} \phi(y)dy\\
&&=
\int_{{\mathbb R}^d} G(x,y) {\bf L} \phi(y)dy- \int_{D^c}
\int_{{\mathbb R}^d} G(z,y) {\bf L} \phi(y)dy
{\mathbb P}_x(X_{\tau_D} \in dz)\\
&&=-\phi(x)+ \int_{D^c}
\phi(z){\mathbb P}_x(X_{\tau_D} \in dz)
\,=\,-\phi(x)+{\mathbb E}_x[\phi(X_{\tau_D})].
\end{eqnarray*}
In particular, if $\phi(x)=0$ for $x\in D$, we have
\begin{equation}\label{ksv-har_gen}
{\mathbb E}_x\left[ \phi(X_{\tau_D})\right] = \int_D
G_D(x,y) {\bf L} \phi(y)dy.
\end{equation}
For fixed $a \in (0,1)$, take a
sequence of radial functions $\phi_m$ in $C^{\infty}_c({\mathbb R}^d)$ such
that $0\le \phi_m\le 1$,
\[
\phi_m(y)=\left\{
\begin{array}{lll}
0, & |y|< a\\
1, & 1\le |y|\le m+1\\
0, & |y|>m+2,
\end{array}
\right.
\]
and that $\sum_{i, j}|\frac{\partial^2}{\partial y_i\partial y_j}
\phi_m|$ is uniformly bounded.
Define $\phi_{m, r}(y)=\phi_m(\frac{y}{r})$ so that
$0\le \phi_{m, r}\le 1$,
\begin{equation}\label{ksv-e:2.11}\phi_{m, r}(y)=
\begin{cases}
0, & |y|<ar\\
1, & r\le |y|\le r(m+1)\\
0, & |y|>r(m+2),
\end{cases}
\quad \text{and} \quad \sup_{y\in {\mathbb R}^d} \sum_{i,
j}\left|\frac{\partial^2}{\partial y_i\partial y_j} \phi_{m,
r}(y)\right| \,<\, c_1\, r^{-2}.
\end{equation}
We claim that there exists a constant $c_1=c_1(a)>0$ such that for
all $r\in (0, 1)$,
\begin{equation}\label{ksv-e2.1}
\sup_{m \ge 1} \sup_{y\in {\mathbb R}^d} |{\bf L}\phi_{m,r}(y)|\,\le\,
c_1 r^{-\alpha} \, \ell(r^{-2}).
\end{equation}
In fact, by Proposition \ref{ksv-t:Jorigin} we have
\begin{eqnarray*}
&& \left|\int_{{\mathbb R}^d} (\phi_{m,r}(x+y)-\phi_{m,r}(x)-(\nabla
\phi_{m,r}(x) \cdot y)1_{B(0, r)}(y))J(y)
dy \right|\\
&&\le \left|\int_{\{|y|\le r\}}
(\phi_{m,r}(x+y)-\phi_{m,r}(x)-(\nabla
\phi_{m,r}(x) \cdot y)1_{B(0, r)}(y))J(y) dy\right|\\
&& \quad +2\int_{\{r<|y|\}}J(y) dy
\\
&&\le \frac{c_2}{r^2}\int_{\{|y|\le r \}}
|y|^2 J(y)dy
+2\int_{\{r<|y|\}}J(y) dy \\
&&\le \frac{c_3}{r^2}\int_{\{|y|\le r \}}
\frac1{|y|^{d+\alpha-2}} \ell(|y|^{-2})dy
\,+\,c_3\int_{\{r<|y|\}} \frac1{|y|^{d+\alpha}} \ell(|y|^{-2}) dy
\\
&& \le
\frac{c_4}{r^2}
\int_{0}^r \frac{\ell(s^{-2})}{s^{\alpha-1}}ds\,+\,
c_4\int^{\infty}_r \frac{\ell(s^{-2})}{s^{1+\alpha}}ds.
\end{eqnarray*}
Applying \eqref{ksv-el4}-\eqref{ksv-el5} to the above equation, we get
$$
\left|\int_{{\mathbb R}^d} (\phi_{m,r}(x+y)-\phi_{m,r}(x)-(\nabla
\phi_{m,r}(x) \cdot y)1_{B(0, r)}(y))J(y)
dy \right| \,\le \,
c_5\, r^{-\alpha}\, \ell(r^{-2}),$$
for some constant
$c_5=c_5(d, \alpha, \ell)>0$.
So the claim follows.
Let $A(x, a,b):=\{ y \in {\mathbb R}^d: a \le |y-x| <b \}.$
When $D \subset B(0,r)$ for some $r\in (0, 1)$, we get,
by combining (\ref{ksv-har_gen})
and (\ref{ksv-e2.1}), that for any $x\in D\cap B(0, ar)$,
\begin{eqnarray*}
{\mathbb P}_x\left(X_{\tau_D} \in B(0, r)^c\right)\,&=&\, \lim_{m\to
\infty}{\mathbb P}_x\left(X_{\tau_D} \in A(0, r, (m+1)r)\right)\\
\,&\le &\,
C\,r^{-\alpha} \, \ell(r^{-2})\int_D G_D(x,y)dy.
\end{eqnarray*}
{
$\Box$
}
\begin{lemma}\label{ksv-l2.1_1}
There exists $C_{20}>0$ such that for any open set $D$ with $B(A, \kappa r)\subset D\subset B(0, r)$
for some $r\in (0, 1)$ and
$\kappa\in (0, 1)$, we have that for every $x \in D \setminus B(A, \kappa r)$,
\begin{eqnarray*}
\lefteqn{\int_{D} G_D(x,y) dy }\\
& \le & C_{20}\, r^{\alpha}
\,\kappa^{-d-\alpha/2}\, \frac1{\ell((4r)^{-2})}\left(1+
\frac{\ell((\frac{\kappa r}{2})^{-2})}{\ell((4r)^{-2})}\right)
{\mathbb P}_x\left(X_{\tau_{D\setminus B(A, \kappa r)}} \in B(A, \kappa
r)\right).
\end{eqnarray*}
\end{lemma}
\noindent{\bf Proof.} Fix a point $x\in D\setminus B(A, \kappa r)$ and let $B:=B(A,
\frac{\kappa r}2)$. Note that, by the harmonicity of
$G_D(x,\,\cdot\,)$ in $D\setminus \{x\}$ with respect to $X$, we
have
\[
G_D(x,A)\,\ge\,\int_{D\cap \overline{B}^c}K_B(A, y)G_D(x,y)dy
\,\ge\,\int_{D\cap B(A, \frac{3\kappa r}4)^c}K_B(A, y)G_D(x,y)dy.
\]
Since $\frac{3\kappa r}4\le |y-A|\le 2r$ for $y\in B(A,
\frac{3\kappa r}4)^c\cap D$ and $j$ is a decreasing function, it
follows from \eqref{ksv-P2} in Proposition \ref{ksv-p:Poisson1} and Theorem \ref{ksv-t:Jorigin} that
\begin{eqnarray*}
G_D(x,A) &\ge& c_1\, \frac{(\frac{\kappa r}{2})^\alpha}{\ell\left((
\frac{\kappa r}{2})^{-2}\right)}\int_{D \cap B(A, \frac{3\kappa
r}4)^c}G_D(x,y)J(y-A) dy\\
&\ge& c_1\, j(2r)\, \frac{(\frac{\kappa r}{2})^\alpha}{\ell\left((
\frac{\kappa r}{2})^{-2}\right)}\int_{D \cap B(A, \frac{3\kappa
r}4)^c}G_D(x,y) dy\\
&\ge& c_2\, \kappa^\alpha \,r^{-d}\, \frac{\ell((2r)^{-2})}{\ell((
\frac{\kappa r}{2})^{-2})}\int_{D \cap B(A, \frac{3\kappa
r}4)^c}G_D(x,y) dy,
\end{eqnarray*}
for some positive constants $c_1$ and $c_2$. On the other hand,
applying Theorem \ref{ksv-T:Har} we get
\[
\int_{B(A, \frac{3\kappa r}4)} G_D(x,y) dy\le c_3 \int_{B(A,
\frac{3\kappa r}4)} G_D(x,A)dy \,\le\,c_4\,r^{d}\,\kappa^d
G_D(x,A),
\]
for some positive constants $c_3$ and $c_4$. Combining these two
estimates we get that
\begin{equation}\label{ksv-efe1}
\int_{D} G_D(x,y) dy \,\le\, c_5\,\left(r^{d}\kappa^d+r^{d}
\kappa^{-\alpha}\frac{\ell((\frac{\kappa r}{2})^{-2})}
{\ell((2r)^{-2})}\right)\, G_D(x,A)
\end{equation}
for some constant $c_5>0$.
Let $\Omega=D\setminus \overline{B(A, \frac{\kappa r}2)}$. Note that for any
$z\in B(A, \frac{\kappa r}4)$ and $y\in \Omega$, $ 2^{-1}|y-z|\le|y-A|\le 2|y-z|$.
Thus we get from (\ref{ksv-PK}) and \eqref{ksv-H:1} that for $z\in B(A,\frac{\kappa r}4)$,
\begin{equation}\label{ksv-e:KK1}
c_6^{-1}K_{\Omega}(x, A) \,\le \,K_{\Omega}(x, z) \,\le\,
c_6K_{\Omega}(x, A)
\end{equation}
for some $c_6>1$. Using the harmonicity of $G_D(\cdot, A)$ in
$D\setminus\{A\}$ with respect to $X$, we can split $G_D(\cdot, A)$
into two parts:
\begin{eqnarray*}
\lefteqn{G_D(x, A)
={\mathbb E}_x \left[G_D(X_{\tau_{\Omega}},A)\right]}\\
&=&{\mathbb E}_x \left[G_D(X_{\tau_{\Omega}},A):\,X_{\tau_{\Omega}} \in B(A,
\frac{\kappa r}4) \right]\\
& & +
{\mathbb E}_x\left[G_D(X_{\tau_{\Omega}},A):\,X_{\tau_{\Omega}}
\in \{\frac{\kappa r}4\le |y-A|\le \frac{\kappa r}2\}\right]\\
& := &I_1+I_2.
\end{eqnarray*}
Since $G_D(y,A)\le G(y,A)$, by using (\ref{ksv-e:KK1}) and Theorem \ref{ksv-t:Gorigin}, we have
\begin{eqnarray*}
I_1 &\le &
c_6\,K_{\Omega}(x,A) \int_{B(A, \frac{\kappa r}4)}G_D(y,
A)dy \\
& \le & c_7 \,K_{\Omega}(x,A) \int_{B(A, \frac{\kappa r}4)}
\frac{1}{|y-A|^{d-\alpha}\ell(|y-A|^{-2})}\, dy\, ,
\end{eqnarray*}
for some constant $c_7>0$. Since $|y-A|\le 4r \le 4 $, by
\eqref{ksv-el1},
\begin{equation}\label{ksv-efe}
\frac{|y-A|^{\alpha/2}}{\ell(|y-A|^{-2})} \,\le\, c_8 \,
\frac{(4r)^{\alpha/2}}{\ell((4r)^{-2})}
\end{equation}
for some constant $c_8>0$. Thus
\begin{eqnarray*}
I_1 &\le & c_7\, c_8\,K_{\Omega}(x,A) \int_{B(A, \frac{\kappa
r}4)}\frac{1}{|y-A|^{d-\alpha/2}}
\frac{(4r)^{\alpha/2}}{\ell((4r)^{-2})}dy\\
& \le &
c_9\kappa^{\alpha/2}r^{\alpha}\frac1{\ell((4r)^{-2})}K_{\Omega}(x, A)
\end{eqnarray*}
for some constant $c_9>0$. Now using (\ref{ksv-e:KK1}) again, we get
\begin{eqnarray*}
I_1 &\le &
c_{10}\kappa^{\alpha/2-d}r^{\alpha-d}\frac1{\ell((4r)^{-2})}\int_{B(A, \frac{\kappa r}4)} K_{\Omega}(x, z)dz\\
&=&c_{10}\kappa^{\alpha/2-d}r^{\alpha-d}\frac1{\ell((4r)^{-2})}{\mathbb P}_x\left(X_{\tau_{\Omega}}\in B(A, \frac{\kappa r}{4}\right)
\end{eqnarray*}
for some constant $c_{10}>0$. On the other hand, again by Theorem \ref{ksv-t:Gorigin} and \eqref{ksv-efe},
\begin{eqnarray*}
I_2 &=& \int_{\{\frac{\kappa r}4\le |y-A|\le \frac{\kappa r}2\}}
G_{D}(y,A) {\mathbb P}_x(X_{\tau_{\Omega}} \in dy) \\
&\le &
c_{11}\int_{\{\frac{\kappa r}4\le |y-A|\le \frac{\kappa r}2\}}
\frac1{|y-A|^{d-\alpha}} \,\frac{1}{\ell(|y-A|^{-2})}
{\mathbb P}_x(X_{\tau_{\Omega}} \in dy)\\
&\le & c_{12}
\kappa^{\alpha/2-d}\,r^{\alpha-d} \, \frac1{\ell((4r)^{-2})}{\mathbb P}_x
\left(X_{\tau_{\Omega}} \in \{\frac{\kappa r}4\le |y-A|\le
\frac{\kappa r}2\}\right),
\end{eqnarray*}
for some constants $c_{11}>0$ and $c_{12}>0$.
Therefore
$$
G_D(x, A) \,\le\, c_{13}\,
\kappa^{\alpha/2-d}\,r^{\alpha-d}\frac1{\ell((4r)^{-2})}\,
{\mathbb P}_x\left(X_{\tau_{\Omega}} \in B(A, \frac{\kappa r}2)\right)
$$
for some constant $c_{13}>0$. Combining the above with \eqref{ksv-efe1},
we get
\begin{eqnarray*}
\lefteqn{\int_{D} G_D(x,y) dy}\\
&\le & c_{14}\, r^{\alpha} \,\kappa^{-d-\alpha/2}\,
\frac1{\ell((4r)^{-2})}\left(1+ \frac{\ell((\frac{\kappa
r}{2})^{-2})}{\ell((2r)^{-2})}\right){\mathbb P}_x \left(X_{\tau_{D\setminus
B(A, \frac{\kappa r}2)}} \in B(A, \frac{\kappa r}2)\right),
\end{eqnarray*}
for some constant $c_{14}>0$. It follows immediately that
\begin{eqnarray*}
\lefteqn{\int_{D} G_D(x,y) dy }\\
& \le & c_{14}\, r^{\alpha} \,
\kappa^{-d-\alpha/2}\,\frac1{\ell((4r)^{-2})}\left(1+
\frac{\ell((\frac{\kappa r}{2})^{-2})} {\ell((2r)^{-2})} \right)
{\mathbb P}_x\left(X_{\tau_{D\setminus B(A, \kappa r)}} \in B(A, \kappa
r)\right).
\end{eqnarray*}
{
$\Box$
}
Combining Lemmas \ref{ksv-l2.1}-\ref{ksv-l2.1_1} and using the translation
invariant property, we have the following
\begin{lemma}\label{ksv-l2.3}
There exists $C_{21}>0$ such that for any open set $D$ with $B(A, \kappa r)\subset D\subset B(Q, r)$
for some $r\in(0, 1)$ and
$\kappa\in (0, 1)$, we have that for every $ x\in D\cap B(Q, \frac{r}2)$,
\begin{eqnarray*}
\lefteqn{{\mathbb P}_x\left(X_{\tau_{D}} \in B(Q, r)^c\right)}\\
& \le &
C_{21}\,\kappa^{-d-\alpha/2 }\, \frac{\ell(r^{-2})}{ \ell((4r)^{-2})}\,
\left(1+\frac{\ell((\frac{\kappa r}{2})^{-2})}{\ell((2r)^{-2})}
\right) {\mathbb P}_x\left(X_{\tau_{D\setminus B(A, \kappa r) }} \in B(A,
\kappa r) \right).
\end{eqnarray*}
\end{lemma}
Let $A(x, a,b):=\{ y \in {\mathbb R}^d: a \le |y-x| <b \}.$
\begin{lemma}\label{ksv-l2.U}
Let $D$ be an open set and $r\in (0,1/2)$. For every $Q \in {\mathbb R}^d$ and
any positive function $u$ vanishing on $D^c \cap B(Q,
\frac{11}6r)$,
there is a $\sigma\in
(\frac{10}{6}r, \frac{11}{6}r)$ such that for any $ x \in D
\cap B(Q, \frac{3}{2} r)$,
\begin{equation}\label{ksv-e:l2.U}
{\mathbb E}_x\left[u(X_{\tau_{D \cap B(Q, \sigma)}}); X_{\tau_{D \cap B(Q,
\sigma)}} \in B(Q, \sigma)^c\right]
\le C_{22}\,\frac{r^{\alpha}}{\ell((2r)^{-2})}
\int_{B(Q, \frac{10r}6)^c} J(y-Q)u(y)dy
\end{equation}
for some constant $C_{22}>0$ independent of $Q$ and $u$.
\end{lemma}
\noindent{\bf Proof.} Without loss of generality, we may assume that $Q=0$. Note that
by \eqref{ksv-el6}
\begin{eqnarray*}
&&\int^{\frac{11}{6}r}_{\frac{10}{6}r}\int_{A(0, \sigma, 2r)}
\ell((|y|- \sigma)^{-2})^{1/2} (|y|-\sigma)^{-{\alpha}/2}
u(y)\, dy \, d\sigma\\
&&=\int_{A(0, \frac{10}{6}r , 2r)}
\int^{ |y| \wedge \frac{11}{6}r}_{\frac{10}{6}r}\ell((|y|-
\sigma)^{-2})^{1/2}
(|y|-\sigma)^{-{\alpha}/2}\, d\sigma\, u(y )\,dy \\
&& \le \int_{A(0, \frac{10}{6}r , 2r)} \left(\int^{ |y| -
\frac{10}{6}r}_{0}\ell(s^{-2})^{1/2}
s^{-{\alpha}/2}ds \right)u(y)dy \\
&&\le c_1 \int_{A(0, \frac{10r}6, 2r)} \ell\left(\left(|y|- \frac{10r}6
\right)^{-2}\right)^{1/2} \left(|y|- \frac{10r}6\right)^{1-{\alpha}/2} u(y)dy
\end{eqnarray*}
for some positive constant $c_1$.
Using
\eqref{ksv-el2} and \eqref{ksv-el7},
we get that there are constants $c_2>0$ and $c_3>0$ such that
\begin{eqnarray*}
\lefteqn{\int_{A(0, \frac{10r}6, 2r)}\ell\left(\left(|y|- \frac{10r}6 \right)^{-2}\right)^{1/2}
\left(|y|- \frac{10r}6\right)^{1-{\alpha}/2} u(y)dy }\\
&\le & c_3 \int_{A(0,
\frac{10r}6, 2r)} \ell(|y|^{-2})^{1/2} |y|^{1-{\alpha}/2} u(y)dy\\
&\le & c_3 \frac{r^{1-\alpha/2}}{\ell((2r)^{-2})^{1/2}} \int_{A(0,
\frac{10r}6, 2r)} \ell(|y|^{-2}) u(y)dy\, .
\end{eqnarray*}
Thus, by taking $c_4>6 c_1 c_3$,
we can conclude that there is a $\sigma\in (\frac{10}{6}r,
\frac{11}{6}r)$ such that
\begin{eqnarray}\label{ksv-e:int}
\lefteqn{\int_{A(0, \sigma, 2r)}\ell((|y|- \sigma)^{-2})^{1/2}\,
(|y|-\sigma)^{-{\alpha}/2}u(y)dy}\nonumber\\
&\le &
c_4\,\frac{r^{-\alpha/2}}{\ell((2r)^{-2})^{1/2}} \int_{A(0,
\frac{10r}6, 2r)} \ell(|y|^{-2}) u(y)dy.
\end{eqnarray}
Let $x \in D \cap B(0, \frac{3}{2} r)$. Note that, since $X$
satisfies the hypothesis ${\bf H}$ in \cite{Sz1}, by Theorem 1 in
\cite{Sz1} we have
\begin{eqnarray*}
&& {\mathbb E}_x\left[u(X_{\tau_{D \cap B(0, \sigma)}}); X_{\tau_{D \cap B(0,
\sigma)}} \in
B(0, \sigma)^c \right]\\
&&= {\mathbb E}_x\left[u(X_{\tau_{D \cap B(0, \sigma)}}); X_{\tau_{D \cap
B(0, \sigma)}} \in B(0, \sigma)^c, \, \tau_{D \cap
B(0, \sigma)} =\tau_{B(0, \sigma)} \right]\\
&&= {\mathbb E}_x\left[u(X_{\tau_{ B(0, \sigma)}}); X_{ \tau_{B(0, \sigma)}}
\in
B(0, \sigma)^c, \, \tau_{D \cap B(0, \sigma)} =
\tau_{B(0, \sigma)} \right]\\
&&\le {\mathbb E}_x\left[u(X_{\tau_{ B(0, \sigma)}}); X_{\tau_{B(0, \sigma)}}
\in B(0, \sigma)^c \right] \,=\,\int_{B(0, \sigma)^c}K_{B(0,
\sigma)}(x, y)u(y)dy.
\end{eqnarray*}
Since $ \sigma <2r < 1$, from
\eqref{ksv-P1} in Proposition \ref{ksv-p:Poisson1}, Proposition \ref{ksv-p:Poisson3} we have
\begin{eqnarray*}
\lefteqn{ {\mathbb E}_x\left[u(X_{\tau_{D \cap B(0, \sigma)}}); X_{\tau_{D \cap B(0,
\sigma)}} \in B(0, \sigma)^c \right]
\,\le\,
\int_{ B(0, \sigma)^c }
K_{B(0, \sigma)}(x, y)u(y)dy}\\
&\le &\, c_5 \int_{A(0, \sigma, 2r)}
\frac{\sigma^{\alpha/2-d}}{\left(\ell(\sigma^{-2})\right)^{1/2}}
\frac{(\ell((|y|-\sigma)^{-2}))^{1/2}} {( |y|-\sigma)^{\alpha/2}}
u(y)dy\\
&& + c_5 \int_{B(0, 2r)^c}j(|y|-\sigma)
\frac{\sigma^{\alpha/2}}
{(\ell(\sigma^{-2}))^{1/2}}\frac{(\sigma-|x|)^{\alpha/2}}
{(\ell((\sigma-|x|)^{-2}))^{1/2}}
u(y)dy
\end{eqnarray*}
for some constant $c_5>0$.
When $y \in A(0, 2r , 4)$ we have $\frac1{12}|y|\le |y|-\sigma $, while when $|y|\ge 4$ we have
$|y|-\sigma\ge |y|-1$. Since $ \sigma-|x|\le\sigma \le {2r}$, we
have by \eqref{ksv-el1} and the monotonicity of $j$,
$$
j(|y|-\sigma) \frac{\sigma^{\alpha/2}}{(\ell(\sigma^{-2}))^{1/2}}
\frac{(\sigma-|x|)^{\alpha/2}} {(\ell((\sigma-|x|)^{-2}))^{1/2}}
\,\le\, c_6 j\left(\frac{|y|}{12}\right)
\frac{r^{\alpha}}{\ell((2r)^{-2})} , \quad y \in A(0, 2r , 4)
$$
and
$$
j(|y|-\sigma) \frac{\sigma^{\alpha/2}}{(\ell(\sigma^{-2}))^{1/2}}
\frac{(\sigma-|x|)^{\alpha/2}} {(\ell((\sigma-|x|)^{-2}))^{1/2}}
\,\le\, c_6 j(|y|-1)
\frac{r^{\alpha}}{\ell((2r)^{-2})} , \quad |y|\ge 4
$$
for some constant $c_6>0$. Thus by applying \eqref{ksv-H:1} and
\eqref{ksv-H:2}, we get
$$
j(|y|-\sigma) \frac{\sigma^{\alpha/2}}{(\ell(\sigma^{-2}))^{1/2}}
\frac{(\sigma-|x|)^{\alpha/2}} {(\ell((\sigma-|x|)^{-2}))^{1/2}}
\,\le\, c_7 j(|y|)
\frac{r^{\alpha}}{\ell((2r)^{-2})}
$$
for some constant $c_7>0$.
Therefore,
\begin{eqnarray*}
\lefteqn{\int_{B(0, 2r)^c}j(|y|-\sigma)\frac{\sigma^{\alpha/2}} {(\ell(\sigma^{-2}))^{1/2}}\frac{(\sigma-|x|)^{\alpha/2}}
{(\ell((\sigma-|x|)^{-2}))^{1/2}}
u(y)dy}\\
& \le & c_5 c_7 \frac{r^{\alpha}}{\ell((2r)^{-2})} \int_{B(0, 2r)^c} J(y) u(y)\, dy\, .
\end{eqnarray*}
On the other hand, by \eqref{ksv-el1}, \eqref{ksv-e:int} and Theorem \ref{ksv-t:Jorigin}, we have that
\begin{eqnarray*}
\lefteqn{\int_{A(0, \sigma, 2r)}
\frac{\sigma^{\alpha/2-d}}{\left(\ell(\sigma^{-2})\right)^{1/2}}
\frac{(\ell((|y|-\sigma)^{-2}))^{1/2}}
{( |y|-\sigma)^{\alpha/2}} u(y)dy}\\
&\le& \left(\frac{10r}{6}\right)^{-d}\frac{\sigma^{\alpha/2}}
{\left(\ell(\sigma^{-2})\right)^{1/2}} \int_{A(0, \sigma, 2r)}
\frac{(\ell((|y|-\sigma)^{-2}))^{1/2}}
{( |y|-\sigma)^{\alpha/2}} u(y)dy\\
&\le & c_8
r^{-d}\frac{(2r)^{\alpha/2}}{\left(\ell((2r)^{-2})\right)^{1/2}}
\,\frac{r^{-\alpha/2}}{\left(\ell((2r)^{-2})\right)^{1/2}}
\int_{A(0, \frac{10r}6, 2r)} \ell(|y|^{-2}) u(y)dy \\
&\le & c_{9} \frac{r^{\alpha}}{\ell((2r)^{-2})} \int_{A(0,
\frac{10r}6, 2r)}\ell(|y|^{-2}) |y|^{-d-\alpha} u(y) dy\\
&\le & c_{10} \frac{r^{\alpha}}{\ell((2r)^{-2})}
\int_{A(0, \frac{10r}6, 2r)}J(y) u(y) dy
\end{eqnarray*}
for some positive constants $c_8$, $c_9$ and $c_{10}$.
Hence, by combining the last two displays we arrive at
$$
{\mathbb E}_x\left[u(X_{\tau_{D \cap B(0, \sigma)}}); X_{\tau_{D \cap B(0,
\sigma)}} \in
B(0, \sigma)^c \right]\\
\,\le\,
c_{11}\,\frac{r^{\alpha}}{\ell((2r)^{-2})}
\int_{B(0, \frac{10r}6)^c}J(y)u(y)dy
$$
for some constant $c_{11}>0$.
{
$\Box$
}
\begin{lemma}\label{ksv-l2.2}
Let $D$ be an open set and $r\in (0,1/2)$. Assume that $B(A, \kappa r)\subset D\cap
B(Q, r)$ for $\kappa\in (0, 1/2 ]$.
Suppose that $u\ge0$ is regular harmonic in $D\cap B(Q, 2r)$ with respect to
$X$ and $u=0$ in $D^c\cap B(Q, 2r)$. If $w$ is a regular harmonic
function with respect to $X$ in $D\cap B(Q, r)$ such that
$$
w(x)=\left\{
\begin{array}{ll}
u(x), & x\in B(Q, \frac{3r}2)^c\cup (D^c\cap B(Q, r)),\\
0, & x \in A(Q, r, \frac{3r}2),
\end{array}\right.
$$
then
$$
u(A) \ge w(A) \ge C_{23}\,\kappa^{\alpha} \frac{\ell((2r)^{-2})}
{\ell((\kappa r)^{-2})} \,u(x), \quad x \in D \cap B(Q,\frac32 r)
$$
for some constant $C_{23}>0$.
\end{lemma}
\noindent{\bf Proof.}
Without loss of generality we may assume $Q=0$. Let $x \in D \cap B(0,\frac32 r)$.
The left hand side inequality in the conclusion of the lemma is clear from the fact that $u$ dominates $w$ on $(D\cap B(0,r))^c$ and both functions are regular harmonic in $D\cap B(0,r)$.
Thus we only need to prove the right hand side inequality.
By Lemma \ref{ksv-l2.U} there exists $\sigma\in (\frac{10r}6, \frac{11r}6)$ such that \eqref{ksv-e:l2.U} holds.
Since $u$ is regular harmonic in $D\cap B(0, 2r)$ with respect to $X$ and equal to zero on $D^c\cap B(0,2r)$,
it follows that
\begin{equation}\label{ksv-e:l2.2}
u(x)= {\mathbb E}_x\left[u(X_{\tau_{D \cap B(0, \sigma)}}); \,X_{\tau_{D \cap
B(0, \sigma)}} \in B(0, \sigma)^c \right] \le
c_1\frac{r^{\alpha}}{\ell((2r)^{-2})} \int_{B(0, \frac{10r}6)^c} J(y)u(y)dy
\end{equation}
for some constant $c_1>0$.
On the other hand, by \eqref{ksv-P2} in Proposition \ref{ksv-p:Poisson1},
we have that
\begin{eqnarray*}
w(A)&=& \int_{B(0, \frac{3r}2)^c}
K_{D\cap B(0, r)}(A, y)u(y)dy
\ge \int_{B(0, \frac{3r}2)^c}
K_{B(A, \kappa r)}(A, y)u(y)dy\\
&\ge & c_2 \int_{B(0, \frac{3r}2)^c} J(A-y) \frac{(\kappa
r)^\alpha}{\ell((\kappa r)^{-2})} u(y)dy
\end{eqnarray*}
for some constant $c_2>0$. Note that $|y-A|\le 2|y|$ in $
A(0,\frac{3r}2, 4)$ and that $|y-A|\le |y|+1$ for $|y|\ge 4$. Hence
by the monotonicity of $j$, \eqref{ksv-H:1} and \eqref{ksv-H:2},
$$
w(A)\,\ge\, c_3\,\frac{(\kappa r)^\alpha}{\ell((\kappa r)^{-2})}
\int_{B(0, \frac{3r}2)^c} J( y)
u(y) dy
$$
for some constant $c_3>0$. Therefore, by \eqref{ksv-e:l2.2}
$$
w(A)\ge c_4 c_1^{-1} \,\kappa^{\alpha} \frac{\ell((2r)^{-2})}
{\ell((\kappa r)^{-2})} \,u(x)\, .
$$
{
$\Box$
}
\begin{defn}\label{ksv-fat}
Let $\kappa \in (0,1/2]$. We say that an open set $D$ in ${\mathbb R}^d$ is
$\kappa$-fat if there exists $R>0$ such that for each $Q \in
\partial D$ and $r \in (0, R)$, $D \cap B(Q,r)$ contains a ball
$B(A_r(Q),\kappa r)$. The pair $(R, \kappa)$ is called the
characteristics of the $\kappa$-fat open set $D$.
\end{defn}
Note that all Lipschitz domain and all non-tangentially accessible
domain (see \cite{JK} for the definition) are $\kappa$-fat. The
boundary of a $\kappa$-fat open set can be highly nonrectifiable
and, in general, no regularity of its boundary can be inferred.
Bounded $\kappa$-fat open set may be disconnected.
Since $\ell$ is slowly varying at $\infty$, we get the Carleson's
estimate from Lemma \ref{ksv-l2.2}.
\begin{corollary}\label{ksv-c:Carl}
Suppose that $D$ is a $\kappa$-fat open set with the
characteristics $(R, \kappa)$. There exists a constant $C_{24}$
depending on the characteristics $(R,\kappa)$ such that if $r \le
R\wedge \frac12$, $Q\in \partial D$, $u\ge0$ is regular harmonic in
$D\cap B(Q, 2r)$ with respect to $X$ and $u=0$ in $D^c\cap B(Q,
2r)$, then
$$
u\left(A_r(Q)\right) \,\ge C_{24}\, u(x)\, , \quad \forall
x \in D \cap B(Q,\frac32 r)\, .
$$
\end{corollary}
The next theorem is a boundary Harnack principle for (possibly
unbounded) $\kappa$-fat open set and it is the main result of this
subsection.
\begin{thm}\label{ksv-BHP}
Suppose that $D$ is a $\kappa$-fat open set with the
characteristics $(R, \kappa)$. There exists a constant $C_{25}>1$
depending on the characteristics $(R,\kappa)$ such that if $r \le
R\wedge \frac14$ and $Q\in\partial D$, then for any nonnegative
functions $u, v$ in ${\mathbb R}^d$ which are regular harmonic in $D\cap B(Q,
2r)$ with respect to $X$ and vanish in $D^c \cap B(Q, 2r)$, we have
$$
C_{25}^{-1}\,\frac{u(A_r(Q))}{v(A_r(Q))}\,\le\, \frac{u(x)}{v(x)}\,\le
C_{25}\,\frac{u(A_r(Q))}{v(A_r(Q))}, \qquad x\in D\cap B(Q, \frac{r}2)\, .
$$
\end{thm}
\noindent{\bf Proof.}
Since $\ell$ is slowly varying at $\infty$ and
locally bounded above and below by positive constants,
there exists a
constant $c>0$ such that for every $r\in (0,1/4)$,
\begin{equation}\label{ksv-lll}
\max \left(\frac{\ell(r^{-2})}{ \ell((\kappa r)^{-2}) },\,
\frac{\ell((2r)^{-2})}{ \ell((4r)^{-2})},\, \frac{\ell((\frac{\kappa
r}{2})^{-2})}{\ell((4r)^{-2})},\, \frac{\ell((\kappa
r)^{-2})}{\ell((2r)^{-2})} \right) \,\le\, c\, .
\end{equation}
Fix $r\in (0, R\wedge \frac14)$ throughout this proof. Without loss of generality
we may assume that $Q=0$ and $u(A_r(0))=v(A_r(0))$. For simplicity,
we will write $A_r(0)$ as $A$ in the remainder of this proof. Define
$u_1$ and $u_2$ to be regular harmonic functions in $D\cap B(0, r)$
with respect to $X$ such that
$$
u_1(x)=\left\{
\begin{array}{ll}
u(x), & x\in A(0,r, \frac{3r}{2}),\\
0, & x\in B(0, \frac{3r}2)^c\cup(D^c\cap B(0, r))
\end{array}
\right.
$$
and
$$
u_2(x)=\left\{
\begin{array}{ll}
0, & x\in A(0,r, \frac{3r}{2}), \\
u(x), & x\in B(0, \frac{3r}2)^c\cup(D^c\cap B(0, r)).
\end{array}
\right.
$$
and note that $u=u_1+u_2$. If $D\cap A(0,r,\frac{3r}{2})=\emptyset$,
then $u_1=0$ and the inequality (\ref{ksv-e2.6}) below holds
trivially. So we assume that $D\cap A(0,r,\frac{3r}{2})$ is not empty.
Then by Lemma \ref{ksv-l2.2},
$$
u(y)\le c_1 \kappa^{-\alpha} \frac{\ell((\kappa
r)^{-2})}{\ell((2r)^{-2})}\, u(A), \qquad y\in D\cap B(0, \frac{3r}2),
$$
for some constant $c_1>0$.
For $x\in D\cap B(0, \frac{r}2)$, we have
\begin{eqnarray*}
u_1(x)&=& {\mathbb E}_x\left[u(X_{\tau_{D\cap B(0, r)}}): X_{\tau_{D\cap B(0,
r)}}\in
D\cap A(0,r,\frac{3r}{2})\right]\\
&\le&\left(\sup_{D\cap A(0,r,\frac{3r}{2})}u(y)\right)
{\mathbb P}_x\left( X_{\tau_{D\cap B(0, r)}}\in
D\cap A(0,r,\frac{3r}{2})\right) \\
&\le&\left(\sup_{D\cap A(0,r,\frac{3r}{2})}u(y)\right)
{\mathbb P}_x\left( X_{\tau_{D\cap B(0, r)}}\in
B(0,r)^c \right) \\
&\le&c_1\,\kappa^{-\alpha} \frac{\ell((\kappa
r)^{-2})}{\ell((2r)^{-2})} \,u(A) \,{\mathbb P}_x\left( X_{\tau_{D\cap B(0,
r)}}\in B(0,r)^c \right).
\end{eqnarray*}
Now using Lemma \ref{ksv-l2.3} (with $D$ replaced by $D\cap B(0,r)$) and \eqref{ksv-lll}, we have that for $ x\in D\cap B(0, \frac{r}2)$,
\begin{eqnarray}
&&u_1(x)\\
&&\le\, c_2\,\kappa^{-d-\frac32\alpha }\, \frac {\ell((\kappa
r)^{-2})}{\ell((2r)^{-2})}\frac{\ell(r^{-2})}{\ell((4r)^{-2})}\,
\left(1+\frac{\ell((\frac{\kappa r}{2})^{-2})}{\ell((4r)^{-2})}
\right)\,u(A)\times \nonumber\\
&&\quad \times\ {\mathbb P}_x\left( X_{\tau_{(D\cap B(0,r))\setminus
B(A, \frac{\kappa r}2)}} \in B(A, \frac{\kappa r}2)\right) \nonumber\\
&&\le\,c_3 \,u(A)\,{\mathbb P}_x\left( X_{\tau_{(D\cap B(0,r))\setminus B(A,
\frac{\kappa r}2)}} \in B(A, \frac{\kappa r}2)\right) \label{ksv-e2.3}
\end{eqnarray}
for some positive constants $c_2$ and $c_3=c_3(\kappa)$.
Since $r <1/4$, Theorem \ref{ksv-T:Har} implies that
$$
u(y)\,\ge\, c_4\,u(A), \qquad y\in B(A, \frac{\kappa r}2)
$$
for some constant $c_4>0$. Therefore for $x\in D\cap B(0,
\frac{r}2)$
\begin{equation}\label{ksv-e2.4}
u(x) \,=\, {\mathbb E}_x\left[u(X_{\tau_{(D\cap B(0, r))\setminus B(A,
\frac{\kappa r}2)}}) \right] \,\ge\, c_4\,u(A)\,
{\mathbb P}_x\left(X_{\tau_{(D\cap B(0,r))\setminus B(A, \frac{\kappa r}2)}}
\in B(A, \frac{\kappa r}2)\right).
\end{equation}
Using (\ref{ksv-e2.3}), the analogue of (\ref{ksv-e2.4}) for $v$, and the
assumption that $u(A)=v(A)$, we get that for $x\in D\cap B(0,
\frac{r}2)$,
\begin{equation}\label{ksv-e2.6}
u_1(x)\,\le \,c_3\,v(A)\, {\mathbb P}_x\left(X_{\tau_{(D\cap B(0, r))
\setminus B(A, \frac{\kappa r}2)}} \in B(A, \frac{\kappa
r}2)\right)\,\le \,c_5\,v(x)
\end{equation}
for some constant $c_5=c_5(\kappa)>0.$ For $x\in D\cap B(0, r)$, we
have
\begin{eqnarray*}
u_2(x)&=& \int_{B(0, \frac{3r}2)^c}K_{D\cap B(0, r)}
(x, z)u(z)dz\\
&=& \int_{B(0, \frac{3r}2)^c}
\int_{D\cap B(0, r)} G_{D\cap B(0, r)}(x, y) J(y-z)dy\, u(z)\, dz.
\end{eqnarray*}
Let
$$
s(x):=\int_{D\cap B(0, r)}G_{D\cap B(0, r)}(x, y)dy.
$$
Note that for every $y \in B(0,r)$ and $z \in B(0, \frac{3r}2)^c$,
$$
\frac13|z| \,\le\, |z|-r \,\le\, |z|-|y| \, \le\, |y-z|\, \le \,
|y|+|z|\, \le \, r+|z| \le 2 |z|\, ,
$$
and that for every $y \in B(0,r)$ and $z \in B(0, 12)^c$,
$$
|z|-1\, \le \, |y-z|\, \le \,|z|+1.
$$
So by the monotonicity of $j$, for every $y \in B(0,r)$ and $z \in
A(0, \frac{3r}2, 12)$,
$$
j(12|z|) \, \le \,j(2|z|) \, \le \,J(y-z) \, \le \, j\left(\frac{|z|}{3}\right) \,
\le \,j\left(\frac{|z|}{12}\right)\, ,
$$
and for every $y \in B(0,r)$ and every $z \in B(0,12)^c$,
$$
j(|z|-1)\, \le \,J(y-z) \, \le \, j(|z|+1).
$$
Using \eqref{ksv-H:1} and \eqref{ksv-H:2}, we have that, for every $y \in
B(0,r)$ and $z \in B(0, \frac{3r}2)^c$,
$$ c_6^{-1} j(|z|) \, \le \,J(y-z) \, \le \,c_6\,j(|z|)
$$
for some constant $c_6>0$. Thus we have
\begin{equation}\label{ksv-e2.5}
c_7^{-1}\le \left(\frac{u_2(x)}{u_2(A)}\right)\left(\frac{s(x)}{s(A)}\right)^{-1}\le c_7,
\end{equation}
for some constant $c_7>1$.
Applying (\ref{ksv-e2.5}) to $u$, and $v$ and Lemma \ref{ksv-l2.2} to $v$ and
$v_2$, we obtain for $x\in D\cap B(0, \frac{r}2)$,
\begin{eqnarray}\label{ksv-e2.7}
u_2(x) &\le & c_7\,u_2(A)\,\frac{s(x)}{s(A)}\,\le\, c_{7}^2\,
\frac{u_2(A)}{v_2(A)}\,v_2(x)\, \le\, c_{8}\, \kappa^{-\alpha}
\frac{\ell((\kappa r)^{-2})}
{\ell((2r)^{-2})}\frac{u(A)}{v(A)}\,v_2(x)\nonumber \\
& = &
c_{8}\,\kappa^{-\alpha} \frac{\ell((\kappa
r)^{-2})}{\ell((2r)^{-2})}\,v_2(x),
\end{eqnarray}
for some constant $c_8>0.$
Combining (\ref{ksv-e2.6}) and (\ref{ksv-e2.7}) and applying \eqref{ksv-lll}, we
have
$$
u(x)\,\le\, c_{9} \,v(x), \qquad x\in D\cap B(0, \frac{r}2),
$$
for some constant $c_{9}=c_{9}(\kappa)>0.$ {
$\Box$
}
\noindent {\bf Acknowledgment:} We thank Qiang Zeng for his comments
on the first version of this paper.
We also thank the referee for helpful comments.
\begin{singlespace}
\small
\end{singlespace}
\end{doublespace}
\end{document}
{\bf Panki Kim}
Department of Mathematical Sciences and Research Institute of
Mathematics,
Seoul National University, San56-1 Shinrim-dong Kwanak-gu, Seoul
151-747, Republic of Korea
E-mail: \texttt{[email protected]}
{\bf Renming Song}
Department of Mathematics, University of Illinois, Urbana, IL 61801,
USA
E-mail: \texttt{[email protected]}
{\bf Zoran Vondra{\v{c}}ek}
Department of Mathematics, University of Zagreb, Zagreb, Croatia
Email: \texttt{[email protected]}
\end{document}
|
\begin{document}
\title{Anomaly detection in laser-guided vehicles' batteries: a case study}
\begin{abstract}
Detecting anomalous data within time series is a very relevant task in pattern recognition and machine learning, with many possible applications that range from disease prevention in medicine, e.g., detecting early alterations of the health status before it can clearly be defined as "illness" up to monitoring industrial plants. Regarding this latter application, detecting anomalies in an industrial plant's status firstly prevents serious damages that would require a long interruption of the production process. Secondly, it permits optimal scheduling of maintenance interventions by limiting them to urgent situations. At the same time, they typically follow a fixed prudential schedule according to which components are substituted well before the end of their expected lifetime.
This paper describes a case study regarding the monitoring of the status of Laser-guided Vehicles (LGVs) batteries, on which we worked as our contribution to project SUPER (Supercomputing Unified Platform, Emilia Romagna) aimed at establishing and demonstrating a regional High-Performance Computing platform that is going to represent the main Italian supercomputing environment for both computing power and data volume.
\end{abstract}
\section{Introduction}
The term {\em anomaly detection}, with regard to time series analysis, has been long used in statistics, even if not so frequently, as a possible alternative for the more common term {\em outlier detection} \cite{kozma1994anomaly}.\\
Recently, more and more frequently, such a term has been referred to the analysis of time series coming from sensors describing the health status of industrial plants. Within this context, it has rapidly gained relevance as a research and real-life topic. This is partially due the actions, taken within by the Industry 4.0 framework, that have encouraged industrial plant connectivity and the use of sensor networks and Internet of Things (IoT) devices for real-time monitoring of properties describing a plant's status. \\
In a productive process, the first concern is indeed to guarantee that plants are always in a correct working status, since the cost of systematic maintenance, even when performed according to a non-optimal, over-frequent schedule, is lower than the cost of suspending production following an unexpected (and possibly catastrophic) fault.
Therefore, accurately monitoring their plants' status allows companies to define models that may then be applied to plants' maintenance from two different viewpoints:
\begin{itemize}
\item {\em anomaly detection}, i.e., a timely detection of a possible sudden malfunctioning before it produces an outburst of problems that may damage the plant and cause an expensive long-term suspension of the production process;
\item {\em remaining lifetime estimation}, i.e., an estimate of the remaining time before the probability of a fault occurring becomes higher than a preset threshold.
\end{itemize}
Monitoring data from the above two viewpoints is the basis for the optimization of a plant's maintenance schedule, usually termed {\em predictive maintenance} (PM).
In the absence of long-term dense monitoring data, PM can only be based on empirical knowledge that derives from a macroscopic and generally incomplete plant description. Indeed, defining a robust model of the plant's behavior requires a long time; in the meanwhile, maintenance has to be cautiously performed at more or less prefixed intervals that are much shorter than the expected lifetime of a plant's critical components.
Even if the term {\em predictive maintenance} has been used for more than twenty years \cite{levitt2003complete,mobley2002introduction}, the models to which such a term refers nowadays are much different than the ones that were used originally. This equally depends on the different quality and amount of data that can be acquired from plants and on the recent developments of machine-learning models in terms of quality, intrinsic power, and capability of dealing with huge amounts of complex data. In fact, the vast majority of PM applications nowadays rely on machine-learning methods \cite{carvalho2019machine,ccinar2020machine, mo2021evolutionary}, even if one should not be surprised noticing that, in spite of very different limitations, \cite{kozma1994anomaly} already dealt with neural network-based models.
However, in most cases, the huge amount of data currently available is very far from ideal from a machine-learning (ML) perspective, especially when dealing with binary classification problems like the one that we will consider in the case study described in the following sections.\\
From this viewpoint, theoretical and practical rules suggest that, in solving a binary classification
problem, an optimal training data set for an ML application should include
roughly as many positive as negative samples, i.e., it should be what is usually
termed a {\em balanced} data set. To obtain data satisfying such a requirement, it would be obviously too costly and impractical to replicate complex large industrial plants in a laboratory to study their behavior under extreme stress conditions up to the occurrence of faults that often imply the destruction of some of their components. Because of this, the only data available for building machine learning models can be those acquired from the plants during the actual production cycle. During the latter, before any {\em a posteriori} optimization of the maintenance schedule can be applied, the plant is kept under a prudential (and costly) maintenance schedule, which relies on a strong underestimation of the remaining lifetime of components to avoid virtually any anomalous event.
Thus, even in the presence of an infrastructure that would allow data about faults to be acquired, the data acquired from the sensors almost exclusively relate to healthy plants, which makes it almost impossible to characterize possible
anomalies in the plant based on classical supervised-learning approaches.\\
The most common way for ML methods to deal with this problem is therefore by building a model of the healthy plant (usually referred to as a {\em digital twin} of the plant) and compare the real data coming from the plant's sensor network to the corresponding data produced by the digital twin over a certain time interval. The interpretation of the results of such a comparison assumes that a small divergence between the two time series corresponds to a healthy status of the plant, while the occurrence of larger discrepancies may suggest that something is probably going wrong with it.
Within a larger regional project on Big Data analysis (SUPER \-- "Supercomputing Unified Platform, Emilia Romagna"), we investigated the effectiveness of these machine learning-based analysis methodologies by studying, applying, and validating some state-of-the-art Deep Learning techniques \cite{goodfellow2016deep} applied to the automatic analysis of large amounts of data from an industrial plant.
\section{A real-world test case}
Aiming at the aforementioned goals, we analyzed the data provided by Elettric80 spa, partner of the project. The dataset they provided is the result of a data ingestion process conducted by the company in order to record and monitor various functional parameters of their LGVs (Laser Guided Vehicles) that operate in a water-bottling plant.
The available data relate to 25 LGVs and were collected on a daily basis in the period between January 2020 and March 2021 for about ten hours each day. In particular, for each robot, the data refer to:
\begin{itemize}
\item Vehicle system variables (status, current operation)
\item Battery: temperatures, state of charge, charge cycles, amperage and voltage
\item Vehicle navigation: LGV position relative to environmental markers
\item Mast of the automatic forklift: weight on board, height reached, estimate of the chain elongation
\item Tilt: (lifting cylinder pressure, working time, etc.)
\item Hydraulic pumps for lifting: (inverter temperature, speed setpoint, torque feedback, etc.)
\item Steering: (inverter temperature, motor speed, etc.)
\end{itemize}
The dataset allows one to deal with different forecasting tasks; we focused on identifying anomalies in the temperature of lithium batteries since this study was of particular interest to the company. In fact, these batteries may malfunction due to time wear but also due to their overheating if the air filters of the LGV are clogged by dust typically affecting industrial plants. As a result, a model has been developed that can automatically detect anomalous temperature behaviors and act as alarms urging for maintenance and extraordinary cleaning.
\section{Automatic detection of anomalies in the temperature of lithium batteries}
The dataset does not contain additional "external" information that labels as normal or anomalous the data acquired within the temporal windows into which the dataset was divided. In fact, we consider the data sequence acquired within a time window of $250~s$ as a sample to be classified.
According to this organization, we designed an architecture for data analysis within which a neural network-based autoencoder was trained to generate a model of the normal data. Such a model is used as a reference to identify anomalous data (i.e., those data that cannot be correctly replicated by the model) in a totally unsupervised way.
The autoencoder receives a sequence/sample as input and then compresses it by encoding the useful information as the output of a hidden layer much smaller than the input layer, performin what can be termed {\em unsupervised feature learning}. The self-supervised autoencoder architecture is completed by adding a further hidden layer, symmetric to the one used in the encoder part of the network, and an output layer having the same size as the input layer. The full network is then trained to reproduce the input signal (i.e., the data acquired from the plant) in the output layer (signal reconstruction).
If this type of architecture is trained by examples of data acquired in normal working conditions for the plant, it will likely commit a small error in replicating these signals while failing to reconstruct, or at least approximate as precisely, anomalous signals. This behavior can be explained considering that the network has learned the probability distribution of 'normal' data, and therefore has no knowledge of 'anomalous' samples having a different distribution, to which it reacts in some unpredictable way.
Two different neural-network architectures were tested to implement the autoencoder: a Multi-Layer Perceptron (MLP) and a GRU (Gated-Recurrent Unit) recurrent neural network. This design choice is justified by the results achieved in the state of the art by these neural models \cite{Liufaultrecurrent, Loyseqgru,Provotarlstmautoenc}. In particular, the choice of recurrent networks like the GRU and LSTM (Long Short Term Memory) networks aims to exploit their memory and ability to embed time dependencies. In fact, we also tested LSTMs but they confirmed some documented adverse computational effects, like gradient explosion/vanishing \cite{hochreitervanishing98,explovanishribeiro20}. With the data we considered, problems concerning the overflow of the values of the network weights did occur. Although they could be solved using gradient-clipping techniques, we could not achieve the same satisfactory results we obtained using the GRUs, at the cost of the latter’s higher computational requirements.
From a computational efficiency viewpoint, we also considered a more traditional MLP-based approach \cite{haykinneuralnets09} for its lower computational burden when processing big data, in view of its subsequent use on a much greater amount of data than we used in our experimentation.\\
{\noindent \em Operational pipeline \\}
{\noindent The} investigation activity with both architectures was conducted according to the following pipeline:
\begin{enumerate}
\item {\em Data cleaning}. We identified and corrected data inconsistencies and deal with missing data.
\item {\em Identification of the operating scenario}. A common operating scenario was identified for all LGVs in order to exclude some elements that may alter the temperature: transporting particularly heavy or bulky packages, long journeys. The best scenario was identified in the battery recharging phases, since all LGVs are frequently in this phase, in similar conditions and positions within the working environment. In fact, all the batteries are recharged when they reach a percentage of remaining charge between $70$ and $80\%$. Each robot is then recharged several times a day.
\item {\em Data analysis}. The batteries are recharged until $90\%$ of the full charge is reached; this requires a time, depending on the starting conditions, in the order of minutes.
\item {\em Time series processing}. Since the data is sampled every $250~ms$, each recharge phase has been divided into time windows $1000$ samples long with an overlap of $50\%$. The size of this window was identified through a parameter optimization process based on a grid search.
\item {\em Dataset creation}. The available data has been divided into a training and a test set on a time basis. The training set comprises data from year $2020$ while the test set those acquired during $2021$.
\item {\em Parameter optimization and choice}. We have taken a validation set apart from the training based on which we chose the parameters that define an anomaly (i.e., the reconstruction error threshold).
\item {\em Analysis of the anomalies detected}.
\end{enumerate}
\section{Data Processing with GRU networks}
GRU-type recurring networks have the main advantage of exhibiting memory properties; they can therefore operate directly on time series by processing one sample at a time based on the current input value but also on the values of the time sequence previously processed. In our use-case, the input time series is represented by the sampling of the temperature of the LGV batteries being recharged within a pre-defined time window. Each instance is made up of $1000$ consecutive samples of the variable acquired within a $250 s$ long time window.
The autoencoder we designed to create the data model uses two separate GRU networks: one for the encoding (learning of a representation for the time series) and one for the decoding (reconstruction of the input signal as network output) operations. In the design phase, recurrent networks of different lengths in terms of memory cells were evaluated to study the model dependence on this parameter, i.e., on a lower or higher compression of the information in the encoding phase. Specifically, networks with $128, 256, 512,$ and $1024$ GRU cells were tested.
\begin{figure}
\caption{GRU- Gated Recurrent Unit}
\label{fig:Figure1}
\end{figure}
Each GRU unit corresponds to a single time step, so it receives the corresponding sample in input along with the output from the previous unit or the initialization vector (h) to limit the network depth (that could theoretically be unlimited) and thus regulates the "forgetting" dynamics of the effects of the partial time sequence processed up to that point.
One unit of this model implements: an addition operation (+), a sigmoid function ($\sigma$), a Hadamard product operation (x) and a {\em tanh} function. In practice, GRUs are capable of storing and filtering information using their unique update and reset gates. This eliminates the vanishing gradient problem, i.e., the fading of the gradient value propagated back during training with the increase of the network depth, since the model retains, within its state, memory of the previously analyzed data, finally transmitting them to the unit corresponding to the next time step.
In order to identify the best architecture capable of minimizing the signal reconstruction error using GRU networks, we proceeded with the training of different models in which the main training parameters were the same, except for the number of memory cells:
\begin{itemize}
\item Learning rate = $0.001$
\item Batch size = 32
\item Number of epochs = 32
\end{itemize}
The networks were compared on a validation set using the Loss function used for training, i.e., the Mean-Squared Error (MSE) between the input time sequence and the reconstructed (output) sequence (Figure 2).
\begin{figure}
\caption{Comparison of the results on the training set and on the validation set.}
\label{fig:Figure2}
\end{figure}
Subsequently, this result was used to analyze the distribution of the error between the samples of the validation and the test set in order to determine the error threshold beyond which a temporal sequence should be identified as anomalous based on the reconstruction error (Figure 3).
\begin{figure}
\caption{Analysis of the distribution of the reconstruction error (MSE) with different numbers of GRU cells. The average error on the training set is marked in black, while the possible thresholds, identified as multiples of the standard deviation of data are marked in red.}
\label{fig:Figure3}
\end{figure}
The best results were obtained with the model with $1024$ GRU units. Setting an MSE threshold equal to $0.0007$, it identifies as anomalous about $9.66\%$ of the samples in the validation set and about $10.19\%$ of the test set.
\section{Processing data with a Multi-Layer Perceptron}
Recurrent networks have proven to be excellent tools for identifying anomalies, while still showing some computational efficiency problems with large datasets. For this reason, we have also tested an architecture based on Multi-Layer Perceptrons in order to address larger training sets with a limited computational burden.
Differently from recurrent networks, MLPs do not process time series with architectures forming a chain of units, each corresponding to a different acquisition time. Instead, the entire input sequence is fed into the input layer as a single instance (a one-dimensional vector), ignoring the reproduction of the sequence dynamics. The network design was therefore constrained by the size of the time sequences ($1000$ samples). The structure of the autoencoder therefore comprises an input layer of $1000$ neurons and a hidden layer of $200$ neurons in the encoding section; a layer of $40$ neurons as the internal representation (embedding) of the input data; finally, a hidden layer of $200$ neurons and an output layer of $1000$ neurons in the decoding section.
The reconstruction error was measured as the mean square error between the vector of the input samples and the output vector reconstructed in the output, averaged over all the time sequences in the validation set and, subsequently, in the test set to determine the optimal error threshold for identifying anomalies.
The reconstruction error is distributed according to a Gaussian with mean value $0.176$ and variance $0.02$. The threshold that determines the maximum reconstruction error before considering the sample an anomaly, was set to $0.432$ as the average value of the reconstruction error in validation plus three times the standard deviation. This choice is based on the confidence intervals of the Gaussian; in particular, with this choice $99\%$ of the distribution centered on the average value is preserved and only the most 'critical' instances are identified as anomalies.
\begin{figure}
\caption{Reconstruction error on the test set overlapped to the Gaussian distribution best fit on the validation set (in red) and the corresponding threshold (red bar)}
\label{fig:Figure4}
\end{figure}
Using this threshold, we could identify $548$ anomalies in the test set comprising all data acquired in 2021. We analyzed these data using K-means clustering \cite{macqueen1967kmeans} and adding Dynamic Time Warping (DTW) \cite{muller2007dynamic} for a more robust calculation of the Euclidean distance between the samples and the corresponding centroids. The number of clusters is equal to 3. Finally, these clusters were analyzed using Principal Component Analysis (PCA) \cite{abdi2010principal} and T Stochastic Neighborhood Embedding (T-SNE) \cite{van2008tsne}. Figure 5 displays the clustering results.
\begin{figure}
\caption{Clustering of anomalies.}
\label{fig:Figure5}
\end{figure}
We analyzed the three clusters that can be identified by observing the corresponding anomalies in the time domain appreciating the good separation obtained by our unsupervised approach and hypothesizing the possible causes for such a cluster distribution. This analysis made it possible to formulate a hypothesis for each cluster:
\begin{itemize}
\item Cluster 0: These are all the time windows within which one can observe a sudden drop in temperature, whose average is about 5 degrees centigrade. Corresponding to these events, the battery stops absorbing current for a few seconds (the value of current going from negative to zero in figure 3). The battery power suddenly drops by an average of 10 percent and then it starts recharging again.
\item Cluster 1: The samples appear to be very similar to those of cluster 0. However, the "anomalous" time windows seem to correspond to the end of the LGV batteries recharge.
\item Cluster 2: All these time windows show an abnormal and sudden rise in temperature at the end of which the battery begins to absorb current and the remaining charge begins to increase. The anomalous condition seems to affect the LGV batteries in the initial moments of recharging, as shown in Figure 4.
\end{itemize}
\begin{figure}
\caption{A sample from cluster 0 (left) and a sample from cluster 2 (right)}
\label{fig:Figure67}
\end{figure}
In conclusion, using the MLP-based model with the identified threshold, it was possible to discriminate some anomalies in the behavior of the battery. However, further investigations need to be carried out directly on the LGV in its working environment to correctly identify the causes that lead to the occurrence of such situations.
\section{Conclusions and future developments}
The investigation carried out so far has led to promising results which, however, require further investigation. The unavailability of supervised data only permits a limited validation of the model. However, we could observe that the anomalous time windows reported by the system actually correspond to clearly identifiable situations. The anomaly detection system we designed therefore accomplishes its task.
A future step could involve the collection of data from an LGV, in its operating environment, in which the clogging of the filters would be artificially caused. The consequent temperature data acquisition could offer the chance to obtain further validation data by which we might determine a causal relationship for the anomalies. In particular, considering a scenario in which the LGVs are in regular operation, in addition to the charging phase, the same 'anomalous' behaviors could be observed in the robots' braking phase. This situation therefore appears to be worth further investigation in the future, as braking is an opportunity for recovering energy and recharging the batteries on board.
The future availability of data in much greater quantities, when the acquisition system we considered is extended to all the plants where the LGVs are in operation and, in any case, new data can also be acquired from the plants already under monitoring, will also allow to proceed with further analyses, the most important of which could be the estimate of the remaining life time (RUL) of lithium batteries. This parameter is crucial in view of current and future applications of these batteries in other different domains.
\section*{Acknowledgments}
Gianfranco Lombardo and Stefano Cavalli were supported by a grant from Regione Emilia Romagna (SUPER \-- "Supercomputing Unified Platform, Emilia Romagna", Azione 1.5.1, POR-FESR 2014-2020 Emilia Romagna).
\end{document}
|
\begin{document}
\title{Debiased Lasso After Sample Splitting for Estimation and Inference in High Dimensional Generalized Linear Models}
\begin{abstract}
We consider random sample splitting for estimation and inference in high dimensional generalized linear models, where we first apply the lasso to select a submodel using one subsample and then apply the debiased lasso to fit the selected model using the remaining subsample.
We show that, no matter including a prespecified subset of regression coefficients or not, the debiased lasso estimation of the selected submodel after a single splitting follows a normal distribution asymptotically. Furthermore, for a set of prespecified regression coefficients, we show that a multiple splitting procedure based on the debiased lasso can address the loss of efficiency associated with sample splitting and produce asymptotically normal estimates under mild conditions.
Our simulation results indicate that using the debiased lasso instead of the standard maximum likelihood estimator in the estimation stage can vastly reduce the bias and variance of the resulting estimates. We illustrate the proposed multiple splitting debiased lasso method with an analysis of the smoking data of the Mid-South Tobacco Case-Control Study.
\end{abstract}
\noindent
{\it Keywords: Asymptotic normality, genetic marker, high-dimensional inference, single nucleotide polymorphism (SNP), sparse regression.}
\spacingset{1.1}
\section{Introduction}
\label{s:intro}
Recent technological advances have allowed biomedical researchers to collect an extraordinary amount of genetic data from patients.
Studies that seek to characterize associations between these genetic factors and medical outcomes face numerous challenges.
Firstly, the standard maximum likelihood estimator (MLE) is not well-defined for classical statistical models such as linear and generalized linear models (GLMs) when the covariates are high dimensional, in particular, $p\gg n$.
Several variable selection and estimation procedures are available for sparse models with high dimensional covariates.
The commonly-used lasso estimator \citep{tibshirani1996regression} introduces a penalty that shrinks many coefficient estimates to exactly zero, thus performing simultaneous variable selection and coefficient estimation.
Similar approaches include the adaptive lasso \citep{zou2006adaptive}, the smoothly clipped absolute deviation (SCAD) estimator \citep{fan2001variable} and elastic net regularization \citep{zou2005regularization}.
Another common approach, which only performs variable selection, is sure independence screening \citep{fan2008sure,fan2010sure}.
Secondly, although penalized regression methods can produce coefficient estimates for GLMs, they typically have substantial biases and do not quantify the uncertainty around these estimates.
Thus they cannot be used to directly construct confidence intervals or to do hypothesis testing in the high dimensional setting.
More recent progress on statistical inference for high dimensional data includes debiased lasso estimators \citep{zhang2014confidence,van2014asymptotically}, MLE-based sample splitting approaches \citep{fei2021estimation,fei2019drawing}, and the decorrelated score test \citep{ning2017general}.
Other work on hypothesis testing without coefficient estimation includes the sample splitting approach for p-values by \citet{meinshausen2009p} and the test developed by \citet{zhu2018linear} for linear models without imposing sparsity assumptions on the regression parameters.
Lastly, we note that some post-model selection inference procedures, such as the simultaneous inference method of \cite{kuchibhotla2020valid} and the sample splitting and bootstrap method of \cite{rinaldo2019bootstrapping}, are applicable to high dimensional covariates, although these in particular only address linear models.
The goal of such methods is distinct from ours.
They are designed to be valid for arbitrary model selection procedures and under potential model misspecification.
This robustness comes at a cost of conservative inference results when a model selection procedure, such as the lasso estimator, can select a superset of the correct model with high probability, which is the setting that we consider below.
Our proposed method is based on sample splitting, a two-stage approach where, first, one part of the data is used to select the covariates to be included in the model, and then the other part is used to estimate the now lower-dimensional model ($p<n$ but $p$ may grow with $n$).
Similar methods have been recently developed for linear models \citep{fei2019drawing,wang2020debiased} and generalized linear models \citep{fei2021estimation} based on the MLE, as well as for Cox proportional hazards models \citep{zhang2022projection} based on the decorrelated score function.
Compared to \cite{fei2021estimation}, we use the lasso instead of sure independence screening for model selection, which allows for more mild theoretical assumptions, as well as the debiased lasso in place of the MLE, which can substantially improve the bias, variance, and confidence interval coverage of the resulting estimators, as shown in our simulations below.
Note that debiased lasso estimation would be equivalent to using the MLE in sample splitting approaches for linear models.
Our contributions are as follows.
For the model selection component, following \cite{huang2012estimation} we show that, under mild sufficient conditions, the lasso screens out a large portion of the noise variables and selects a model containing the true signal variables with high probability in the case of random design.
Existing work on the selected model size and estimation error of the lasso method for linear regression includes \citet{bickel}, \citet{zhang2008sparsity}, \citet{meinshausen2009lasso}, and \citet{zhao2006model}, under differing assumptions on the covariates.
Similar results were shown for generalized linear models by \citet{van2008high} and \cite{huang2012estimation}, only the latter of which addressed model selection consistency and the number of selected noise variables.
Their results imply that the lasso selected model size is of the same order as the number of nonzero regression coefficients, with high probability.
This is essential because if the selected model size is too large then its design matrix may not have full rank, even asymptotically, and thus the regression coefficients will still not be estimable via MLE or even the refined debiased lasso of \citet{xia2021debiased}.
For the lower-dimensional model estimation stage, while a naive approach would be to use the standard maximum likelihood estimator with the selected covariates, which is commonly used in sample splitting literature, we instead apply the recently developed refined debiased lasso approach of \citet{xia2021debiased}.
This method is better suited for models that contain a substantial number of noise variables, as is typically expected after variable selection.
This is a more desirable approach because the conditions for the GLM lasso to not select any noise variables, for example, are known to be quite stringent \citep{huang2012estimation}.
We illustrate the potentially large difference in performance through simulations, where the MLE exhibits a strong bias that inflates the size of estimated coefficients, and this bias increases with the true signal strength, in contrast to the approximately unbiased estimates from the debiased lasso.
Such tendencies are discussed further by \citet{xia2021debiased} in the lower-dimensional setting.
For a set of prespecified coefficients, since their estimators based on a single sample split suffer from a loss of efficiency due to only using part of the data for estimation, we further investigate the idea of averaging estimates across multiple sample splits.
\cite{fei2019drawing} proposed a multiple splitting procedure for linear models, where they required model selection consistency for their asymptotic theory.
\cite{wang2020debiased} discussed multiple splitting for a single treatment variable in linear models, under much milder assumptions.
Recently, \cite{fei2021estimation} proposed a multiple splitting procedure for generalized linear models based on the MLE under a partial orthogonality condition, which requires that the signal variables be independent of the noise variables.
For our proposed multiple splitting procedure, we apply the debiased lasso estimator instead of the MLE in the estimation stage, and show through simulations that our procedure results in approximately unbiased estimates and substantially reduced variability compared to the MLE-based multiple splitting that is often biased. For the theoretical development, we adapt the mild conditions of \cite{wang2020debiased} to GLMs and show the asymptotic normality of our proposed approach.
As evidenced by simulations, our multiple splitting estimator can produce confidence intervals with the nominal coverage.
\section{Methods}
\label{s:method}
We first provide brief overviews of lasso and debiased lasso methods, then introduce our proposed sample splitting debiased lasso methods for GLMs.
\subsection{Notation}
\label{s:notation}
For a positive integer $p$, we denote the size of any index set $S\subseteq\{1,\ldots,p\}$ by $|S|$.
For a $p$-dimensional vector $\boldsymbol{b}$ and a $p\times p$ dimensional matrix $\boldsymbol{B}$, let $\boldsymbol{b}_S$ denote the $|S|$-dimensional subvector with entries indexed by $S$, and similarly $\boldsymbol{B}_S$ denote the $|S|\times |S|$ submatrix with rows and columns indexed by $S$.
The $\ell_q$ norm is denoted as $\norm{\boldsymbol{b}}{q}$ for $q\geq 1$.
For positive sequences $a_n$ and $b_n$, we write $a_n=\bigO{b_n}$ if there exists a constant $c>0$ and $N>0$ such that $a_n/b_n<c$ for all $n>N$, we write $a_n=\littleO{b_n}$ if $a_n/b_n\rightarrow 0$ as $n\rightarrow\infty$, and we write $a_n\sim b_n$ if $a_n=\bigO{b_n}$ and $b_n=\bigO{a_n}$.
Let $(y_i, \boldsymbol{\widetilde{x}}_i)$, $i=1,\ldots,n$, be independent and identically distributed copies of $(y, \boldsymbol{\widetilde{x}})$, where $y$ is a scalar-valued response variable and $\boldsymbol{\widetilde{x}}$ is a $p$-dimensional vector of covariates.
We consider the high dimensional setting where $p$ can be much larger than $n$.
Let $\boldsymbol{x}_i = (1,\boldsymbol{\widetilde{x}}_i^T)^T$ and denote the $n\times (p+1)$ design matrix by $\boldsymbol{X}$, with $i$th row $\boldsymbol{x}_i^T$ and $j$th column $\boldsymbol{X}_j$.
We assume without loss of generality that $\boldsymbol{\widetilde{x}}$ has mean zero.
For any function $f(y,\boldsymbol{x})$, we define $P_nf = n^{-1}\sum_{i=1}^n f(y_i, \boldsymbol{x}_i)$.
We consider generalized linear models \citep{mccullagh2019generalized} with canonical link function and known dispersion parameter.
Denote
\[
\rho_{\boldsymbol{\beta}}(y, \boldsymbol{x})
= \rho(y, \boldsymbol{x}^T\boldsymbol{\beta})
= A(\boldsymbol{x}^T\boldsymbol{\beta}) - y\boldsymbol{x}^T\boldsymbol{\beta}
\]
for a known twice-differentiable function $A(\cdot)$, and its first and second derivatives with respect to $\boldsymbol{\beta}$ as $\dot{\rho}_{\boldsymbol{\beta}}$ and $\ddot{\rho}_{\boldsymbol{\beta}}$, respectively.
The negative log-likelihood for $\boldsymbol{\beta}$ is then $P_n\rho_{\boldsymbol{\beta}} = n^{-1}\sum_{i=1}^n \rho_{\boldsymbol{\beta}}(y_i, \boldsymbol{x}_i)$ with score function $P_n\dot{\rho}_{\boldsymbol{\beta}}=n^{-1}\sum_{i=1}^n \{A'(\boldsymbol{x}_i^T\boldsymbol{\beta}) - y_i \}\boldsymbol{x}_i $ and Hessian matrix $P_n\ddot{\rho}_{\boldsymbol{\beta}}(y_i, \boldsymbol{x}_i) = n^{-1}\sum_{i=1}^n A''(\boldsymbol{x}_i^T\boldsymbol{\beta})\boldsymbol{x}_i\boldsymbol{x}_i^T$.
Denote $\boldsymbol{\Sigma}_{\boldsymbol{\beta}} = E\{P_n\ddot{\rho}_{\boldsymbol{\beta}}(y_i, \boldsymbol{x}_i)\}$.
The $(p+1)$-dimensional unknown true coefficient vector $\boldsymbol{\beta}^0$ is assumed to be sparse, and we denote the index set of signal variables by $S_0 = \{j:\boldsymbol{\beta}_j^0\neq 0\}$, with $s_0=|S_0|$.
The quantities $p$, $\boldsymbol{\beta}^0$, $s_0$, and $S_0$ are allowed to change with $n$.
For practical applications, one would generally consider $s_0$ and $\boldsymbol{\beta}^0_{S_0}$ to be fixed so that the regression coefficients have the usual interpretation in terms of the conditional mean $E[y|\boldsymbol{x}]$ as $p$ grows with $n$. Letting $s_0$ grow with $n$ does not maintain the log odds ratio interpretation of $\boldsymbol{\beta}^0$, for example, in a logistic regression model.
\subsection{The lasso estimator for generalized linear models}
\label{s:lasso}
The lasso estimator $\boldsymbol{\beta}l$ for the GLM parameters $\boldsymbol{\beta}^0$ is, given a tuning parameter $\lambda>0$, a minimizer of the penalized negative log likelihood
\[
\boldsymbol{\beta}l = \argmin_{\boldsymbol{\beta}} P_n\rho_{\boldsymbol{\beta}} + \lambda\norm{\boldsymbol{\beta}}{1}.
\]
In general there is no closed form solution for $\boldsymbol{\beta}l$, but the objective function is convex and can be optimized efficiently with widely available software \citep{friedman2010regularization}.
The penalization shrinks the estimated coefficients towards zero, with many of their values set exactly to zero, resulting in a sparse estimator $\boldsymbol{\beta}l$.
Model selection may then be performed by keeping only the covariates with nonzero estimated coefficients.
In practice, a small subset with a finite number of coefficients may be left unpenalized so that they are never excluded from the selected model.
In our simulations and data analysis, we do not penalize the intercept.
\subsection{The debiased lasso estimator}
\label{s:debiased}
\cite{zhang2014confidence} and \citet{van2014asymptotically} proposed debiased, also called desparsified, lasso estimators for linear models and GLMs, respectively.
This procedure may be used to obtain coefficient estimates and confidence intervals across the entire parameter vector $\boldsymbol{\beta}^0$ in high dimensional settings.
It does not rely on sample splitting or variable selection.
Instead, using the lasso estimator $\boldsymbol{\beta}l$ as an initial value, it performs a single Newton iteration for minimizing the negative log likelihood $P_n\rho_{\boldsymbol{\beta}}$, using an approximate inverse for the Hessian matrix $P_n\ddot{\rho}_{\boldsymbol{\beta}l}$ such as a nodewise lasso estimator.
The resulting desparsified lasso estimator is no longer sparse and is, under further conditions, asymptotically unbiased with a normal distribution.
A key assumption for accurately estimating the inverse of the Hessian matrix in high dimensional settings, however, is the sparsity assumption on $\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}^{-1} = [E\{A''(\boldsymbol{x}^T\boldsymbol{\beta}^0)\boldsymbol{x}\boldsymbol{x}^T\}]^{-1}$.
For generalized linear models, each entry of this matrix generally depends on all the signal variables due to the non-identity link function, so the sparsity assumption is difficult to interpret and often may not hold in practice.
Such issues were discussed further by \citet{xia2021debiased}, who proposed a refined debiased lasso method for lower dimensional GLMs with a diverging number of covariates.
In that setting and under some standard conditions, $\boldsymbol{\Sigma}Hat_{\boldsymbol{\beta}l} = P_n\ddot{\rho}_{\boldsymbol{\beta}l}$ is directly invertible with probability going to one, and the resulting estimator
\begin{equation}\label{dlasso}
\boldsymbol{\beta}dl = \boldsymbol{\beta}l - \boldsymbol{\Theta}Hat_{\boldsymbol{\beta}l} P_n \dot{\rho}_{\boldsymbol{\beta}l}
\end{equation}
with estimated covariance matrix $\boldsymbol{\Theta}Hat_{\boldsymbol{\beta}l}/n = \boldsymbol{\Sigma}Hat_{\boldsymbol{\beta}l}^{-1}/n$
was shown to be asymptotically normal.
Additionally, it outperforms both the original desparsified lasso estimator and the MLE in several simulation settings of lower-dimensional GLMs with sparse regression coefficients.
It can also be computed using standard GLM software packages for the lasso and MLE.
\subsection{The debiased lasso after single sample splitting}
\label{s:sample-splitting}
We propose a debiased lasso after sample splitting procedure that uses the lasso for model selection and the refined debiased lasso for estimation and inference.
For a fixed splitting proportion $q\in (0,1)$, we randomly split the sample $(y_i,\boldsymbol{x}_i)_{i=1}^n$ into two subsets: $\mathcal{D}_1$ containing $n_1= qn$ individuals to be used for variable selection, and $\mathcal{D}_2$ containing $n_2 = n-n_1$ individuals to be used for estimation.
For a pre-specified fixed set of coefficient indices $S$ that is allowed to be an empty set, do the following:
\begin{enumerate}
\item \label{ss-selection} using the subsample $\mathcal{D}_1$, fit a GLM lasso estimator $\boldsymbol{\beta}l$ to obtain the selected model $\widehat{S} = S\cup \{j:\boldsymbol{\beta}l_j\neq 0\}$;
\item\label{ss-estimation}
using the subsample $\mathcal{D}_2$, compute the debiased lasso estimator $\boldsymbol{\beta}dl_{\widehat{S}}$ for $\boldsymbol{\beta}^0_{\widehat{S}}$ based on a GLM with covariates $\widehat{S}$, and use the estimated covariance matrix of $\boldsymbol{\beta}dl_{\widehat{S}}$, $\boldsymbol{\Theta}Hat_{\boldsymbol{\beta}l,\widehat{S}}/n_2$, to construct $(1-\alpha)$\% confidence intervals for any contrast $\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\beta}^0_{\widehat{S}}$ based on a normal distribution.
\end{enumerate}
Because the two subsamples are independent, we can make asymptotically valid statistical inference on any covariate in $\widehat{S}$ as long as this set contains the true signal variables when $n$ is large enough.
In practice, covariates with small coefficients may not be selected in the first stage, so making conclusions about the statistical significance of omitted variables from step \ref{ss-selection} is not advised.
The large bias that can potentially occur when $S$ is a null set is illustrated through simulations in Section \ref{s:simulations}.
Furthermore, although the size of the fitted model $\widehat{S}$ must be low-dimensional, this does not prevent one from obtaining estimates for each individual coefficient.
For example, step \ref{ss-estimation} may be repeated for each individual coefficient, i.e. for $S=\{1\}, \{2\},\ldots, \{p\}$, along with an appropriate correction for multiple testing when computing p-values and confidence intervals.
\subsection{The debiased lasso after multiple sample splitting}
\label{s:sample-splitting-m}
As previously noted, the single sample splitting estimator suffers from a loss of efficiency due to only using a fraction of the total sample to estimate $\boldsymbol{\beta}^0$.
The following multiple splitting procedure addresses this issue. For a pre-specified fixed set of coefficient indices $S$,
we now generate $B$ random splits of the sample $(y_i,\boldsymbol{x}_i)_{i=1}^n$, denoted $(\mathcal{D}_{1,b}, \mathcal{D}_{2,b})$ for $b=1,\ldots,B$, and do the following:
\begin{enumerate}
\item for $b=1,\ldots,B$,
\begin{enumerate}
\item using the subsample $\mathcal{D}_{1,b}$, fit a GLM lasso estimator $\boldsymbol{\beta}l_b$ to obtain the selected model $\widehat{S}_b = S\cup \{j:\boldsymbol{\beta}l_{b,j}\neq 0\}$;
\item\label{ms-estimation} using the subsample $\mathcal{D}_{2,b}$, compute the debiased lasso estimator $\boldsymbol{\beta}dl_{S,b}$ for $\boldsymbol{\beta}^0_{S}$ based on a GLM with covariates $\widehat{S}_b$;
\end{enumerate}
\item finally the multiple splitting estimators are $\boldsymbol{\beta}ms_S = B^{-1}\sum_{b=1}^B \boldsymbol{\beta}dl_{S,b}$.
\end{enumerate}
Note that all target coefficients to estimate must be pre-specified, in contrast to the single split procedure.
For exploratory analysis, where step \ref{ms-estimation} may be repeated for each individual coefficient, the multiple splitting estimator typically has a substantially more strict significance threshold compared to single split inference on $\widehat{S}$ due to correcting for $p$, $p>|\widehat{S}|$, multiple comparisons.
We apply the same variance estimator as \cite{fei2021estimation}, based on the nonparametric delta method with a bias correction for controlling the Monte Carlo error \citep{efron2014estimation,wager2014confidence}.
Letting $\boldsymbol{v}_b$ denote the $n$-vector of sampling indicators such that $\boldsymbol{v}_{b,i}\in \{0,1\}$ is equal to one if $i\in \mathcal{D}_{2,b}$, and is zero otherwise,
\[
\widehat{Var}\left (\boldsymbol{\beta}ms_j \right )
= \widehat{V}_j - \frac{n(n_2)}{B^2(n-n_2)}\sum_{b=1}^B \left (\boldsymbol{\beta}dl_{j,b} - \boldsymbol{\beta}ms_j\right )^2,
\]
\[
\widehat{V}_j = \frac{n(n-1)}{(n-n_2)^2}\sum_{i=1}^n \left \{\frac{1}{B}\sum_{b=1}^B \left (\boldsymbol{v}_{b,i} - \boldsymbol{a}r{\boldsymbol{v}}_i\right )(\boldsymbol{\beta}dl_{j,b} - \boldsymbol{\beta}ms_j) \right \}^2,
\]
where $\boldsymbol{a}r{\boldsymbol{v}}_i = B^{-1}\sum_{b=1}^B \boldsymbol{v}_{b,i}$, and $(1-\alpha)$\% confidence intervals may be constructed based on a normal approximation.
Similarly, the estimated covariance matrix is
\[
\widehat{Var}\left (\boldsymbol{\beta}ms_S \right )
= \frac{n(n-1)}{(n-n_2)^2}\sum_{i=1}^n \boldsymbol{\widehat{C}}_i\boldsymbol{\widehat{C}}_i^T
- \frac{n(n_2)}{B^2(n-n_2)}\sum_{b=1}^B \boldsymbol{\widehat{D}}_b\boldsymbol{\widehat{D}}_b^T
\]
\[
\boldsymbol{\widehat{C}}_i = \frac{1}{B}\sum_{b=1}^B \left (\boldsymbol{v}_{b,i} - \boldsymbol{a}r{\boldsymbol{v}}_i\right )\left (\boldsymbol{\beta}dl_{S,b} - \boldsymbol{\beta}ms_S\right ), \
\boldsymbol{\widehat{D}}_b = \boldsymbol{\beta}dl_{S,b} - \boldsymbol{\beta}ms_S.
\]
Both single and multiple sample splitting procedures require that the matrices $\boldsymbol{\Sigma}Hat_{\boldsymbol{\beta}l,\widehat{S}}$ computed from each estimation split are invertible.
One way to encourage this is to set a hard limit for the selected model size, which would set a lower bound for the parameter $\lambda$ in the lasso.
In our simulations and real data analysis we do not restrict $\lambda$, which is chosen by cross validation, and instead allow the glm function in R to follow its default behavior of automatically dropping covariates from the model when the design matrix is ill-conditioned.
As the number of random splits $B$ grows we typically have diminishing gains in efficiency relative to the increased computational cost.
Although our theoretical results consider the maximum possible value of $B={n\choose n_2}$, our simulation results indicate that $B=1,000$ is sufficient for many practical settings with up to several hundreds of individuals in the sample.
\section{Theoretical Results}
\label{s:theory}
For the single split estimator, we make the following assumptions:
\begin{enumerate}
\item\label{cond-subg} The covariates $\boldsymbol{x}_i$ are sub-Gaussian random vectors and $\norm{\boldsymbol{X}}{\infty}\leq K$ almost surely for some constant $K>0$.
\item\label{cond-eig} $\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}$ and $E[\boldsymbol{X}^T\boldsymbol{X}/n]$ are positive definite with eigenvalues bounded from above and away from zero.
\item\label{cond-lip} The derivatives $\dot{\rho}(y,a) = \partial\rho(y,a)/\partial a$ and $\ddot{\rho}(y,a) = \partial^2\rho(y,a)/\partial a^2$ exist for all $(y, a)$ and there exists a $\delta>0$ and constant $c_{Lip}>0$ such that $\ddot{\rho}$ is locally Lipschitz
\[
\max_{a_0\in \{\boldsymbol{x}_i^T\boldsymbol{\beta}^0\}}
\sup_{\max\left (|a-a_0|, |\widehat{a} - a_0|\right )\leq\delta}
\sup_{y\in\mathcal{Y}}
\frac{|\ddot{\rho}(y,a) - \ddot{\rho}(y, \widehat{a})|}{|a-\widehat{a}|}\leq c_{Lip}.
\]
Also, there exist constants $K_1,K_2>0$ such that the derivatives are bounded:
\[
\max_{a_0\in \{\boldsymbol{x}_i^T\boldsymbol{\beta}^0\}}
\sup_{y\in\mathcal{Y}}
|\dot{\rho}(y,a_0)|\leq K_1,
\]
\[
\max_{a_0\in \{\boldsymbol{x}_i^T\boldsymbol{\beta}^0\}}
\sup_{y\in\mathcal{Y}}
|\ddot{\rho}(y,a_0)|\leq K_2.
\]
\item\label{cond-linpred} $\norm{\boldsymbol{X}\boldsymbol{\beta}^0}{\infty}$ is bounded above almost surely.
\item\label{cond-betamin} The sparsity $s_0$ satisfies $s_0\log(p)/\sqrt{n} = \littleO{1}$ and the regression parameters are large enough that $\min_{j\in S_0}|\boldsymbol{\beta}_j^0| \geq \bigO{s_0\sqrt{\log(p)/n}}$ as $n\rightarrow\infty$.
\end{enumerate}
Assumptions \ref{cond-subg}, \ref{cond-eig}, and \ref{cond-linpred} are typical in high dimensional literature.
Bounded covariates are common in practice, including dummy variables for categorical covariates, minor allele counts, and physical measurements.
Assumption \ref{cond-lip} is required in order to apply the results for the refined debiased lasso estimator of \cite{xia2021debiased}.
All but the boundedness condition on $\dot{\rho}$ are satisfied when the function $A$ is twice continuously differentiable, as is the case with commonly-used GLMs, due to assumption \ref{cond-linpred}.
The derivative $\dot{\rho}$ is also bounded if the response variable has bounded support, such as in logistic regression, and this condition can be relaxed to include other common GLMs by instead assuming sub-exponential tails, as in Lemma 3.4 of \cite{ning2017general}.
Assumption \ref{cond-betamin} guarantees that, with high probability, model selection based on the lasso does not miss any signal variables, so that the selected model is not misspecified.
This is a standard condition for sample splitting procedures that is not required by the desparsified lasso \citep{van2014asymptotically}.
For multiple splitting, we make the following additional assumptions for the set of target covariates $S$, where $\boldsymbol{a}$ is a $(p+1)$-vector such that $\norm{\boldsymbol{a}_S}{2}=1$ and $\norm{\boldsymbol{a}_{S^c}}{2}=0$.
Let $\mathcal{Z} = (y_i,\boldsymbol{x}_i)_{i=1}^n$ denote the entire data set.
\begin{enumerate}
\setcounter{enumi}{5}
\item\label{cond-ms1} For independent sampling indicator vectors $\boldsymbol{v}$ and $\boldsymbol{\widetilde{v}}$ with corresponding fitted models $\widehat{S}$ and $\widetilde{S}$, define
\[h_{i,n}
= \left [E\left (\boldsymbol{v}_i\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}}\Big |\mathcal{Z} \right )
- E\left (\boldsymbol{v}_i\boldsymbol{a}_{\widetilde{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}}\Big |\mathcal{Z} \right )\right ]\dot{\rho}_{\boldsymbol{\beta}^0}(y_i,\boldsymbol{x}_i) \]
where $\boldsymbol{\widetilde{I}}_S$ is the $|S|\times (p+1)$ matrix such that $\boldsymbol{\widetilde{I}}_S\boldsymbol{b} = \boldsymbol{b}_S$ for any $(p+1)$-vector $\boldsymbol{b}$, and the expectations are with respect to the sampling weights, conditional on the data.
We assume that $\sum_{i=1}^n h_{i,n}/\sqrt{n} =\littleO{1}$ and that $P_n\ddot{\rho}_{\boldsymbol{\beta}l}$ is invertible when computed from any estimation subsample.
\item\label{cond-ms2} There exists a random $(p+1)$-vector $\boldsymbol{\eta}_n$ independent of the data such that $\norm{\boldsymbol{\eta}_n}{\infty}$ is bounded and
\[\norm{E\left (\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}}\Big |\mathcal{Z} \right ) - \boldsymbol{\eta}_n^T}{1} = \op{1/\sqrt{\log(p)}}. \]
\end{enumerate}
Similar conditions were used and discussed further by \cite{wang2020debiased} in the context of linear models.
Since, for low-dimensional GLMs, the refined debiased lasso estimator is asymptotically equivalent to the MLE $\widetilde{\boldsymbol{\beta}}^{ML}$ in the sense that $\sqrt{n}(\boldsymbol{\beta}dl_j - \widetilde{\boldsymbol{\beta}}_j^{ML})=\op{1}$ for all $j$, our theoretical arguments also apply to multiple splitting with the MLE, using the lasso for model selection.
\cite{fei2021estimation} used a stronger partial orthogonality condition, where the signal variables are independent of the noise variables, to derive the asymptotic normality of their MLE-based multiple splitting estimator.
We further discuss the motivation behind Assumptions \ref{cond-ms1} and \ref{cond-ms2} and their relationship to the assumptions used by other work on sample splitting procedures in \ref{appendix:disc}, but provide a brief overview here.
Under Assumptions \ref{cond-subg}-\ref{cond-betamin} and the sparsity rates required in Theorem \ref{thm-ms} below, it can be shown that
\[
\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}_S^0)
= \frac{(1-q)^{-1}}{\sqrt{n}}\sum_{i= 1}^n E\left [\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}} \boldsymbol{v}_{i} | \mathcal{Z}\right ]\dot{\rho}_{\boldsymbol{\beta}^0}(y_i, \boldsymbol{x}_i) + \op{1}.
\]
Therefore, in order to prove asymptotic normality we need to control the randomness of the vector $\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}}\boldsymbol{v}_{i}$ averaged across all sample splits that exclude the $i$th sample from model selection (i.e. $\boldsymbol{v}_i=1$).
The ideal case, when this average is deterministic, can be achieved under model selection consistency, so that $\widehat{S}$ is fixed at $S\cup S_0$ for each sample split, or if the set of covariates that are always included in the fitted model are independent of all remaining covariates, essentially giving $\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}^{-1}$ a block diagonal structure.
Either of these two conditions imply that our less restrictive assumptions hold, where we only require that the effect of always excluding a single sample from model selection is asymptotically negligible (Assumption \ref{cond-ms1}) and that the average converges in probability to a bounded random vector with moderate rate (Assumption \ref{cond-ms2}).
The variable screening properties and asymptotic normality of our estimators are presented in the following three theorems, with their proofs given in \ref{appendix:thm1} through \ref{appendix:thm3}, respectively.
Note that any variable selection procedure with the same screening properties as the lasso may be used in our sample splitting procedures.
\begin{theorem}\label{thm-lasso}
(Variable screening properties for the GLM lasso.)
For a choice of tuning parameter $\lambda\sim \sqrt{\log(p)/n}$ and under assumptions \ref{cond-subg}-\ref{cond-betamin}, the lasso estimator $\boldsymbol{\beta}l$ and its selected model $\widehat{S} = \{j:\widetilde{\beta}_j^\lambda\neq 0\}$ satisfy
\[
P\left (\widehat{S}\supseteq S_0, |\widehat{S}|\leq ks_0 \right )\geq 1 - c_1/p^{c_2}
\]
for some positive constants $k, c_1, c_2$.
\end{theorem}
\begin{theorem}\label{thm-ss}
(Asymptotic normality for single sample splitting.) For the single split debiased lasso estimator $\boldsymbol{\beta}dl_{\widehat{S}}$ with both lasso tuning parameters of the order $\sqrt{\log(p)/n}$, $s_0\log(s_0)\sqrt{s_0/n} = \littleO{1}$, $|S|=\bigO{s_0}$, and under assumptions \ref{cond-subg}-\ref{cond-betamin},
$\boldsymbol{\Sigma}Hat_{\boldsymbol{\beta}l, \widehat{S}}$ is invertible with probability going to one, and for any $(p+1)$-vector $\boldsymbol{a}$ such that $\norm{\boldsymbol{a}}{2} = \norm{\boldsymbol{a}_{\widehat{S}}}{2}=1$, we have
\[
\frac{\sqrt{n_2}\boldsymbol{a}_{\widehat{S}}^T(\boldsymbol{\beta}dl_{\widehat{S}} - \boldsymbol{\beta}^0_{\widehat{S}})}{(\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\Theta}Hat_{\boldsymbol{\beta}l,\widehat{S}}\boldsymbol{a}_{\widehat{S}})^{1/2}}\rightarrow N(0,1)
\]
in distribution as $n\rightarrow\infty$.
\end{theorem}
\begin{theorem}\label{thm-ms}
(Asymptotic normality for multiple sample splitting.) For the multiple splitting debiased lasso estimator $\boldsymbol{\beta}ms_S$ with all lasso tuning parameters of the order $\sqrt{\log(p)/n}$, $s_0\log(s_0)\sqrt{s_0/n} = \littleO{1}$, $|S|=\bigO{s_0}$, and under assumptions \ref{cond-subg}-\ref{cond-ms2},
for any $(p+1)$-vector $\boldsymbol{a}$ such that $\norm{\boldsymbol{a}}{2} = \norm{\boldsymbol{a}_{S}}{2}=1$, we have
\[
\frac{\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}^0_S)}{(\boldsymbol{\eta}_n^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}\boldsymbol{\eta}_n)^{1/2}}\rightarrow N(0,1)
\]
in distribution as $n\rightarrow\infty$.
\end{theorem}
\section{Simulations}
\label{s:simulations}
We apply the proposed estimating methods with sample splitting in several simulations settings with high dimensional covariates in order to assess the potential benefits of using the lasso for model selection and the debiased lasso in place of the MLE for making statistical inference, and of averaging estimates across multiple sample splits (MS) as opposed to using a single split (SS) estimator for a fixed set of coefficients.
Simulation results are all from logistic regression models, where the benefits of using the debiased lasso are particularly apparent.
This may be due to the numerical instability of the Hessian matrix of the negative log-likelihood $P_n\ddot{\rho}_{\boldsymbol{\beta}}(y_i, \boldsymbol{x}_i) = n^{-1}\sum_{i=1}^n \hat{p}(\boldsymbol{x}_i^T\boldsymbol{\beta})\left [1-\hat{p}(\boldsymbol{x}_i^T\boldsymbol{\beta})\right ]\boldsymbol{x}_i\boldsymbol{x}_i^T$, where $\hat{p}(\cdot)$ is the cdf of the standard logistic distribution.
Note $\hat{p}(\boldsymbol{x}_i^T\boldsymbol{\beta})$ will be close to zero or one for large coefficient values, which can result in a near-singular Hessian matrix.
Therefore the debiased lasso approach of performing a single-step maximization of the log-likelihood starting from an initial lasso estimator that is biased towards zero can help alleviate this issue.
In all simulations, $n=500$ samples are generated from a logistic regression model with $p=700$ covariates.
The covariates are generated from a $N(\boldsymbol{0}, \boldsymbol{\Sigma})$ distribution before being truncated at $\pm 3$.
The covariance matrix $\boldsymbol\Sigma$ has ones on the diagonal and either an AR(1) correlation structure $\boldsymbol\Sigma_{jk} = 0.5^{|j-k|}$ or a compound symmetry structure $\boldsymbol\Sigma_{jk} = 0.5$ $(j\neq k)$.
Further simulations that demonstrate the robustness of our procedures under higher covariate correlations are presented in \ref{appendix:sims}.
There are $s_0=6$ signal covariates, and their index set was randomly chosen.
The corresponding coefficient values are $\boldsymbol{\beta}^0_{S_0} = (-1.5, -1, -0.5, 0.5, 1, 1.5)^T$.
We consider all signal covariates together with two randomly chosen noise covariates. For each coefficient $\beta_j$, we assess the single and multiple splitting debiased lasso estimators fit on $\widehat{S}\cup\{j\}$, as well as the corresponding MLE-based estimators.
The splitting proportion is $q=0.5$.
We also provide results from the oracle MLE, fit on $S_0\cup \{j\}$, for reference, using either the entire sample or half of the sample.
Model selection is performed using lasso estimators with the values for $\lambda$ chosen by 10-fold cross validation.
The multiple splitting estimators use $B=1,000$ splits.
We use the R package glmnet for lasso estimation and the built-in glm function for MLE and debiased lasso estimation, using the control and start arguments to specify a single iteration starting at an initial lasso estimate for the latter.
The simulation results for the AR(1) correlation structure are summarized in Table \ref{table-ar1-5}.
The single split MLE exhibits a bias that greatly inflates the size of the estimated coefficients, and the magnitude of this bias increases with the signal strength.
In contrast, the single split debiased lasso estimator is approximately unbiased.
Averaging across multiple splits does not improve the bias of the MLE, which is also apparent to a lesser extent in the logistic regression simulations of \cite{fei2021estimation} summarized in their Table 3.
The MLE has substantially higher variability than the debiased lasso, even for the multiple splitting estimators.
The 95\% confidence interval coverage for noise variables is roughly the same across all considered methods.
For signal variables, however, the MLE has poor coverage that actually worsens after multiple splitting.
This issue appears to be particularly severe in logistic regression models and is mild in some other GLMs such as Poisson regression.
In contrast, the debiased lasso after multiple splitting performs well in achieving the nominal 95\% coverage for all considered coefficients.
All single split standard errors tend to be underestimated for signal variables, leading to slight undercoverage, while the multiple splitting standard errors are approximately unbiased.
Multiple splitting drastically lowers the variability of estimates from either the MLE or debiased lasso compared to a single split.
This produces a dramatic improvement in rejection rate for small coefficients.
Note that the rejection rates for the MLE estimators are inflated due to their bias, which partially offsets the wider confidence interval length.
In summary, for each pre-specified coefficient, the multiple splitting debiased lasso estimator provides the best performance in terms of bias, variability, and confidence interval coverage in this simulation setting, where the correlation between covariates decays rather quickly.
\begin{table}
\caption{Logistic regression simulation results for $n=500$, $p=700$, $s_0=6$, and AR(1) correlation structure with parameter 0.5. Selection results refer to the lasso in the single split estimator, where the average selected model size was 41. The fitted model for estimating each $\beta_j$ was $\widehat{S}\cup\{j\}$. Nominal confidence interval coverage probabilities are 0.95.} \label{table-ar1-5}
{\begin{tabular*}{\textwidth}{l l r r r r r r r r}
\hline & Estimator & $\beta_{489}$ & $\beta_{130}$ & $\beta_{680}$ & $\beta_{488}$ & $\beta_{476}$ & $\beta_{190}$ & $\beta_{510}$ & $\beta_{336}$\\ \hline
$\boldsymbol{\beta}^0_j$ & & -1.50 & -1.00 & -0.50 & 0.00 & 0.00 & 0.50 & 1.00 & 1.50\\
Selection Rate & & 1.00 & 1.00 & 0.64 & 0.13 & 0.07 & 0.69 & 1.00 & 1.00\\ \hline
Bias & Debiased SS & 0.01 & 0.06 & 0.00 & -0.04 & -0.01 & -0.03 & 0.00 & 0.00\\
& MLE SS & -0.52 & -0.30 & -0.24 & -0.06 & -0.01 & 0.18 & 0.37 & 0.54\\
& Debiased MS & 0.01 & 0.04 & 0.02 & -0.02 & 0.00 & -0.02 & -0.01 & 0.00\\
& MLE MS & -0.59 & -0.37 & -0.21 & -0.02 & -0.01 & 0.21 & 0.40 & 0.60\\
& Oracle $(n_2)$ & -0.09 & -0.05 & 0.00 & -0.01 & 0.00 & 0.04 & 0.07 & 0.09\\
& Oracle $(n)$ & -0.05 & -0.01 & -0.01 & -0.02 & 0.00 & 0.01 & 0.04 & 0.05\\ \hline
Coverage & Debiased SS & 0.84 & 0.89 & 0.92 & 0.94 & 0.95 & 0.88 & 0.87 & 0.89\\
& MLE SS & 0.73 & 0.86 & 0.84 & 0.90 & 0.92 & 0.86 & 0.77 & 0.69\\
& Debiased MS & 0.94 & 0.94 & 0.93 & 0.95 & 0.97 & 0.96 & 0.96 & 0.95\\
& MLE MS & 0.39 & 0.64 & 0.81 & 0.94 & 0.94 & 0.79 & 0.58 & 0.40\\
& Oracle $(n_2)$ & 0.95 & 0.96 & 0.94 & 0.95 & 0.96 & 0.94 & 0.96 & 0.97\\
& Oracle $(n)$ & 0.96 & 0.95 & 0.94 & 0.92 & 0.94 & 0.95 & 0.96 & 0.96\\ \hline
Rejection Rate & Debiased SS & 1.00 & 0.99 & 0.74 & 0.06 & 0.05 & 0.62 & 1.00 & 1.00\\
($H_0: \beta_j=0$) & MLE SS & 1.00 & 0.99 & 0.77 & 0.10 & 0.08 & 0.71 & 1.00 & 1.00\\
& Debiased MS & 1.00 & 1.00 & 0.92 & 0.05 & 0.03 & 0.94 & 1.00 & 1.00\\
& MLE MS & 1.00 & 1.00 & 0.94 & 0.06 & 0.06 & 0.95 & 1.00 & 1.00\\
& Oracle $(n_2)$ & 1.00 & 1.00 & 0.74 & 0.05 & 0.04 & 0.80 & 1.00 & 1.00\\
& Oracle $(n)$ & 1.00 & 1.00 & 0.97 & 0.09 & 0.06 & 0.98 & 1.00 & 1.00\\ \hline
Standard Error & Debiased SS & 0.22 & 0.20 & 0.19 & 0.21 & 0.18 & 0.19 & 0.20 & 0.22\\
& MLE SS & 0.37 & 0.30 & 0.28 & 0.29 & 0.25 & 0.27 & 0.31 & 0.37\\
& Debiased MS & 0.18 & 0.15 & 0.14 & 0.14 & 0.12 & 0.14 & 0.16 & 0.18\\
& MLE MS & 0.27 & 0.22 & 0.20 & 0.22 & 0.19 & 0.20 & 0.22 & 0.27\\
& Oracle $(n_2)$ & 0.26 & 0.22 & 0.20 & 0.22 & 0.19 & 0.20 & 0.22 & 0.26\\
& Oracle $(n)$ & 0.18 & 0.15 & 0.14 & 0.15 & 0.13 & 0.14 & 0.15 & 0.18\\ \hline
Empirical SD & Debiased SS & 0.30 & 0.23 & 0.21 & 0.22 & 0.18 & 0.24 & 0.26 & 0.29\\
& MLE SS & 0.58 & 0.41 & 0.36 & 0.38 & 0.31 & 0.40 & 0.48 & 0.56\\
& Debiased MS & 0.17 & 0.14 & 0.14 & 0.14 & 0.12 & 0.14 & 0.14 & 0.18\\
& MLE MS & 0.29 & 0.22 & 0.22 & 0.23 & 0.20 & 0.21 & 0.24 & 0.30\\
& Oracle $(n_2)$ & 0.28 & 0.22 & 0.20 & 0.22 & 0.19 & 0.20 & 0.22 & 0.26\\
& Oracle $(n)$ & 0.17 & 0.14 & 0.14 & 0.16 & 0.14 & 0.14 & 0.14 & 0.17\\ \hline
\end{tabular*}}
\end{table}
The simulation results for compound symmetry correlation structure are summarized in Table \ref{table-cs-5}.
The same trends concerning the bias and large variability of the MLE seen in Table \ref{table-ar1-5} are also present in this setting.
Again the debiased lasso estimators are approximately unbiased, and the multiple splitting debiased lasso estimator has approximate 95\% coverage for each coefficient.
Multiple splitting again greatly reduces the variability of single split estimators, resulting in thinner confidence intervals with more power for detecting small coefficient values.
\begin{table}
\caption{Logistic regression simulation results for $n=500$, $p=700$, $s_0=6$, and compound symmetry correlation structure with parameter 0.5. Selection results refer to the lasso in the single split estimator, where the average selected model size was 37. The fitted model for estimating each $\beta_j$ was $\widehat{S}\cup\{j\}$. Nominal confidence interval coverage probabilities are 0.95.} \label{table-cs-5}
{\begin{tabular*}{\textwidth}{l l r r r r r r r r}
\hline & Estimator & $\beta_{489}$ & $\beta_{130}$ & $\beta_{680}$ & $\beta_{488}$ & $\beta_{476}$ & $\beta_{190}$ & $\beta_{510}$ & $\beta_{336}$\\ \hline
$\boldsymbol{\beta}^0_j$ & & -1.50 & -1.00 & -0.50 & 0.00 & 0.00 & 0.50 & 1.00 & 1.50\\
Selection Rate & & 1.00 & 0.96 & 0.51 & 0.07 & 0.05 & 0.44 & 0.98 & 1.00\\ \hline
Bias & Debiased SS & 0.01 & 0.04 & 0.04 & -0.02 & 0.00 & 0.00 & -0.02 & -0.05\\
& MLE SS & -0.42 & -0.27 & -0.13 & -0.02 & -0.01 & 0.19 & 0.30 & 0.37\\
& Debiased MS & 0.02 & 0.03 & 0.03 & -0.01 & 0.00 & -0.02 & -0.02 & -0.02\\
& MLE MS & -0.39 & -0.26 & -0.14 & -0.01 & 0.00 & 0.15 & 0.26 & 0.39\\
& Oracle $(n_2)$ & -0.05 & -0.04 & -0.02 & 0.00 & 0.00 & 0.01 & 0.04 & 0.07\\
& Oracle $(n)$ & -0.03 & -0.02 & -0.02 & -0.01 & 0.00 & 0.02 & 0.02 & 0.03\\ \hline
Coverage & Debiased SS & 0.88 & 0.92 & 0.92 & 0.94 & 0.96 & 0.92 & 0.90 & 0.89\\
& MLE SS & 0.80 & 0.89 & 0.91 & 0.90 & 0.93 & 0.86 & 0.85 & 0.82\\
& Debiased MS & 0.94 & 0.96 & 0.94 & 0.96 & 0.96 & 0.94 & 0.94 & 0.96\\
& MLE MS & 0.68 & 0.84 & 0.90 & 0.96 & 0.94 & 0.92 & 0.81 & 0.76\\
& Oracle $(n_2)$ & 0.94 & 0.94 & 0.95 & 0.91 & 0.94 & 0.92 & 0.96 & 0.94\\
& Oracle $(n)$ & 0.96 & 0.98 & 0.96 & 0.92 & 0.92 & 0.96 & 0.96 & 0.98\\ \hline
Rejection Rate & Debiased SS & 1.00 & 0.99 & 0.49 & 0.06 & 0.04 & 0.57 & 0.98 & 0.99\\
($H_0: \beta_j=0$) & MLE SS & 1.00 & 1.00 & 0.56 & 0.10 & 0.07 & 0.63 & 0.98 & 0.99\\
& Debiased MS & 1.00 & 1.00 & 0.82 & 0.04 & 0.04 & 0.87 & 1.00 & 1.00\\
& MLE MS & 1.00 & 1.00 & 0.84 & 0.04 & 0.06 & 0.88 & 1.00 & 1.00\\
& Oracle $(n_2)$ & 1.00 & 1.00 & 0.66 & 0.09 & 0.06 & 0.66 & 1.00 & 1.00\\
& Oracle $(n)$ & 1.00 & 1.00 & 0.92 & 0.07 & 0.07 & 0.92 & 1.00 & 1.00\\ \hline
Standard Error & Debiased SS & 0.25 & 0.23 & 0.23 & 0.23 & 0.23 & 0.23 & 0.23 & 0.25\\
& MLE SS & 0.36 & 0.32 & 0.30 & 0.28 & 0.29 & 0.30 & 0.32 & 0.36\\
& Debiased MS & 0.19 & 0.17 & 0.16 & 0.15 & 0.15 & 0.16 & 0.17 & 0.19\\
& MLE MS & 0.27 & 0.23 & 0.21 & 0.21 & 0.21 & 0.21 & 0.23 & 0.27\\
& Oracle $(n_2)$ & 0.27 & 0.24 & 0.22 & 0.22 & 0.22 & 0.22 & 0.24 & 0.27\\
& Oracle $(n)$ & 0.18 & 0.17 & 0.15 & 0.15 & 0.15 & 0.15 & 0.17 & 0.18\\ \hline
Empirical SD & Debiased SS & 0.29 & 0.25 & 0.26 & 0.22 & 0.24 & 0.26 & 0.27 & 0.29\\
& MLE SS & 0.56 & 0.43 & 0.39 & 0.34 & 0.38 & 0.40 & 0.50 & 0.53\\
& Debiased MS & 0.18 & 0.16 & 0.16 & 0.15 & 0.15 & 0.16 & 0.17 & 0.18\\
& MLE MS & 0.28 & 0.24 & 0.22 & 0.22 & 0.22 & 0.21 & 0.24 & 0.27\\
& Oracle $(n_2)$ & 0.28 & 0.25 & 0.23 & 0.25 & 0.23 & 0.23 & 0.25 & 0.29\\
& Oracle $(n)$ & 0.18 & 0.15 & 0.16 & 0.16 & 0.16 & 0.15 & 0.16 & 0.17\\ \hline
\end{tabular*}}
\end{table}
Lastly, we assess the performance of the post-model selection procedure that does not pre-specify any covariate of interest.
This simulation setting is identical to that of Table \ref{table-ar1-5} with AR(1) correlation structure, but the estimates are now all based on a single model fit on only the selected covariates.
For covariates that are not selected, their coefficient estimate and standard error are both set to zero.
The oracle MLE results we present are estimated on $S_0$ instead of $S_0\cup \{j\}$.
\begin{table}
\caption{Logistic regression simulation results for $n=500$, $p=700$, $s_0=6$, and AR(1) correlation structure with parameter 0.5. Selection results refer to the lasso in the single split estimator, where the average selected model size was 41. The fitted model for estimating each $\beta_j$ was $\widehat{S}$. Nominal confidence interval coverage probabilities are 0.95.} \label{table-postselection-ar1-5}
{\begin{tabular*}{\textwidth}{l l r r r r r r r r}
\hline & Estimator & $\beta_{489}$ & $\beta_{130}$ & $\beta_{680}$ & $\beta_{488}$ & $\beta_{476}$ & $\beta_{190}$ & $\beta_{510}$ & $\beta_{336}$\\ \hline
$\boldsymbol{\beta}^0_j$ & & -1.50 & -1.00 & -0.50 & 0.00 & 0.00 & 0.50 & 1.00 & 1.50\\
Selection Rate & & 1.00 & 1.00 & 0.64 & 0.13 & 0.07 & 0.69 & 1.00 & 1.00\\ \hline
Bias & Debiased SS & 0.00 & 0.06 & 0.18 & -0.01 & 0.00 & -0.18 & 0.00 & 0.00\\
& MLE SS & -0.66 & -0.35 & 0.02 & -0.02 & 0.01 & -0.03 & 0.44 & 0.66\\
& Oracle $(n_2)$ & -0.09 & -0.05 & 0.00 & 0.00 & 0.00 & 0.04 & 0.07 & 0.09\\ \hline
Coverage & Debiased SS & 0.83 & 0.90 & 0.57 & 0.99 & 1.00 & 0.59 & 0.87 & 0.89\\
& MLE SS & 0.70 & 0.83 & 0.51 & 0.98 & 1.00 & 0.57 & 0.75 & 0.66\\
& Oracle $(n_2)$ & 0.95 & 0.96 & 0.94 & 1.00 & 1.00 & 0.94 & 0.96 & 0.97\\ \hline
Rejection Rate & Debiased SS & 1.00 & 0.99 & 0.47 & 0.01 & 0.00 & 0.42 & 1.00 & 1.00\\
($H_0: \beta_j=0$) & MLE SS & 1.00 & 0.99 & 0.49 & 0.02 & 0.00 & 0.45 & 1.00 & 1.00\\
& Oracle $(n_2)$ & 1.00 & 1.00 & 0.74 & 0.00 & 0.00 & 0.80 & 1.00 & 1.00\\ \hline
Standard Error & Debiased SS & 0.22 & 0.20 & 0.12 & 0.03 & 0.01 & 0.13 & 0.20 & 0.22\\
& MLE SS & 0.40 & 0.32 & 0.18 & 0.04 & 0.02 & 0.20 & 0.33 & 0.40\\
& Oracle $(n_2)$ & 0.26 & 0.22 & 0.20 & 0.00 & 0.00 & 0.20 & 0.22 & 0.26\\ \hline
Empirical SD & Debiased SS & 0.32 & 0.23 & 0.30 & 0.10 & 0.04 & 0.30 & 0.26 & 0.29\\
& MLE SS & 0.83 & 0.49 & 0.47 & 0.18 & 0.07 & 0.50 & 0.55 & 0.75\\
& Oracle $(n_2)$ & 0.28 & 0.22 & 0.20 & 0.00 & 0.00 & 0.20 & 0.22 & 0.26\\ \hline
\end{tabular*}}
\end{table}
The simulation results are summarized in Table \ref{table-postselection-ar1-5}.
For small coefficients, there is poor confidence interval coverage across all non-oracle estimators due to the randomness associated with their inclusion in each model.
For larger coefficients that are nearly always selected by the lasso, the performance resembles that of Table \ref{table-ar1-5}.
These results demonstrate the importance of only analyzing coefficients that are included in the fitted model.
For larger sample sizes, the lasso selection performance can improve substantially, leading to improved coverage and bias correction of the debiased lasso after model selection. See \ref{appendix:sims} for additional simulations results with a larger sample size. We also provide additional simulation results with higher correlations among covariates in \ref{appendix:sims}.
\section{Real Data Example: The Mid-South Tobacco Case-Control Study}
\label{s:analysis}
We apply the proposed method to a dataset of single nucleotide polymorphisms (SNPs) from a sample of African-American participants in the Mid-South Tobacco Case-Control study population \citep{jiang2019exome, xu2020prediction, han2022identification} to assess genetic risk factors for nicotine dependence.
The dataset is publicly available with GEO accession GSE148375.
It originally contained 242,901 SNPs measured across 3399 individuals.
We excluded SNPs that were insertions or deletions, had a call rate less than 95\%, were not in Hardy-Weinberg equilibrium ($p>10^{-6}$), or had a minor allele frequency of less than 0.01.
Subjects missing more than 1\% of the remaining SNPs were excluded, and missing values were then imputed using the observed allele frequencies for each SNP.
After data cleaning, the covariates consist of 32,557 SNPs as well as gender and age. The response variable is a binary indicator of smoking status (1=smoker), where 1607 of the 3317 participants are smokers.
Prior research has identified several genetic regions with SNPs that are associated with nicotine dependence, including 15q25.1 (CHRNA5/A3/B4), 8p11.21 (CHRNB3/A6), and 19q13.2 (CYP2A6/A7/B6, EGLN2, RAB4B, and NUMBL).
See, for example, \cite{yang2016converging} and the references therein.
We choose the target covariates to be the 16 measured SNPs that lie in these three regions, as well as the demographic variables.
The results from our multiple splitting debiased lasso estimator are presented in Table \ref{table-data-analysis}.
After adjusting for multiple testing using the Holm procedure \citep{holm1979simple}, none of the SNPs have a significant association with smoking status.
The SNP rs3733829 has the largest coefficient estimate, with an estimated 37\% increase in the odds of being a smoker and unadjusted 95\% confidence interval (9\%, 73\%), controlling for all other covariates.
The association of rs3733829 with increased cigarette consumption has been discussed by \cite{tobacco2010genome} and \cite{bloom2014variants}.
\begin{table}
\caption{Multiple splitting debiased lasso estimates for a logistic regression model of smoking status on standardized age, gender, and 32,557 SNPs, based on a case-control sample of 3317 individuals. There are 16 measured SNPs that lie in the regions of interest after data cleaning. P-values are adjusted for multiple testing using the Holm procedure and truncated at one, while confidence intervals are not adjusted.} \label{table-data-analysis}
{\begin{tabular*}{\textwidth}{l r r r r r r r r r r}
\hline
Covariate & Gene & $\hat\beta$ & SE & Holm P-value & Odds Ratio (95\% CI)\\
\hline
Intercept & & -0.01 & 0.18 & 1.00 & 0.99 (0.70, 1.41)\\
Age & & 0.03 & 0.03 & 1.00 & 1.03 (0.96, 1.09)\\
Male & & 0.45 & 0.07 & $1.76\times 10^{-10}$ & 1.58 (1.38, 1.79)\\
rs35327613 & CHRNB3 & -0.06 & 0.11 & 1.00 & 0.94 (0.76, 1.17)\\
rs61740655 & CHRNA5 & -0.10 & 0.13 & 1.00 & 0.90 (0.70, 1.17)\\
rs79109919 & CHRNA5 & -0.06 & 0.16 & 1.00 & 0.94 (0.68, 1.29)\\
rs16969968 & CHRNA5 & -0.09 & 0.11 & 1.00 & 0.91 (0.74, 1.13)\\
rs938682 & CHRNA3 & 0.10 & 0.10 & 1.00 & 1.11 (0.91, 1.35)\\
rs8042374 & CHRNA3 & -0.18 & 0.12 & 1.00 & 0.83 (0.66, 1.05)\\
rs61737502 & CHRNB4 & 0.04 & 0.10 & 1.00 & 1.04 (0.85, 1.28)\\
rs56218866 & CHRNB4 & -0.10 & 0.14 & 1.00 & 0.91 (0.68, 1.21)\\
rs950776 & CHRNB4 & -0.02 & 0.08 & 1.00 & 0.98 (0.83, 1.15)\\
rs12440298 & CHRNB4 & -0.03 & 0.06 & 1.00 & 0.97 (0.87, 1.09)\\
rs3865452 & COQ8B & -0.06 & 0.06 & 1.00 & 0.94 (0.84, 1.04)\\
rs3733829 & EGLN2 & 0.32 & 0.12 & 0.13 & 1.37 (1.09, 1.73)\\
rs75152309 & CYP2A7 & 0.07 & 0.09 & 1.00 & 1.07 (0.90, 1.28)\\
rs73032311 & CYP2A7 & -0.09 & 0.08 & 1.00 & 0.91 (0.78, 1.07)\\
rs28399499 & CYP2B6 & 0.00 & 0.10 & 1.00 & 1.00 (0.82, 1.22)\\
rs7260329 & CYP2B6 & -0.16 & 0.08 & 0.78 & 0.86 (0.73, 1.00)
\end{tabular*}}
\end{table}
\appendix
\renewcommand{Appendix \Alph{section}}{Appendix \Alph{section}}
\section{Proof of Theorem 1: Results for GLM lasso selected model}\label{appendix:thm1}
In order to show the model selection properties of the GLM lasso under Assumptions 1-5, we consider the results on false negatives and false positives (i.e. selected model size) separately in Lemmas 1 and 2 below.
These are each based on oracle inequalities from \cite{huang2012estimation}, specifically using the unweighted lasso, target vector $\boldsymbol{\beta}^0$, and target set $S = S_0\cup \{1\}$ that includes the intercept.
We use $c_1$ and $c_2$ as generic constants in our probability bounds and denote the sample size as $n$.
\begin{lemma} \label{lemma1}
Under Assumptions 1-5, if the diagonal entries of $\boldsymbol{X}^T\boldsymbol{X}$ are bounded above and away from zero and the compatibility constant \citep{huang2012estimation} is bounded below, then the GLM lasso has no false negatives with probability at least $1-c_1\exp(-c_2\log(p))$ for constants $c_1,c_2>0$.
\end{lemma}
This lemma is a direct result of Theorem 9 (iii) of \cite{huang2012estimation}, which gives an $\ell_1$ bound for the GLM lasso estimation error.
We proceed by verifying that our assumptions imply the required conditions in Theorem 9 of \cite{huang2012estimation} are satisfied.
We first use the bound $\log x \in [1-1/x, x-1]$, to verify the required Lipschitz condition
\begin{eqnarray*}
&& \max_{i\leq n} \left |\log A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0 + t) - \log A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0)\right | \\
&& \qquad \qquad = \max_{i\leq n} \left |\log \frac{A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0 + t)}{A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0)}\right | \\
&& \qquad \qquad \leq \max_{i\leq n}\frac{|A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0 + t)- A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0)|}{\min\left (A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0), A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0 + t)\right )} \\
&& \qquad \qquad \leq \frac{c_{Lip} |t|}{\inf_{i\leq n, |t|\leq \delta} A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0 + t)}
\end{eqnarray*}
for all $|t|\leq \delta$, where the denominator is bounded below since $\norm{\boldsymbol{X}\boldsymbol{\beta}^0}{\infty}$ is bounded above almost surely and $A''$ is a positive function.
Next, we can choose penalty levels $\lambda>\lambda_1=\lambda_0$ each of order $\sqrt{\log(p)/n}$ so that (31) of \cite{huang2012estimation} is satisfied for large $n$ when the compatibility constant $\kappa_*(\xi, S)$ is bounded below and $|S|\sqrt{\log(p)/n}=\littleO{1}$, as we have assumed.
Lastly, to verify condition (29) of \cite{huang2012estimation}, note that the positive quantities $A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0), i=1,\ldots,n$ are bounded almost surely, so the diagonal elements of $P_n\ddot{\rho}_{\boldsymbol{\beta}^0}= \frac{1}{n}\sum_{i=1}^n A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0)\boldsymbol{x}\boldsymbol{x}_i^T$ are bounded above and away from zero due to the assumed corresponding bounds on $\frac{1}{n}\sum_{i=1}^n x_{ij}^2$ for all $j=1,\ldots,p+1$.
Now that we have verified the conditions, by Theorem 9 of \cite{huang2012estimation} using the seminorm $\phi(\boldsymbol{b}) = \norm{\boldsymbol{b}_S}{1}/|S|$, with probability at least $1-c_1\exp(-c_2\log(p))$ for constants $c_1,c_2>0$, we have $\norm{\boldsymbol{\beta}l_S - \boldsymbol{\beta}_S^0}{1}\leq k|S|\sqrt{\log(p)/n}$ for some constant $k>0$.
Therefore, on this event, there are no false negatives if $\min_{j\in S}|\boldsymbol{\beta}_j^0| > k(s_0+1) \sqrt{\log(p)/n}$, as we have assumed.
\begin{lemma} \label{lemma2}
Under the conditions of Lemma 1, the GLM lasso selected model size has an upper bound proportional to the true model size $s_0$, with probability at least $1-c_1\exp(c_2n)$ for some constants $c_1,c_2>0$.
\end{lemma}
This lemma is a direct result of Theorem 19 of \cite{huang2012estimation}, which gives an upper bound for the number of false positives selected by the GLM lasso in terms of a compatibility constant and restricted upper eigenvalue.
We begin by showing that the restricted upper eigenvalue
\[
\kappa_+(s) = \sup_{|B|=s, B\cap S=\emptyset, \boldsymbol{b} = \in\Cone{\xi}{S}, M_3\norm{\boldsymbol{b}_S}{1}\leq \eta^*}
\lambda_{max}\left (\int_0^1 \frac{1}{n}\sum_{i=1}^n A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0 + t\boldsymbol{x}_i^T\boldsymbol{b})\left [\boldsymbol{x}_i\boldsymbol{x}_i^T\right ]_B dt\right )
\]
over the cone $\Cone{\xi}{S} = \{\boldsymbol{b}\in R^{p+1}: \norm{\boldsymbol{b}_{S^c}}{1}\leq \xi\norm{\boldsymbol{b}_S}{1}\neq 0\}$ is bounded above with probability tending to one when $s$ is proportional to $s_0$.
For large $n$ and small $\eta^*$, the term involving $A''$ is bounded almost surely by our Lipschitz assumptions since $\norm{\boldsymbol{b}}{1} = \norm{\boldsymbol{b}_S}{1} + \norm{\boldsymbol{b}_{S^c}}{1}\leq (1+\xi)\norm{\boldsymbol{b}_S}{1}$ so $|\boldsymbol{x}_i^T\boldsymbol{b}|\leq \norm{\boldsymbol{x}_i}{\infty}\norm{\boldsymbol{b}}{1}\leq K(1+\xi)\eta^*$ in the cone $\Cone{\xi}{S}$.
Since the intercept is included in $S$, and the eigenvalues of $E[\boldsymbol{x}\boldsymbol{x}^T]$ are bounded above, we can apply Lemma 15 of \cite{loh2012sparseeigen} for zero-mean subgaussian random vectors to show that $\sup_{|B|\leq s, B\cap S=\emptyset}\lambda_{\max}\left ( \frac{1}{n}\left [\boldsymbol{X}^T\boldsymbol{X}\right ]_B\right )^2$ is bounded above by a constant with probability at least $1-c_1\exp(c_2n)$ for some constants $c_1,c_2>0$ when $s\propto s_0$ and $s_0\log(p)/n = \littleO{1}$.
Using this upper bound for the upper restricted eigenvalue and the assumed lower bound for the compatibility constant, Theorem 19 of \cite{huang2012estimation} implies that the number of false positives, and thus the selected model size, is at most of order $s_0$.
We now are ready to prove Theorem 1. We begin by providing probability bounds for the events assumed in Lemmas 1 and 2.
First, \cite{van2009conditions} showed that the compatibility constant is bounded away from zero with probability at least $1-c_1\exp(-c_2n)$ for some constants $c_1,c_2>0$.
Next, we seek to bound $\frac{1}{n}\sum_{i=1}^n x_{ij}^2$ from above and away from zero uniformly over $j=1,\ldots,p+1$.
This is trivial for the intercept, and the rest of the covariates are each bounded almost surely with mean zero and a variance of at least $min_j \boldsymbol{\Sigma}_{jj}\geq \lambda_{min}(\boldsymbol{\Sigma})>0$. We apply Hoeffding's inequality and a union bound to get
\begin{eqnarray*}
P\left (\max_{2\leq j\leq p+1}\left |\frac{1}{n}\sum_{i=1}^n x_{ij}^2 - \boldsymbol{\Sigma}_{jj}\right | \geq t\right )
&\leq& \sum_{j=2}^{p+1} P\left (\left |\frac{1}{n}\sum_{i=1}^n x_{ij}^2 - \boldsymbol{\Sigma}_{jj}\right |\geq t\right ) \\
&\leq& \sum_{j=2}^{p+1} 2\exp\left (-\frac{2nt^2}{K^4} \right ) \\
&\leq& \exp\left (-\frac{n\left [2t^2 - K^4\log(2p)/n\right ]}{K^4} \right ).
\end{eqnarray*}
Since $\log(p)/n=\op{1}$, for large $n$ we can fix a $t\in (0, \lambda_{min}(\boldsymbol{\Sigma}))$ and the probability bound will be of order $c_1\exp(-c_2n)$ for some positive constants $c_1$ and $c_2$.
Lastly, let $\mathcal{B}$ denote the union of events that the compatibility constant is bounded, the diagonal entries of $\boldsymbol{X}^T\boldsymbol{X}$ are bounded, and the upper restricted eigenvalue (see Lemma 2) is bounded.
Since the probability bound on each of these events has the same order, by a union bound and rescaling of the constants involved we have $P(\mathcal{B}^c)\leq c_1\exp(-c_2n)$ for some $c_1,c_2>0$.
Then for the event $\mathcal{A} = \{\widehat{S}\supseteq S_0, |\widehat{S}|\leq ks_0\}$ with $k>0$ not depending on $n$ and some constants $c_3,c_4,c_5,c_6>0$ we have
\begin{eqnarray*}
P(\mathcal{A})
&=& P(\mathcal{A}|\mathcal{B})P(\mathcal{B}) + P(\mathcal{A}|\mathcal{B}^c)P(\mathcal{B}^c) \\
&\geq& \underbrace{\left [1-c_3\exp(-c_4\log(p))\right ]}_{by\ Lemma\ 1}\left [1-c_1\exp(-c_2n)\right ] + 0 \\
&\geq& 1 - c_5\exp(-c_6\log(p))
\end{eqnarray*}
for $n$ sufficiently large, since $\log(p)/n = \littleO{1}$.
\section{Proof of Theorem 2: Asymptotic results for debiased lasso after single sample split}\label{appendix:thm2}
Suppose the sample of $n$ individuals is split into a subsample $\mathcal{D}_1$ of size $n_1=qn$ and a subsample $\mathcal{D}_2$ of size $n_2 = (1-q)n$, where we assume $n_1$ is an integer and $q\in (0,1)$.
We apply a model selection procedure to $\mathcal{D}_1$ such that the selected model $\widehat{S}$ satisfies the event $\mathcal{A} = \{\widehat{S}\supseteq S_0, |\widehat{S}|\leq \littleO{\sqrt{n}} \}$ with probability tending to one as $n\rightarrow\infty$, such as the lasso.
For the selected model $\widehat{S}$, which includes the pre-specified set $S$, let $\boldsymbol{\beta}dl_{\widehat{S}}$ denote the debiased lasso estimator of $\boldsymbol{\beta}^0_{\widehat{S}}$ based on the subsample $\mathcal{D}_2$, and similarly denote the asymptotic covariance estimator by $\boldsymbol{\widehat\Theta}_{\widehat{S}}$.
We decompose
\[
\sqrt{n_2}\boldsymbol{a}_{\widehat{S}}^T\left (\boldsymbol{\beta}dl_{\widehat{S}} - \boldsymbol{\beta}^0_{\widehat{S}}\right )
= -\frac{1}{\sqrt{n_2}}\boldsymbol{a}_{\widehat{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}}^{-1}\sum_{i\in \mathcal{D}_2}\dot{\rho}_{\boldsymbol{\beta}_{\widehat{S}}^0}(y_i, (\boldsymbol{x}_i)_{\widehat{S}}) + r_{n}
\]
where, conditional on any sequence of selected models $\widehat{S}$ independent of $\mathcal{D}_2$ and satisfying the event $\mathcal{A}$, $r_n\rightarrow_p 0$ by the theoretical results of \cite{xia2021debiased}.
Hence for any $\varepsilon > 0$,
\begin{eqnarray*}
P\left (|r_n| \geq \varepsilon\right )
&=& P\left (|r_n| \geq \varepsilon|\mathcal{A}\right )P(\mathcal{A}) + P\left (|r_n| \geq \varepsilon|\mathcal{A}^c\right )P(\mathcal{A}^c) \\
&=& P\left (|r_n| \geq \varepsilon|\mathcal{A}\right )(1-\littleO{1}) + \bigO{1}\littleO{1},
\end{eqnarray*}
where $P\left (|r_n|\geq \varepsilon|\mathcal{A}\right )\rightarrow 0$ as $n\rightarrow\infty$ by \cite{xia2021debiased} as mentioned above, so $r_n = \op{1}$ unconditionally as $n\rightarrow\infty$.
Asymptotic normality, conditional on $\mathcal{A}$, also follows from the low-dimensional results of \cite{xia2021debiased} using the Lindeberg-Feller Central Limit Theorem.
\section{Proof of Theorem 3: Asymptotic results for debiased lasso after multiple sample splits}\label{appendix:thm3}
First we apply the results from the proof of Theorem 2 to the $(p+1)$-vector $\boldsymbol{a}$ with $\norm{\boldsymbol{a}_S}{2}=\norm{\boldsymbol{a}}{2}=1$, i.e. a contrast of only the pre-specified covariates $S$.
The single split debiased lasso estimator $\boldsymbol{\beta}dl_b$ satisfies
\begin{eqnarray*}
\sqrt{n_2}\boldsymbol{a}_{S}^T(\boldsymbol{\beta}dl_{S,b} - \boldsymbol{\beta}_S^0)
&=& -\frac{1}{\sqrt{n_2}}\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}
\sum_{i\in D_{2,b}}\dot{\rho}_{\boldsymbol{\beta}_{\widehat{S}_b}^0}(y_i, (\boldsymbol{x}_i)_{\widehat{S}_b}) + \op{1} \\
&=& \frac{1}{\sqrt{n_2}}\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b} \sum_{i=1}^n v_{b,i}\dot{\rho}_{\boldsymbol{\beta}^0}(y_i, \boldsymbol{x}_i) + \op{1},
\end{eqnarray*}
where $\widehat{S}_b\supseteq S_0\cup S$ is the fitted model, $\boldsymbol{\widetilde{I}}_{\widehat{S}_b}$ is the $|\widehat{S}_b|\times (p+1)$ matrix such that $\boldsymbol{\widetilde{I}}_{\widehat{S}_b}\boldsymbol{b} = \boldsymbol{b}_{\widehat{S}_b}$ for any $(p+1)$-vector $\boldsymbol{b}$, and $v_{b,i}$ is the indicator variable equal to one for $i\in D_{2,b}$.
Now consider the multiple splitting estimator $\boldsymbol{\beta}ms$ obtained by averaging across all possible splits $(D_{1,b}, D_{2,b})_{b=1}^B$, $B = {n\choose n_2}$.
Again letting $\widehat{S}_{b}$ denote the fitted model from data $\mathcal{Z}=(y_i,\boldsymbol{x}_i)_{i=1}^n$ split by sampling indicators $\boldsymbol{v}_b$, the multiple splitting estimator satisfies
\begin{eqnarray}\label{ms-decomp}
\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}_S^0)
&=& \frac{(1-q)^{-1}}{\sqrt{n}}\sum_{i= 1}^n E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b} v_{b,i} | \mathcal{Z}\right ]\dot{\rho}_{\boldsymbol{\beta}^0}(y_i, \boldsymbol{x}_i) + \op{1} \\
&=& \frac{1}{\sqrt{n}}\sum_{i= 1}^n\left (y_i - A'(\boldsymbol{x}_i^T\boldsymbol{\beta}^0) \right )\boldsymbol{\eta}_n^T\boldsymbol{x}_i \nonumber \\
&&+ \underbrace{\frac{1}{\sqrt{n}}\sum_{i= 1}^n \left ( (1-q)^{-1}E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b}v_{b,i} | \mathcal{Z}\right ] - \boldsymbol{\eta}_n^T\right )\dot{\rho}_{\boldsymbol{\beta}^0}(y_i, \boldsymbol{x}_i)}_{r_n} + \op{1}, \nonumber
\end{eqnarray}
where the expectations are with respect to the random splits $b=1,\ldots,B$, conditional on the data.
To establish asymptotic normality, we first show that $r_n = \op{1}$.
Let $\boldsymbol{\widetilde{v}}_b$ be another sampling indicator vector independent of $\boldsymbol{v}_b$, with fitted model $\widetilde{S}_b$.
The expectations in the following decomposition are taken with respect to $\boldsymbol{v}_b$ and $\boldsymbol{\tilde{v}}_b$, i.e. over $b=1,\ldots,B$, conditional on the data:
\begin{eqnarray*}
&& (1-q)^{-1}E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b}v_{b,i} | \mathcal{Z}\right ] - \boldsymbol{\eta}_n^T \\
&& \qquad
= (1-q)^{-1}E\left [\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b}v_{b,i} | \mathcal{Z}\right ] - \boldsymbol{\eta}_n^T \\
&& \qquad \qquad + \ (1-q)^{-1}\left (E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b}v_{b,i} | \mathcal{Z}\right ]
- E\left [\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b}v_{b,i} | \mathcal{Z}\right ]\right ),
\end{eqnarray*}
and, by independence, $E\left [\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b}v_{b,i} | \mathcal{Z}\right ] = E\left [\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b}|\mathcal{Z}\right ]P(v_{b,i}=1)$ with $P(v_{b,i}=1)=n_2/n = 1-q$.
Since this quantity does not depend on $i$, we can write
\[
r_n
= \underbrace{\left (E\left [\boldsymbol{a}_{\widetilde{S}}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}} | \mathcal{Z}\right ] - \boldsymbol{\eta}_n^T\right )\sqrt{n} P_n \dot{\rho}_{\boldsymbol{\beta}^0}}_{r_{n,1}}
+ \underbrace{\frac{(1-q)^{-1}}{\sqrt{n}}\sum_{i= 1}^n h_{i,n}}_{r_{n,2}}
\]
with $h_{i,n}
= \left [E\left (\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b}v_{b,i}\Big |\mathcal{Z} \right )
- E\left (\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b}v_{b,i}\Big |\mathcal{Z} \right )\right ]\dot{\rho}_{\boldsymbol{\beta}^0}(y_i,\boldsymbol{x}_i)$
so $r_{n,2} = \op{1}$ by Assumption (6).
Also, since $P_n\dot{\rho}_{\boldsymbol{\beta}^0}$ has bounded entries with mean zero, we apply Hoeffding's inequality and a union bound to obtain
\begin{eqnarray*}
P\left (\max_{j} \left | \sqrt{n}\left (P_n\dot{\rho}_{\boldsymbol{\beta}^0}\right )_j\right | \geq t\sqrt{\log(p)} \right )
&\leq& p P\left ( \left |\sqrt{n}\left (P_n\dot{\rho}_{\boldsymbol{\beta}^0}\right )_j\right |
\geq t\sqrt{\log(p)} \right ) \\
&\leq& 2p\exp\left (-ct^2\log(p)\right )
\end{eqnarray*}
for some constant $c>0$ and $t>0$, so $\norm{\sqrt{n}P_n\dot{\rho}_{\boldsymbol{\beta}^0}}{\infty} = \Op{\sqrt{\log(p)}}$ and, applying Assumption (7), we have
\begin{eqnarray*}
|r_{n,1}|
&\leq& \norm{E\left [\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b} | \mathcal{Z}\right ] - \boldsymbol{\eta}_n^T}{1}
\norm{\sqrt{n} P_n \dot{\rho}_{\boldsymbol{\beta}^0}}{\infty} \\
&=& \op{1/\sqrt{\log(p)}} \Op{\sqrt{\log(p)}} = \op{1}.
\end{eqnarray*}
In summary,
\[
\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}_S^0)
= \sqrt{n}\boldsymbol{\eta}_n^T P_n\dot{\rho}_{\boldsymbol{\beta}^0} + \op{1}
= \frac{1}{\sqrt{n}}\sum_{i= 1}^n\left (y_i - A'(\boldsymbol{x}_i^T\boldsymbol{\beta}^0) \right )\boldsymbol{\eta}_n^T\boldsymbol{x}_i + o_p(1),
\]
so we have asymptotic normality by the Lindeberg-Feller Central Limit Theorem.
Note that
\begin{eqnarray*}
\norm{\boldsymbol{\eta}_n}{1}
\rightarrow_p \norm{E\left [\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widetilde{S}_b} | \mathcal{Z}\right ]}{1}
&\leq& E\left [\norm{\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}}{1} | \mathcal{Z}\right ] \\
&\leq& E\left [\sqrt{|\widehat{S}_b|}\norm{\boldsymbol{a}_{\widetilde{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widetilde{S}_b}^{-1}}{2} | \mathcal{Z}\right ] \\
&=& \Op{\sqrt{s_0}}
\end{eqnarray*}
since $\boldsymbol{a}$ is a unit vector, $\boldsymbol{\Sigma}^{-1}_{\boldsymbol{\beta}^0}$ has bounded eigenvalues, and $|\widehat{S}_b|\leq ks_0$ with probability tending to one.
Thus the Lindeberg condition can be verified by standard arguments for bounded covariates.
\section{Further discussion of the multiple splitting assumptions}\label{appendix:disc}
The asymptotic representation used in our multiple splitting proof is distinct from the one used by \cite{fei2021estimation} who, in their theoretical arguments, applied a different decomposition of $\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}_S^0)$ based on comparing the single split estimators with their respective oracle estimators.
Under their partial orthogonality condition, which requires that the signal variables are independent of the noise variables and thus that $\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}$ is block diagonal (reordering the covariates without loss of generality), they argue that $\boldsymbol{\beta}ms_S$ has the same asymptotic normal distribution as the oracle estimator fit on $S\cup S_0$.
Under different assumptions than \cite{fei2021estimation} we also showed that $\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}_S^0)$ is asymptotically normal, but with variance $\boldsymbol{\eta}_n^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}\boldsymbol{\eta}_n$ that is possibly larger than the oracle variance $\boldsymbol{a}_{S\cup S_0}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0,S\cup S_0}^{-1}\boldsymbol{a}_{S\cup S_0}$.
Our assumptions instead concern the limiting behavior of the average (across all random splits that exclude $(y_i,\boldsymbol{x}_i)$ from model selection, i.e. such that $v_{b,i}=1$) $E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b} v_{b,i} | \mathcal{Z}\right ]$, for $i=1,\ldots,n$.
Note that, for each $i$, the corresponding average is independent of $(y_i,\boldsymbol{x}_i)$.
Since the samples are i.i.d., the effect of always excluding the $i$th observation specifically from model selection should be asymptotically negligible (Assumption 6), and furthermore each average should converge in probability to a bounded random vector $\boldsymbol{\eta}_n^T$, which is independent of the data, with moderate rate $\norm{E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b} | \mathcal{Z}\right ] - \boldsymbol{\eta}_n^T}{1} = \op{1/\sqrt{\log(p)}}$ (Assumption 7).
We can achieve the oracle-level result of \cite{fei2021estimation} under either of two stronger assumptions that also imply our convergence conditions in Assumptions (6) and (7).
First, under model selection consistency we have $\widehat{S}_b=S\cup S_0$ for all random splits, so $\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b} = \boldsymbol{a}_{S\cup S_0}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, S\cup S_0}^{-1}\boldsymbol{\widetilde{I}}_{S\cup S_0}$, a deterministic vector, for all $b=1,\ldots,B$ and we can directly set
$\boldsymbol{\eta}_n^T
= E\left [\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b} | \mathcal{Z}\right ]$.
By (\ref{ms-decomp}) we get $\sqrt{n}\boldsymbol{a}_S^T(\boldsymbol{\beta}ms_S - \boldsymbol{\beta}_S^0) = \sqrt{n}\boldsymbol{\eta}_n^T P_n\dot{\rho}_{\boldsymbol{\beta}^0} + \op{1}$. Then, using the fact that $\boldsymbol{\widetilde{I}}_{S \cup S_0}\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}\boldsymbol{\widetilde{I}}_{S \cup S_0}^T=\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0,S\cup S_0}$, the asymptotic variance becomes $\boldsymbol{\eta}_n^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}\boldsymbol{\eta}_n
= \boldsymbol{a}_{S\cup S_0}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0,S\cup S_0}^{-1}\boldsymbol{a}_{S\cup S_0}$.
Alternatively, if the covariates $S\cup S_0$ are independent of $(S\cup S_0)^C$ then $\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0}=E[A''(\boldsymbol{x}_i^T\boldsymbol{\beta}^0)\boldsymbol{x}_i\boldsymbol{x}_i^T]$ has a block diagonal structure that also implies
$\boldsymbol{a}_{\widehat{S}_b}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, \widehat{S}_b}^{-1}\boldsymbol{\widetilde{I}}_{\widehat{S}_b}
= \boldsymbol{a}_{S\cup S_0}^T\boldsymbol{\Sigma}_{\boldsymbol{\beta}^0, S\cup S_0}^{-1}\boldsymbol{\widetilde{I}}_{S\cup S_0}$ for all $b=1,\ldots,B$, using the fact that $\boldsymbol{a}_{(S\cup S_0)^C}$ is a zero-vector.
Therefore this independence assumption also implies oracle-level variance.
Note that, in general, this independence assumption is difficult to justify in practice when the true signal variables are unknown.
An exception is in linear models, where it is sufficient to assume the pre-specified covariates $S$ are independent of $S^C$, e.g. by randomized treatment assignment, since $A''(\cdot)$ is a constant.
In this sense our Assumptions (6) and (7), adapted from \cite{wang2020debiased} for linear models, are less restrictive than those used by others to justify multiple splitting procedures such as \cite{fei2021estimation} for GLMs and \cite{fei2019drawing} for linear models.
\section{Additional simulation results}\label{appendix:sims}
Table \ref{table-postselection-ar1-5-n2000} summarizes results from the same simulation setting as the post-model selection procedure of Table 3, but with a larger sample size of $n=2000$, and $p=2800$ covariates.
We see that the selection rate for small coefficients improves, resulting in lower bias and better confidence interval coverage, and the debiased lasso estimator still has a substantial advantage over the MLE.
\begin{table}
\caption{Logistic regression simulation results for $n=2000$, $p=2800$, $s_0=6$, and AR(1) correlation structure with parameter 0.5. Selection results refer to the lasso in the single split estimator, where the average selected model size was 78. The fitted model for estimating each $\beta_j$ was $\widehat{S}$. Nominal confidence interval coverage probabilities are 0.95.} \label{table-postselection-ar1-5-n2000}
{\begin{tabular*}{\textwidth}{l l r r r r r r r r}
\hline & Estimator & $\beta_{489}$ & $\beta_{130}$ & $\beta_{680}$ & $\beta_{488}$ & $\beta_{137}$ & $\beta_{190}$ & $\beta_{510}$ & $\beta_{336}$\\ \hline
$\boldsymbol{\beta}^0_j$ & & -1.50 & -1.00 & -0.50 & 0.00 & 0.00 & 0.50 & 1.00 & 1.50\\
Selection Rate & & 1.00 & 1.00 & 1.00 & 0.11 & 0.03 & 1.00 & 1.00 & 1.00\\ \hline
Bias & Debiased SS & -0.01 & -0.01 & 0.01 & -0.00 & 0.00 & -0.01 & 0.02 & 0.01\\
& MLE SS & -0.24 & -0.16 & -0.08 & -0.00 & 0.00 & 0.07 & 0.17 & 0.24\\
& Oracle $(n_2)$ & -0.03 & -0.02 & -0.00 & 0.00 & 0.00 & 0.01 & 0.03 & 0.03\\ \hline
Coverage & Debiased SS & 0.89 & 0.90 & 0.98 & 0.99 & 1.00 & 0.91 & 0.93 & 0.92\\
& MLE SS & 0.64 & 0.71 & 0.90 & 0.99 & 1.00 & 0.86 & 0.74 & 0.65\\
& Oracle $(n_2)$ & 0.96 & 0.93 & 0.97 & 1.00 & 1.00 & 0.94 & 0.95 & 0.95\\ \hline
Rejection Rate & Debiased SS & 1.00 & 1.00 & 1.00 & 0.01 & 0.00 & 1.00 & 1.00 & 1.00\\
($H_0: \beta_j=0$) & MLE SS & 1.00 & 1.00 & 1.00 & 0.01 & 0.00 & 1.00 & 1.00 & 1.00\\
& Oracle $(n_2)$ & 1.00 & 1.00 & 1.00 & 0.00 & 0.00 & 1.00 & 1.00 & 1.00\\ \hline
Standard Error & Debiased SS & 0.11 & 0.10 & 0.09 & 0.01 & 0.00 & 0.09 & 0.10 & 0.11\\
& MLE SS & 0.15 & 0.13 & 0.11 & 0.01 & 0.00 & 0.11 & 0.13 & 0.15\\
& Oracle $(n_2)$ & 0.12 & 0.11 & 0.09 & 0.00 & 0.00 & 0.09 & 0.11 & 0.12\\ \hline
Empirical SD & Debiased SS & 0.14 & 0.11 & 0.10 & 0.03 & 0.01 & 0.11 & 0.11 & 0.13\\
& MLE SS & 0.21 & 0.17 & 0.12 & 0.05 & 0.02 & 0.14 & 0.16 & 0.22\\
& Oracle $(n_2)$ & 0.12 & 0.11 & 0.09 & 0.00 & 0.00 & 0.10 & 0.10 & 0.12\\ \hline
\end{tabular*}}
\end{table}
We also present additional simulation results that have stronger correlation between covariates.
Table \ref{table-ar1-8} summarizes results from an AR(1) correlation structure with parameter 0.8, and Table \ref{table-cs-8} summarizes results from a compound symmetry correlation structure with parameter 0.8.
We see that the multiple splitting debiased lasso estimator is fairly robust to high correlations between covariates, even as the model selection performance deteriorates.
The debiased lasso continues to outperform the MLE, having lower bias and variance as well as better confidence interval coverage.
\begin{table}
\caption{Logistic regression simulation results for $n=500$, $p=700$, $s_0=6$, and AR(1) correlation structure with parameter 0.8. Selection results refer to the lasso in the single split estimator, where the average selected model size was 38. The fitted model for estimating each $\beta_j$ was $\widehat{S}\cup\{j\}$. Nominal confidence interval coverage probabilities are 0.95.} \label{table-ar1-8}
{\begin{tabular*}{\columnwidth}{l l r r r r r r r r}
\hline & Estimator & $\beta_{489}$ & $\beta_{130}$ & $\beta_{680}$ & $\beta_{488}$ & $\beta_{476}$ & $\beta_{190}$ & $\beta_{510}$ & $\beta_{336}$\\ \hline
$\boldsymbol{\beta}^0_j$ & & -1.50 & -1.00 & -0.50 & 0.00 & 0.00 & 0.50 & 1.00 & 1.50\\
Selection Rate & & 1.00 & 0.97 & 0.56 & 0.24 & 0.05 & 0.64 & 0.98 & 1.00\\ \hline
Bias & Debiased SS & 0.04 & 0.03 & 0.00 & -0.01 & -0.03 & -0.02 & -0.02 & -0.02\\
& MLE SS & -0.54 & -0.37 & -0.24 & 0.02 & -0.05 & 0.21 & 0.39 & 0.55\\
& Debiased MS & 0.01 & 0.03 & 0.01 & -0.02 & -0.01 & -0.01 & -0.01 & -0.01\\
& MLE MS & -0.54 & -0.35 & -0.23 & -0.01 & -0.01 & 0.22 & 0.39 & 0.55\\
& Oracle $(n_2)$ & -0.08 & -0.03 & -0.02 & -0.03 & -0.01 & 0.02 & 0.05 & 0.08\\
& Oracle $(n)$ & -0.04 & -0.02 & -0.02 & -0.01 & -0.01 & 0.01 & 0.03 & 0.04\\ \hline
Coverage & Debiased SS & 0.89 & 0.93 & 0.92 & 0.96 & 0.96 & 0.90 & 0.91 & 0.90\\
& MLE SS & 0.77 & 0.82 & 0.85 & 0.93 & 0.93 & 0.88 & 0.81 & 0.74\\
& Debiased MS & 0.93 & 0.92 & 0.93 & 0.96 & 0.95 & 0.94 & 0.90 & 0.92\\
& MLE MS & 0.58 & 0.73 & 0.86 & 0.97 & 0.94 & 0.85 & 0.70 & 0.53\\
& Oracle $(n_2)$ & 0.93 & 0.96 & 0.95 & 0.93 & 0.94 & 0.96 & 0.94 & 0.95\\
& Oracle $(n)$ & 0.94 & 0.96 & 0.92 & 0.96 & 0.96 & 0.94 & 0.93 & 0.96\\ \hline
Rejection Rate & Debiased SS & 0.99 & 0.92 & 0.59 & 0.04 & 0.04 & 0.57 & 0.95 & 1.00\\
($H_0: \beta_j=0$) & MLE SS & 0.99 & 0.90 & 0.64 & 0.07 & 0.07 & 0.60 & 0.94 & 0.99\\
& Debiased MS & 1.00 & 0.99 & 0.90 & 0.04 & 0.05 & 0.88 & 1.00 & 1.00\\
& MLE MS & 1.00 & 0.99 & 0.90 & 0.03 & 0.06 & 0.89 & 1.00 & 1.00\\
& Oracle $(n_2)$ & 1.00 & 1.00 & 0.74 & 0.07 & 0.06 & 0.78 & 1.00 & 1.00\\
& Oracle $(n)$ & 1.00 & 1.00 & 0.98 & 0.04 & 0.04 & 0.98 & 1.00 & 1.00\\ \hline
Standard Error & Debiased SS & 0.27 & 0.26 & 0.24 & 0.32 & 0.20 & 0.23 & 0.25 & 0.27\\
& MLE SS & 0.43 & 0.39 & 0.33 & 0.44 & 0.27 & 0.33 & 0.38 & 0.44\\
& Debiased MS & 0.19 & 0.17 & 0.15 & 0.20 & 0.12 & 0.15 & 0.17 & 0.19\\
& MLE MS & 0.30 & 0.26 & 0.22 & 0.32 & 0.19 & 0.22 & 0.26 & 0.30\\
& Oracle $(n_2)$ & 0.26 & 0.22 & 0.20 & 0.32 & 0.19 & 0.20 & 0.22 & 0.25\\
& Oracle $(n)$ & 0.17 & 0.15 & 0.14 & 0.22 & 0.13 & 0.14 & 0.15 & 0.17\\ \hline
Empirical SD & Debiased SS & 0.33 & 0.30 & 0.26 & 0.30 & 0.18 & 0.26 & 0.29 & 0.33\\
& MLE SS & 0.72 & 0.59 & 0.43 & 0.55 & 0.31 & 0.44 & 0.52 & 0.69\\
& Debiased MS & 0.21 & 0.20 & 0.17 & 0.21 & 0.12 & 0.17 & 0.19 & 0.20\\
& MLE MS & 0.35 & 0.32 & 0.26 & 0.33 & 0.19 & 0.26 & 0.30 & 0.34\\
& Oracle $(n_2)$ & 0.29 & 0.22 & 0.20 & 0.34 & 0.19 & 0.20 & 0.23 & 0.26\\
& Oracle $(n)$ & 0.19 & 0.15 & 0.14 & 0.23 & 0.13 & 0.15 & 0.17 & 0.18\\ \hline
\end{tabular*}}
\end{table}
\begin{table}
\caption{Logistic regression simulation results for $n=500$, $p=700$, $s_0=6$, and compound symmetry correlation structure with parameter 0.8. Selection results refer to the lasso in the single split estimator, where the average selected model size was 23. The fitted model for estimating each $\beta_j$ was $\widehat{S}\cup\{j\}$. Nominal confidence interval coverage probabilities are 0.95.} \label{table-cs-8}
{\begin{tabular*}{\columnwidth}{l l r r r r r r r r}
\hline & Estimator & $\beta_{489}$ & $\beta_{130}$ & $\beta_{680}$ & $\beta_{488}$ & $\beta_{476}$ & $\beta_{190}$ & $\beta_{510}$ & $\beta_{336}$\\ \hline
$\boldsymbol{\beta}^0_j$ & & -1.50 & -1.00 & -0.50 & 0.00 & 0.00 & 0.50 & 1.00 & 1.50\\
Selection Rate & & 0.98 & 0.62 & 0.24 & 0.01 & 0.03 & 0.22 & 0.68 & 0.96\\ \hline
Bias & Debiased SS & 0.04 & 0.04 & 0.03 & -0.02 & 0.00 & -0.05 & -0.08 & -0.06\\
& MLE SS & -0.14 & -0.09 & -0.04 & -0.02 & 0.01 & 0.01 & 0.04 & 0.11\\
& Debiased MS & 0.03 & 0.08 & 0.02 & -0.02 & 0.00 & -0.02 & -0.07 & -0.07\\
& MLE MS & -0.17 & -0.07 & -0.07 & -0.03 & -0.01 & 0.06 & 0.08 & 0.13\\
& Oracle $(n_2)$ & -0.07 & 0.03 & -0.07 & -0.03 & 0.00 & 0.05 & 0.03 & 0.02\\
& Oracle $(n)$ & -0.03 & 0.02 & -0.03 & -0.01 & 0.00 & 0.03 & 0.01 & 0.01\\ \hline
Coverage & Debiased SS & 0.90 & 0.91 & 0.91 & 0.91 & 0.90 & 0.94 & 0.91 & 0.91\\
& MLE SS & 0.89 & 0.89 & 0.91 & 0.91 & 0.89 & 0.93 & 0.88 & 0.90\\
& Debiased MS & 0.90 & 0.94 & 0.91 & 0.96 & 0.96 & 0.93 & 0.92 & 0.90\\
& MLE MS & 0.86 & 0.90 & 0.93 & 0.96 & 0.95 & 0.93 & 0.88 & 0.88\\
& Oracle $(n_2)$ & 0.94 & 0.94 & 0.94 & 0.96 & 0.96 & 0.94 & 0.96 & 0.96\\
& Oracle $(n)$ & 0.94 & 0.94 & 0.92 & 0.96 & 0.93 & 0.94 & 0.92 & 0.94\\ \hline
Rejection Rate & Debiased SS & 1.00 & 0.82 & 0.33 & 0.09 & 0.10 & 0.33 & 0.77 & 0.99\\
($H_0: \beta_j=0$) & MLE SS & 1.00 & 0.85 & 0.36 & 0.09 & 0.11 & 0.34 & 0.78 & 0.98\\
& Debiased MS & 1.00 & 0.98 & 0.59 & 0.04 & 0.04 & 0.56 & 0.98 & 1.00\\
& MLE MS & 1.00 & 0.98 & 0.58 & 0.04 & 0.05 & 0.58 & 0.98 & 1.00\\
& Oracle $(n_2)$ & 1.00 & 0.88 & 0.47 & 0.04 & 0.04 & 0.44 & 0.90 & 1.00\\
& Oracle $(n)$ & 1.00 & 1.00 & 0.67 & 0.04 & 0.07 & 0.71 & 1.00 & 1.00\\ \hline
Standard Error & Debiased SS & 0.32 & 0.32 & 0.31 & 0.31 & 0.31 & 0.31 & 0.32 & 0.32\\
& MLE SS & 0.38 & 0.37 & 0.35 & 0.34 & 0.35 & 0.35 & 0.36 & 0.38\\
& Debiased MS & 0.23 & 0.23 & 0.22 & 0.21 & 0.21 & 0.22 & 0.23 & 0.23\\
& MLE MS & 0.27 & 0.26 & 0.25 & 0.25 & 0.25 & 0.26 & 0.26 & 0.26\\
& Oracle $(n_2)$ & 0.34 & 0.31 & 0.30 & 0.31 & 0.31 & 0.31 & 0.32 & 0.33\\
& Oracle $(n)$ & 0.23 & 0.22 & 0.21 & 0.21 & 0.21 & 0.21 & 0.22 & 0.23\\ \hline
Empirical SD & Debiased SS & 0.36 & 0.37 & 0.34 & 0.33 & 0.35 & 0.35 & 0.39 & 0.38\\
& MLE SS & 0.51 & 0.46 & 0.40 & 0.40 & 0.41 & 0.42 & 0.50 & 0.49\\
& Debiased MS & 0.26 & 0.25 & 0.24 & 0.20 & 0.22 & 0.23 & 0.25 & 0.26\\
& MLE MS & 0.35 & 0.32 & 0.29 & 0.24 & 0.26 & 0.27 & 0.31 & 0.34\\
& Oracle $(n_2)$ & 0.32 & 0.33 & 0.33 & 0.29 & 0.30 & 0.34 & 0.32 & 0.34\\
& Oracle $(n)$ & 0.24 & 0.23 & 0.24 & 0.20 & 0.23 & 0.22 & 0.24 & 0.24\\
\hline
\end{tabular*}}
\end{table}
\end{document}
|
\begin{document}
\title{\textbf{\emph{S}-Noetherian generalized power series rings}}
\author {\bf F. Padashnik, A. Moussavi and H. Mousavi}
\date{}
\maketitle
\begin{center}
{\small Department of Pure Mathematics, Faculty of Mathematical
Sciences,\\ Tarbiat Modares University, Tehran, Iran, P.O. Box:
14115-134.{\footnote {\noindent Corresponding author.
[email protected] and [email protected].\\
\indent [email protected] \\
\indent [email protected] .}}
}\\
\end{center}
\date{}
\maketitle
\begin{abstract}
Let $R$ be a ring with identity, $(M,\leq)$ a commutative positive strictly ordered monoid and $\omega_{m}$ an automorphism for each $m\in M$. The skew generalized power series ring $R[[M,\omega]]$ is a common generalization of (skew) polynomial rings, (skew)
power series rings, (skew) Laurent polynomial rings, (skew) group rings, and Mal'cev Neumann Laurent series rings. If $S\displaystyle\sumbset R$ is a multiplicative set, then $R$ is called right $S$-\emph{Noetherian}, if for each ideal $I$ of $R$, $Is\displaystyle\sumbseteq J \displaystyle\sumbseteq I$ for some $s \in S$ and some finitely generated right ideal $J$. Unifying and generalizing a number of known results, we study
transfers of $S$-Noetherian property to the ring $R[[M,\omega]]$.
We also show that the ring $R[[M,\omega]]$
is left Noetherian if and only if $R$ is left Noetherian and $M$ is finitely generated. Generalizing a result of Anderson and Dumitrescu, we show that,when $S\displaystyle\sumbseteq R$ is a $\sigma$-anti-Archimedean multiplicative set with $\sigma$ an automorphism of $R$, then $R$ is right $S$-Noetherian if and only if the skew polynomial ring $R[x;\sigma]$ is right $S$-Noetherian.
\end{abstract}
\textit{Keywords:} $S$-Noetherian ring, skew generalized power
series ring; right archimedean ring; skew Laurent series ring; skew
polynomial ring.
\textit{Subject Classification: $16P40; 16D15; 16D40; 16D70; 16S36$}
\section{Introduction}
Throughout this paper, $R$ is a ring (not necessary commutative) with identity.
In \cite{anderson KZ}, the authors introduced the concept of ``almost
finitely generated" to study Querr$\acute{e}$'s characterization of divisorial ideals in integrally
closed polynomial rings. Later, Anderson and Dumitrescu \cite{anderson} abstracted this notion
to any commutative ring and defined a general concept of Noetherian rings. They
call $R$ an $S$-Noetherian ring if each ideal of $R$ is $S$-finite, i.e., for each ideal $I$ of $R$,
there exist an $s\in S$ and a finitely generated ideal $J$ of $R$ such that $Is\displaystyle\sumbseteq J \displaystyle\sumbseteq I$. By \cite[
Proposition 2(a)]{anderson}, any integral domain $R$ is ($R \setminus \{0\}$)-Noetherian; so an $S$-Noetherian
ring is not generally Noetherian. Also, $M$ is said to be $S$-finite if there exist an $s\in S$
and a finitely generated $R$-submodule $F$ of $M$ such that $sM \displaystyle\sumbseteq F.$ Also, $M$ is called
$S$-Noetherian if each submodule of $M$ is $S$-finite.
In \cite{anderson}, the authors gave a number of $S$-variants of well-known results for
Noetherian rings: $S$-versions of Cohen’s result, the Eakin-Nagata theorem, and
the Hilbert basis theorem under an additional condition. More precisely, in \cite[
Propositions 9 and 10]{anderson}, the authors showed that, if $S$ is an anti-Archimedean
subset of an $S$-Noetherian ring $R$, then the polynomial ring $R[X_1,\cdots,X_n]$ is also
an $S$-Noetherian ring; and if $S$ is an anti-Archimedean subset of an $S$-Noetherian
ring $R$ consisting of nonzero divisors, then the power series ring $R[[X_1,\cdots,X_n]]$ is
an $S$-Noetherian ring. Note that if $S$ is a set of units of $R$, then the results above
are nothing but the Hilbert basis theorem and a well-known fact that $R[[X]]$ is
Noetherian if $R$ is Noetherian. In \cite[Theorem 2.3]{liu}, Liu generalized this result to the
ring of generalized power series as follows: If $S$ is an anti-Archimedean subset of a
ring $R$ consisting of nonzero divisors and ($\Gamma,\leq$)
is a positive strictly ordered monoid
(defined in Secion 4), then $R[[M,\le]]$ is $S$-Noetherian if and only if $R$ is $S$-Noetherian
and $\Gamma$ is finitely generated. Note that this recovers the result for the Noetherian case
shown in \cite[, Theorem 4.3]{brook} when $S$ is a set of units.
Also, the authors in \cite{lim ex} study on transfers of the $S$-Noetherian property to the constructions
$D+(X_1,\cdots,X_n)E[X_1,\cdots,X_n]$ and $D+(X_1,\cdots,X_n)E[[X_1,\cdots,X_n]]$
and Nagata's idealization is studied in \cite{lim amal}.\par
The authors in \cite[Theorem 7.7, page(65)]{Gilmer} proved that
$R[M]$ is Noetherian if and only if $R$ is Noetherian and $M$ is finitely generated.
Brookfield \cite{brook} proved that if $(M,\le)$ is a commutative positively ordered monoid,
then $R[[M,\le]]$ is right Noetherian if and only if $R$ is right Noetherian and $M$ is finitely generated.\par
Ribenboim \cite{Ribenboim-noeth}
and
Varadarajan \cite{vara},
have carried out an extensive study of
rings of generalized power series. They investigated conditions under which a
ring of generalized power series $R[[M,\le]]$ is Noetherian, where $R$ is a commutative
ring with identity and $(M,\le)$ is a strictly ordered monoid.\par
In this paper we obtain results pertaining to Noetherian nature of
generalized power series rings. These considerably strengthen earlier
results of Ribenboim \cite{Ribenboim-noeth}, Varadarajan \cite{vara}, Brookfield \cite{brook}, D. D. Anderson, and T. Dumitrescu\cite{anderson}, D. D. Anderson, B. G. Kang, and M. H. Park \cite{andersonI} , D. D. Anderson, D. J. Kwak, M. Zafrullah \cite{anderson KZ} on this topic.\par
More precisely, we show that, if $S$ is an $\sigma$-anti-Archimedean multiplicative
subset of an $S$-Noetherian ring $R$ with an automorphism $\sigma$, then the skew polynomial ring $R[x;\sigma]$ is also
an $S$-Noetherian ring; and if $(M,\le)$ is a commutative positively ordered monoid and $\omega_m$
is an automorphism over $R$ for every $m\in M$, then the skew generalized power series ring $R[[M,\omega]]$ is right Noetherian if and only if $R$ is right Noetherian and $M$ is finitely generated.
When $(M,\leq)$ is a commutative positive strictly ordered monoid and $\omega_{m}$ is an automorphism for each $m\in M$,
we unify and generalize the above mentioned results, and study
transfers of $S$-Noetherian property to the skew generalized power series ring $R[[M,\omega]]$.
\section{S-Noetherian property on skew polynomial rings}
If $R$ is a commutative ring
and $S$ is a multiplicative subset of $R$,
in \cite{anderson}, the authors proved that the necessary condition
for the ring of fractions $R_S$ to be a Noetherian ring is that $R$ be an
$S$-Noetherian ring. In noncommutative rings, the
situation is more complicated. In fact, if $S$ is a right (resp., left)
permutable and right (resp., left) reversible (i.e $S$ is
right (resp., left) denominator set), then $R$ has a ring
of fraction $RS^{-1}$ (resp., $S^{-1}R$). In this situation,
denominator sets (both left and right denominator sets) act
like a multiplicatively closed sets in the commutative case. Our
interest in this note is multiplicatively closed subsets (i.e.
denominator subsets) in noncommutative rings.
First we
define the notion of $S$-Noetherian rings for noncommutative rings.
\begin{definition}
Let $R$ be a ring and $S$ a multiplicative subset
of $R$. An ideal $I$ of $R$ is called right $S$-\emph{finite} (resp., $S$-\emph{principal}),
if there exists a finitely generated (resp., principal) right ideal $J$ of
$R$ and some $s\in S$ such that $Is\displaystyle\sumbseteq J \displaystyle\sumbseteq I$.\\
A ring $R$ is said to be right $S$-\emph{Noetherian} (resp., $S$-\emph{PRIR}),
if each right ideal of $R$ is right $S$-\emph{finite} (resp., $S$-\emph{principal}).
This definition can be done similarly for left side ideals.\\
Also, we say that an $R$-module $M$ is right (or left)
$S$-\emph{finite }if $Ms\displaystyle\sumbseteq F$ (resp., $sM\displaystyle\sumbseteq F$) for some
$s\in S$ and a finitely generated submodule $F$ of $M$.
A module $M$ is called right (or left) $S$-\emph{Noetherian}
if each submodule of $M$ is
a right (or left) $S$-finite module.
\end{definition}
The author in \cite{anderson} justified the definition of $S$-Noetherian for commutative rings
by proving some interesting properties of $S$-Noetherian ring. For
example, they showed that if $R$ is $S$-Noetherian, then the ring of fractions $R_S$ is
Noetherian and they found the conditions for the reverse of this proposition.\\
Given rings $R,T$, an ideal $J$ of $T$
is said to be extended, if there exists an ideal $I$ of $R$ such that $\varphi(I)=J$
where $\varphi:R\longrightarrow T$ is a ring monomorphism. Also,
a ring $R$ is von Neumann regular if for every $a \in R$ there
exists an $x$ in $R$ such that $a=axa$. The center of a ring $R$ is denoted
by $Cent(R)$.
\begin{proposition}\label{tie}
Let $R$ be a ring, $S\displaystyle\sumbseteq R$ a multiplicative set
and $I$ a right ideal of $R$.
\emph{1}) If $R$ is von Neumman regular, $S$ a denominator set and $I\cap S\neq \emptyset$,
then $I$ is right $S$-principal.
\emph{2}) If $S\displaystyle\sumbseteq T$ are right denominator subsets of $R$
and $R$ is right $S$-Noetherian(resp., $S$-PRIR), then
$R$ is right $T$-Noetherian(resp., $T$-PRIR).
\emph{3}) If $R$ is von Neumman regular and $S$ a denominator
set, then $R$ is right $S$-Noetherian (resp., $S$-PRIR) if and
only if $R$ is right Noetherian (resp., PRIR).
\emph{4}) If $S$ is a denominator set and $R$ is right $S$-Noetherian (resp., $S$-PRIR),
then $RS^{-1}$ is right Noetherian.
\emph{5}) If $S$ is central in $R$, then the conditions \emph{1-4} and those of \cite[Proposition \emph{2}]{anderson} follow.
\end{proposition}
\begin{proof}
1) Let $S\displaystyle\sumbseteq R$ be a denominator set, $R$ a von Neumman regular ring and $I$ a right ideal of $R$.
Then for each $s\in I\cap S$, one can see that $Is\displaystyle\sumbseteq Rs=s\frac{1}{s}Rs$,
where $\frac{1}{s}$ is the inverse of $s$ in $RS^{-1}$. It is sufficient to see
that $\frac{1}{s}Rs\displaystyle\sumbseteq R$.
For each $s\in S$, there exists $a\in R$ such that $sas=s$,
so $sa=s\frac{1}{s}=1$ (in $RS^{-1}$). Thus $sa=1$ and hence $a=\frac{1}{s}$.
Therefore $\frac{1}{s}\in R$ and $Rs\displaystyle\sumbseteq R$, so $\frac{1}{s}Rs \displaystyle\sumbseteq R$.
2) Let $S\displaystyle\sumbseteq T$ be denominator subsets of $R$. If $R$ is right
$S$-Noetherian (resp., $S$-PRIR), then for each right ideal of $R$, there exists
$s\in S$ such that $Is\displaystyle\sumbseteq J \displaystyle\sumbseteq I$ for some finitely generated (resp.,
principal) right ideal of $R$. Since $s\in S$, $S\displaystyle\sumbseteq T$, $s\in T$ which
means that $R$ is right $T$-Noetherian (resp., $T$-PRIR).
3) Assume that $R$ is a right Noetherian (resp., PRIR) ring. Each right ideal of $R$ is
finitely generated (resp., principal). So for each $s\in S$, one can see that
$Is\displaystyle\sumbseteq I$. Hence $R$ is right $S$-Noetherian (resp., $S$-PRIR).
On the other hand, assume that $R$ is right $S$-Noetherian (resp., $S$-PRIR),
so there exists $s\in S$ such that $Is\displaystyle\sumbseteq J\displaystyle\sumbseteq I$ for some finitely
generated (resp., principal) right ideal of $R$. Also suppose that $sts=s$ for some
$t\in R$. So $Is\displaystyle\sumbseteq I$. Also, $It\displaystyle\sumbseteq I$, so $Its\displaystyle\sumbseteq Is=Ists$. So
$Its.\frac{1}{s}\displaystyle\sumbseteq Ists.\frac{1}{s}$. Hence $It\displaystyle\sumbseteq Ist$. Also $Is\displaystyle\sumbseteq I$
yields that $Ist\displaystyle\sumbseteq It \displaystyle\sumbseteq Ist$. So $Ist=It$. Thus $Ists=Its$ which
means that $Is=Ists=Its$. However $Its=I\frac{1}{s}sts=I\frac{1}{s}s=I$. So
$Is=I$. Thus $I=Is\displaystyle\sumbseteq J \displaystyle\sumbseteq I$ and hence $I=J$, and since $J$ is
a finitely generated (resp., principal) right ideal of $R$, so is $I$.
4) This proof is an inspiration from \cite[proposition 3.11 part (i)]{atia}.
First, we claim that each ideal of $RS^{-1}$ is extended. Let a right ideal $J$ of ring of fraction $RS^{-1}$
and $\frac{x}{s}=b\in J$. So $\frac{x}{1}=\frac{x}{s}.\frac{s}{1}\in J.\frac{s}{1}\displaystyle\sumbseteq J$.
So $\frac{x}{1}\in J$. Hence, $\varphi^{-1}(\frac{x}{1})\in \varphi^{-1}(J)$
which means that $x\in \varphi^{-1}(J)$. Thus, $\varphi(x)\in \varphi(\varphi^{-1}(J))$,
so $\frac{x}{1}\in \varphi(\varphi^{-1}(J))$. So $\frac{x}{1}.\frac{s}{s}=\frac{x.s}{s}.\frac{s}{s}=
\frac{xs}{s}\in \varphi(\varphi^{-1}(J))$. Note that $\varphi(\varphi^{-1}(J))$ is an ideal of $RS^{-1}$
and $s\in U(RS^{-1})$, so we have
\begin{align*}
\frac{xs}{s}.\frac{1}{s}=\frac{x}{s}\in \varphi(\varphi^{-1}(J))\frac{1}{s}\displaystyle\sumbseteq \varphi(\varphi^{-1}(J)).
\end{align*}
So $b=\frac{x}{s}\in \varphi(\varphi^{-1}(J))$ which implies $J\displaystyle\sumbseteq \varphi(\varphi^{-1}(J))$.
On the other hand, $\varphi(\varphi^{-1}(J))\displaystyle\sumbseteq J$ holds for each ideal of $RS^{-1}$. Thus
$J=\varphi(\varphi^{-1}(J))$ and $J$ is an extended ideal of $RS^{-1}$.
Let a right ideal $K$ of ring of fraction $RS^{-1}$. Since $R$ is right $S$-Noetherian there exists $s\in S$
and a finitely generated (resp., principal) right ideal $W$ of $R$ such that $\varphi^{-1}(K)s\displaystyle\sumbseteq W\displaystyle\sumbseteq \varphi^{-1}(K)$.
So $\varphi(\varphi^{-1}(K)s)\displaystyle\sumbseteq \varphi(W)\displaystyle\sumbseteq \varphi(\varphi^{-1}(K))$.
We know that $\varphi(\varphi^{-1}(K)s)=\varphi(\varphi^{-1}(K))\varphi(s)$.
Also, $\varphi(s)\in U(RS^{-1})$ and $\varphi(\varphi^{-1}(K))=K$. So $K\displaystyle\sumbseteq \varphi(W) \displaystyle\sumbseteq K$.
So $K=\varphi(W)$. Since $W$ is finitely generated, $\varphi(W)$ is finitely generated.
So $K$ is finitely generated which means that $RS^{-1}$ is right Noetherian.
5) The proof is straightforward by \cite[Proposition 2]{anderson}.
\end{proof}
Now we generalize a theorem of D.D. Anderson and Tiberiu Dumitrescu \cite[Proposition 9]{anderson}, for commutative polynomial ring $R[x]$, in a more general setting.
We show that if $R$ is a right (or left) $S$-Noetherian ring with an automorphism $\sigma$, then $R[x;\sigma]$ is a right (or left) $S$-Noetherian ring.
In \cite{andersonI} the authors defined the notion of anti-Archimedean multiplication set. Now we introduce the notion of $\sigma$-anti-Archimedean multiplication set:
\begin{definition}
Let $R$ be a ring with an automorphism $\sigma$ and $S$
a multiplicative set. Then $R$ is called left $\sigma$-\emph{anti}-\emph{Archimedean} over
$S$, if there exists $s\in S$, such that
\begin{align*}
(\bigcap_{l\geq 1,k_i\ge 0} R\sigma^{k_1}(s)\sigma^{k_2}(s)\cdots \sigma^{k_l}(s)\big)\cap S \neq \emptyset.
\end{align*}
\end{definition}
\begin{theorem}\label{S-Noetherian}
Let $R$ be a ring with an automorphism $\sigma$ and $S\displaystyle\sumbseteq R$ a $\sigma$-anti-Archimedean multiplicative set. Then $R$ is right \emph{(}or left\emph{)} $S$-Noetherian if and only if $R[x;\sigma]$ is right \emph{(}or left\emph{)} $S$-Noetherian.
\end{theorem}
\begin{proof}
($\mbox{$\mathbb{R}$}ightarrow$) We prove the theorem for the right version. The proof of left version is similar. First, we claim that if $D$ is a finitely generated $R$-module and $R$ is a right $S$-Noetherian ring, then $D$ is a right $S$-Noetherian module. For this claim, assume that $D$ is a finitely generated right $R$-module. So there exists a finitely generated free right $R$-module $F$ and a surjective homomorphism $\pi:F\longrightarrow D$. We show that $D$ is a right $S$-Noetherian $R$-module. For this, let $N:=\pi^{-1}(T)$, for a submodule $T$ of $D$. We have $N\simeq I_1\oplus I_2 \cdots \oplus I_l$, for some right ideals $I_i$ of $R$, $1\leq i \leq l$. Since $R$ is a right $S$-Noetherian ring, there exists $s_i\in S$ such that $I_is_i\displaystyle\sumbseteq J_i$
for a finitely generated ideals $J_i$ of $R$, $1\leq i \leq l$. Now take $s':=s_1s_2\cdots s_l \in S$, we show that $Ns'\displaystyle\sumbseteq K$ for a finitely generated $R$-submodule $K$ of $F$. One can see that $Ns_1=I_1s_1\oplus I_2s_1\oplus \cdots \oplus I_ls_1$. Since $I_i$ is a right ideal of $R$ so we have $I_is_1\displaystyle\sumbseteq I_i$ for $i\neq 1$ and $I_1s_1\displaystyle\sumbseteq J_1$, for a finitely generated right ideal $J_1$ of $R$. So we have $Ns_1\displaystyle\sumbseteq J_1\oplus I_2\oplus I_3 \cdots \oplus I_l$. Continuing in this way,
$Ns_1s_2\cdots s_l\displaystyle\sumbseteq J_1 \oplus J_2\oplus \cdots \oplus J_l\simeq K$,
where $J_i$ is a finitely generated right ideal of $R$,$1\leq i \leq l$, and hence $K$ is a finitely generated $R$-submodule of $F$. Thus $Ns'\displaystyle\sumbseteq K$ and hence $F$ is a right $S$-Noetherian $R$-module. Next, since $T=\pi(N)$ and $Ns'\displaystyle\sumbseteq K$, we have $\pi(Ns')=\pi(N)s'=Ts'\displaystyle\sumbseteq \pi(K)$.
We know that $K$ is finitely generated in $F$, so $\pi(K)$ is finitely generated $R$-submodule of $D$. Thus, $Ts'\displaystyle\sumbseteq \pi(K)$ which means that $T$ is $S$-finite. Since $T$ is an arbitrary $R$-submodule of $D$, $D$ is a right $S$-Noetherian module.
Now, we prove that $A:=R[x;\sigma]$ is a right $S$-Noetherian ring. Let $I$ be right ideal of $A$ and suppose that
\begin{align*}
J=\{r_i\in R | r_i\,\, \textrm{is a leading coefficient of any polynomial in} \,\, I\}\cup \{0\}.
\end{align*}
It is easy to see that $J$ is a right ideal. Since $R$ is right $S$-Noetherian, $Js\displaystyle\sumbseteq (a_1R+a_2R+\cdots+a_nR)$ for some $s\in S$ and $a_i \in J$. So there exist polynomials $f_i\in I$ with $f_i=a_{i,n_i}x^{n_i}+\cdots +a_{0,i}$. Let $d=max\{n_i\}$.
Assume that $T$ is the set of all polynomials in $I$ with degree less than $d$. Obviously, $T$ is a finitely generated right $R$-submodule of $A$. So by the first claim, $T$ is right $S$-Noetherian. Hence there exist $t \in S$, $g_i \in T$ for $1\le i \le m$ such that
$Tt\displaystyle\sumbseteq (g_1R+g_2R+\cdots +g_mR)$. Let $h(x)=\displaystyle\summ_{i=1}^zb_ix^i \in I$, so $b_z\in J$ which means that
$b_z\in (a_1R+a_2R+\cdots+a_nR)$. Thus $h\sigma^{-z}(s)$ can be written as follows:
\begin{align*}
h\sigma^{-z}(s)= v^{(1)}+w^{(1)}+q^{(1)},
\end{align*}
where $v^{(1)}\in (f_1A+f_2A+\cdots+f_nA)$, $w^{(1)}\in \{f\in A | d+1\le deg(f)\le z-1\}$ and $q^{(1)}\in T$. Continuing in this way and multiplying $\sigma^{-z+1}(s),\sigma^{-z+2}(s),\cdots,\sigma^{-1-d}(s)$ from right side respectively, so there exists some $v\in (f_1A+f_2A+\cdots+f_nA)$,
$w\in T$ such that
\begin{align*}
h\sigma^{-z}(s)\sigma^{-z+1}(s)\cdots \sigma^{-d-1}(s)=v+w.
\end{align*}
Assume that $s_i=\sigma^{-z+i}$ and multipling $t$ from right side, then $ hs_1s_2\cdots s_{z-d}t =vt+wt$. But $wt\in Tt$, so
$wt\in (g_1R+g_2R+ \cdots+g_mR) \displaystyle\sumbseteq (g_1A+g_2A+\cdots+g_mA)$. Hence,
\begin{align*}
hs_1s_2\cdots s_{z-d}t\in (f_1A+f_2A+\cdots +f_nA+g_1A+\cdots +g_mA).
\end{align*}
Since $s_i$'s and $t$ are independent from the choice of $h\in I$, we have
\begin{align*}
Is_1s_2\cdots s_{z-d}t\displaystyle\sumbseteq (f_1A+f_2A+\cdots+ f_nA+g_1A+\cdots +g_mA).
\end{align*}
Finally, since $s_1s_2\cdots s_{z-d}t \in S$, the ideal $I$ is $S$-finite and because $I$ was chosen an arbitrary right ideal of $A$, hence $A$ is a right $S$-Noetherian ring.
($\Leftarrow$)
Let $I$ be a right ideal of $ R$. Suppose that
\begin{align*}
J=\{f\in A | \,\, \textrm{the leading coefficient of}\, f\, \textrm{is in}\, I\}.
\end{align*}
Then $J$ is a right ideal of $A$. Since $A$ is right $S$-Noetherian,
there exists $s\in S$ such that $Js\displaystyle\sumbseteq K\displaystyle\sumbseteq J$,
where $K$ is a finitely generated right ideal of $A$. Suppose that $K=(f_1A+f_2A+\cdots +f_lA)$.
Let $r\in I$, then there exists some $f\in J$ such that $fs=\displaystyle\summ a_if_i$.
So if $r_i$ is the leading coefficient of $f_i$, $1\leq i\leq l$, then
$rs\in (r_1R+r_2R\cdots +r_lR)$. So $Is\displaystyle\sumbseteq (r_1R+r_2R+ \cdots +r_lR)$.
Also, $K\displaystyle\sumbseteq J$, so each leading coefficient of $K$ is a leading coefficient of $J$.
So $(r_1R+r_2R+ \cdots +r_lR) \displaystyle\sumbseteq I$ and hence $I$ is right $S$-finite and $R$
is right $S$-Noetherian.
\end{proof}
We have the following generalization of a theorem of D.D. Anderson and Tiberiu Dumitrescu \cite[Proposition 9]{anderson}.
\begin{corollary}
Let $R$ be a \emph{(}not necessarily commutative\emph{)} ring and $S\displaystyle\sumbseteq R$ an anti-Archimedean multiplicative set.
If $R$ is $S$-Noetherian then so is the polynomial ring $R[X_1,X_2,\cdots,X_n]$.
\end{corollary}
\section{Noetherian Skew Generalized Power Series Rings}
Throughout this section, $(M,\leq)$ is assumed to be a strictly ordered commutative monoid.
The pair $(M,\le)$ is called an \textit{ordered monoid} with
order $\le$, if for every $m,m', n\in M$, $m\le m'$ implies that
$nm\le nm'$ and $mn\le m'n$. Also, an ordered monoid $(M,\le)$ is
said to be \textit{strictly orderd} if for every $m,m',n\in M$, $m<m'$
implies that $nm<nm'$ and $mn<m'n$.
Let $(M,\le)$ be a partially ordered set. The set $(M,\le)$ is called \textit{Artinian}
if every strictly decreasing sequence of elements of $M$ stablized, and
also $(M,\le)$ is called \textit{narrow} if the number of
incomparable elements in every subset of $M$ is finite. Thus, we can conclude that $(M, \leq )$ is
Artinian and narrow if and only if every nonempty subset of $M$ has
at least one but only a finite number of minimal elements.
The author in \cite{Ribenboim-semisimple} introduced the ring of
generalized power series $R[[M]]$ for a strictly ordered monoid $M$
and a ring $R$ consisting of all functions from $M$ to $R$ whose support
is Artinian and narrow with the pointwise addition and the
convolution multiplication. There are a lot of interesting
examples of rings in this form (e.g., Elliott and Ribenboim,
\cite{Elliott-Ribenboim 1992}; Ribenboim,\cite{Ribenboim (1995b)}) and it was extensively studied by
many authors, recently.
In \cite{zim}, the authors defined a ``twisted'' version of the mentioned
construction and study on ascending chain condition for its principal ideals.
Now we recall the construction of the skew generalized power series
ring introduced in \cite{zim}. Let $R$ be a ring, $(M, \leq )$ a
strictly ordered monoid, and $\omega: M\rightarrow End(R)$ a monoid
homomorphism. For $m \in M$, let $\omega_{m}$ denote the image of
$m$ under $\omega$, that is $\omega_{m}=\omega (m)$. Let $A$ be the
set of all functions $f: M\rightarrow R$ such that the support
$\displaystyle\sumpp(f)= \{ m\in M | f(m)\neq 0 \}$ is Artinian and narrow. Then
for any $m \in M$ and $f,g \in A $ the set
\begin{align*}
\chi_{m}( f, g)=\{(u,v) \in \displaystyle\sumpp(f) \times \displaystyle\sumpp(g): m=uv\}
\end{align*}
is finite. Thus one can define the product $f g: M\rightarrow R$ of
$f, g \in A$ as follows:
\begin{align*}
fg(m)=\displaystyle\summ_{ (u,v)\in \chi_{m}(f,g)}f(u)\omega_{u} (g(v)),
\end{align*}
(by convention, a sum over the empty set is $0$). Now, the set $A$
with pointwise addition and the defined multiplication is a ring, and
called \textit{the ring of skew generalized power series} with
coefficients in $R$ and exponents in $M$. To simplify, take $A$ as a formal series $\displaystyle\summ\limits_{m\in M}r_mx^m,$
where $r_m = f(m)\in R$. This ring can be denoted either by
$R[[M^{\leq},\omega]]$ or by $R[[M,\omega ]]$ (see \cite{unified}
and \cite{von}).\par
For every $r\in R$ and $m\in M$ we can defined the maps $c_r,e_m:M\longrightarrow R$ by
\begin{align}\label{e_m}
c_r(x)=\begin{cases}
r\quad; x=1 \\
0 \quad ; \text{Otherwise}
\end{cases},
e_m(x)=\begin{cases}
1\quad; x=m \\
0 \quad ; \text{Otherwise}
\end{cases}
\end{align}
where $x\in M$.
By way of illustration, $c_r(x)$ and $e_m(x)$ are like $r$ and $x^m$ in usual polynomial ring $R[x]$, respectively.\\
The following proposition which is proved in \cite[Theorem 2.1]{higman}, can characterize all Artinian and narrow sets.
\begin{proposition}
Let $(M,\leq)$ be an ordered set. Then the following conditions are
equivalent
\emph{(1)} $(M,\leq)$ is Artinian and narrow.
\emph{(2)} For any sequence $(m_n)_{n\in \mathbb{N}}$ of elements of $M$ there exist indices $n_1<n_2<n_3<\cdots$ such that $m_{n_1}\leq m_{n_2}\leq m_{n_3}\leq \cdots$ .
\emph{(3)} For any sequence $(m_n)_{n\in N}$ of elements of $M$ there exist indices $i < j$ such that $m_i\leq m_j$.
\end{proposition}
The author in \cite{brook} introduced the concept of a lower set. A \emph{lower set} of $L$ is a subset $I\displaystyle\sumbseteq L$ such that $x\leq y \in I$ implies $x \in I$ for all $x, y \in L$, (which we denoted by $\Downarrow L$ for the set of lower sets of $L$ ordered by inclusion). In this concept, we can ignore the condition narrow by lower set, indeed it is proved that if $L$ is a partially ordered set, then $\Downarrow L$ is Artinian if and only if $L$ is Artinian and narrow. He also showed that if $\alpha : K \longrightarrow L$ is strictly increasing map between partially ordered sets, then if $L$ satisfies Artinian (or Noetherian) property, then so is $K$. Moreover, if $\alpha$ is surjective and $\Downarrow K$ satisfies Artinian (or Noetherian) property, then so does $\Downarrow L$.
An ordered monoid $(M,\leq)$ is called \emph{positively ordered} if $m\ge 0$ for all $m\in M$. In this condition,
$m\preceq m'$ implies $m \leq m'$ for all $m, m' \in M$. Now, according to \cite[in section 4]{brook} we have
\begin{align}
R[[M,\omega,\leq]]= \{f\in R[[M,\omega]] \quad| \Downarrow (\displaystyle\sumpp(f),\leq)\quad is \quad \text{Artinian}\}.
\end{align}
If $\Downarrow (M,\leq)$ is Artinian, $R[[M,\omega,\leq]]=R[[M,\omega]]$.
For instance, $\Downarrow (\mathbb{F},\preccurlyeq)$
and $\Downarrow (\mathbb{F}^n,\preccurlyeq)$ are
Artinian, and so $R[[\mathbb{F},\omega,\preccurlyeq]]=R[[\mathbb{F},\omega]]$
and $R[[\mathbb{F}^n,\omega,\preccurlyeq]]=R[[\mathbb{F}^n,\omega]]$
such that $\mathbb{F}$ be a free monoid.\\
Now we give a generalization of a result \cite[Theorem 4.3]{brook} of G. Brookfield:
\begin{theorem}\label{go}
Let $R$ be a ring, $(M,\leq)$ a positive strictly ordered monoid and $\omega_{m}$ an automorphism of $R$ with $\omega_m\omega_n=\omega_n\omega_m$ for each $m,n\in M$.
Then $R[[M,\omega]]$ is left Noetherian if and only if $R$ is left Noetherian and $M$ is finitely generated.
\end{theorem}
\begin{proof}
$\Leftarrow$) In the first place, we claim that if $\varphi :(N,\leq)\rightarrow (M,\leq)$ is a surjective strict monoid homomorphism, induces a surjective ring homomorphism $\varphi^*: R[[N,\omega,\leq]]\rightarrow R[[M,\omega,\leq]]$.
Since $\varphi$ is strict, $\varphi^{-1}(x)$ is antichain in $(N,\leq)$ for all $x\in M$. Thus, if
$f\in R[[N,\omega,\leq]]$ then $\varphi^{-1}(x)\cap \displaystyle\sumpp(f)$ is finite and we can define $\varphi^*(f)=f^*$, where
$f^*(x)= \displaystyle\summ_{x' \in \varphi^{-1}(x)} f(x')$ for $x\in M$.
We show that $\varphi^*$ is a ring homomorphism. One can see that
\begin{align}\label{A}
(fg)^*(m)=\displaystyle\summ_{m'\in \varphi^{-1}(m)}(fg)(m')=\displaystyle\summ_{xy=m}\displaystyle\summ_{\displaystyle\sumbstack{x'y'=m'\\m'\in \varphi^{-1}(m)}}\bigg(f(x')\alpha_{x'}(g(y'))\bigg).
\end{align}
On the other hand
\begin{align*}
(f^*g^*)(m)=&\big(\varphi^*(f)\varphi^*(g)\big)(m)=\displaystyle\summ_{xy=m}\bigg(\varphi^*(f(x))\alpha_x\big(\varphi^*(g(y)\big)\bigg)\nonumber\\
=&\displaystyle\summ_{xy=m}\bigg(\displaystyle\summ_{x'\in \varphi^{-1}(x)}f(x')\bigg)\alpha_x\bigg(\displaystyle\summ_{y'\in \varphi^{-1}(y)}g(y'))\bigg)\nonumber\\
=&\displaystyle\summ_{xy=m}\displaystyle\summ_{x'\in \varphi^{-1}(x)}\displaystyle\summ_{y'\in \varphi^{-1}(y)}\bigg(f(x')\alpha_{x'}(g(y'))\bigg).
\end{align*}
Since $\varphi^{-1}$ is a homomorphism, $\varphi^{-1}(x)\varphi^{-1}(y)=\varphi^{-1}(xy)$ and so $\varphi^{-1}(m)=\varphi^{-1}(x)\varphi^{-1}(y)$. So
\begin{align}\label{B}
(f^*g^*)(m)=\displaystyle\summ_{xy=m}\displaystyle\summ_{\displaystyle\sumbstack{m'=x'y'\\m'\in \varphi^{-1}(m)}}\bigg( f(x')\alpha_{x'}(g(y'))\bigg).
\end{align}
By equations \ref{A} and \ref{B} we see that $(fg)^*(m)=(f^*g^*)(m)$. We have also
\begin{align*}
(f+g)^*(x)=&\displaystyle\summ_{x'\in \varphi^{-1}(x)}(f+g)(x')=\displaystyle\summ_{x'\in \varphi^{-1}(x)}(f(x')+g(x'))\nonumber\\
=&\displaystyle\summ_{x'\in \varphi^{-1}(x)}f(x')+\displaystyle\summ_{x'\in \varphi^{-1}(x)}g(x')=f^*(x)+g^*(x).
\end{align*}
Thus $\varphi^*:R[[N,\omega,\leq]]\rightarrow R[[M,\omega,\leq]]$ is a ring homomorphism.
Now, we show that $\varphi^*$ is surjective. Suppose that $f\in R[[M,\omega,\leq]]$, where $\{f(n)\}_{n\in M}$ are the coefficients of $f$ in $R$. For every $n\in M$, the set $\varphi^{-1}(n)$ is nonempty and finite, say $\varphi^{-1}(n)=\{m_1,m_2,\dots ,m_k\}$, where $k$ and all the $m_{i}$ depends on $n$. We define the function $g\in R[[N,\omega,\leq]]$ as follows
\begin{align}
g(m_j)=\begin{cases}
f(n)\quad ; j=1 \\
0 \quad ; \text{otherwise}.
\end{cases}
\end{align}
Notice that $g$ is independent of $n$, since if $n \neq n'$, then $\varphi^{-1}(n) \cap \varphi^{-1}(n')=\emptyset$. Also, for each $n\in M$ we have
\begin{align*}
\varphi^{*}(g)(n)=\displaystyle\summ_{m\in \varphi^{-1}(n)}g(m)=\displaystyle\summ_{j=1}^k g(m_j)=g(m_1)=f(n).
\end{align*}
This means that $\varphi^{*}(g)=f$, and hence $\varphi^*$ is surjective. So we proved the claim.
It is well-known that there is an strict monoid surjection
$\varphi :(\mathbb{F}^n,\preccurlyeq)\rightarrow (M,\preccurlyeq)$ for some $n\in \mathbb{N}$.
Also, the identity map $(M,\preccurlyeq)\to (M,\leq)$ is a surjection.
So the composition of these two maps is a surjection and by \cite[Lemma 2.1]{brook}. Hence $R[[M,w,\leq]]$ is a homomorphic image of the ring $R[[\mathbb{F}^n,\omega, \preceq]]$. Since $R[[\mathbb{F}^n,\omega,\preceq]]=R[[\mathbb{F}^n,\omega]]$ and $R[[\mathbb{F}^n,\omega]]$ is Noetherian, its projection $R[[M,w,\leq]]$ is also Noetherian.
Moreover, we show that $R[[M,\omega,\leq]]=R[[M,\omega]]$. If $R[[M,\omega,\leq]]$ is left Noetherian, then $\Downarrow(M,\preccurlyeq)$ is Artinian.
By applying \cite[Lemma 2.1(2)]{brook} to the identity map $(M,\preccurlyeq) \to (M,\leq)$, one can see that $\Downarrow(M, \leq)$ is Artinian.
Thus $R[[M,\omega, \leq]]=R[[M,\omega]]$.
$\mbox{$\mathbb{R}$}ightarrow$) The method of this part is inspired from \cite[Theorem 4.3]{brook}. The trivial case of $M$ is obvious. By \cite[Lemmas 3.1 and 3.2]{brook}, $M$ is strict
and $\preccurlyeq$ is a partial order on $M$.
Suppose $T=R[[M,\omega,\leq]]$ is left Noetherian.
One can see that $M$ is finitely generated similar to the proof of
\cite[Theorem 4.3]{brook}. Hence we have to prove
that $R$ is Noetherian similar to the proof of (\cite[Theorem 5.2(i)]{riben},
\cite[Theorem 3.1(i)]{vara}). Let $I_T=\{f\in T\mid \omega_x(f(y))\in I ; x,y\in M\}$.
It is easy to see that $I_T$ is a left ideal of $T$. So for each ideal $I$ of $R$,
there is a correspondent ideal in $T$. Also if $I \displaystyle\sumbset J$ ,then
$I_T\displaystyle\sumbset J_T$. Hence if there exists a nonstabilized ascending
chain in $R$, then there is one in $T$. But this is impossible, so $R$ is left Noetherian.
\end{proof}
In Theorem \ref{go} if we set $\sigma$ the identity homomorphism then we have:
\begin{corollary}
\emph{\cite[Theorem 4.3]{brook}}
Let $R$ be a ring and $(M,\le)$ a positive strictly ordered monoid.
Then $R[[M,\le]]$ is left Noetherian if and only if $R$ is left Noetherian
and $M$ is finitely generated.
\end{corollary}
Finally, we conclude the following result which connects the results of previous sections.
\begin{corollary}
Let $R$ be an $S$-Noetherian von Neumman regular ring and $S$ a denominator set. Assume that $(M,\leq)$ is a finitely generated positive strictly ordered monoid and $\omega_{m}$ an automorphism of $R$ with $\omega_m\omega_n=\omega_n\omega_m$ for each $m,n\in M$. Then $(S^{-1}R)[[M,\omega]]$ is a left Noetherian ring.
\end{corollary}
\begin{proof}
The ring $S^{-1}R$ is Noetherian by Theorem \ref{tie}. Since $(M,\leq)$ is a positive strictly ordered monoid and $\omega_{m}$ is an automorphism for all $m\in M$, $(S^{-1}R)[[M,\omega]]$ is a Noetherian ring by Theorem \ref{go}.
\end{proof}
\section{S-Noetherian property of generalized skew power series rings}
Recall that a ring is called right \emph{duo} (resp., left duo) if all of its right (resp., left) ideals are two-sided.
Also, a right and left duo ring is called a duo ring.
We know that if a ring is duo, then every prime ideal is completely
prime.
It is known that a power series ring over a duo ring need not be duo (on either side).
\begin{lemma}\label{duo}
Let $R$ be a duo ring and $S\displaystyle\sumbset R$ a denominator set. If $s\in S$, $r\in R$ then there exists $s_1\in S$ such that $srs_1=rss_1$.
\end{lemma}
\begin{proof}
Let $s\in S$ and $r\in R$. Since $R$ is duo, there exist $s'\in S$
such that $sr=rs'$, so
$\frac{1}{s}.\frac{sr}{1}=\frac{1}{s}.\frac{rs'}{1}$. Hence
$\frac{r}{1}=\frac{rs'}{s}=\frac{r}{1}.\frac{s'}{s}$.
Thus $\frac{r}{1}(1-\frac{s'}{s})=0$, which means that
$\frac{r(s-s')}{s}=0_{S^{-1}R}$. So $r(s-s')s_1=0_{R}$.
So $rss_1=rs's_1$ and since $rs'=sr$ we have $srs_1=rss_1$.
\end{proof}
In the previous result, it is easy to see that if $s\in S$, $r\in R$, then there exists $s_1\in S$ such that $s_1sr=s_1rs$.
We will use this point in the proposition below.
\begin{proposition}\label{prime ideal}
Let $R$ be a duo ring, $S\displaystyle\sumbseteq R$ a denominator set and
$M$ an $S$-finite $R$-module. Then $M$ is $S$-Noetherian if
and only if $PM$ is an $S$-finite submodule, for
each $S$-disjoint prime ideal $P$ of $R$.
\end{proposition}
\begin{proof}
The ``only if" part is clear. For the converse, assume that $PM$
is $S$-finite for each $P$ prime ideal of $R$ with $P\cap S=\emptyset$.
Since $M$ is $S$-finite, $wM\displaystyle\sumbseteq F$ for some $w\in S$ and some
finitely generated submodule $F$. If $M$ is not $S$-Noetherian, the set
$\mathfrak {F}$ of all non-$S$-finite submodules of $M$ is not empty.
So $\mathfrak {F}$ has a maximal element like $N$ by Zorn's lemma.
We claim that $P=[N:M]:=\{r\in R\mid rM\displaystyle\sumbseteq N\}$ is a prime ideal of $R$ and is disjoint from $S$.
Suppose to the contrary that $P\cap S\neq \emptyset$ and $s\in P\cap S$. Then we have
\begin{align*}
swN \displaystyle\sumbseteq swM\displaystyle\sumbseteq sF\displaystyle\sumbseteq sM\displaystyle\sumbseteq N.
\end{align*}
So $swN\displaystyle\sumbseteq sF\displaystyle\sumbseteq N$ and $N$ becomes $S$-finite. This contradiction
shows that $P\cap S=\emptyset$. Now suppose that $P$ is not a prime ideal of
$R$. So $P$ is not completely prime. So there exist $a,b\in R\setminus P$
and $ab\in P$. So $N+aM$ is $S$-finite, hence $s(N+aM)\displaystyle\sumbseteq (R(n_1+am_1)+ \cdots+ R(n_p+am_p))$
for some $s\in S$, $n_i\in N$ and $m_i \in M$. Also $[N:a]$ is $S$-finite. So
$t[N:a]\displaystyle\sumbseteq (Rq_1+Rq_2+\cdots+Rq_k)$ for some $t\in S$ and
$q_j\in [N:a]$. Since $R$ is duo and $S$ is a denominator set in $R$, there exists $s''\in S$ such that $s''at=s''ta$ by Theorem \ref{duo}. Also $s(N+aM)\displaystyle\sumbseteq (R(n_1+am_1)+\cdots+R(n_p+am_p))$.
Thus $sx=\displaystyle\summ r_in_i+r_iam_i$. This means that $sx=\displaystyle\summ r_in_i+a\displaystyle\summ r'_im_i$ for
some $r'_i\in R$. Since $sx,\displaystyle\summ r_in_i\in N$, we have $\displaystyle\summ r'_im_i\in [N:a]$. So
\begin{align*}
s''tsx=s''t\displaystyle\summ r_in_i+s''t\displaystyle\summ ar'_im_i=\displaystyle\summ s''tr_in_i+s''at\displaystyle\summ r'_im_i=\displaystyle\summ s''tr_in_i+s''a\displaystyle\summ c_jq_j.
\end{align*}
So $s''tsx=\displaystyle\summ s''tr_in_i+\displaystyle\summ c's''_jaq_j$ for some $c'_j\in R$.
Hence $s''tsx\in (Rn_1+\cdots+Rn_p+Rs''aq_1+\cdots+Rs''aq_k)$.
So $s''tsN\displaystyle\sumbseteq (Rn_1+\cdots +Rn_p+Rs''aq_1+\cdots+Rs''aq_k) \displaystyle\sumbseteq N$.
Thus $N$ is $S$-finite and this contradicts to the fact that $N$ is maximal in $\mathfrak {F}$.
Therefore $P$ is a prime ideal of $R$.
Moreover $P=[N:M]\displaystyle\sumbseteq [N:F]\displaystyle\sumbseteq [N:wM]=[P:w]= P$. Hence $[N:F]=P$.
Let $F=(Rf_1+Rf_2+\cdots+Rf_k)$. Since $R$ is a duo ring, $P=[N:\displaystyle\summ Rf_i]= \bigcap[N:f_i]$.
So $P=[N:f_i]$ for some $f_i\in\{f_1,f_2,\cdots ,f_k\}.$
One can show that $tN\displaystyle\sumbseteq (Rn_1+Rn_2+\cdots+Rn_l)+PM$
for some $t\in S$ and $n_i\in N$ as above or in similar way as that employed in \cite[Proposition 4]{anderson}.
Since $PM$ is $S$-finite, $vPM\displaystyle\sumbseteq G\displaystyle\sumbseteq PM\displaystyle\sumbseteq N$
for some $v\in S$ and a finitely generated submodule $G$ of $M$. So
\begin{align*}
vtN\displaystyle\sumbseteq v(Rn_1+Rn_2\cdots +Rn_l)+vPM \displaystyle\sumbseteq (Rn'_1+Rn'_2\cdots +Rn'_l)+G\displaystyle\sumbseteq N
\end{align*}
for some $n'_i\in N$. So $N$ becomes $S$-finite which is a contradiction. So $M$ is $S$-Noetherian.
\end{proof}
\begin{lemma}
Let $R$ be a ring with an endomorphism $\sigma$. If $R[[x;\sigma]]$ is a duo ring, then $\sigma$ is surjective.
\end{lemma}
\begin{proof}
Suppose that $a\in R$. Since $R[[x;\sigma]]$ is a duo ring we have $ax=xf$ such that $f=\displaystyle\summ_{i=0}^{\infty}f_ix^i$. So
$xf=x\displaystyle\summ_{i=0}^{\infty}f_ix^i=\displaystyle\summ_{i=0}^{\infty}\sigma(f_i)x^{i+1}$. Now, since $ax=xf$, $\sigma(f_i)=0$ for all $i \neq 0$ and $\sigma(f_0)=a$. Thus, for each $a\in R$ there exists $f_0\in R$ such that $a=\sigma(f_0)$.
\end{proof}
\begin{theorem}\label{power}
Let $R$ be a ring, $S\displaystyle\sumbseteq R$ a $\sigma$-anti-Archimedean
denominator set \emph{(}consisting nonzero devisors\emph{)} and $\sigma_1,\cdots,\sigma_n$ are monomorphisms of $R$ with $\sigma_i\sigma_j=\sigma_j\sigma_i$, for each $i,j$. Assume that $R[[X_1,\cdots ,X_n;\sigma_1,\cdots,\sigma_n]]$ is a duo ring.
If $R$ is $S$-Noetherian, then the ring $R[[X_1,\cdots ,X_n;\sigma_1,\cdots,\sigma_n]]$ is also $S$-Noetherian.
\end{theorem}
\begin{proof}
We use the method in \cite[Proposition 10]{anderson} employed by Anderson and Dumitrescu. As $S$ is $\sigma$-anti-Archimedean in every ring containing $R$ as a subring,
we shall prove the case $n=1$, so we assume that $T=R[[x;\sigma]]$ is duo and
$\sigma$ is an automorphism of $R$. It is enough to prove that every prime ideal $P$ of $T$ is $S$-finite. Let
$\pi:T\rightarrow R$ the $R$-algebra homomorphism sending $x$ to zero and $P'=\pi(P)$.
Since $R$ is $S$-Noetherian, there exists $s\in S$ such that $sP'\displaystyle\sumbseteq (Rg_1(0)+Rg_2(0)+ \cdots +Rg_k(0))$
for some $g_i\in P$. If $x\in P$, then $P=( TP'+Tx)$. If $g_i(x)=\displaystyle\summ a_ix^i$, then
$g_i(x)=\displaystyle\summ x^i\sigma^{-i}(a_i)\in (TP'+Tx)$. So
$sP\displaystyle\sumbseteq (TP'+Tx)= (Tg_1+\cdots +Tg_k) \displaystyle\sumbseteq P$.
This means that $P$ is $S$-finite. Let $x\notin P$ and $f\in P$. So
$sf(0)=\displaystyle\summ d_{0,j}g_j(0)$ for some $d_{0,j}\in R$. So $xf_1=sf-\displaystyle\summ d_{0,j}g_j \in P$
for some $f_1\in T$. Considering $x\notin P$, $f_1\in P$. So $sf_1=\displaystyle\summ d_{1,j}g_j+xf_2$ for some $f_2\in T$.
Hence $\sigma(s)sf=\displaystyle\summ \sigma(s) d_{0,j}g_j+x\displaystyle\summ d_{1,j}g_j+x^2f_2$. Also $f_2\in P$,
since $x\notin P$ and $sf_1-\displaystyle\summ d_{1,j}g_j \in P$. In this way, one can see that for each $L\ge 0$,
\begin{align*}
\big(\prod_{l=0}^{L}\sigma^l(s)\big)f=\displaystyle\summ_{i=0}^{L}x^i\displaystyle\summ_{j=1}^k (\prod_{l=i+1}^{L}\sigma^l(s))d_{i,j}g_j+x^{L+1}f_{L+1}.
\end{align*}
Since $S\cap \big(\bigcap_{l\geq 1,i_j\in \mathbb{N}\cup \{0\}}\sigma^{i_1}(s)\cdots \sigma^{i_l}(s)R\big)\neq \emptyset$,
there exists $t\in R$ such that $\frac{t}{\sigma^{i_1}(s)\cdots \sigma^{i_k}(s)}\in R$ for
each $i_j\in \mathbb{N}\cup \{0\}$, $k\in \mathbb{N}$. Moreover
\begin{align*}
tf=\displaystyle\summ_j \displaystyle\summ_i \Big(\frac{ts\sigma^{-i}(d_{ij})}{\prod_l\sigma^l(s)} \Big)x^ig_j.
\end{align*}
So $tf=\displaystyle\summ_j h_jg_j$ where $h_j=\displaystyle\summ_i \frac{ts\sigma^{-i}(d_{i,j})}{\prod_l\sigma^l(s)}x^i$. So
$tf\in (Tg_1+Tg_2+\cdots +Tg_k)$. Hence $tP\displaystyle\sumbseteq (Tg_1+Tg_2+\cdots +Tg_k)$.
Since $g_i\in P$, $(Tg_1+Tg_2+\cdots +Tg_k) \displaystyle\sumbseteq P$. Thus
$R[[x;\sigma]]$ is an $S$-Noetherian ring.
\end{proof}
The following proposition which is proved in \cite{anderson}, is the corollary of the above theorem.
\begin{corollary}\emph{\cite[Proposition 10]{anderson}}
Let $R$ be a commutative ring and $S\displaystyle\sumbseteq R$ an anti-Archimedean
multiplicative set of $R$. If $R$ is $S$-Noetherian, then so is $R[[X_1,\cdots ,X_n]]$.
\end{corollary}
A ring R is called \emph{strongly regular} if every principal right (or left) ideal is generated by a central idempotent.
A ring is said to be left \emph{self injective} if it is injective as a left module over itself.
Hirano in \cite[Theorem 4]{hirano} shows that if $R$ is a self-injective
strongly regular ring, then $R[[x]]$ is a duo ring. \\
We have the following generalization of a theorem of D.D. Anderson and Tiberiu Dumitrescu \cite[Proposition 10]{anderson}.
\begin{theorem}
Let $R$ be a duo ring with an automorphism $\sigma$ and $S\displaystyle\sumbseteq R$ a $\sigma$-anti-Archimedean
denominator set \emph{(}consisting nonzero devisors\emph{)}.
If $R$ is $S$-Noetherian, then so is the skew power series ring $R[[x;\sigma]]$.
\end{theorem}
\begin{proof}
We can prove this theorem in a similar way as in Theorem \ref{power}. Consider the notations in the proof of Theorem \ref{power}. Let $x\in P$. Since $\sigma$ is bijective, $P$ is $S$-finite. Let $x\notin P$ and $f\in P$, so $xf_1=sf-\displaystyle\summ d_{0i}g_i \in P$. Note that for each $h\in R[[x;\sigma]]$ and $I$ is a left ideal of $R[[x;\sigma]]$, $xh\in I$ yields that $xR[[x;\sigma]]h\in I$. So $f_1\in P$. The rest of the proof is similar to what we did in Theorem \ref{power}.
\end{proof}
The following corollary is a generalization of the case $n=1$ in \cite[Proposition 10]{anderson} for the category of duo rings.
\begin{corollary}
Let $R$ be a duo ring and $S\displaystyle\sumbseteq R$ an anti-Archimedean
denominator set \emph{(}consisting nonzero devisors\emph{)} of $R$.
If $R$ is $S$-Noetherian, then so is the power series ring $R[[x]]$.
\end{corollary}
Now we extend the last result for the skew generalized power series ring $R[[M,\omega]]$.
\begin{theorem}
Let $R$ be a duo ring, $(M,\le)$ a positive
strictly ordered commutative monoid and $\omega_{m}$ a monomorphism of $R$ with $\omega_m\omega_n=\omega_n\omega_m$ for each $m,n\in M$. Assume that $S\displaystyle\sumbset R$ is an $\omega_m$-anti-Archimedean
denominator set \emph{(}consisting nonzero devisors\emph{)} of $R$ and $R[[M,\omega]]$ be a duo ring. Then
$R[[M,\omega]]$ is left (or right) $S$-Noetherian if and only if $R$ is left (or right) $S$-Noetherian and $M$ is finitely generated.
\end{theorem}
\begin{proof}
($\Leftarrow$) We use the method of G. Brookfeild employed in \cite{brook}. We know that the surjective homomorphism $\varphi:\mathbb{F}^n\longrightarrow M$ (where $\mathbb{F}$ is a free monoid)
induces a projection $$\varphi^*:R[[\mathbb{F}^n,(\omega,\preceq)]]\longrightarrow R[[M,(\omega,\le)]]$$
and $R[[M,(\omega,\le)]]=R[[M,\omega]]$ by \cite[Theorem 4.3]{brook}.
Moreover, since $R[[\mathbb{F}^n,(\omega,\preceq)]]$ is $S$-Noetherian, so is $R[[M,\omega]]$ by \cite[Lemma 2.2]{liu} for noncommutative version.
($\mbox{$\mathbb{R}$}ightarrow$) Let $A:=R[[M,\omega]]$ be $S$-Noetherian. Let $\{m_n| n\in \mathbb{N}\}$
be an infinite sequence in $M$. Let $I=(Ae_{m_1}+Ae_{m_2}+\cdots)$.
Since $A$ is $S$-Noetherian, there exists $s\in S$ such that $c_sI\displaystyle\sumbseteq J\displaystyle\sumbseteq I$
for $J$ finitely generated ideal of $A$. So $c_sI\displaystyle\sumbseteq (Ae_{m_{i_1}}+Ae_{m_{i_2}}+\cdots+Ae_{m_{i_k}})$
for some $k\in \mathbb{N}$. So $c_se_{m_l}=\displaystyle\summ_{t=0}^k f_t e_{m_{it}}$ for
some $l\neq i_t$. So $m_l\in \bigcup_{t=0}^k \displaystyle\sumpp(f_te_{m_{i_t}})$ for each $m\in M$,
$(f_te_{m_{i_t}})(m)=\displaystyle\summ_{m'm''=m}f_t(m')\omega_{m'}(e_{m_{i_t}}(m''))$.
So $m_{l}\in \bigcup_{t=0}^k \big\{\displaystyle\sumpp(f_t)+\displaystyle\sumpp(\omega_{m'}(e_{m_{i_t}}(m'')))\big\}$.
There exists $m_1\in M$ such that $m_1m_{it}=m$ for some $0\le t \le L$. So
$$(f_te_{m_{i_t}})(m)=f_t(m_1)\omega_{m_1}(e_{m_{i_t}}(m_{i_t}))=f_t(m_1).$$
Thus $m_1\in \displaystyle\sumpp(f_t)$ and $m_1m_{i_t}\in \displaystyle\sumpp(f_te_{m_{i_t}})$ for some $0\le t\le L$.
So for each $m\in \displaystyle\sumpp(\omega_{m'}(e_{m_{i_t}}(m')))$, $m_{i_t}\preceq m$ for some
$0\le t \le L$. Since $m_l \in \displaystyle\sumpp(f_te_{m_{i_t}})$, $m_{i_t}\preceq m_l$ for some $0\le t \le L$.
Since $M$ is positive strictly ordered monoid, $M$ is finitely generated by \cite[Lemma 3.3]{brook}.
Let $I$ be an ideal of $R$, so $AI$ is an ideal of $A$. So there exists $s\in S$ such that
$c_sAI\displaystyle\sumbseteq J\displaystyle\sumbseteq AI$ for some $J$ finitely generated ideal of $A$.
Set
\begin{align*}
T=\{f(\pi(f))| f\in c_sAI\}.
\end{align*}
We claim that $T=sI$. Let $t\in T$, so $t=h(\pi(h))$ and $h=c_sg$
for some $g\in AI$. So $t=sg(\pi(sg))$. This means that $t\in sI$
considering the fact that
\begin{align*}
I=\{f(\pi(f))| f\in AI\}.
\end{align*}
So $T\displaystyle\sumbseteq sI$. Now let $i\in I$, so $i\in AI$.
Since $si(m)=0$ for $m\neq 1$, $si\in T$. Thus $sI\displaystyle\sumbseteq T$.
Hence $sI=T$. But $sI=T\displaystyle\sumbseteq J'\displaystyle\sumbseteq I$ where $J'=\{f(\pi(f))|f\in J\}$.
Let $J=(Aj_1+Aj_2+\cdots+Aj_p)$. So it is easy to show that
$J'=(Rj_1(\pi(j_1))+Rj_2(\pi(j_2))+\cdots+Rj_p(\pi(j_p)))$.
So $J'$ is finitely generated in $R$. Hence $I$ is $S$-finite and $R$ is left $S$-Noetherian.
\end{proof}
Recall from \cite{Brewer}, that a ring $R$
is right (left) $\aleph_0$-injective provided any homomorphism from a countably
generated right (left) ideal of $R$ into $R$ extends to a right (left) $R$-module
endomorphism of $R$. By an $\aleph_0$-injective ring we mean a right and left
$\aleph_0$-injective ring.
\begin{corollary}
Let $R$ be an strongly regular and an $\aleph_0$-injective ring with automorphisms $\sigma_1,\sigma_2$
such that $\sigma_1\sigma_2=\sigma_2\sigma_1$. Assume that $S\displaystyle\sumbset R$ is an $\sigma_1$, $\sigma_2$ anti-Archimedean denominator set \emph{(}consisting nonzero devisors\emph{)}. If $R$ is left (or right) $S$-Noetherian, then so is $R[[x,y;\sigma_1,\sigma_2]]$.
\end{corollary}
\begin{proof}
Assume that $R$ is an strongly regular and $\aleph_0$-injective ring. Then by \cite{zhim}, $A=R[[x;\sigma_1]]$ is duo ring and $S$-left Noetherian ring. Then $A[[y;\sigma_2]]$ is left $S$-Noetherian ring.
\end{proof}
The following corollary is a generalization of the case $n=2$ of in \cite[Proposition 10]{anderson} for the category of duo rings.
\begin{corollary}
Let $R$ be an strongly regular self-injective ring and $S\displaystyle\sumbseteq R$ an anti-Archimedean denominator set \emph{(}consisting nonzero devisors\emph{)} of $R$. If $R$ is left (or right) $S$-Noetherian, then so is $R[[x,y]]$.
\end{corollary}
\begin{corollary}
Let $R$ be a duo ring, $S\displaystyle\sumbseteq R$ an anti-Archimedean
denominator set \emph{(}consisting nonzero devisors\emph{)} of $R$. Assume that $R[[M]]$ is a duo ring. Then $R[[M]]$ is left (or right) $S$-Noetherian
if and only if $R$ is left (or right) $S$-Noetherian and $M$ is finite generated.
\end{corollary}
\end{document}
|
\begin{document}
\title{f
ormalsize Acoustic and Filtration Properties\
of Thermo-elastic porous medium:\
Biot's Equations of Thermo-Poroelasticity. } \small
{\mathbf n}oindent \textbf{Abstract.} A linear system of differential
equations describing a joint motion of thermoelastic porous body and
incompressible thermofluid occupying porous space is considered.
Although the problem is linear, it is very hard to tackle due to the
fact that its main differential equations involve non-smooth
oscillatory coefficients, both big and small, under the
differentiation operators. The rigorous justification is fulfilled
for homogenization procedures as the dimensionless size of the
pores tends to zero, while the porous body is geometrically
periodic. As the results, we derive Biot's like system of equations
of thermo-poroelasticity, system of equations of
thermo-viscoelasticity, or system of non-isotropic Lam\'{e}'s
equations depending on ratios between physical parameters and
geometry of porous space. The proofs are based on Nguetseng's
two-scale convergence method of homogenization in periodic structures.\\
{\mathbf n}oindent \textbf{Key words:} Biot's equations, Stokes equations,
Lam\'{e}'s equations, two-scale convergence, homogenization of
periodic structures, thermo-poroelasticity.\\
{\mathbf n}ormalsize
\addtocounter{section}{0} \setcounter{equation}{0}
\begin{center} \textbf{Introduction}
{\mathbf e}nd{center}
In the present publication we consider a problem of a joint motion
of thermoelastic deformable solid (thermoelastic skeleton),
perforated by a system of channels (pores) and incompressible
thermofluid occupying a porous space. We refer to this model as to
\textbf{model (NA)}. In dimensionless variables (without primes)
$$ {\mathbf x}'=L {\mathbf x},\quad t'=\tau t,\quad {\mathbf w}'=L {\mathbf w},
\quad \theta'=\vartheta_*\frac{L }{\tau v_{*}} \theta$$
the differential equations of the model in a domain $\Omega \in
\mathbb R^{3}$ for the dimensionless displacement vector ${\mathbf w}$ of the
continuum medium and the dimensionless temperature $\theta$, have a
form:
\begin{eqnarray} \label{0.1}
& \displaystyle \alpha_\tau \bar{\rho} \frac{\partial^2
{\mathbf w}}{\partial t^2}=\mbox{div}_x \mathbb P + \bar{\rho} \mathbf F,\\
\label{0.2}
& \displaystyle \alpha_\tau \bar{c}_p \frac{\partial
\theta}{\partial t} = \mbox{div}_x ( \bar{\alpha} _{\varkappa} {\mathbf n}abla_x
\theta) -\bar{\alpha}_\theta
\frac{\partial}{\partial t} \mbox{div}_x {\mathbf w} +\Psi,\\
\label{0.3} & \displaystyle \mathbb P =
\bar{\chi}\alpha_\mu \mathbb D\Bigl({\mathbf x},\frac{\partial {\mathbf w}}{\partial t}\Bigr)
+(1-\bar{\chi})\alpha_\lambda \mathbb D(x,{\mathbf w})-(q+\pi )\mathbb I ,\\
\label{0.4}
& \displaystyle q=p+\frac{\alpha_{\mathbf n}u}{\alpha_p}\frac{\partial
p}{\partial t}+\bar{\chi}\alpha _{\theta f}\theta,\\
\label{0.5}
& \displaystyle p+\bar{\chi} \alpha_p \mbox{div}_x {\mathbf w}=0,\\
\label{0.6}
& \displaystyle \pi +(1-\bar{\chi}) (\alpha_{\mathbf e}ta \mbox{div}_x {\mathbf w}-\alpha
_{\theta s}\theta)=0.
{\mathbf e}nd{eqnarray}
Here and further we use notations
$$ \mathbb D(x,{\mathbf u})=(1/2)\left({\mathbf n}abla_x {\mathbf u} +({\mathbf n}abla_x {\mathbf u})^T\right),$$
$$\bar{\rho}=\bar{\chi}\rho_f +(1-\bar{\chi})\rho_s, \quad
\bar{c}_p=\bar{\chi} c_{pf} +(1-\bar{\chi})c_{ps},$$
$$ \bar{\alpha _{\varkappa}} =\bar{\chi} \alpha _{\varkappa f}
+(1-\bar{\chi})\alpha _{\varkappa s},\quad \bar{\alpha}_\theta
=\bar{\chi} \alpha_{\theta f} +(1-\bar{\chi})\alpha_{\theta s}.$$ In
this model the characteristic function of the porous space
$\bar{\chi}({\mathbf x})$ is a known function.
For derivation of {\mathbf e}qref{0.1}-- {\mathbf e}qref{0.6} and description of
dimensionless constants (all these constant are positive ) see \cite{MS}.
We endow model \textbf{(NA)} with initial and boundary conditions
\begin{equation} \label{0.7}
{\mathbf w}|_{t=0}={\mathbf w}_0,\quad \frac{\partial {\mathbf w}}{\partial
t}|_{t=0}={\mathbf v}_0,\quad \theta|_{t=0} =\theta_0,\quad {\mathbf x}\in \Omega
{\mathbf e}nd{equation}
\begin{equation} \label{0.8}
{\mathbf w}=0,\quad \theta=0,\quad {\mathbf x} \in S=\partial \Omega, \quad t\geq 0.
{\mathbf e}nd{equation}
From the purely mathematical point of view, the corresponding
initial-boundary value problem for model \textbf{(NA)} is well-posed
in the sense that it has a unique solution belonging to a suitable
functional space on any finite temporal interval (see \cite{MS}).
However, in view of possible applications this model is ineffective.
Therefore arises a question of finding an effective approximate
models. If the model involves the small parameter $\varepsilon$, the
most natural approach to this problem is to derive models that would
describe limiting regimes arising as $\varepsilon$ tends to zero.
Such an approximation significantly simplifies the original problem
and at the same time preserves all of its main features. In the
model under consideration we define $\varepsilon$ as the
characteristic size of pores $l$ divided by the characteristic size
$L$ of the entire porous body:
$$\varepsilon =\frac{l}{L}.$$
But even this approach is too hard to work out, and some additional
simplifying assumptions are necessary. In terms of geometrical
properties of the medium, the most appropriate is to simplify the
problem postulating that the porous structure is periodic. Further
by \textbf{model} ${(\mathbf{N}\mathbf B})^\varepsilon$ we will call
model \textbf{NA} supplemented by this periodicity condition. Thus,
our main goal now is a derivation of all possible homogenized
equations in the model ${(\mathbf{N}\mathbf B})^\varepsilon$.
We accept the following constraints
\begin{assumption} \label{assumption1}
domain $\Omega =(0,1)^3$ is a periodic repetition of an elementary
cell $Y^\varepsilon =\varepsilon Y$, where $Y=(0,1)^3$ and quantity
$1/\varepsilon$ is integer, so that $\Omega$ always contains an
integer number of elementary cells $Y_i^\varepsilon$. Let $Y_s$ be
a "solid part" of $Y$, and the "liquid part" $Y_f$ -- is its open
complement. We denote as $\gamma = \partial Y_f \cap \partial Y_s$
and $\gamma $ is $C^{1}$-surface.
A porous space $\Omega ^{\varepsilon}_{f}$ is the periodic repetition of
the elementary cell $\varepsilon Y_f$, and solid skeleton $\Omega
^{\varepsilon}_{s}$ is the periodic repetition of the elementary
cell $\varepsilon Y_s$. A boundary $\Gamma^\varepsilon =\partial
\Omega_s^\varepsilon \cap \partial \Omega_f^\varepsilon$ is the
periodic repetition in $\Omega$ of the boundary $\varepsilon
\gamma$. The "solid skeleton" $\Omega _{s}$ is a connected domain.
{\mathbf e}nd{assumption}
In these assumption
\begin{equation*}
\bar{\chi}({\mathbf x})=\chi^{\varepsilon}({\mathbf x})=\chi
\left({\mathbf x} / \varepsilon\right),
{\mathbf e}nd{equation*}
$$\bar{c}_{p}=c_{p}^{\varepsilon}({\mathbf x})=\chi^{\varepsilon}({\mathbf x})c _{pf}+
(1-\chi^{\varepsilon}({\mathbf x}))c_{ps},$$
$$\bar{\rho}=\rho^{\varepsilon}({\mathbf x})=\chi^{\varepsilon}({\mathbf x})\rho _{f}+
(1-\chi^{\varepsilon}({\mathbf x}))\rho_{s},$$
$$ \bar{\alpha} _{\varkappa} =\alpha^{\varepsilon} _{\varkappa}({\mathbf x})=
\chi ^{\varepsilon}({\mathbf x})\alpha _{\varkappa f} +(1-\chi
^{\varepsilon}({\mathbf x}))\alpha _{\varkappa s}, $$ $$\bar{\alpha}_\theta
=\alpha ^{\varepsilon}_\theta({\mathbf x})=\chi ^{\varepsilon}({\mathbf x})
\alpha_{\theta f} +(1-\chi ^{\varepsilon}({\mathbf x}))\alpha_{\theta s},$$
where $\chi ({\mathbf y})$ is a characteristic function of $Y_f$ in $Y$.
We say that a \textbf{porous space is disconnected (isolated
pores)}
if $\gamma \cap \partial Y={\mathbf e}mptyset$.\\
In the present work we suppose that all dimensionless parameters depend on the small
parameter $\varepsilon$ and there exist limits (finite or infinite)
$$\lim_{\varepsilon\searrow 0} \alpha_\mu(\varepsilon) =\mu_0, \quad
\lim_{\varepsilon\searrow 0} \alpha_\lambda(\varepsilon) =\lambda_0,
\quad \lim_{\varepsilon\searrow 0}
\alpha_\tau(\varepsilon)=\tau_{0}, \quad \lim_{\varepsilon\searrow
0} \alpha_p(\varepsilon) =p_{*}.$$
Moreover, we restrict ourself with the case when $\tau_0<\infty$
and
$$\mu_0=0, \quad p_{*}=\infty, \quad 0< \lambda_0 <\infty.$$
If $\tau_0=\infty$,then, re-normalizing the displacement vector
and temperature by setting
\begin{equation}{\mathbf n}onumber
{\mathbf w} \rightarrow \alpha_\tau {\mathbf w},\quad \theta \rightarrow \alpha_\tau
\theta
{\mathbf e}nd{equation}
we reduce the problem to the previous case. The condition
$p_{*}=\infty $ means that the liquid in consideration is
incompressible.
Using Nguetseng's two-scale convergence method \cite{LNW,NGU} we
derive Biot's like systems of thermo-poroelasticity or system of
non-isotropic Lam\'{e}'s, depending on the ratios between
dimensionless parameters and geometry of the porous space.
Different isothermic models have been considered in
\cite{S-P}, \cite{B-K}, \cite{GNG}, \cite{G-M2,G-M3,G-M1},
\cite{AM}.
\addtocounter{section}{1} \setcounter{equation}{0}
\setcounter{theorem}{0} \setcounter{lemma}{0}
\setcounter{proposition}{0} \setcounter{corollary}{0}
\setcounter{definition}{0} \setcounter{assumption}{0}
\begin{center} \textbf{\S1}. \textbf{Model}
${(\mathbf{N}\mathbf B})^\varepsilon$
{\mathbf e}nd{center}
\begin{center} \textbf{\S1. Formulation of the main results.}
{\mathbf e}nd{center}
As usual, equations {\mathbf e}qref{0.1}-{\mathbf e}qref{0.6} are understood in the
sense of distributions. They involve the equations {\mathbf e}qref{0.1}--
{\mathbf e}qref{0.6} in a usual sense in the domains $\Omega_f^{\varepsilon}$
and $\Omega_s^{\varepsilon}$ and the boundary conditions
\begin{eqnarray} \label{1.1}
& [\vartheta]=0, \quad [{\mathbf w}]=0,\quad {\mathbf x}_0\in \Gamma ^{\varepsilon},\; t\geq 0,\\
\label{1.2} & [\mathbb P]=0,\quad [\alpha ^{\varepsilon} _{\varkappa}
{\mathbf n}abla_x \theta ]=0, \quad {\mathbf x}_0\in \Gamma ^{\varepsilon},\; t\geq 0
{\mathbf e}nd{eqnarray}
on the boundary $\Gamma^\varepsilon $, where
\begin{eqnarray}
{\mathbf n}onumber & [\varphi]({\mathbf x}_0)=\varphi_{(s)}({\mathbf x}_0)
-\varphi_{(f)}({\mathbf x}_0),\\
{\mathbf n}onumber \displaystyle
& \varphi_{(s)}({\mathbf x}_0) =\lim\limits_{\tiny \begin{array}{l}{\mathbf x}\to {\mathbf x}_0\\
{\mathbf x}\in \Omega_s^{\varepsilon}{\mathbf e}nd{array}} \varphi({\mathbf x}),\quad
\varphi_{(f)}({\mathbf x}_0) =\lim\limits_{\tiny \begin{array}{l}{\mathbf x}\to {\mathbf x}_0\\
{\mathbf x}\in \Omega_f^{\varepsilon}{\mathbf e}nd{array}} \varphi({\mathbf x}).
{\mathbf e}nd{eqnarray}
There are various equivalent in the sense of
distributions forms of representation of equations
{\mathbf e}qref{0.1}--{\mathbf e}qref{0.2} and boundary conditions
{\mathbf e}qref{1.1}--{\mathbf e}qref{1.2}. In what follows, it is convenient to write
them in the form of the integral equalities.
\begin{definition} \label{definition1}
Five functions
$({\mathbf w}^{\varepsilon},\theta^{\varepsilon},p^{\varepsilon},q^{\varepsilon},\pi^{\varepsilon})$
are called a generalized solution of \textbf{model}
${(\mathbf{N}\mathbf B})^\varepsilon$ if they satisfy the regularity
conditions in the domain $ \Omega_{T}=\Omega\times (0,T)$
\begin{equation} \label{1.3}
{\mathbf w}^{\varepsilon},\, \mathbb D(x,{\mathbf w}^{\varepsilon}),\,
\mbox{div}_x{\mathbf w}^{\varepsilon},\, q^{\varepsilon},\,p^{\varepsilon},\,
\frac{\partial p^{\varepsilon}}{\partial
t},\,\pi^{\varepsilon},\,\theta^{\varepsilon}, {\mathbf n}abla_x \theta
^{\varepsilon} \in L^2(\Omega_{T})
{\mathbf e}nd{equation}
in the domain $ \Omega_{T}=\Omega\times (0,T)$, boundary conditions
{\mathbf e}qref{0.8}, equations
\begin{eqnarray} \label{1.4}
&\displaystyle q^{\varepsilon}=p^{\varepsilon}+
\frac{\alpha_{\mathbf n}u}{\alpha_p}\frac{\partial p^{\varepsilon}}{\partial
t}+
\chi^{\varepsilon}\alpha _{\theta f}\theta ^{\varepsilon},\\
\label{1.5}& \displaystyle p^{\varepsilon}+
\chi^{\varepsilon} \alpha_p \mbox{div}_x {\mathbf w}^{\varepsilon}=0,\\
\label{1.6}& \displaystyle \pi^{\varepsilon} +(1-\chi^{\varepsilon})
(\alpha_{\mathbf e}ta \mbox{div}_x {\mathbf w}^{\varepsilon}-\alpha _{\theta
s}\theta^{\varepsilon})=0
{\mathbf e}nd{eqnarray}
a.e. in $\Omega_{T}$, and integral identities
\begin{eqnarray}{\mathbf n}onumber
&& \displaystyle \int_{\Omega_{T}} \Bigl(\alpha_\tau \rho
^{\varepsilon} {\mathbf w}^{\varepsilon}\cdot \frac{\partial ^{2}{\mathbf
\varphi}}{\partial t^{2}} - \chi ^{\varepsilon}\alpha_\mu \mathbb D({\mathbf x},
{\mathbf w}^{\varepsilon}): \mathbb D(x,\frac{\partial {\mathbf \varphi}}{\partial
t})-\rho ^{\varepsilon} \mathbf F\cdot {\mathbf
\varphi}+\\
&&{\mathbf n}onumber\{(1-\chi ^{\varepsilon})\alpha_\lambda
\mathbb D(x,{\mathbf w}^{\varepsilon})-(q^{\varepsilon}+\pi^{\varepsilon})\mathbb I\}
: \mathbb D(x,{\mathbf \varphi})\Bigr) d{\mathbf x} dt +\\
\label{1.7}
&& \displaystyle \int_\Omega \alpha_\tau \rho
^{\varepsilon}\Bigl({\mathbf w}^{\varepsilon}_{0}\cdot\frac{\partial {\mathbf
\varphi}}{\partial t}|_{t=0}- {\mathbf v}^{\varepsilon}_0 \cdot {\mathbf
\varphi}|_{t=0} \Bigr)d{\mathbf x} =0
{\mathbf e}nd{eqnarray}
for all smooth vector-functions ${\mathbf \varphi}={\mathbf
\varphi}({\mathbf x},t)$ such that ${\mathbf \varphi}|_{\partial \Omega}
={\mathbf \varphi}|_{t=T}=\partial {\mathbf \varphi} / \partial
t|_{t=T}=0$ and
\begin{eqnarray}
{\mathbf n}onumber
&& \displaystyle \int_{\Omega_{T}} \Bigl((\alpha_\tau
c^{\varepsilon}_p \theta ^{\varepsilon}+\alpha^{\varepsilon}_\theta
\mbox{div}_x {\mathbf w} ^{\varepsilon}) \frac{\partial {\mathbf x}i}{\partial t} - \alpha
_{\varkappa }^{\varepsilon} {\mathbf n}abla_x \theta ^{\varepsilon}\cdot
{\mathbf n}abla_x {\mathbf x}i +\Psi {\mathbf x}i \Bigr) d{\mathbf x} dt\\
\label{1.8}
&& \displaystyle +\int_\Omega (\alpha_\tau
c^{\varepsilon}_p \theta ^{\varepsilon}_0
+\alpha^{\varepsilon}_\theta \mbox{div}_x {\mathbf w}
_{0}^{\varepsilon}){\mathbf x}i|_{t=0}) d{\mathbf x}=0
{\mathbf e}nd{eqnarray}
for all smooth functions ${\mathbf x}i= {\mathbf x}i({\mathbf x},t)$ such that ${\mathbf x}i|_{\partial \Omega}
= {\mathbf x}i|_{t=T}=0$.
{\mathbf e}nd{definition}
In {\mathbf e}qref{1.4} by $A:B$ we denote the convolution (or,
equivalently, the inner tensor product) of two second-rank tensors
along the both indexes, i.e., $A:B=\mbox{tr\,} (B^*\circ
A)=\sum_{i,j=1}^3 A_{ij} B_{ji}$.
Suppose additionally that there exist limits (finite or infinite)
\begin{equation} {\mathbf n}onumber
\lim_{\varepsilon\searrow 0}\alpha_{\mathbf n}u(\varepsilon) ={\mathbf n}u_0, \quad
\lim_{\varepsilon\searrow 0} \alpha_{\mathbf e}ta(\varepsilon) ={\mathbf e}ta_0,\quad
\lim_{\varepsilon\searrow 0} \alpha_{\varkappa s}(\varepsilon)
=\varkappa _{0s},\quad \lim_{\varepsilon\searrow 0} \alpha _{ \theta
f}(\varepsilon) =\beta_{0f},
{\mathbf e}nd{equation}
\begin{equation*}
\lim_{\varepsilon\searrow 0} \alpha _{ \theta s}(\varepsilon)
=\beta_{0s}, \quad \lim_{\varepsilon\searrow 0}
\frac{\alpha_\mu}{\varepsilon^{2}} =\mu_1,\quad
\lim_{\varepsilon\searrow 0} \frac{\alpha _{\varkappa
f}}{\alpha_\mu}=\varkappa_{f}.
{\mathbf e}nd{equation*}
In what follows we suppose to be held
\begin{assumption} \label{assumption2}
1)Dimensionless parameters in the model ${(\mathbf{N}\mathbf
B})^\varepsilon$ satisfy to next restrictions
$$ \mu_{0}=0; \quad 0< \tau _{0}+ \mu_1, \quad \varkappa _{0s},
\quad \varkappa _{f}, \quad \lambda_{0},\quad {\mathbf e}ta _{0};$$
\begin{equation*}
\tau _{0}, \quad \varkappa _{f},\quad \varkappa _{0s},
\quad {\mathbf n}u _{0}\quad \beta_{0f}, \quad \beta_{0s}\quad \lambda_{0} <\infty.
{\mathbf e}nd{equation*}
2) Sequences $\{\sqrt{\alpha_\lambda}(1-\chi
^{\varepsilon}){\mathbf n}abla{\mathbf w}^{{\mathbf e}psilon}_0\}$,
$\{\sqrt{\alpha_\tau}{\mathbf v}^{{\mathbf e}psilon}_0 \}$,
$\{\sqrt{\alpha_\tau}\theta^{{\mathbf e}psilon}_0 \}$,
$\{\sqrt{\alpha_{\mathbf e}ta}(1- \chi ^{\varepsilon}) \mbox{div}_x
{\mathbf w}_{0}^{\varepsilon}\}$, $\{\sqrt{\alpha _{p}} \chi
^{\varepsilon} \mbox{div}_x {\mathbf w}_{0}^{\varepsilon}\}$,
$\{\sqrt{\alpha_\lambda \alpha_\tau}(1-\chi
^{\varepsilon}){\mathbf n}abla{\mathbf v}^{{\mathbf e}psilon}_0\}$, $\{\sqrt{\alpha_{\mathbf e}ta
\alpha_\tau}(1- \chi ^{\varepsilon}) \mbox{div}_x
{\mathbf v}_{0}^{\varepsilon}\}$,
$\{\sqrt{\alpha _{p}\alpha_\tau} \chi
^{\varepsilon} \mbox{div}_x {\mathbf v}_{0}^{\varepsilon}\}$,
$\{\textbf{a}^{\varepsilon}_0 \}$, $\{\textbf{b}^{\varepsilon}_0
\}$ are uniformly in $\varepsilon$ bounded in $L^2(\Omega)$ and
$|\mathbf F|, |\partial \mathbf F /
\partial t|, \Psi, \partial \Psi /
\partial t \in
L^2(\Omega_{T})$.
{\mathbf e}nd{assumption}
Here
$$\textbf{a}^{\varepsilon}_0=\mbox{div}_x \mathbb P_{0}^{\varepsilon} + \bar{\rho} \mathbf F({\mathbf x},0),$$
$$ c_{p}^{\varepsilon}\textbf{b}^{\varepsilon}_0 =
\mbox{div}_x ( \alpha ^{\varepsilon} _{\varkappa} {\mathbf n}abla_x \theta
^{\varepsilon}_0) -\alpha ^{\varepsilon}_\theta \mbox{div}_x
{\mathbf v}^{\varepsilon}_0 +\Psi ({\mathbf x},0),$$
$$\mathbb P_{0}^{\varepsilon}=\chi ^{\varepsilon}\alpha_\mu \mathbb D({\mathbf x}, {\mathbf v}_{0}^{\varepsilon})
+(1-\chi ^{\varepsilon})\alpha_\lambda \mathbb D(x,{\mathbf w}_{0}^{\varepsilon})+$$
$$(\chi ^{\varepsilon}(\alpha_p \mbox{div}_x {\mathbf w}^{\varepsilon}_0+
\alpha_{\mathbf n}u \mbox{div}_x {\mathbf v}^{\varepsilon}_0)+(1-\chi ^{\varepsilon})\alpha_{\mathbf e}ta
\mbox{div}_x {\mathbf w}^{\varepsilon}_0)\mathbb I.$$
In what follows all parameters may take all permitted values. For
example, if
$\tau_{0}=0$ or ${\mathbf e}ta _{0}^{-1}=0$, then all terms in final equations
containing these parameters disappear.
The following Theorems \ref{theorem1}--\ref{theorem2} are the main results of the paper.
\begin{theorem} \label{theorem1}
For all $\varepsilon >0$ on the arbitrary time interval $[0,T]$
there exists a unique generalized solution of model
${(\mathbf{N}\mathbf B})^\varepsilon$
and
\begin{equation} \label{1.9}
\displaystyle \max\limits_{0\leq t\leq
T}\| |{\mathbf w}^{\varepsilon}(t)|, \sqrt{\alpha_\mu} \chi^\varepsilon
|{\mathbf n}abla_x {\mathbf w}^{\varepsilon}(t)|, (1-\chi^\varepsilon)
|{\mathbf n}abla_x {\mathbf w}^{\varepsilon}(t)| \|_{2,\Omega}
\leq C_{0} ,
{\mathbf e}nd{equation}
\begin{equation} \label{1.10}
\displaystyle\| \theta^{\varepsilon} \|_{2,\Omega_{T}}+\sqrt{\alpha _{\varkappa
f}}\| \chi ^{\varepsilon} {\mathbf n}abla_x \theta^{\varepsilon}\|_{2,\Omega
_{T}}+ \|(1- \chi ^{\varepsilon}) {\mathbf n}abla_x
\theta^{\varepsilon}\|_{2,\Omega _{T}}
\leq C_{0} ,
{\mathbf e}nd{equation}
\begin{equation}\label{1.11}
\|q^{\varepsilon}\|_{2,\Omega_{T}} +
\|p^{\varepsilon}\|_{2,\Omega_{T}} + \frac{\alpha _{{\mathbf n}u}}{\alpha
_{p}}\|\frac{\partial p^{\varepsilon}}{\partial t}\|_{2,\Omega_{T}}
+ \|\pi ^{\varepsilon}\|_{2,\Omega_{T}} \leq C_{0}
{\mathbf e}nd{equation}
where $C_{0}$ does not depend on the small parameter $\varepsilon $.
{\mathbf e}nd{theorem}
\begin{theorem} \label{theorem2}
Functions ${\mathbf w}^{\varepsilon}$ and $\theta ^{\varepsilon}$ admit an
extension ${\mathbf u}^{\varepsilon}$ and $\vartheta^{\varepsilon}$
respectively from $\Omega_{s,T}^{\varepsilon}=\Omega_s^\varepsilon
\times (0,T)$
into $\Omega_{T}$
such that the sequences $\{{\mathbf u}^{\varepsilon}\}$ and $\{\vartheta^{\varepsilon}\}$
converge strongly
in $L^{2}(\Omega_{T})$ and weakly in
$L^{2}((0,T);W^1_2(\Omega))$ to the
functions ${\mathbf u}$ and $\vartheta$ respectively. At the same time,
sequences $\{{\mathbf w}^\varepsilon\}$, $\{\theta ^{\varepsilon}\}$,
$\{p^{\varepsilon}\}$, $\{q^{\varepsilon}\}$, and
$\{\pi^{\varepsilon}\}$ converge weakly in $L^{2}(\Omega_{T})$
to ${\mathbf w}$, $\theta $, $p$, $q$, and $\pi$, respectively.
The following assertions for these limiting functions hold
true:
\textbf{(I)} If $\mu_1 =\infty$ then ${\mathbf w}={\mathbf u}$, $\theta =\vartheta
$ and the weak limits ${\mathbf u}$, $\vartheta $, $p$, $q$, and $\pi$
satisfy in $\Omega_{T}$ the initial-boundary value problem
\begin{equation}\label{1.12}
\left. \begin{array}{lll} \displaystyle \tau
_{0}{\mathbf h}at{\rho}\frac{\partial ^2{\mathbf u}}{\partial t^2}
+{\mathbf n}abla (q+\pi )-{\mathbf h}at{\rho}\mathbf F=\\[1ex]
\mbox{div}_x \{\lambda _{0}\mathbb A^{s}_{0}:\mathbb D(x,{\mathbf u}) + B^{s}_{0}(\mbox{div}_x
{\mathbf u}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\vartheta )+B^{s}_{1}q \},
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
\begin{equation}\label{1.13}
(\tau_{0}{\mathbf h}at{c_{p}}+\frac{\beta_{0s}^{2}}{{\mathbf e}ta_{0}}(1-m))\frac{\partial
\vartheta}{\partial t} -\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\frac{\partial
\pi}{\partial t}+(a^{s}_{1}-\frac{1}{{\mathbf e}ta_{0}})\langle
\frac{\partial q}{\partial t}\rangle_{\Omega}=
\mbox{div}_x ( B^{\theta}\cdot {\mathbf n}abla \vartheta )+\Psi ,
{\mathbf e}nd{equation}
\begin{equation}\label{1.14}
\frac{1}{{\mathbf e}ta_{0}}(\pi +\langle
q\rangle_{\Omega})+C^{s}_{0}:\mathbb D(x,{\mathbf u})+ a^{s}_{0}(\mbox{div}_x {\mathbf u} -
\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta-\langle \vartheta
\rangle_{\Omega})) +a^{s}_{1}(q-\langle q\rangle_{\Omega})=0,
{\mathbf e}nd{equation}
\begin{equation}\label{1.15}
\frac{1}{{\mathbf e}ta_{0}}(\pi +\langle
q\rangle_{\Omega}) + \mbox{div}_x {\mathbf u}+
\frac{(1-m)\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta-\langle
\vartheta \rangle_{\Omega})=0,
{\mathbf e}nd{equation}
\begin{equation}\label{1.16}
q-\langle
q\rangle_{\Omega}=p +\beta_{0f}m(\vartheta-\langle \vartheta
\rangle_{\Omega}),
{\mathbf e}nd{equation}
where
$${\mathbf h}at{\rho}=m \rho_{f} + (1-m)\rho_{s},\quad {\mathbf h}at{c_{p}}=m
c_{pf} + (1-m)c_{ps},\quad m=\int _{Y}\chi ({\mathbf y})d{\mathbf y}.$$
The symmetric strictly positively defined constant fourth-rank tensor
$\mathbb A^{s}_{0}$, constant matrices $C^{s}_{0}, B^{s}_{0}$ $B^{s}_{1}$,
strictly positively defined constant matrix $B^{\vartheta}$
and constants $a^{s}_{0}$, $a^{s}_{1}$ and $a^{s}_{2}$ are
defined below by Eqs. {\mathbf e}qref{4.33} - {\mathbf e}qref{4.35} and
{\mathbf e}qref{4.38}.
Differential equations {\mathbf e}qref{1.12}-{\mathbf e}qref{1.16} are endowed with
initial conditions at $t=0$ and $ {\mathbf x}\in \Omega$
\begin{equation}\label{1.17}
(\tau _{0}+\beta_{0s})(\vartheta-\vartheta_{0})=0,\quad\tau _{0}({\mathbf u}-{\mathbf u}_{0})=
\tau _{0}(\frac{\partial {\mathbf u}}{\partial t}-{\mathbf v}_{0})=0;
{\mathbf e}nd{equation}
and boundary conditions
\begin{equation}\label{1.18}
\vartheta ({\mathbf x},t)=0, \quad {\mathbf u}({\mathbf x},t)=0, \quad {\mathbf x}\in S, \quad t>0.
{\mathbf e}nd{equation}
{\mathbf n}oindent
\textbf{(II)}
If the porous space is disconnected, then ${\mathbf w}={\mathbf u}$ and strong
and weak
limits ${\mathbf u}$, $\vartheta $, $p$, $q$, $\pi$ together with a weak
limit $\theta ^{f}$ of the sequence
$\{\chi ^{\varepsilon}\theta ^{\varepsilon}\}$
satisfy in $\Omega_{T}$ equations {\mathbf e}qref{1.12}, {\mathbf e}qref{1.14}--
{\mathbf e}qref{1.15}, the state equation
\begin{equation}\label{1.19}
q-\langle
q\rangle_{\Omega}=p +\beta_{0f}(\theta ^{f}-\langle \theta ^{f}
\rangle_{\Omega}),
{\mathbf e}nd{equation}
and heat equation
\begin{eqnarray}
{\mathbf n}onumber
&&\tau_{0}c_{pf}\frac{\partial \theta^{f}}{\partial
t}+(\tau_{0}c_{ps}+
\frac{\beta_{0s}^{2}}{{\mathbf e}ta_{0}})(1-m)\frac{\partial
\vartheta}{\partial t}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\frac{\partial
\pi}{\partial t} +(a^{s}_{1}-\frac{1}{{\mathbf e}ta_{0}})\langle
\frac{\partial q}{\partial t}\rangle_{\Omega}=\\
&& \mbox{div}_x ( B^{\theta}\cdot {\mathbf n}abla \vartheta ) +\Psi . \label{1.20}
{\mathbf e}nd{eqnarray}
Here $\theta^{f}$ is defined below by formulas
{\mathbf e}qref{4.40}--{\mathbf e}qref{4.45}.
The problem is endowed with initial and boundary conditions
{\mathbf e}qref{1.17}-{\mathbf e}qref{1.18}.
{\mathbf n}oindent
\textbf{(III)}
If $\mu_{1}<\infty$ then strong
and weak limits ${\mathbf u}$, $\vartheta $, ${\mathbf w}^{f}$, $\theta
^{f}$, $p$, $q$ and $\pi$ of the sequences $\{{\mathbf u}^\varepsilon\}$,
$\{\vartheta ^\varepsilon\}$,
$\{\chi^{\varepsilon}{\mathbf w}^\varepsilon\}$,
$\{\chi^{\varepsilon}\theta ^\varepsilon\}$,
$\{p^\varepsilon\}$, $\{q^\varepsilon\}$ and $\{\pi^\varepsilon\}$
satisfy the initial-boundary value problem in $\Omega_T$, consisting
of the balance of momentum equation
\begin{equation}\label{1.21}
\left. \begin{array}{lll} \displaystyle\tau
_{0}(\rho_{f}\frac{\partial ^2{\mathbf w}^{f}}{\partial
t^2}+\rho_{s}(1-m)\frac{\partial ^2{\mathbf u}}{\partial t^2})
+{\mathbf n}abla (q+\pi )-{\mathbf h}at{\rho}\mathbf F= \\[1ex]
\mbox{div}_x \{\lambda _{0}A^{s}_{0}:\mathbb D(x,{\mathbf u}) +
B^{s}_{0}\mbox{div}_x {\mathbf u}
+B^{s}_{1}q \},
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
where $\mathbb A^{s}_{0}$, $B^{s}_{0}$ è $B^{s}_{1}$ are the same as in
Eq. {\mathbf e}qref{1.12}, continuity equation {\mathbf e}qref{1.14}, continuity
equation
\begin{equation} \label{1.22}
\frac{1}{{\mathbf e}ta_{0}}(\pi +\langle
q\rangle_{\Omega})+\mbox{div}_x {\mathbf w}^{f} +
\frac{(1-m)\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta -\langle \vartheta
\rangle_{\Omega})= (m-1)\mbox{div}_x {\mathbf u} ,
{\mathbf e}nd{equation}
state equation {\mathbf e}qref{1.19}, heat equation {\mathbf e}qref{1.20}
and Darcy's law in the form
\begin{equation}\label{1.23}
\frac{\partial {\mathbf w}^{f}}{\partial t}=m\frac{\partial {\mathbf u}}{\partial
t}+\int_{0}^{t} B_{1}(\mu_1,t-\tau)\cdot (-{\mathbf n}abla_x
q+\rho_{f}\mathbf F-\tau_{0}\rho_{f}\frac{\partial ^2 {\mathbf u}}{\partial \tau
^2})({\mathbf x},\tau )d\tau
{\mathbf e}nd{equation}
if $\tau_{0}>0$ è $\mu_{1}>0$, Darcy's law in the form
\begin{equation}\label{1.24}
\frac{\partial {\mathbf w}^{f}}{\partial t}=\frac{\partial {\mathbf u}}{\partial
t}+B_{2}(\mu_1)\cdot(-{\mathbf n}abla_x q+\rho_{f}\mathbf F)
{\mathbf e}nd{equation}
if $\tau_{0}=0$ and, finally, Darcy's law in the form
\begin{equation}\label{1.25}
\frac{\partial {\mathbf w}^{f}}{\partial t}=B_{3}\cdot \frac{\partial
{\mathbf u}}{\partial t}+\frac{1}{\tau
_{0}\rho_{f}}(m\mathbb I-B_{3})\cdot\int_{0}^{t}(-{\mathbf n}abla_x
q+\rho_{f}\mathbf F)({\mathbf x},\tau )d\tau
{\mathbf e}nd{equation}
if $\mu_{1}=0$.
The problem is supplemented by boundary and initial conditions
{\mathbf e}qref{1.17}-{\mathbf e}qref{1.18} for the displacement ${\mathbf u}$ and
temperature $\vartheta$ of the rigid component and by the boundary
condition
\begin{equation}\label{1.26}
{\mathbf w}^{f}({\mathbf x},t)\cdot {\mathbf n}({\mathbf x})=0,
\quad ({\mathbf x},t) \in S=\partial \Omega , \quad t>0
{\mathbf e}nd{equation}
for the displacement $ {\mathbf w}^{f}$ of the liquid component. In Eqs.
{\mathbf e}qref{1.23}--{\mathbf e}qref{1.26} ${\mathbf n}({\mathbf x})$ is the unit normal vector to $S$
at a point ${\mathbf x} \in S$, and matrices $B_{1}(\mu_1,t)$,
$B_{2}(\mu_1)$, and $B_{3}$ are given below by formulas
{\mathbf e}qref{4.51}--{\mathbf e}qref{4.56}.
{\mathbf e}nd{theorem}
\addtocounter{section}{1} \setcounter{equation}{0}
\setcounter{theorem}{0} \setcounter{lemma}{0}
\setcounter{proposition}{0} \setcounter{corollary}{0}
\setcounter{definition}{0} \setcounter{assumption}{0}
\begin{center} \textbf{\S2. Preliminaries}
{\mathbf e}nd{center}
\textbf{2.1. Two-scale convergence.} Justification of Theorems
\ref{theorem1}--\ref{theorem2} relies on systematic use of the
method of two-scale convergence, which had been proposed by G.
Nguetseng \cite{NGU} and has been applied recently to a wide range
of homogenization problems (see, for example, the survey
\cite{LNW}).
\begin{definition} \label{TS}
A sequence $\{\varphi^\varepsilon\}\subset L^2(\Omega_{T})$ is said
to be \textit{two-scale convergent} to a limit $\varphi\in
L^2(\Omega_{T}\times Y)$ if and only if for any 1-periodic in ${\mathbf y}$
function $\sigma=\sigma({\mathbf x},t,{\mathbf y})$ the limiting relation
\begin{equation}\label{(2.1)}
\lim_{\varepsilon\searrow 0} \int_{\Omega_{T}}
\varphi^\varepsilon({\mathbf x},t) \sigma\left({\mathbf x},t,{\mathbf x} /
\varepsilon\right)d{\mathbf x} dt = \int _{\Omega_{T}}\int_Y
\varphi({\mathbf x},t,{\mathbf y})\sigma({\mathbf x},t,{\mathbf y})d{\mathbf y} d{\mathbf x} dt
{\mathbf e}nd{equation}
holds.
{\mathbf e}nd{definition}
Existence and main properties of weakly convergent sequences are
established by the following fundamental theorem \cite{NGU,LNW}:
\begin{theorem} \label{theorem3}(\textbf{Nguetseng's theorem})
\textbf{1.} Any bounded in $L^2(Q)$ sequence contains a subsequence,
two-scale convergent to some limit
$\varphi\in L^2(\Omega_{T}\times Y)$.\\[1ex]
\textbf{2.} Let sequences $\{\varphi^\varepsilon\}$ and
$\{\varepsilon {\mathbf n}abla_x \varphi^\varepsilon\}$ be uniformly bounded
in $L^2(\Omega_{T})$. Then there exist a 1-periodic in ${\mathbf y}$ function
$\varphi=\varphi({\mathbf x},t,{\mathbf y})$ and a subsequence
$\{\varphi^\varepsilon\}$ such that $\varphi,{\mathbf n}abla_y \varphi\in
L^2(\Omega_{T}\times Y)$, and $\varphi^\varepsilon$ and $\varepsilon
{\mathbf n}abla_x \varphi^\varepsilon$ two-scale converge to $\varphi$ and
${\mathbf n}abla_y \varphi$,
respectively.\\[1ex]
\textbf{3.} Let sequences $\{\varphi^\varepsilon\}$ and $\{{\mathbf n}abla_x
\varphi^\varepsilon\}$ be bounded in $L^2(Q)$. Then there exist
functions $\varphi\in L^2(\Omega_{T})$ and $\psi \in
L^2(\Omega_{T}\times Y)$ and a subsequence from
$\{\varphi^\varepsilon\}$ such that $\psi$ is 1-periodic in ${\mathbf y}$,
${\mathbf n}abla_y \psi\in L^2(\Omega_{T}\times Y)$, and
$\varphi^\varepsilon$ and ${\mathbf n}abla_x \varphi^\varepsilon$ two-scale
converge to $\varphi$ and ${\mathbf n}abla_x \varphi({\mathbf x},t)+{\mathbf n}abla_y
\psi({\mathbf x},t,{\mathbf y})$, respectively.
{\mathbf e}nd{theorem}
\begin{corollary} \label{corollary2.1}
Let $\sigma\in L^2(Y)$ and
$\sigma^\varepsilon({\mathbf x}):=\sigma({\mathbf x}/\varepsilon)$. Assume that a
sequence $\{\varphi^\varepsilon\}\subset L^2(\Omega_{T})$ two-scale
converges to $\varphi \in L^2(\Omega_{T}\times Y)$. Then the
sequence $\sigma^\varepsilon \varphi^\varepsilon$ two-scale
converges to $\sigma \varphi$.
{\mathbf e}nd{corollary}
\textbf{2.2. An extension lemma.} The typical difficulty in
homogenization problems while passing to a limit in Model
${(\mathbf{N}\mathbf B})^\varepsilon$ as $\varepsilon \searrow 0$
arises because of the fact that the bounds on the gradient of
displacement ${\mathbf n}abla_x {\mathbf w}^\varepsilon$ may be distinct in liquid and
rigid phases. The classical approach in overcoming this difficulty
consists of constructing of extension to the whole $\Omega$ of the
displacement field defined merely on $\Omega_s$. The following lemma
is valid due to the well-known results from \cite{ACE,JKO}. We
formulate it in appropriate for us form:
\begin{lemma} \label{Lemma2.1}
Suppose that assumptions of Sec. 1.2 on geometry of periodic
structure hold, $ \psi^\varepsilon\in W^1_2(\Omega^\varepsilon_s)$
and $\psi^\varepsilon =0$ on $S_{s}^{\varepsilon}=\partial \Omega
^\varepsilon_s \cap
\partial \Omega$ in the trace sense. Then there exists a function
$ \sigma^\varepsilon \in
W^1_2(\Omega)$ such that its restriction on the sub-domain
$\Omega^\varepsilon_s$ coincide with $\psi^\varepsilon$, i.e.,
\begin{equation} \label{2.2}
(1-\chi^\varepsilon({\mathbf x}))( \sigma^\varepsilon({\mathbf x}) - \psi^\varepsilon
({\mathbf x}))=0,\quad {\mathbf x}\in\Omega,
{\mathbf e}nd{equation}
and, moreover, the estimate
\begin{equation} \label{2.3}
\|\sigma^\varepsilon\|_{2,\Omega}\leq C\|
\psi^\varepsilon\|_{2,\Omega ^{\varepsilon}_{s}} , \quad \|{\mathbf n}abla_x
\sigma^\varepsilon\|_{2,\Omega} \leq C \|{\mathbf n}abla_x
\psi^\varepsilon\|_{2,\Omega ^{\varepsilon}_{s}}
{\mathbf e}nd{equation}
hold true, where the constant $C$
depends only on geometry $Y$ and does not depend on $\varepsilon$.
{\mathbf e}nd{lemma}
\textbf{2.3. Friedrichs--Poincar\'{e}'s inequality in periodic
structure.} The following lemma was proved by L. Tartar in
\cite[Appendix]{S-P}. It specifies Friedrichs--Poincar\'{e}'s
inequality for $\varepsilon$-periodic structure.
\begin{lemma} \label{F-P}
Suppose that assumptions on the geometry of $\Omega^\varepsilon_f$
hold true. Then for any function $\varphi\in
\stackrel{\!\!\circ}{W^1_2}(\Omega^\varepsilon_f)$ the inequality
\begin{equation} \label{(F-P)}
\int_{\Omega^\varepsilon_f} |\varphi|^2 d{\mathbf x} \leq C \varepsilon^2
\int_{\Omega^\varepsilon_f} |{\mathbf n}abla_x \varphi|^2 d{\mathbf x}
{\mathbf e}nd{equation}
holds true with some constant $C$, independent of $\varepsilon$.
{\mathbf e}nd{lemma}
\textbf{2.4. Some notation.} Further we denote
1) $ \langle\Phi \rangle_{Y} =\int_Y \Phi dy, \quad
\langle\Phi \rangle_{Y_{f}} =\int_{Y_{f}} \Phi dy,\quad
\langle\Phi \rangle_{Y_{s}} =\int_{Y_{s}} \Phi dy.$
2) If $\textbf{a}$ and $\textbf{b}$ are two vectors then the matrix
$\textbf{a}\otimes \textbf{b}$ is defined by the formula
$$(\textbf{a}\otimes \textbf{b})\cdot
\textbf{c}=\textbf{a}(\textbf{b}\cdot \textbf{c})$$ for any vector
$\textbf{c}$.
3) If $B$ and $C$ are two matrices, then $B\otimes C$ is a
forth-rank tensor such that its convolution with any matrix $A$ is
defined by the formula
$$(B\otimes C):A=B (C:A)$$.
4) By $\mathbb I^{ij}$ we denote the $3\times 3$-matrix with
just one non-vanishing entry, which is equal to one and stands in
the $i$-th row and the $j$-th column.
5) We also introduce
$$
J^{ij}=\frac{1}{2}(\mathbb I^{ij}+\mathbb I^{ji})=\frac{1}{2} ({\mathbf e}_i
\otimes {\mathbf e}_j + {\mathbf e}_j \otimes {\mathbf e}_i),
$$
where $({\mathbf e}_1, {\mathbf e}_2, {\mathbf e}_3)$ are the
standard Cartesian basis vectors. \addtocounter{section}{1}
\setcounter{equation}{0} \setcounter{theorem}{0}
\setcounter{lemma}{0} \setcounter{proposition}{0}
\setcounter{corollary}{0} \setcounter{definition}{0}
\setcounter{assumption}{0}
\begin{center} \textbf{\S3. Proof of Theorem \ref{theorem1}}
{\mathbf e}nd{center}
Under restriction $\tau_{0}>0$ estimates
{\mathbf e}qref{1.9}-{\mathbf e}qref{1.10} follow from
\begin{equation*}
\max\limits_{0<t<T}(\sqrt{\alpha_{\mathbf e}ta}\| \mbox{div}_x
\frac{\partial{\mathbf w}^{\varepsilon}}{\partial t}(t) \|_{2,\Omega _s^{\varepsilon}}+
\sqrt{\alpha_\lambda}\|
{\mathbf n}abla_x \frac{\partial{\mathbf w}^{\varepsilon}}{\partial t}(t) \|_{2,\Omega
_s^{\varepsilon}}
{\mathbf e}nd{equation*}
\begin{equation*}
+ \sqrt{\alpha_\tau}\| \frac{\partial ^{2}{\mathbf w}^\varepsilon}{\partial
t^{2}}(t)\|_{2,\Omega}+\sqrt{\alpha _{p}} \| \mbox{div}_x
\frac{\partial{\mathbf w}^{\varepsilon}}{\partial t}(t)\|_{2,\Omega
_f^{\varepsilon}}+ \sqrt{\alpha_\tau}\|
\frac{\partial\theta^{\varepsilon}}{\partial t}(t)\|_{2,\Omega})
{\mathbf e}nd{equation*}
\begin{equation*}
+\sqrt{\alpha _{\varkappa f}}\|\chi ^{\varepsilon} {\mathbf n}abla_x
\frac{\partial\theta^{\varepsilon}}{\partial t}\|_{2,\Omega
_{T}}+\sqrt{\alpha _{\varkappa s}}\|(1- \chi ^{\varepsilon})
{\mathbf n}abla_x \frac{\partial\theta^{\varepsilon}}{\partial t}\|_{2,\Omega
_{T}}
{\mathbf e}nd{equation*}
\begin{equation} \label{3.1}
+\sqrt{\alpha_\mu}\|\chi ^{\varepsilon}
{\mathbf n}abla_x \frac{\partial ^{2}{\mathbf w}^\varepsilon}{\partial t^{2}} \|_{2,\Omega_T}+
\sqrt{\alpha _{{\mathbf n}u}}\| \chi ^{\varepsilon} \mbox{div}_x
\frac{\partial ^{2}{\mathbf w}^\varepsilon}{\partial t^{2}}\|_{2,\Omega _{T}}
\leq \frac{C_{0}}{\sqrt{\alpha_\tau}},
{\mathbf e}nd{equation}
where $C_{0}$ is independent of $\varepsilon$. Last estimate we
obtain if we differentiate equations for ${\mathbf w}^{\varepsilon}$ and
$\theta^{\varepsilon}$ with respect to time, multiply first equation
by $\partial ^{2} {\mathbf w}^{\varepsilon} /
\partial t^{2}$, second equation--by $\partial\theta^{\varepsilon} /
\partial t $, integrate by parts and sum the result. The same estimate
guaranties the existence and uniqueness of the generalized solution
for the model ${(\mathbf{N}\mathbf B})^\varepsilon$.
Estimate {\mathbf e}qref{1.11} for pressures follows from integral identity
{\mathbf e}qref{1.7} and estimates {\mathbf e}qref{3.1} as an estimate of the
corresponding functional, if we re-normalized pressures, such that
$$\int _{\Omega} (q^\varepsilon({\mathbf x},t)+\pi^\varepsilon({\mathbf x},t)) d{\mathbf x}=0. $$.
Indeed, integral identity {\mathbf e}qref{1.7} and estimates {\mathbf e}qref{3.1}
imply
$$|\int _{\Omega} (q^\varepsilon+\pi^\varepsilon )\mbox{div}_x {\mathbf{\psi}} d{\mathbf x}
|\leq C \|{\mathbf n}abla {\mathbf{\psi}}\|_{2,\Omega}.$$
Choosing now ${\mathbf{\psi}}$ such that
$(q^\varepsilon+\pi^\varepsilon )= \mbox{div}_x {\mathbf{\psi}}$
we get the desired estimate for the sum of pressures
$(q^\varepsilon+\pi^\varepsilon )$. Such a choice is always
possible (see \cite{LAD}), if we put
$${\mathbf{\psi}}={\mathbf n}abla \varphi + {\mathbf{\psi_{0}}}, \quad
\mbox{div}_x {\mathbf{\psi_{0}}}=0, \quad \triangle
\varphi=q^\varepsilon+\pi^\varepsilon ,\quad \varphi |
_{\partial\Omega}=0, \quad ({\mathbf n}abla \varphi + {\mathbf{\psi_{0}}})|
_{\partial\Omega}=0.$$
Note that the re-normalization of the pressures $(q^\varepsilon+\pi^\varepsilon )$
transforms continuity and state equations
{\mathbf e}qref{1.4}-{\mathbf e}qref{1.6} for pressures into
\begin{eqnarray} \label{3.2}
&\displaystyle q^{\varepsilon}=p^{\varepsilon}+
\frac{\alpha_{\mathbf n}u}{\alpha_p}\frac{\partial p^{\varepsilon}}{\partial
t}+
\chi^{\varepsilon}(\alpha _{\theta f}\theta ^{\varepsilon}+\gamma ^{\varepsilon}_{f}),\\
\label{3.3}& \displaystyle \frac{1}{\alpha_p}p^{\varepsilon}+
\chi^{\varepsilon}\mbox{div}_x {\mathbf w}^{\varepsilon}=-\frac{1}{m}\beta ^{\varepsilon}\chi^\varepsilon ,\\
\label{3.4}& \displaystyle \frac{1}{\alpha_{\mathbf e}ta}\pi^{\varepsilon}
+(1-\chi^{\varepsilon}) (\mbox{div}_x {\mathbf w}^{\varepsilon}-\frac{\alpha
_{\theta s}}{\alpha_{\mathbf e}ta}\theta^{\varepsilon}+\gamma
^{\varepsilon}_{s})=0,
{\mathbf e}nd{eqnarray}
where
$$\beta ^{\varepsilon}=\langle (1-\chi^\varepsilon )\mbox{div}_x
{\mathbf w}^\varepsilon \rangle _{\Omega},\quad m\gamma
^{\varepsilon}_{f}=\langle q^\varepsilon \rangle _{\Omega}-\alpha
_{\theta f}\langle \chi^{\varepsilon}\theta ^{\varepsilon}\rangle
_{\Omega},$$
$$ (1-m)\gamma
^{\varepsilon}_{s}=\frac{1}{\alpha_{\mathbf e}ta}\langle q^\varepsilon
\rangle _{\Omega}+\frac{\alpha _{\theta s}}{\alpha_{\mathbf e}ta}\langle
(1-\chi^{\varepsilon})\theta ^{\varepsilon}\rangle _{\Omega}-\beta
^{\varepsilon}.$$
Note that the basic integral identity {\mathbf e}qref{1.7} permits to bound
only the sum $(q^\varepsilon +\pi^{\varepsilon})$. But thanks to the
property that the product of these two functions is equal to zero,
it is enough to get bounds for each of these functions. The pressure
$p^{\varepsilon}$ is bounded from the state equation
{\mathbf e}qref{3.2}, if we substitute the term $(\alpha_{{\mathbf n}u} / \alpha_p)\partial
p^\varepsilon / \partial t$ from the continuity equation
{\mathbf e}qref{3.3} and use estimate {\mathbf e}qref{3.1}.
Estimation of ${\mathbf w}^\varepsilon$ and $\theta^\varepsilon$ in the
case $\tau_0=0$ is not simple, and we outline it in more detail. As
usual, we obtain the basic estimates if we multiply equations for
${\mathbf w}^\varepsilon$
by $\partial {\mathbf w}^\varepsilon /\partial t$, equation for $\theta^\varepsilon$
by $\theta^\varepsilon$, sum the result and then
integrate by parts all obtained terms. The only two terms $\mathbf F\cdot
\partial {\mathbf w}^\varepsilon / \partial t $ and $\Psi\cdot \theta ^{\varepsilon}
$ heed additional consideration here. First of all, on the strength
of Lemma \ref{Lemma2.1}, we construct an extension ${\mathbf u}^\varepsilon
$ of the function ${\mathbf w}^\varepsilon $ from $\Omega_s^\varepsilon$ into
$\Omega_f^\varepsilon$ such that ${\mathbf u}^\varepsilon ={\mathbf w}^\varepsilon$
in $\Omega_s^\varepsilon$, ${\mathbf u}^\varepsilon \in W_2^1(\Omega)$ and
$$\| {\mathbf u}^\varepsilon\|_{2,\Omega} \leq C
\|{\mathbf n}abla_x {\mathbf u}^\varepsilon\|_{2,\Omega} \leq
\frac{C}{\sqrt{\alpha_\lambda}}
\|(1-\chi^\varepsilon)\sqrt{\alpha_\lambda}{\mathbf n}abla_x {\mathbf w}^\varepsilon\|_{2,\Omega }.$$
After that we estimate $\|{\mathbf w}^\varepsilon\|_{2,\Omega}$ with the help
of Friedrichs--Poincar\'{e}'s inequality in periodic structure
(lemma \ref{F-P}) for the difference $({\mathbf u}^\varepsilon
-{\mathbf w}^\varepsilon)$:
$$\|{\mathbf w}^\varepsilon\|_{2,\Omega} \leq
\|{\mathbf u}^\varepsilon\|_{2,\Omega} + \|{\mathbf u}^\varepsilon
-{\mathbf w}^\varepsilon\|_{2,\Omega} \leq \|{\mathbf u}^\varepsilon\|_{2,\Omega} +
C\varepsilon \|\chi^\varepsilon {\mathbf n}abla_x ({\mathbf u}^\varepsilon
-{\mathbf w}^\varepsilon)\|_{2,\Omega} $$
$$\leq
\|{\mathbf u}^\varepsilon\|_{2,\Omega}+C\varepsilon \|{\mathbf n}abla_x
{\mathbf u}^\varepsilon\|_{2,\Omega}+C(\varepsilon \alpha _{\mu
}^{-\frac{1}{2}})\|\chi^\varepsilon \sqrt{\alpha_\mu} {\mathbf n}abla_x
{\mathbf w}^\varepsilon\|_{2,\Omega}$$
$$\leq \frac{C}{\sqrt{\alpha_\lambda}}
\|(1-\chi^\varepsilon)\sqrt{\alpha_\lambda}{\mathbf n}abla_x
{\mathbf w}^\varepsilon\|_{2,\Omega }+C(\varepsilon \alpha _{\mu
}^{-\frac{1}{2}})\|\chi^\varepsilon \sqrt{\alpha_\mu} {\mathbf n}abla_x
{\mathbf w}^\varepsilon\|_{2,\Omega}.$$
The same method we apply for the temperature $\theta^{\varepsilon}$:
there is an extension $\vartheta^\varepsilon $ of the function
$\theta^\varepsilon $ from $\Omega_s^\varepsilon$ into
$\Omega_f^\varepsilon$ such that $\vartheta^\varepsilon
=\theta^\varepsilon$ in $\Omega_s^\varepsilon$,
$\vartheta^\varepsilon \in W_2^1(\Omega)$ and
$$\| \vartheta^\varepsilon\|_{2,\Omega} \leq C
\|{\mathbf n}abla_x \vartheta^\varepsilon\|_{2,\Omega} \leq
\frac{C}{\sqrt{\alpha_{\varkappa s}}}
\|(1-\chi^\varepsilon)\sqrt{\alpha_{\varkappa s}}{\mathbf n}abla_x \theta^\varepsilon\|_{2,\Omega },$$
$$\|\theta^\varepsilon\|_{2,\Omega} \leq \frac{C}{\sqrt{\alpha_{\varkappa s}}}
\|(1-\chi^\varepsilon)\sqrt{\alpha_{\varkappa s}}{\mathbf n}abla_x \theta^\varepsilon\|_{2,\Omega }
+C(\varepsilon \alpha _{\varkappa s
}^{-\frac{1}{2}})\|\chi^\varepsilon \sqrt{\alpha_{\varkappa s}}
{\mathbf n}abla_x \theta^\varepsilon\|_{2,\Omega}.$$
Next we pass the derivative with respect to time from
$\partial {\mathbf w}^{\varepsilon }/ \partial t$ to $\rho^{\varepsilon}\mathbf F$
and bound all obtained new terms in a usual way with the help of
H\"{o}lder and Grownwall's inequalities.
The rest of the proof is the same as for the case
$\tau_0>0$, if we use the consequence of {\mathbf e}qref{3.1}:
$$\max\limits_{0<t<T}\alpha_\tau \| \frac{\partial ^2
{\mathbf w}^{\varepsilon}}{\partial t^2}(t)\|_{2,\Omega}\leq C_{0}.$$
\addtocounter{section}{1} \setcounter{equation}{0}
\setcounter{theorem}{0} \setcounter{lemma}{0}
\setcounter{proposition}{0} \setcounter{corollary}{0}
\setcounter{definition}{0} \setcounter{assumption}{0}
\begin{center} \textbf{\S4. Proof of Theorem \ref{theorem2}}
{\mathbf e}nd{center}
\textbf{4.1. Weak and two-scale limits of sequences of displacement,
temperatures and pressures.} On the strength of Theorem
\ref{theorem1}, the sequences $\{\theta^\varepsilon\}$,
$\{p^\varepsilon\}$, $\{q^\varepsilon\}$, $\{\pi^\varepsilon\}$ and $\{{\mathbf w}^\varepsilon
\}$ are uniformly in $\varepsilon$ bounded in $L^2(\Omega_{T})$.
Hence there exist a subsequence of small parameters
$\{\varepsilon>0\}$ and functions $\theta $, $p$, $q$, $\pi$ and
${\mathbf w}$ such that
\begin{equation*}
\theta^\varepsilon \rightarrow \theta,\quad p^\varepsilon
\rightarrow p,\quad q^\varepsilon \rightarrow q, \quad
\pi^\varepsilon \rightarrow \pi, \quad {\mathbf w}^\varepsilon \rightarrow
{\mathbf w}
{\mathbf e}nd{equation*}
weakly in $L^2(\Omega_T)$ as $\varepsilon\searrow 0$.
Due to Lemma \ref{Lemma2.1} there is a function ${\mathbf u}^\varepsilon \in
L^\infty ((0,T);W^1_2(\Omega))$ such that ${\mathbf u}^\varepsilon
={\mathbf w}^\varepsilon $ in $\Omega_{s}\times (0,T)$, and the family
$\{{\mathbf u}^\varepsilon \}$ is uniformly in $\varepsilon$ bounded in
$L^\infty ((0,T);W^1_2(\Omega))$. Therefore it is possible to
extract a subsequence of $\{\varepsilon>0\}$ such that
\begin{equation*}
{\mathbf u}^\varepsilon \rightarrow {\mathbf u} \mbox{ weakly in } L^2
((0,T);W^1_2(\Omega))
{\mathbf e}nd{equation*}
as $\varepsilon \searrow 0$.
Applying again the same lemma
\ref{Lemma2.1} we conclude that there is a function
$\vartheta^\varepsilon \in L^{2}((0,T);W^1_2(\Omega))$ such that
$\vartheta^\varepsilon =\theta^\varepsilon $ in $\Omega_{s}\times
(0,T)$, and the family $\{\vartheta^\varepsilon \}$ is uniformly in
$\varepsilon$ bounded in $L^{2}((0,T);W^1_2(\Omega))$. Therefore it
is possible to extract a subsequence of $\{\varepsilon>0\}$ such
that
\begin{equation*}
\vartheta^\varepsilon \rightarrow \vartheta \mbox{ weakly in } L^2
((0,T);W^1_2(\Omega))
{\mathbf e}nd{equation*}
as $\varepsilon \searrow 0$.
Moreover,
\begin{equation} \label{4.1}
\chi^\varepsilon \alpha_\mu \mathbb D({\mathbf x},{\mathbf w}^\varepsilon) \rightarrow 0,
\quad \chi^\varepsilon \alpha _{\varkappa f} {\mathbf n}abla
\theta^\varepsilon \rightarrow 0
{\mathbf e}nd{equation}
as $\varepsilon \searrow 0$.
Relabelling if necessary, we assume that the sequences converge
themselves.
On the strength of Nguetseng's theorem, there exist 1-periodic in
${\mathbf y}$ functions $\Theta ({\mathbf x},t,{\mathbf y})$, $P({\mathbf x},t,{\mathbf y})$, $\Pi({\mathbf x},t,{\mathbf y})$,
$Q({\mathbf x},t,{\mathbf y})$, $\mathbf W({\mathbf x},t,{\mathbf y})$, $\Theta ^{s} ({\mathbf x},t,{\mathbf y})$ and
$\mathbf U({\mathbf x},t,{\mathbf y})$ such that the sequences $\{\theta^\varepsilon\}$,
$\{p^\varepsilon\}$, $\{\pi^\varepsilon\}$, $\{q^\varepsilon\}$,
$\{{\mathbf w}^\varepsilon \}$, $\{{\mathbf n}abla_x \vartheta^\varepsilon \}$ and
$\{{\mathbf n}abla_x {\mathbf u}^\varepsilon \}$ two-scale converge to $\Theta
({\mathbf x},t,{\mathbf y})$, $P({\mathbf x},t,{\mathbf y})$, $\Pi({\mathbf x},t,{\mathbf y})$, $Q({\mathbf x},t,{\mathbf y})$,
$\mathbf W({\mathbf x},t,{\mathbf y})$, ${\mathbf n}abla _{x}\vartheta +{\mathbf n}abla_{y}\Theta
^{s}({\mathbf x},t,{\mathbf y})$ and ${\mathbf n}abla _{x}{\mathbf u} +{\mathbf n}abla_{y}\mathbf U({\mathbf x},t,{\mathbf y})$,
respectively.
Note that the sequence $\{\mbox{div}_x {\mathbf w}^\varepsilon \}$ weakly
converges to $\mbox{div}_x {\mathbf w}$ and $ \vartheta ,|{\mathbf u}| \in L^2
((0,T);\stackrel{\!\!\circ}{W^1_2}(\Omega)).$ Last assertion for
disconnected porous space follows from inclusion $\vartheta
^\varepsilon ,|{\mathbf u} ^\varepsilon |\in L^2
((0,T);\stackrel{\!\!\circ}{W^1_2}(\Omega))$ and for the connected
porous space it follows from the Friedrichs--Poincar\'{e}'s
inequality for ${\mathbf u}^\varepsilon$ and $ \vartheta ^\varepsilon$ in
the $\varepsilon$-layer of the boundary $S$ and from convergence of
sequences
$\{{\mathbf u}^\varepsilon \}$ and $\{\vartheta^\varepsilon \}$ to
${\mathbf u}$ and $ \vartheta $ respectively strongly
in $L^2(\Omega_{T})$ and weakly in $L^2 ((0,T);W^1_2(\Omega))$.\\
\textbf{4.2. Micro- and macroscopic equations I..}
\begin{lemma} \label{lemma4.1}
For all $ {\mathbf x} \in \Omega$ and ${\mathbf y}\in Y$ weak and two-scale limits of
the sequences $\{\theta^\varepsilon\}$, $\{p^\varepsilon\}$,
$\{\pi^\varepsilon\}$, $\{q^\varepsilon\}$, $\{{\mathbf w}^\varepsilon\}$,
$\{{\mathbf n}abla_x \vartheta^\varepsilon \}$ and $\{{\mathbf n}abla_x
{\mathbf u}^\varepsilon \}$ satisfy the relations
\begin{eqnarray} \label{4.2}
&Q=\frac{1}{m}\chi q, \quad Q=P+\chi (\beta_{0f} \Theta+\gamma_{f});\\
\label{4.3} & \frac{1}{{\mathbf e}ta_{0}}\Pi+(1-\chi )
(\mbox{div}_x{\mathbf u} + \mbox{div}_y \mathbf U-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta-\langle \vartheta
\rangle_{\Omega}) +\gamma_{s})=0;\\
\label{4.4} & \mbox{div}_y \mathbf W=0;\\
\label{4.5} &\mathbf W=\chi \mathbf W +
(1-\chi){\mathbf u} ;\\
\label{4.6} &\Theta=\chi \Theta +
(1-\chi)\vartheta ;\\
\label{4.7} & q=p +\beta_{0f}\theta ^{f}+m\gamma_{f};\\
\label{4.8} & \frac{1}{{\mathbf e}ta_{0}}\pi+ (1-m)(\mbox{div}_x {\mathbf u}
-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta-\langle \vartheta
\rangle_{\Omega}) +\gamma_{s})+
\langle \mbox{div}_y\mathbf U\rangle_{Y_{s}}=0;\\
\label{4.9} & \frac{1}{{\mathbf e}ta_{0}}\pi+\mbox{div}_x {\mathbf w}
-(1-m)(\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta-\langle \vartheta
\rangle_{\Omega})-\gamma_{s})+\beta =0,
{\mathbf e}nd{eqnarray}
where
$$\beta =\langle \langle
\mbox{div}_y\mathbf U\rangle_{Y_{s}}\rangle_{\Omega},
\quad \theta ^{f}=\langle \Theta\rangle_{Y_{f}},$$
$$m\gamma_{f}=\langle q\rangle_{\Omega}-\beta_{0f}\langle \theta ^{f}\rangle_{\Omega},
\quad (1-m)\gamma_{s}=\frac{1}{{\mathbf e}ta_{0}}\langle
q\rangle_{\Omega}-\beta .$$
{\mathbf e}nd{lemma}
\begin{proof}
In order to prove first equation in {\mathbf e}qref{4.2} into Eq.{\mathbf e}qref{1.7}
insert a test function ${\mathbf \psi}^\varepsilon =\varepsilon
{\mathbf \psi}\left({\mathbf x},t,{\mathbf x} / \varepsilon\right)$, where ${\mathbf
\psi}({\mathbf x},t,{\mathbf y})$ is an arbitrary 1-periodic in ${\mathbf y}$ and finite on
$Y_f$ function. Passing to the limit as $\varepsilon \searrow 0$, we
get
\begin{equation} \label{4.10}
{\mathbf n}abla_y Q({\mathbf x},t,{\mathbf y})=0, \quad {\mathbf y}\in Y_{f}.
{\mathbf e}nd{equation}
The weak and two-scale limiting passage in Eq. {\mathbf e}qref{3.4}
yield that Eq. {\mathbf e}qref{4.7}
and the second equation in {\mathbf e}qref{4.2}.
Next, fulfilling the two-scale limiting passage in the equalities
$$(1-\chi^{\varepsilon})p^{\varepsilon}
=0,\quad (1-\chi^{\varepsilon})q^{\varepsilon} =0$$ we get
$$(1-\chi )P=0,\quad (1-\chi )Q=0,$$
which justify first equation in {\mathbf e}qref{4.2}.
Eqs.{\mathbf e}qref{4.3}, {\mathbf e}qref{4.4}, {\mathbf e}qref{4.8}, and {\mathbf e}qref{4.9}
appear as the results of two-scale limiting passages in
Eqs.{\mathbf e}qref{3.2}--{\mathbf e}qref{3.4} with the proper test functions being
involved. Thus, for example, Eq.{\mathbf e}qref{4.8} is just a subsequence of
Eq.{\mathbf e}qref{4.3} and Eq.{\mathbf e}qref{4.9} is a result of two-scale
convergence in the sum of Eq.{\mathbf e}qref{3.3} and Eq.{\mathbf e}qref{3.4} with the
test functions independent of the ``fast'' variable
${\mathbf y}={\mathbf x} / \varepsilon$. Eq.{\mathbf e}qref{4.4} is derived
quite similarly if multiply the same sum of Eq.{\mathbf e}qref{3.3} and
Eq.{\mathbf e}qref{3.4} by an arbitrary function ${\mathbf \psi}^\varepsilon
=\varepsilon {\mathbf \psi}\left({\mathbf x},t,{\mathbf x} / \varepsilon\right)$ and
pass to the limit as $\varepsilon\searrow 0$.
In order to prove Eqs.{\mathbf e}qref{4.5} and {\mathbf e}qref{4.6} it is
sufficient to consider the two-scale limiting relations in
\begin{equation*}
(1-\chi ^{\varepsilon})({\mathbf w}^{\varepsilon}-{\mathbf u}^{\varepsilon})=0, \quad
(1-\chi
^{\varepsilon})(\theta^{\varepsilon}-\vartheta^{\varepsilon})=0.
{\mathbf e}nd{equation*}
{\mathbf e}nd{proof}
\begin{lemma} \label{lemma4.2} For all $({\mathbf x},t) \in \Omega_{T}$
and $y \in Y$ the relation
\begin{equation} \label{4.11}
\mbox{div}_y \{\lambda_0(1-\chi ) (\mathbb D(y,\mathbf U)+\mathbb D(x,{\mathbf u}))- (\Pi
+\frac{1}{m}q \chi )\cdot \mathbb I \}=0.
{\mathbf e}nd{equation}
holds true.
{\mathbf e}nd{lemma}
\begin{proof}
Substituting a test function of the form ${\mathbf \psi}^\varepsilon =\varepsilon {\mathbf
\psi}\left({\mathbf x},t,{\mathbf x} / \varepsilon \right)$, where ${\mathbf
\psi}({\mathbf x},t,{\mathbf y})$ is an arbitrary 1-periodic in ${\mathbf y}$ function
vanishing on the boundary $\partial \Omega$, into Eq.{\mathbf e}qref{1.7} and
passing to the limit as $\varepsilon \searrow 0$, we arrive at the
desired microscopic relation on the cell $Y$.
{\mathbf e}nd{proof}
In the same way using additionally continuity equations {\mathbf e}qref{3.3}
and {\mathbf e}qref{3.4} one gets from Eq.{\mathbf e}qref{1.8}
\begin{lemma} \label{lemma4.3}
For all $({\mathbf x},t) \in \Omega_{T}$ the relations
\begin{equation} \label{4.12}
\left. \begin{array}{lll} \displaystyle \triangle _{y}\Theta ^{s}
= 0, \quad {\mathbf y}\in Y_s,\\[1ex]
\frac{\partial\Theta ^{s}}{\partial n}=-{\mathbf n}abla_{x} \vartheta \cdot
\mathbf{n}, \quad {\mathbf y}\in \gamma
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
hold true.
{\mathbf e}nd{lemma}
Now we pass to the macroscopic equations for the solid
displacements.
\begin{lemma} \label{lemma4.4}
Let ${\mathbf h}at{\rho}=m \rho_{f} + (1-m)\rho_{s}, \quad {\mathbf w}^{f}=\langle
\mathbf W\rangle_{Y_{f}}$. Then functions ${\mathbf u} , {\mathbf w}^{f}, q, \pi ,
\theta^{f} , \vartheta $ satisfy in $\Omega_{T}$ the system of
macroscopic equations
\begin{eqnarray}\label{4.14}
&& \tau _{0}\rho_{f}\frac{\partial ^2{\mathbf w}^{f}}{\partial t^2}+\tau
_{0}\rho_{s}(1-m)\frac{\partial ^2{\mathbf u}}{\partial t^2}-{\mathbf h}at{\rho}\mathbf F=\\
&&\mbox{div}_x \{\lambda _{0}((1-m)\mathbb D(x,{\mathbf u})+ \langle \mathbb D(y,\mathbf U)\rangle
_{Y_{s}})-(q+\pi )\cdot \mathbb I \},{\mathbf n}onumber
{\mathbf e}nd{eqnarray}
\begin{eqnarray}\label{4.15}
&&\tau_{0}c_{pf}\frac{\partial \theta^{f}}{\partial t}+(\tau
_{0}c_{ps}+\frac{\beta_{0s}^{2}}{{\mathbf e}ta_{0}})(1-m)\frac{\partial
\vartheta}{\partial t} -\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\frac{\partial
\pi}{\partial t} -\beta_{0f}\frac{\partial \beta}{\partial t}-\\
&&(1-m)\beta_{0s}\frac{\partial \gamma_{s}}{\partial t}= \varkappa
_{0s}\mbox{div}_x \{(1-m){\mathbf n}abla_{x}\vartheta + \langle {\mathbf n}abla
_{y}\Theta^{s}\rangle _{Y_{s}}\} +\Psi.{\mathbf n}onumber
{\mathbf e}nd{eqnarray}
{\mathbf e}nd{lemma}
\begin{proof}
Eqs.{\mathbf e}qref{4.14} and {\mathbf e}qref{4.15} arise as the limit of
Eqs.{\mathbf e}qref{1.7} and {\mathbf e}qref{1.8} with test functions being finite
in $\Omega_T$ and independent of $\varepsilon$. In Eq.{\mathbf e}qref{1.8}
we have used continuity equations {\mathbf e}qref{3.3} and {\mathbf e}qref{3.4}.
{\mathbf e}nd{proof}
\textbf{4.3. Micro- and macroscopic equations II.}
\begin{lemma} \label{lemma4.5}
If $\mu_{1}=\infty$, then ${\mathbf u}={\mathbf w}$ and $\theta =\vartheta $.
{\mathbf e}nd{lemma}
\begin{proof}
In order to verify, it is sufficient to consider the differences
$({\mathbf u}^\varepsilon -{\mathbf w}^\varepsilon)$ and $(\theta^\varepsilon
-\vartheta^\varepsilon)$ and apply Friedrichs--Poincar'{e}'s
inequality, just like in the proof of Theorem \ref{theorem1}.
{\mathbf e}nd{proof}
\begin{lemma} \label{lemma4.6}
Let $\mu_1 <\infty$ and $\mathbf V=\chi\partial \mathbf W / \partial t$. Then
\begin{equation}\label{4.16}
\tau_{0}\rho_{f}\frac{\partial \mathbf V}{\partial t}-\rho_{f}\mathbf F=
\mu_{1}\triangle_y \mathbf V -{\mathbf n}abla_y R -{\mathbf n}abla_x q, \quad {\mathbf y} \in Y_{f},
{\mathbf e}nd{equation}
\begin{equation}\label{4.17}
\tau_{0}c_{pf}\frac{\partial \Theta}{\partial t}= \varkappa _{1}
\mu_{1}\triangle_y \Theta +\frac{\beta_{0f}}{m}\frac{\partial
\beta}{\partial t} + \Psi, \quad {\mathbf y} \in Y_{f},
{\mathbf e}nd{equation}
\begin{equation}\label{4.18}
\mathbf V=\frac{\partial {\mathbf u}}{\partial t}, \quad \Theta =\vartheta, \quad {\mathbf y} \in \gamma
{\mathbf e}nd{equation}
for $\mu_{1}>0$, and
\begin{equation}\label{4.19}
\tau_{0}\rho_{f}\frac{\partial \mathbf V}{\partial t}= -{\mathbf n}abla_y R
-{\mathbf n}abla _{x} q +\rho_{f}\mathbf F, \quad {\mathbf y} \in Y_{f},
{\mathbf e}nd{equation}
\begin{equation}\label{4.20}
\tau_{0}c_{pf}\frac{\partial \Theta}{\partial t}=
\frac{\beta_{0f}}{m}\frac{\partial \beta}{\partial t} +\Psi, \quad
{\mathbf y} \in Y_{f},
{\mathbf e}nd{equation}
\begin{equation}\label{4.21}
(\chi \mathbf W - {\mathbf u})\cdot{\mathbf n}=0, \quad {\mathbf y} \in \gamma
{\mathbf e}nd{equation}
for $\mu_{1}=0$.
In Eq.{\mathbf e}qref{4.21} ${\mathbf n}$ is the unit normal to $\gamma$.
{\mathbf e}nd{lemma}
\begin{proof}
Differential equations {\mathbf e}qref{4.16} and {\mathbf e}qref{4.19} follow as
$\varepsilon\searrow 0$ from integral equality {\mathbf e}qref{1.7} with the test function ${\mathbf
\psi}={\mathbf \varphi}(x\varepsilon^{-1})\cdot h({\mathbf x},t)$,
where ${\mathbf \varphi}$ is solenoidal and finite in $Y_{f}$
vector-function.
The same arguments apply for the Eq.{\mathbf e}qref{4.17} and
Eq.{\mathbf e}qref{4.20} The only one difference here is that we use the
continuity equation {\mathbf e}qref{3.3} to exclude the term $\chi
^{\varepsilon}\mbox{div}_x (\partial {\mathbf w}^\varepsilon / \partial t)$.
First boundary condition in {\mathbf e}qref{4.18} is the consequence of the
two-scale convergence of $\{\alpha_{\mu}^{\frac{1}{2}}{\mathbf n}abla_x
{\mathbf w}^{\varepsilon}\}$ to the function
$\mu_{1}^{\frac{1}{2}}{\mathbf n}abla_y\mathbf W({\mathbf x},t,{\mathbf y})$. On the strength of this
convergence, the function ${\mathbf n}abla_y \mathbf W({\mathbf x},t,{\mathbf y})$ is $L^2$-integrable
in $Y$. As above we apply the same argument to the second boundary
condition in {\mathbf e}qref{4.18}. The boundary conditions {\mathbf e}qref{4.21}
follow from Eqs.{\mathbf e}qref{4.4} and {\mathbf e}qref{4.5}.
{\mathbf e}nd{proof}
\begin{lemma} \label{lemma4.7}
If the porous space is disconnected, which is the case of isolated
pores, then ${\mathbf u}={\mathbf w}$.
{\mathbf e}nd{lemma}
\begin{proof}
Indeed, in the case $0\leq \mu_{1}<\infty$ the systems of equations
{\mathbf e}qref{4.4}, {\mathbf e}qref{4.16} and {\mathbf e}qref{4.18}, or {\mathbf e}qref{4.4},
{\mathbf e}qref{4.19} and {\mathbf e}qref{4.21} have the unique solution $\mathbf V=\partial
{\mathbf u} / \partial t$.
{\mathbf e}nd{proof}
\textbf{4.4. Homogenized equations I.}
\begin{lemma} \label{lemma4.8}
If $\mu_1 =\infty$ then ${\mathbf w}={\mathbf u}$, $\theta =\vartheta $ and the
weak limits ${\mathbf u}$, $\vartheta $, $p$, $q$, and $\pi$ satisfy in
$\Omega_{T}$ the initial-boundary value problem
\begin{equation}\label{4.22}
\left. \begin{array}{lll} \displaystyle \tau
_{0}{\mathbf h}at{\rho}\frac{\partial ^2{\mathbf u}}{\partial t^2}
+{\mathbf n}abla (q+\pi )-{\mathbf h}at{\rho}\mathbf F=\\[1ex]
\mbox{div}_x \{\lambda _{0}\mathbb A^{s}_{0}:\mathbb D(x,{\mathbf u}) + B^{s}_{0}(\mbox{div}_x
{\mathbf u}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\vartheta )+B^{s}_{1}q \},
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
\begin{equation}\label{4.23}
(\tau_{0}{\mathbf h}at{c_{p}}+\frac{\beta_{0s}^{2}}{{\mathbf e}ta_{0}}(1-m))\frac{\partial
\vartheta}{\partial t} -\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\frac{\partial
\pi}{\partial t}+(a^{s}_{1}-\frac{1}{{\mathbf e}ta_{0}})\langle
\frac{\partial q}{\partial t}\rangle_{\Omega} = \mbox{div}_x (
B^{\theta}\cdot {\mathbf n}abla \vartheta )+\Psi,
{\mathbf e}nd{equation}
\begin{equation}\label{4.24}
\frac{1}{{\mathbf e}ta_{0}}\pi+C^{s}_{0}:\mathbb D(x,{\mathbf u})+ a^{s}_{0}(\mbox{div}_x {\mathbf u} -
\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\vartheta) +a^{s}_{1}q=\tilde{\gamma},
{\mathbf e}nd{equation}
\begin{equation}\label{4.25}
\frac{1}{{\mathbf e}ta_{0}}\pi + \mbox{div}_x {\mathbf u}+
\frac{(1-m)\beta_{0s}}{{\mathbf e}ta_{0}} \vartheta=\tilde{\beta},
{\mathbf e}nd{equation}
\begin{equation}\label{4.26}
q=p +\beta_{0f}m \vartheta +m\gamma_{f},
{\mathbf e}nd{equation}
where the symmetric strictly positively defined constant
fourth-rank tensor $\mathbb A^{s}_{0}$, constant matrices $C^{s}_{0},
B^{s}_{0}$ $B^{s}_{1}$, strictly positively defined constant matrix
$B^{\vartheta}$
and constants $a^{s}_{0}$, $a^{s}_{1}$ and $a^{s}_{2}$ are
defined below by formulas
{\mathbf e}qref{4.33} - {\mathbf e}qref{4.35} and {\mathbf e}qref{4.38} and
$$\tilde{\gamma}=(a^{s}_{1}-\frac{1}{{\mathbf e}ta_{0}})\langle
q\rangle_{\Omega}-a^{s}_{0}\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\langle
\vartheta \rangle_{\Omega},
\quad -\tilde{\beta}=(1-m)\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\langle
\vartheta \rangle_{\Omega}+\frac{1}{{\mathbf e}ta_{0}}\langle
q\rangle_{\Omega}.$$
Differential equations {\mathbf e}qref{4.22} and {\mathbf e}qref{4.23} are endowed with
initial conditions at $t=0$ and ${\mathbf x}\in \Omega$
\begin{equation}\label{4.27}
(\tau _{0}+\beta_{0s})(\vartheta-\vartheta_{0})=0,\quad\tau _{0}({\mathbf u}-{\mathbf u}_{0})=
\tau _{0}(\frac{\partial {\mathbf u}}{\partial t}-{\mathbf v}_{0})=0;
{\mathbf e}nd{equation}
and boundary conditions
\begin{equation}\label{4.28}
\vartheta ({\mathbf x},t)=0, \quad {\mathbf u}({\mathbf x},t)=0, \quad {\mathbf x}\in S, \quad t>0.
{\mathbf e}nd{equation}
{\mathbf e}nd{lemma}
\begin{proof}
In the first place let us notice that ${\mathbf u} ={\mathbf w}$ and $\theta
=\vartheta $ due to Lemma \ref{lemma4.5}.
The differential equations {\mathbf e}qref{4.22} follow from the
macroscopic equations {\mathbf e}qref{4.14}, after we insert in them the
expression $$\langle \mathbb D(y,\mathbf U)\rangle _{Y_{s}}=\mathbb A^{s}_{1}:\mathbb D(x,{\mathbf u})
+ B^{s}_{0}(\mbox{div}_x {\mathbf u}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta
-\langle \vartheta \rangle_{\Omega}))
+B^{s}_{1}(q-\langle q\rangle_{\Omega}).$$
In turn, this expression follows by virtue of solutions of
Eqs.{\mathbf e}qref{4.3} and {\mathbf e}qref{4.11} on the pattern cell $Y_{s}$.
Indeed, setting
\begin{eqnarray}{\mathbf n}onumber
\mathbf U=&&\sum_{i,j=1}^{3}\mathbf U^{ij}({\mathbf y})D_{ij}+
\mathbf U_{0}({\mathbf y})(\mbox{div}_x {\mathbf u}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta -\langle \vartheta
\rangle_{\Omega}))\\
&&+\mathbf U_{1}({\mathbf y})(q-\langle q\rangle_{\Omega})+\mathbf U_{2}({\mathbf y})\langle
q\rangle_{\Omega} {\mathbf n}onumber
{\mathbf e}nd{eqnarray}
\begin{eqnarray}{\mathbf n}onumber
\Pi=&&\lambda _{0}\sum_{i,j=1}^{3}\Pi^{ij}({\mathbf y})D_{ij}
+\Pi_{0}({\mathbf y})(\mbox{div}_x {\mathbf u}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta -\langle \vartheta
\rangle_{\Omega}))\\
&&+\Pi_{1}({\mathbf y})(q-\langle q\rangle_{\Omega})+\Pi_{2}({\mathbf y})\langle
q\rangle_{\Omega},{\mathbf n}onumber
{\mathbf e}nd{eqnarray}
where
$$D_{ij}=\frac{1}{2}(\frac{\partial u_{i}}{\partial x_{j}}+
\frac{\partial u_{j}}{\partial x_{i}}),$$
we arrive at the following periodic-boundary value problems in $Y$:
\begin{equation}\label{4.29}
\left. \begin{array}{lll} \displaystyle \mbox{div}_y \{(1-\chi )
(\mathbb D(y,\mathbf U^{ij})+J^{ij}) - \Pi ^{ij}\cdot \mathbb I \}=0,\\[1ex]
\frac{\lambda _{0}}{{\mathbf e}ta_{0}}\Pi ^{ij} +(1-\chi ) \mbox{div}_y \mathbf U^{ij}
=0;
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
\begin{equation}\label{4.30}
\left. \begin{array}{lll} \displaystyle \mbox{div}_y
\{\lambda_{0}(1-\chi ) \mathbb D(y,\mathbf U_{0}) - \Pi_{0}\cdot \mathbb I \}=0,\\[1ex]
\frac{1}{{\mathbf e}ta_{0}}\Pi _{0} + (1-\chi )(\mbox{div}_y \mathbf U_{0}+1) =0;
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
\begin{equation}\label{4.31}
\left. \begin{array}{lll} \displaystyle \mbox{div}_y \{\lambda_{0}(1-\chi
) \mathbb D(y,\mathbf U_{1}) - (\Pi_{1}+\frac{1}{m}\chi )\cdot \mathbb I
\}=0,\\[1ex]
\frac{1}{{\mathbf e}ta_{0}}\Pi _{1} +(1-\chi )\mbox{div}_y \mathbf U_{1}) =0.
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
\begin{equation}\label{4.32}
\left. \begin{array}{r} \displaystyle \mbox{div}_y \{\lambda_{0}(1-\chi
)\mathbb D(y,\mathbf U_{2}) - (\Pi_{2}+\frac{1}{m}\chi )\cdot \mathbb I \}=0, \\[1ex]
\displaystyle \frac{1}{{\mathbf e}ta_{0}}\Pi _{2} + (1-\chi )\mbox{div}_y \mathbf U_{2}
-\frac{(1-\chi )}{(1-m)}(\langle
\mbox{div}_y\mathbf U_{2}\rangle_{Y_{s}}+\frac{1}{{\mathbf e}ta_{0}})=0.
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
Note, that
$$\beta=\sum_{i,j=1}^{3}\langle \mbox{div}_y\mathbf U^{ij}\rangle_{Y_{s}}
\langle D_{ij}\rangle_{\Omega} +\langle \mbox{div}_y\mathbf U_{0}\rangle_{Y_{s}}
\langle \mbox{div}_x {\mathbf u}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta -\langle \vartheta
\rangle_{\Omega}\rangle_{\Omega} + $$
$$\langle \mbox{div}_y\mathbf U_{1}\rangle_{Y_{s}}
\langle q-\langle q\rangle_{\Omega}\rangle_{\Omega}+
\langle \mbox{div}_y\mathbf U_{2}\rangle_{Y_{s}}
\langle q\rangle_{\Omega}=\langle \mbox{div}_y\mathbf U_{2}\rangle_{Y_{s}}
\langle q\rangle_{\Omega}$$
due to homogeneous boundary conditions for ${\mathbf u}({\mathbf x},t)$.
On the strength of the assumptions on the geometry of the pattern
``liquid'' cell $Y_{s}$, problems {\mathbf e}qref{4.29}--
{\mathbf e}qref{4.32} have unique solution, up to an arbitrary constant vector. In order
to discard the arbitrary constant vectors we demand
$$\langle\mathbf U^{ij}\rangle_{Y_{s}} =\langle\mathbf U_{0}\rangle_{Y_{s}}
=\langle\mathbf U_{1}\rangle_{Y_{s}} =\langle\mathbf U_{2}\rangle_{Y_{s}}=0.$$
Thus
\begin{equation}\label{4.33}
\mathbb A^{s}_{0}=\sum_{i,j=1}^{3}J^{ij}\otimes J^{ij} + \mathbb A^{s}_{1}, \quad
\mathbb A^{s}_{1}=\sum_{i,j=1}^{3}\langle (1-\chi) D(y,\mathbf U^{ij})\rangle _{Y}\otimes
J^{ij}.
{\mathbf e}nd{equation}
Symmetry and strict positiveness of the tensor $\mathbb A^{s}_{0}$ have
been proved in \cite{AM}.
Finally, Eqs.{\mathbf e}qref{4.24}--{\mathbf e}qref{4.26} for the pressures follow
from Eqs. {\mathbf e}qref{4.7}-- {\mathbf e}qref{4.9}, after we insert in them the
expression
$$\langle \mbox{div}_y\mathbf U\rangle_{Y_{s}}=C^{s}_{0}:\mathbb D(x,{\mathbf u})+ \tilde{a}^{s}_{0}(\mbox{div}_x {\mathbf u} -
\frac{\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta --\langle \vartheta
\rangle_{\Omega})) +a^{s}_{1}(q-\langle
q\rangle_{\Omega})+a^{s}_{2}\langle q\rangle_{\Omega}$$ where
\begin{equation}\label{4.34}
B^{s}_{0}=\langle\mathbb D(y,\mathbf U_{0})\rangle _{Y_{s}}, \quad
B^{s}_{1}=\langle\mathbb D(y,\mathbf U_{1})\rangle _{Y_{s}}, \quad
C^{s}_{0}=\sum_{i,j=1}^{3}\langle\mbox{div}_y\mathbf U^{ij}\rangle _{Y_{s}}J^{ij},
{\mathbf e}nd{equation}
\begin{equation}\label{4.35}
\tilde{a}^{s}_{0}= \langle\mbox{div}_y\mathbf U_{0}\rangle _{Y_{s}}=a^{s}_{0}-1+m, \quad
a^{s}_{1}= \langle\mbox{div}_y\mathbf U_{1}\rangle _{Y_{s}}, \quad
a^{s}_{2}= \langle\mbox{div}_y\mathbf U_{2}\rangle _{Y_{s}}.
{\mathbf e}nd{equation}
Now for $i=1,2,3$ we consider the model problems
\begin{eqnarray} \label{4.36}
&& \displaystyle \triangle _{y}\Theta_{i} ^{s}
= 0, \quad {\mathbf y}\in Y_s,\\
{\mathbf n}onumber && \displaystyle
\frac{\partial\Theta_{i} ^{s}}{\partial n}=- {\mathbf e}_{i}\cdot \mathbf{n},
\quad {\mathbf y}\in \gamma
{\mathbf e}nd{eqnarray}
and put
\begin{equation}\label{4.37}
\Theta ^{s}=\sum_{i=1}^{3}(\Theta_{i} ^{s}\otimes {\mathbf e}_{i})\cdot
{\mathbf n}abla _{x}\vartheta .
{\mathbf e}nd{equation}
Then $\Theta ^{s}$ solves the problem {\mathbf e}qref{4.12}--{\mathbf e}qref{4.13}
and if we insert an expression $\langle {\mathbf n}abla
_{y}\Theta^{s}\rangle _{Y_{s}}$ into {\mathbf e}qref{4.15} we get
\begin{equation}\label{4.38}
B^{\theta}=\varkappa_{0s}((1-m)\mathbb I+\sum_{i=1}^{3}\langle{\mathbf n}abla_{y}\Theta_{i}
^{s}\rangle _{Y_{s}}\otimes {\mathbf e}_{i}).
{\mathbf e}nd{equation}
All properties of the matrix $B^{\theta}$ are well known ( see
\cite{S-P}, \cite{JKO}).
{\mathbf e}nd{proof}
\begin{lemma} \label{lemma4.9}
If the porous space is disconnected, then ${\mathbf w}={\mathbf u}$ and the weak
limits $\theta ^{f}$, ${\mathbf u}$, $\vartheta $, $p$, $q$, and $\pi$
satisfy in $\Omega_{T}$ equations {\mathbf e}qref{4.22},
{\mathbf e}qref{4.24},{\mathbf e}qref{4.25}, {\mathbf e}qref{4.10}, where $\mathbb A^{s}_{0}$,
$C^{s}_{0}, B^{s}_{0}$ $B^{s}_{1}$, $B^{\vartheta}$, $a^{s}_{0}$,
$a^{s}_{1}$ and $a^{s}_{2}$ are the same as in Lemma \ref{lemma4.8},
the state equation {\mathbf e}qref{4.7}, and heat equation
\begin{equation}\label{4.39}
\left. \begin{array}{lll} \displaystyle
\tau_{0}c_{pf}\frac{\partial \theta^{f}}{\partial
t}+(\tau_{0}c_{ps}+
\frac{\beta_{0s}^{2}}{{\mathbf e}ta_{0}})(1-m)\frac{\partial
\vartheta}{\partial t}-\frac{\beta_{0s}}{{\mathbf e}ta_{0}}\frac{\partial
\pi}{\partial t} +(a^{s}_{1}-\frac{1}{{\mathbf e}ta_{0}})\langle
\frac{\partial q}{\partial t}\rangle_{\Omega}=\\[1ex]
\mbox{div}_x ( B^{\theta}\cdot {\mathbf n}abla \vartheta ) +\Psi,
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
where for $\mu_{1}>0$ and $\tau >0$
\begin{equation}\label{4.40}
\theta^{f}({\mathbf x},t)=m\vartheta ({\mathbf x},t) +\int
_{0}^{t}b^{\theta}_{f}(t-\tau
)(\frac{1}{\tau_{0}c_{pf}}(\frac{\beta_{0f}}{m}\frac{\partial
\beta}{\partial t} + \Psi )-\frac{\partial \vartheta }{\partial
t})({\mathbf x},\tau )d\tau .
{\mathbf e}nd{equation}
If $\mu_{1}>0$ and $\tau =0$, then
\begin{equation}\label{4.41}
\theta^{f}({\mathbf x},t)= m\vartheta ({\mathbf x},t) -
c^{\theta}_{f}(\frac{\beta_{0f}}{m}\frac{\partial \beta}{\partial
t}(t) + \Psi ({\mathbf x},t)).
{\mathbf e}nd{equation}
Finally, if $\mu_{1}=0$, then
\begin{equation}\label{4.42}
\theta^{f}({\mathbf x},t)=m\vartheta_{0}({\mathbf x})+\frac{m}{\tau _{0}c_{pf}} \int
_{0}^{t}(\frac{\beta_{0f}}{m}\frac{\partial \beta}{\partial t}(\tau)
+ \Psi ({\mathbf x},\tau))d\tau .
{\mathbf e}nd{equation}
Here $ b^{\theta}_{f}(t)$ and $c^{\theta}_{f}$
are defined below by formulas {\mathbf e}qref{4.43}-- {\mathbf e}qref{4.45}.
The problem is endowed with initial and boundary conditions
{\mathbf e}qref{4.27} è {\mathbf e}qref{4.28}.
{\mathbf e}nd{lemma}
\begin{proof}
The only one difference here with the previous lemma is the heat
equation for $\vartheta$ and the state equation for pressures,
because $\theta{\mathbf n}eq \vartheta$. The function $\theta^{f}=\langle
\Theta \rangle _{Y_{f}}$ now is defined from microscopic equations
{\mathbf e}qref{4.17} and {\mathbf e}qref{4.18}, if
$\mu_{1}>0$ and microscopic equations {\mathbf e}qref{4.20}, if $\mu_{1}=0$.
Indeed, the solutions of above mentioned problems are given by
formulas
$$\Theta =\vartheta ({\mathbf x},t) +\int
_{0}^{t}\Theta _{1}^{f}({\mathbf y},t-\tau )h({\mathbf x},\tau )d\tau ,$$ if
$\mu_{1}>0$ and $\tau >0$ and
$$\Theta =\vartheta ({\mathbf x},t) -\Theta _{0}^{f}({\mathbf y})(\frac{\beta_{0f}}{m}\frac{\partial
\beta}{\partial t}(t) + \Psi ({\mathbf x},t)),$$ if $\mu_{1}>0$ and $\tau
=0$, where
$$h=\frac{1}{\tau_{0}c_{pf}}(\frac{\beta_{0f}}{m}\frac{\partial
\beta}{\partial t} + \Psi )-\frac{\partial \vartheta }{\partial t}$$
and functions $\Theta _{1}^{f}$ and $\Theta ^{f}_{0}$ are
1-periodic in ${\mathbf y}$ solutions of the problems
\begin{equation}\label{4.43}
\left. \begin{array}{lll} \displaystyle
\tau_{0}c_{pf}\frac{\partial\Theta _{1}^{f}}{\partial t}= \varkappa
_{1} \mu_{1}\triangle_y \Theta _{1}^{f}, \quad {\mathbf y} \in
Y_{f},\\[1ex]
\Theta _{1}^{f}({\mathbf y},0)=1, \quad {\mathbf y} \in Y_{f}; \quad \Theta _{1}^{f}
=0, \quad {\mathbf y} \in \gamma ,
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
and
\begin{equation}\label{4.44}
\varkappa _{1} \mu_{1}\triangle_y
\Theta _{0} ^{f}=1, \quad {\mathbf y} \in Y_{f}; \quad \Theta _{0}^{f} =0,
\quad {\mathbf y} \in \gamma .
{\mathbf e}nd{equation}
Then, in accordance with definition, the function $\theta^{f}$ is
given by
{\mathbf e}qref{4.40} or {\mathbf e}qref{4.41}, where
\begin{equation}\label{4.45}
b^{\theta}_{f}(t)=\langle \Theta _{1}^{f}\rangle _{Y_{f}}, \quad
c^{\theta}_{f}=\langle \Theta _{0}^{f}\rangle _{Y_{f}}.
{\mathbf e}nd{equation}
If $\mu_{1}=0$, then $\Theta $ is found by a simple integration in
time.
{\mathbf e}nd{proof}
\textbf{4.5. Homogenized equations II.}
Let $\mu_{1}<\infty$. In the same manner as above, we verify that
the weak limit ${\mathbf u}$ of the sequence $\{{\mathbf u}^\varepsilon\}$ satisfies
some initial-boundary value problem likes problem {\mathbf e}qref{4.22}--
{\mathbf e}qref{4.28} because, in general, the weak limit ${\mathbf w}$ of the
sequence $\{{\mathbf w}^\varepsilon\}$ differs from ${\mathbf u}$. More precisely,
the following statement is true.
\begin{lemma} \label{lemma4.10}
If $\mu_{1}<\infty$ then the weak limits ${\mathbf u}$, ${\mathbf w}^{f}$, $\theta
^{f}$, $\vartheta $, $p$, $q$, and $\pi$ of the sequences
$\{{\mathbf u}^\varepsilon\}$, $\{\chi^{\varepsilon}{\mathbf w}^\varepsilon\}$,
$\{\chi^{\varepsilon}\theta ^\varepsilon\}$, $\{\vartheta ^\varepsilon\}$,
$\{p^\varepsilon\}$, $\{q^\varepsilon\}$, and $\{\pi^\varepsilon\}$
satisfy the initial-boundary value problem in $\Omega_T$, consisting
of the balance of momentum equation
\begin{equation}\label{4.46}
\left. \begin{array}{lll} \displaystyle\tau
_{0}(\rho_{f}\frac{\partial ^2{\mathbf w}^{f}}{\partial
t^2}+\rho_{s}(1-m)\frac{\partial ^2{\mathbf u}}{\partial t^2})
+{\mathbf n}abla (q+\pi )-{\mathbf h}at{\rho}\mathbf F= \\[1ex]
\mbox{div}_x \{\lambda _{0}A^{s}_{0}:\mathbb D(x,{\mathbf u}) +
B^{s}_{0}\mbox{div}_x {\mathbf u}
+B^{s}_{1}q \},
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
where $\mathbb A^{s}_{0}$, $B^{s}_{0}$ and $B^{s}_{1}$ are the same as in
{\mathbf e}qref{4.22}, continuity equation {\mathbf e}qref{4.24}, continuity equation
\begin{equation} \label{4.47}
\frac{1}{{\mathbf e}ta_{0}}(\pi +\langle
q\rangle_{\Omega})+\mbox{div}_x {\mathbf w}^{f} +
\frac{(1-m)\beta_{0s}}{{\mathbf e}ta_{0}}(\vartheta -\langle \vartheta
\rangle_{\Omega})= (m-1)\mbox{div}_x {\mathbf u} ,
{\mathbf e}nd{equation}
state equation {\mathbf e}qref{4.7}, heat equation {\mathbf e}qref{4.39}
and Darcy's law in the form
\begin{equation}\label{4.48}
\frac{\partial {\mathbf w}^{f}}{\partial t}=\frac{\partial {\mathbf u}}{\partial
t}+\int_{0}^{t} B_{1}(\mu_1,t-\tau)\cdot (-{\mathbf n}abla_x
q+\rho_{f}\mathbf F-\tau_{0}\rho_{f}\frac{\partial ^2 {\mathbf u}}{\partial \tau
^2})({\mathbf x},\tau )d\tau
{\mathbf e}nd{equation}
if $\tau_{0}>0$ and $\mu_{1}>0$, Darcy's law in the form
\begin{equation}\label{4.49}
\frac{\partial {\mathbf w}^{f}}{\partial t}=\frac{\partial {\mathbf u}}{\partial
t}+B_{2}(\mu_1)\cdot(-{\mathbf n}abla_x q+\rho_{f}\mathbf F)
{\mathbf e}nd{equation}
if $\tau_{0}=0$ and, finally, Darcy's law in the form
\begin{equation}\label{4.50}
\frac{\partial {\mathbf w}^{f}}{\partial t}=B_{3}\cdot \frac{\partial
{\mathbf u}}{\partial t}+\frac{1}{\tau
_{0}\rho_{f}}(m\mathbb I-B_{3})\cdot\int_{0}^{t}(-{\mathbf n}abla_x
q+\rho_{f}\mathbf F)({\mathbf x},\tau )d\tau
{\mathbf e}nd{equation}
if $\mu_{1}=0$. The
problem is supplemented by boundary and initial conditions
{\mathbf e}qref{4.27}--{\mathbf e}qref{4.28} for the displacement ${\mathbf u}$ and
temperature $\vartheta$ of the rigid component and by the boundary
condition
\begin{equation}\label{4.51}
{\mathbf w}^{f}({\mathbf x},t)\cdot {\mathbf n}({\mathbf x})=0,
\quad ({\mathbf x},t) \in S=\partial \Omega , \quad t>0,
{\mathbf e}nd{equation}
for the displacement $ {\mathbf w}^{f}$ of the liquid component. In
Eqs.{\mathbf e}qref{4.46}--{\mathbf e}qref{4.50} ${\mathbf n}({\mathbf x})$ is the unit normal vector
to $S$ at a point ${\mathbf x} \in S$, and matrices $B_{1}(\mu_1,t)$,
$B_{2}(\mu_1)$, and $B_{3}$ are defined below by formulas
{\mathbf e}qref{4.52}--{\mathbf e}qref{4.57}.
{\mathbf e}nd{lemma}
\begin{proof}
Eqs. {\mathbf e}qref{4.46} and {\mathbf e}qref{4.47} derived in a usual
way like Eqs.{\mathbf e}qref{4.22} and {\mathbf e}qref{4.25}. For example, to
get Eq.{\mathbf e}qref{4.47} we just expressed $\mbox{div}_x {\mathbf w} $ in
Eq.{\mathbf e}qref{4.9} using homogenization in Eq.{\mathbf e}qref{4.5}:
${\mathbf w}={\mathbf w}^{f}+(1-m){\mathbf u}.$ Therefore we omit the relevant proofs now and
focus only on derivation of homogenized equations for the velocity
${\mathbf v}$ in the form of Darcy's laws. The derivation of Eq.
{\mathbf e}qref{4.51} is standard \cite{S-P}.
à) If $\mu_{1}>0$ and $\tau_{0}>0$, then the solution of the
microscopic equations {\mathbf e}qref{4.4}, {\mathbf e}qref{4.16} and {\mathbf e}qref{4.18} is
given by formula
\begin{equation*}
\mathbf V=\frac{\partial {\mathbf u}}{\partial t}+\int_{0}^{t}
\textbf{B}^{f}_{1}({\mathbf y},t-\tau)\cdot (-{\mathbf n}abla_x
q+\rho_{f}\mathbf F-\tau_{0}\rho_{f}\frac{\partial ^2 {\mathbf u}}{\partial \tau
^2})({\mathbf x},\tau )d\tau ,
{\mathbf e}nd{equation*}
where
\begin{equation*}
\textbf{B}^{f}_{1}({\mathbf y},t)= \sum_{i=1}^{3}\mathbf V^{i}({\mathbf y},t)\otimes {\mathbf e}_{i},
{\mathbf e}nd{equation*}
and functions $\mathbf V^{i}({\mathbf y},t)$ are defined by virtue of the periodic
initial-boundary value problem
\begin{equation}\label{4.52}
\left. \begin{array}{lll} \displaystyle \tau
_{0}\rho_{f}\frac{\partial \mathbf V^{i}}{\partial t}-\mu_{1}\triangle
\mathbf V^{i} +{\mathbf n}abla Q^{i} =0,
\quad \mbox{div}_y \mathbf V^{i} =0, \quad {\mathbf y} \in Y_{f}, t>0,\\[1ex]
\mathbf V^{i}=0, \quad {\mathbf y} \in \gamma , t>0;\quad
\tau _{0}\rho_{f}\mathbf V^{i}(y,0)={\mathbf e}_{i}, \quad {\mathbf y} \in Y_{f}.
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
In {\mathbf e}qref{4.52} ${\mathbf e}_{i}$ is the standard Cartesian
basis vector of the coordinate axis $x_{i}$.
Therefore
\begin{equation}\label{4.53}
B_{1}(\mu_{1},t)= \langle \textbf{B}^{f}_{1}({\mathbf y},t)\rangle _{Y_{s}}.
{\mathbf e}nd{equation}
b) If $\tau_{0}=0$ and $\mu_{1}>0$ then the solution of
the stationary microscopic equations {\mathbf e}qref{4.4}, {\mathbf e}qref{4.16} and
{\mathbf e}qref{4.18} is given by formula
\begin{equation*}
\mathbf V=\frac{\partial {\mathbf u}}{\partial
t}+\textbf{B}^{f}_{2}({\mathbf y})\cdot(-{\mathbf n}abla q+\rho_{f}\mathbf F),
{\mathbf e}nd{equation*}
where
\begin{equation*}
\textbf{B}^{f}_{2}({\mathbf y})= \sum_{i=1}^{3}\mathbf U^{i}({\mathbf y})\otimes {\mathbf e}_{i} ,
{\mathbf e}nd{equation*}
and functions $\mathbf U^{i}({\mathbf y})$ are defined from the periodic boundary
value problem
\begin{equation}\label{4.54}
\left. \begin{array}{lll} \displaystyle -\mu_{1}\triangle \mathbf U^{i}
+{\mathbf n}abla R^{i} ={\mathbf e}_{i}, \quad \mbox{div}_y \mathbf U^{i} =0, \quad {\mathbf y} \in
Y_{f},\\[1ex]
\mathbf U^{i}=0, \quad {\mathbf y} \in \gamma .
{\mathbf e}nd{array} \right\}
{\mathbf e}nd{equation}
Thus
\begin{equation}\label{4.55}
B_{2}(\mu_{1})= \langle \textbf{B}^{f}_{2}(({\mathbf y})\rangle _{Y_{s}}.
{\mathbf e}nd{equation}
Matrices $B_{1}(\mu_1,t)$ and $B_{2}(\mu_1)$ are symmetric
and positively defined \cite[Chap. 8]{S-P}.
c) Finally, if $\tau_{0}>0$ and $\mu_{1}=0$, then in the process of solving
the system {\mathbf e}qref{4.4}, {\mathbf e}qref{4.19} and {\mathbf e}qref{4.21}
we firstly find the pressure $R({\mathbf x},t,{\mathbf y})$ by virtue of solving the
Neumann problem for Laplace's equation in
$Y_{f}$. If $${\mathbf h}({\mathbf x},t)=-\tau_{0}\rho_{f}\frac{\partial ^2{\mathbf u}}{\partial t^2}({\mathbf x},t)
-{\mathbf n}abla q({\mathbf x},t)+\rho_{f}\mathbf F({\mathbf x},t),$$ then
$$R({\mathbf x},t,{\mathbf y})=\sum_{i=1}^{3}R_{i}({\mathbf y}) {\mathbf e}_{i}\otimes {\mathbf h}({\mathbf x},t),$$
where $R^{i}({\mathbf y})$ is the solution of the problem
\begin{equation}\label{4.56}
\triangle R_{i}=0,\quad {\mathbf y} \in Y_{f}; \quad {\mathbf n}abla R_{i}\cdot {\mathbf n}
={\mathbf n}\cdot {\mathbf e}_{i}, \quad {\mathbf y} \in \gamma .
{\mathbf e}nd{equation}
The formula {\mathbf e}qref{4.50} appears as the result of integration with
respect to time in the homogenization of Eq.{\mathbf e}qref{4.19} and
\begin{equation}\label{4.57}
B_{3}=\sum_{i=1}^{3}\langle {\mathbf n}abla R_{i}({\mathbf y})\rangle _{Y_{s}}\otimes
{\mathbf e}_{i},
{\mathbf e}nd{equation}
where the matrix $(m\mathbb I - B_3)$ is symmetric and positively
definite \cite[Chap. 8]{S-P}.
{\mathbf e}nd{proof}
\begin{thebibliography}{20}
\bibitem{ACE}
\textit{Acerbi E., Chiado Piat V., Dal Maso G., Percivale D.} An
extension theorem from connected sets and homogenization in general
periodic domains // Nonlinear Anal. , V. 18, (1992), P. 481--496.
\bibitem{B-K}
\textit{Burridge R., Keller J. B.} Poroelasticity equations derived
from microstructure // J. Acoust. Soc. Am., V. 70,N4,( 1981), P.
1140--1146.
\bibitem{BIOT}
\textit{Biot M.} Generalized theory of acoustic propagation in
porous dissipative media // J. Acoust. Soc. Am., V. 34, (1962), P.
1256--1264.
\bibitem{G-M2}
\textit{Clopeau Th., Ferrin J. L., Gilbert R. P., Mikeli\'{c} A.}
Homogenizing the acoustic properties of the seabed: Part II //
Mathematical and Computer Modelling, V. 33, (2001), P. 821--841.
\bibitem{G-M3}
\textit{Ferrin J. L., Mikeli\'{c} A.} Homogenizing the acoustic
properties of a porous matrix containing an incompressible inviscid
fluids // Math. Meth. Appl. Sci., V. 26, ( 2003), P. 831--859.
\bibitem{G-M1}
\textit{Gilbert R. P., Mikeli\'{c} A.} Homogenizing the acoustic
properties of the seabed: Part I // Nonlinear Analysis, V. 40,
(2000), P. 185--212.
\bibitem{JKO}
\textit{Jikov V. V., Kozlov S. M., and Oleinik O. A.} Homogenization
of Differential Operators and Integral Functionals,
(1994), Springer-Verlag, New York.
\bibitem{LAD}
\textit{Ladyzhenskaya O.A.} The mathematical Theory of Viscous
Incompressible Flow, Gordon and Breach, New York, 1969.
\bibitem{LNW}
\textit{Lukkassen, D., Nguetseng, G., Wall P.} Two-scale convergence
// Int. J. Pure and Appl. Math., V. 2, N1, (2002), P. 35--86.
\bibitem{AM}
\textit{Meirmanov A.} Nguetseng's two-scale convergence Method
for filtration and seismic acoustic problems
in elastic porous media// Submitted to Siberian Math. Journal.
\bibitem{MS}
\textit{Meirmanov A.M., Sazhenkov S.A.} Generalized solutions to the
linearized equations of thermoelastic solid and viscous
thermofluid.// Submitted to EJDE.
\bibitem{NGU}
\textit{Nguetseng G.} A general convergence result for a functional
related to the theory of homogenization // SIAM J. Math. Anal., V.
20, ( 1989), P. 608--623.
\bibitem{GNG}
\textit{Nguetseng G.} Asymptotic analysis for a stiff variational
problem arising in mechanics // SIAM J. Math. Anal., V. 21, (1990),
P. 1394-1414.
\bibitem{S-P}
\textit{Sanchez-Palencia E.,} Non-Homogeneous Media and Vibration
Theory, Lecture Notes in Physics, Vol.129, (1980), Springer, Berlin.
{\mathbf e}nd{thebibliography}
{\mathbf e}nd{document}
|
\begin{document}
\title{Genuinely entangled symmetric states with no $N$-partite correlations}
\author{S.~Designolle$^{1,2}$, O.~Giraud$^{2}$, and J.~Martin$^{3}$}
\affiliation{
$^{1}$\'Ecole polytechnique, 91128 Palaiseau Cedex, France\\
$^{2}$LPTMS, CNRS, Univ.~Paris-Sud, Universit\'e
Paris-Saclay, 91405 Orsay, France\\
$^{3}$Institut de Physique Nucl\'{e}aire, Atomique et de Spectroscopie, CESAM,
Universit\'{e} de Li\`{e}ge, B\^{a}t.~B15, B - 4000 Li\`{e}ge, Belgium.
}
\date{July 13, 2017}
\begin{abstract}
We investigate genuinely entangled $N$-qubit states with no $N$-partite correlations in the case of symmetric states. Using a tensor representation for mixed symmetric states, we obtain a simple characterization of the absence of $N$-partite correlations. We show that symmetric states with no $N$-partite correlations cannot exist for an even number of qubits. We fully identify the set of genuinely entangled symmetric states with no $N$-partite correlations in the case of three qubits, and in the case of rank-2 states. We present a general procedure to construct families for an arbitrary odd number of qubits.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:intro}
Entanglement is one of the most remarkable aspects of quantum physics. A pure state is not separable, or entangled, if it cannot be written as a tensor product of single-party states, and a mixed entangled state is a state that cannot be written as a mixture of pure separable states. While the case of bipartite systems is well understood, the multipartite case is much more involved, as there are many ways in which a state can be entangled. A pure multipartite state may be fully separable, that is, a tensor product of pure states of each party. In such a case, there are no correlations between subsystems. A pure multipartite state may also be biseparable, if it can be written as the tensor product of two pure states under a certain bipartition of the system. The strongest form of entanglement in a multipartite state occurs when the state is not biseparable whatever the bipartition. This leads to the following definition: a genuinely $N$-partite entangled pure state is a state which is entangled (not biseparable) along any bipartition. Similar hierarchies exist for mixed states. In particular, a genuinely $N$-partite entangled mixed state is a state such that in any of its pure state decompositions, there exists at least one pure state which is genuinely $N$-partite entangled \cite{Wer89}. Or, as put by Bennett {\it et al.} \cite{Ben11}, by mixing pure states which do not have genuine $N$-partite entanglement one cannot obtain mixed states with genuine $N$-partite entanglement.
In quantum physics, correlations between subsystems are central in the question of entanglement. They can lead to correlations between measurement results violating Bell inequalities, thereby discarding the possibilities of local hidden variable theories. In the case of mixed states, the distinction between classical and quantum correlations is a subtle one (see e.g.~\cite{Oll01,Hen01,Ben11}). While for pure states, correlations imply entanglement and vice versa, this is not true anymore for mixed states. It is possible to construct separable mixed states which possess quantum correlations~\cite{Oll01}. Conversely, there exist genuinely entangled $N$-qubit states with vanishing $N$-partite correlation functions~\cite{Kas08}. In \cite{Wie09,Sch15}, genuinely entangled multiphoton states with vanishing $N$-partite correlation functions were created experimentally: qubits were encoded in the polarization of photons. Entanglement in these states cannot be detected by usual multipartite Bell inequalities involving only $N$-partite correlations, which led to the construction of suitable Bell inequalities able to detect $N$-partite entanglement but involving only $(N-1)$-partite correlations~\cite{Wie12}. In \cite{Sch15}, a continuous family of genuinely entangled three-qubit states without three-partite correlations was constructed. Families of examples were obtained for any odd number of qubits in \cite{Tra17}, and an analytical construction of genuinely entangled rank-4 $N$-qubit states without $N$-partite correlations for any even number $N\geqslant 4$ of qubits was presented.
In this paper, we investigate the case of $N$-qubit symmetric states and present a general procedure to construct families of genuinely entangled states with no $N$-partite correlations. To this end, we use the recently introduced tensor representation of spin states~\cite{Gir15} that we briefly review in Sec.~\ref{sec:tensor}. The consequences of the absence of $N$-partite correlations is expressed in terms of these tensors in Sec.~\ref{sec:symchar}. Using entanglement criteria devised in Sec.~\ref{sec:entcrit}, we fully identify the set of genuinely entangled symmetric three-qubit states with no correlations in Sec.~\ref{sec:threequbits}, and present a general construction procedure for arbitrary number of qubits in Sec.~\ref{sec:families}.
\section{Mixed symmetric states in the tensor representation}\label{sec:tensor}
Symmetric states of $N$ qubits are pure states which are invariant under permutation of the qubits, or mixtures of such pure states. Symmetric states can be expanded in the basis of Dicke states $\{\ket{D_N^{(k)}}:0\leqslant k \leqslant N\}$ defined by
\begin{equation}
\label{DickeStates}
{\ket{D_N^{(k)}}=\frac{1}{\sqrt{\binom{N}{k}}}\sum_\pi \ket{\underbrace{0 \dots 0}_{N-k} \underbrace{1\dots 1}_{k}}},
\end{equation}
where the sum runs over all permutations of the qubits. It is convenient to introduce the projector
\begin{equation}
\label{projps}
P_{\mathcal{S}}=\sum_k\ketbra{D_N^{(k)}}
\end{equation}
onto the symmetric subspace $\mathcal{S}$ spanned by Dicke states \eqref{DickeStates}.
The density matrix associated with a symmetric state can be expressed in the Dicke basis as an $(N+1)\times (N+1)$ positive semidefinite matrix of unit trace. A convenient representation for symmetric states was introduced in \cite{Gir15}. In this representation, any density matrix $\rho$ can be expressed as
\begin{equation}
\label{projrho}
\rho=\frac{1}{2^N} x_{\mu_1\mu_2\ldots\mu_N} S_{\mu_1\mu_2\ldots\mu_N},
\end{equation}
with implicit summation over indices $\mu_i$ ($1\leqslant i \leqslant N$), $0\leqslant\mu_i\leqslant 3$. Here $S_{\mu_1\mu_2\ldots\mu_N}$ are $(N+1)\times (N+1)$ Hermitian matrices defined, for $0\leqslantslant k,k'\leqslantslant N$, by their entries
\begin{equation}
\label{matrixS}
\left(S_{\mu_1\mu_2\ldots\mu_N}\right)_{kk'}=\bra{D_N^{(k)}}\sigma_{\mu_1} \otimes \cdots \otimes \sigma_{\mu_N} \ket{D_N^{(k')}},
\end{equation}
where $\sigma_{1},\sigma_2,\sigma_3$ are the three Pauli matrices (with $\sigma_0$ the $2\times 2$ identity matrix). The coordinates $x_{\mu_1\mu_2\ldots\mu_N}$ are real numbers invariant under permutation of the indices and such that
\begin{equation}
\label{Tensorrep}
x_{\mu_1\mu_2\ldots\mu_N}=\mathrm{tr}(\rho S_{\mu_1\mu_2\ldots\mu_N}).
\end{equation}
Note that, since $\rho$ is a symmetric state, we denote by the same symbol its $2^N\times 2^N$ representing matrix in the computational basis and its $(N+1)\times (N+1)$ representing matrix in the Dicke basis. In particular we have $\rho=P_{\mathcal{S}}\rho P_{\mathcal{S}}$, with $P_{\mathcal{S}}$ the projector \eqref{projps}. As a consequence, using \eqref{matrixS} and \eqref{Tensorrep}, we have $\mathrm{tr}(\rho \sigma_{\mu_1} \otimes \cdots \otimes \sigma_{\mu_N})=\mathrm{tr}(\rho P_{\mathcal{S}} \sigma_{\mu_1} \otimes \cdots \otimes \sigma_{\mu_N}P_{\mathcal{S}})=\mathrm{tr}(\rho S_{\mu_1\mu_2\ldots\mu_N})=x_{\mu_1\mu_2\ldots\mu_N}$. Therefore, these coordinates have a physical interpretation in terms of correlators, as
\begin{equation}
\label{correl}
x_{\mu_1\mu_2\ldots\mu_N}=\big\langle \sigma_{\mu_1} \otimes \cdots \otimes \sigma_{\mu_N}\big\rangle_\rho.
\end{equation}
Since $S_{0\ldots0}$ is the identity matrix, we have in particular $x_{0\ldots0}=\mathrm{tr}\rho=1$. Properties of the Pauli matrices imply moreover that for any $\mu_i$
\begin{equation}
\label{traceless}
\sum_{a=1}^3 x_{\mu_1\ldots\mu_{N-2}aa}=x_{\mu_1\ldots\mu_{N-2}00}.
\end{equation}
This is due to the fact that for $N=2$, we have the identity $\sum_{a=1}^3 P_{\mathcal{S}}\sigma_{a} \otimes\sigma_{a}P_{\mathcal{S}}=P_{\mathcal{S}}\sigma_{0} \otimes\sigma_{0}P_{\mathcal{S}}$. Since the $ x_{\mu_1\mu_2\ldots\mu_N}$ are invariant under permutation of indices, the position of the two indices $a$ in \eqref{traceless} does not matter.
As has been shown in~\cite{Gir15}, this representation allows easily to express the $k$-qubit reduced density matrix $\rho_k$ obtained by tracing out $N-k$ qubits just by replacing the last $N-k$ indices of $x_{\mu_1\mu_2\ldots\mu_N}$ by zero. The expansion (\ref{projrho}) for $\rho_k$ thus reads
\begin{equation}
\label{projrhok}
\rho_k=\frac{1}{2^k} x_{\mu_1\mu_2\ldots \mu_k 0\ldots 0} S_{\mu_1\mu_2\ldots\mu_k}.
\end{equation}
Note that, because of symmetry of $\rho$, the choice of qubits traced out does not matter. For instance for five qubits, tracing out qubits 2 and 4 gives $x_{\mu_1 0 \mu_2 0 \mu_3}=x_{\mu_1\mu_2\mu_300}$, and the coordinates of the reduced density matrix would be given by $x_{\mu_1 \mu_2 \mu_3}$, whichever pair of qubits is traced out.
For single qubit states, $S_\mu=\sigma_\mu$, and the representation (\ref{projrho}) reduces to the usual Bloch representation. In the Bloch representation, a single qubit state $\rho$ can be expressed (with implicit summation over $\mu=0,\ldots,3$) as
\begin{equation}
\label{projrho12}
\rho=\frac{1}{2} n_\mu \sigma_\mu,
\end{equation}
where $n$ is the 4-vector given by $n=(n_0,n_1,n_2,n_3)$ with $n_0=1$. The Bloch vector associated with the state is ${\bf n}=(n_1,n_2,n_3)=\mathrm{tr} (\rho \boldsymbol{\sigma})$, where $\boldsymbol{\sigma}=(\sigma_1,\sigma_2,\sigma_3)$. In the case of pure states, the Bloch vector ${\bf n}=(\sin\theta\cos\varphi,\sin\theta\sin\varphi,\cos\theta)$ is of unit length and we denote by $\ket{{\mathbf n}}$ the corresponding qubit state. A fully separable pure symmetric state $\ket{n}\equiv\ket{{\mathbf n}}^{\otimes N}$ is the tensor product of $N$ copies of a pure qubit state $\ket{{\mathbf n}}$. It can be expanded in the Dicke basis as
\begin{equation}
\label{symsep}
\ket{n}=\sum_{k=0}^{N}\sqrt{\binom{N}{k}}\left[\sin\!\frac{\theta}{2}\right]^{k}\left[\cos\!\frac{\theta}{2}\,e^{-i\varphi}\right]^{N-k}\ket{D_N^{(k)}},
\end{equation}
and in the representation \eqref{projrho} it can be written as~\cite{Gir15}
\begin{equation}
\label{projrhocoh}
\ketbra{n}=\frac{1}{2^N} n_{\mu_1}n_{\mu_2}\ldots n_{\mu_N} S_{\mu_1\mu_2\ldots\mu_N}.
\end{equation}
Fully separable states are central in the context of entanglement of symmetric states. Indeed, let $\rho$ be an $N$-qubit symmetric state that is separable along some bipartition of the qubits. Then $\rho$ is a convex combination of pure symmetric states separable along the same bipartition (see e.g.~\cite{Boh16}, Section C). But any pure symmetric state separable along some bipartition is separable along any bipartition and thus fully separable \cite{Ich08}. Therefore, in the subspace of symmetric states, separable states coincide with the convex hull of the projectors $\ketbra{n}$. In other words, symmetric states are either genuinely entangled or fully separable.
\section{Symmetric states with no $N$-partite correlations}
\label{sec:symchar}
\subsection{Characterization in terms of tensor coefficients}
Symmetric states with no $N$-partite correlations are defined in \cite{Sch15} as states $\rho$ such that $\langle \sigma_{a_1}\otimes\cdots\otimes\sigma_{a_N}\rangle_\rho=0$ for any $a_i$ with $1\leqslant a_i\leqslant 3$ (in the present paper latin indices range from 1 to 3 while greek indices range from 0 to 3). Because of \eqref{correl}, this condition can be expressed in terms of coordinates $x_{\mu_1\mu_2\ldots\mu_N}$ as
\begin{equation}
\label{nocorr}
x_{a_1a_2\ldots a_N}=0\qquad\forall a_i=1,2,3.
\end{equation}
For symmetric states, because of the relation \eqref{traceless}, the absence of $N$-partite correlations has the immediate consequence that all $(N-2k)$-partite correlations, $k=0,\ldots,\lfloor N/2 \rfloor$, vanish. If $N$ is odd, it can be expressed as the hierarchy of conditions
\begin{equation}
\label{nocorrsym}
\begin{array}{rclrcl}
x_{a_1a_2a_3\ldots a_N}&=&0&\qquad\forall a_i&=&1,2,3,\quad 1\leqslant i\leqslant N\\
x_{a_1a_2\ldots a_{N-2}00}&=&0&\qquad\forall a_i&=&1,2,3,\quad 1\leqslant i\leqslant N-2\\
&\vdots& & &\vdots& \\
x_{a_1 0 0\ldots 0}&=&0&\qquad\forall a_1&=&1,2,3.
\end{array}
\end{equation}
If $N$ is even, it leads to $x_{0\ldots 0}=0$, which contradicts the fact that $x_{0\ldots 0}=\mathrm{tr}\rho=1$. Thus no symmetric states with no $N$-partite correlations can exist for an even number of qubits. Similarly, if $N$ is odd, no symmetric states with no $(N-1)$-partite correlations can exist. This generalises to all symmetric states the results found in~\cite{Las12} that all correlations between an odd number of subsystems vanish (and thus admit a local hidden-variable model) in an even mixture of Dicke states for any odd number of qubits.
Note that because of Eq.~(\ref{projrhok}), the hierarchy of conditions \eqref{nocorrsym} implies that all $k$-qubit reduced states obtained from $\rho$ by tracing out an even number of qubits are also states with no $N$-partite correlations. Moreover, the last condition in \eqref{nocorrsym} can be rephrased as $\langle \boldsymbol{\sigma}\rangle_{\rho_1} =0$. Such states with vanishing expectation value of the spin have been called $1$-anticoherent states, by contrast with coherent states which maximize this expectation value. They are characterized by the fact that their one-qubit reduced density matrix is always the maximally mixed state~\cite{Bag14,Gir15}. Thus, all symmetric states with no $N$-partite correlations are $1$-anticoherent.
As we mentioned, only symmetric states with an odd number $N=2M+1$ of qubits can be such that their $N$-partite correlations are zero. In what follows, we will restrict ourselves to this odd case, and we denote by $\mathcal{SNC}_N$ the set of symmetric $N$-qubit states with no $N$-partite correlations.
\subsection{Antistates}
\label{antistates}
Antistates were defined in \cite{Sch15} to construct examples of genuinely entangled states with no $N$-partite correlations. In this subsection we introduce these states, and we will use them in the next subsection to characterize elements of $\mathcal{SNC}_N$.
Let $\mathfrak{N}=\sigma_3\sigma_1K$ be the one-qubit universal-not operator, with $K$ the complex conjugation operator. This operator is antilinear and antiunitary, and it also satisfies $\mathfrak{N}^2=-\mathbb{1}$. For any pure state $\ket{\psi}$, its antistate $\ket{\bar{\psi}}$ is defined by applying the universal-not operator on each qubit, namely $\ket{\bar{\psi}}=\mathfrak{N}^{\otimes N}\ket{\psi}$. The antistate $\ket{\bar{\psi}}$ is orthogonal to $\ket{\psi}$. This can be checked with the explicit form of $\ket{\bar{\psi}}$ provided in \cite{Sch15}; here we give a neat proof for symmetric states, using the symmetric formalism previously introduced in Sec.~\ref{sec:tensor}. Let $\ket{\psi}$ be a symmetric $N$-qubit state. Using representation \eqref{projrho}, it can be written as $\ketbra{\psi}=2^{-N} x_{\mu_1\mu_2\ldots\mu_N}^{\psi} S_{\mu_1\mu_2\ldots\mu_N}$. Then we have
\begin{equation}
\langle \psi \ket{\bar{\psi}}=\mathrm{tr} \left(\ketbra{\psi}\mathfrak{N}^{\otimes N}\right) =\frac{1}{2^N} x_{\mu_1\mu_2\ldots\mu_N}^{\psi} \prod_{i=1}^N \mathrm{tr} \left(\mathfrak{N} \sigma_{\mu_i} \right).
\end{equation}
The second equality is obtained using Eq.~\eqref{matrixS} and the fact that the symmetric operator $\mathfrak{N}^{\otimes N}$ commutes with the projector (\ref{projps}) onto the symmetric subspace $\mathcal{S}$. Since $\mathrm{tr}({\mathfrak{N}})=0$ and $\mathfrak{N}\sigma_j+\sigma_j\mathfrak{N}=0$ for all $j$, we have that $\mathrm{tr} \left(\mathfrak{N} \sigma_{\mu} \right)=0$ for all $\mu$, which completes the proof.
The antistates introduced previously exhibit an elegant geometric interpretation in the Majorana representation. This representation allows to visualize a pure symmetric state of $N$ qubits as a set of $N$ points on the unit sphere. For one qubit, this coincides with the usual Bloch representation. For several qubits, each qubit is associated to its Bloch point and symmetry allows to put all of them on the same sphere. In this picture, fully separable symmetric states correspond to $N$ degenerate points. The points of the Majorana representation of the antistate $\ket{\bar{\psi}}$ are diametrically opposite to those of the initial state $\ket{\psi}$. This is the generalization of the one-qubit case discussed in~\cite{Buz99}.
\subsection{Spectral properties of $\rho\in\mathcal{SNC}_N$}
\label{secspec}
Let $\rho\in\mathcal{SNC}_N$ and let $\ket{\psi}$ be a normalized eigenstate of $\rho$ with eigenvalue $\lambda$. The operator $\mathfrak{N}$ has the remarkable property that its $N$-fold tensor product commutes with $\rho$. Indeed, thanks to the anticommutation property $\mathfrak{N}\sigma_j+\sigma_j\mathfrak{N}=0$ for all $j$, we have
\begin{equation}\label{Nsr}
\mathfrak{N}^{\otimes N}S_{\mu_1\mu_2\ldots\mu_N}=(-1)^{c(\mu_1,\ldots,\mu_N)}S_{\mu_1\mu_2\ldots\mu_N} \mathfrak{N}^{\otimes N},
\end{equation}
where $c(\mu_1,\ldots,\mu_N)$ is the number of nonzero indices $\mu_i$. Using Eq.~(\ref{matrixS}) together with the fact that $\mathfrak{N}^{\otimes N}$ commutes with $P_{\mathcal{S}}$ and expressing $\rho$ in the representation \eqref{projrho} as $\rho=2^{-N} x_{\mu_1\mu_2\ldots\mu_N} S_{\mu_1\mu_2\ldots\mu_N}$, only coefficients with an even number of nonzero indices appear in the expansion because of the property \eqref{nocorrsym}. Using (\ref{Nsr}), we get $\mathfrak{N}^{\otimes N}\rho=\rho \mathfrak{N}^{\otimes N}$.
As a consequence, $\rho \ket{\bar{\psi}} = \rho\mathfrak{N}^{\otimes N}\ket{\psi} = \mathfrak{N}^{\otimes N} \rho \ket{\psi}=\lambda^* \mathfrak{N}^{\otimes N} \ket{\psi}=\lambda \ket{\bar{\psi}}$. Thus $\ket{\bar{\psi}}$ is also an eigenstate of $\rho$ with eigenvalue $\lambda$ which is orthogonal to $\ket{\psi}$. If $\rho$ has an other eigenstate $\ket{\phi}$ with same eigenvalue $\lambda$ ($\ket{\phi}$ can be taken normalized and orthogonal to $\ket{\psi}$ and $\ket{\bar{\psi}}$ without loss of generality), the antistate $\ket{\bar{\phi}}$ will also be orthogonal to $\ket{\psi}$ and $\ket{\bar{\psi}}$ because $\mathfrak{N}$ is antiunitary. Repeating this procedure, we can find an orthonormal basis of the eigenspace of $\rho$ for the eigenvalue $\lambda$ containing pairs of states and antistates. Using this construction on all its eigenspaces, $\rho$ can be written as
\begin{equation}
\label{decomprho}
\rho=\sum_{i=0}^M\lambda_i\left(\ket{\psi_i}\bra{\psi_i}+\ket{\bar{\psi}_i}\bra{\bar{\psi}_i}\right),
\end{equation}
where $\ket{\psi_i}$ and $\ket{\bar{\psi}_i}$ are eigenstates of $\rho$ with eigenvalue $\lambda_i$, and $\sum_i\lambda_i=1/2$. Equation~(\ref{decomprho}) implies that the eigenvalues of a state $\rho\in\mathcal{SNC}_N$ have an even degeneracy. As a consequence, the purity of $\rho$ has a lower upper bound than the usual bound $\mathrm{tr}\rho^2\leqslant 1$, specifically
\begin{equation}
\label{ineqpurity}
\mathrm{tr}\rho^2\leqslant\frac{1}{2}.
\end{equation}
Indeed, the largest purity is reached when all $\lambda_i$ but one are zero. The double degeneracy and the normalization of the state imply that there are two nonzero eigenvalues, which are both equal to $1/2$, leading to a maximal purity $\mathrm{tr}\rho^2=1/2$. Since any state of the form (\ref{decomprho}) is a state with no $N$-partite correlations (as was already shown in \cite{Sch15}), this form provides a characterization of elements of $\mathcal{SNC}_N$.
\section{Entanglement criteria} \label{sec:entcrit}
\subsection{A sufficient entanglement criterion}
A sufficient criterion for genuine entanglement was obtained in \cite{Bad08}. Following this approach we now derive a sufficient criterion for genuine entanglement of symmetric states. Let $S^2$ denote the unit sphere in $\mathbb{R}^3$, and $\ket{n}$ be the fully separable state (\ref{symsep}) associated with ${\bf n}\in S^2$. If $\rho$ is a symmetric state such that
\begin{equation}
\label{genuine}
\forall \: {\bf n}\in S^2,\quad \bra{n}\rho\ket{n}<\mathrm{tr} \rho^2,
\end{equation}
then $\rho$ is genuinely entangled.
Indeed, suppose $\rho$ is not genuinely entangled. Then it is fully separable and therefore can be written as a mixture of fully separable pure states $\ket{n^{(i)}}$, namely
\begin{equation}
\rho=\sum_ip_i\ketbra{n^{(i)}},
\end{equation}
with $0<p_i\leqslant 1$ and $\sum_i p_i=1$. With these notations, we have
\begin{equation}
\begin{aligned}
\label{trrho2}
\mathrm{tr}\rho^2&=\sum_i p_i\bra{n^{(i)}}\rho\ket{n^{(i)}}\\
&\leqslantslant\max_i\;\bra{n^{(i)}}\rho\ket{n^{(i)}},
\end{aligned}
\end{equation}
since $p_i$ are positive and sum up to 1. The state $\ket{n^{(i)}}$ achieving the maximum in \eqref{trrho2} violates Eq.~\eqref{genuine}, hence the result.
In the case where $\rho$ is of rank 2, this condition is in fact necessary and sufficient, as will be shown in Section \ref{rank2sec}.
\subsection{A necessary and sufficient criterion in the three-qubit case}\label{subsec:3qubit}
As we ruled out even values of $N$, the simplest nontrivial case is $N=3$. As we show below, the set of three-qubit genuinely entangled symmetric states with no three-partite correlations can be fully characterized. For $\rho\in\mathcal{SNC}_3$, Eq.~(\ref{projrho}) reduces to
\begin{equation}\label{rho3}
\rho=\frac{1}{8}\mathbb{1}_4+\frac{3}{8}\sum_{a,b=1}^3 A_{ab} S_{ab0},
\end{equation}
with $\mathbb{1}_4$ the $4\times 4$ identity matrix, $S_{ab0}$ the matrices defined in Eq.~(\ref{matrixS}) and explicitly given by
\begin{equation}\label{eq:Sab}
S_{ab0}=\frac{J_a J_b + J_b J_a}{3} - \frac{\delta_{ab}}{2}\mathbb{1}_4,
\end{equation}
with $J_a$ the $4\times 4$ angular momentum matrices, and $A$ the $3\times3$ real symmetric matrix $(x_{ab0})_{1\leqslant a ,b\leqslant 3}$. Equation (\ref{traceless}) implies $\mathrm{tr} A=1$. The possible values of $A_{ab}$ are further constrained by the fact that $\rho$ has to be a positive semidefinite matrix. The characteristic polynomial of $\rho$ can be put under the form
\begin{equation}
\det(z\mathbb{1}-\rho)=\left(z^2-\frac12 z+\frac{3b}{16}\right)^2,
\end{equation}
with $b=(1-\mathrm{tr} A^2)/2$. This readily implies that the eigenvalues of $\rho$ are nonnegative if and only if $b\geqslant 0$, that is, $\mathrm{tr} A^2\leqslant 1$.
For a state $\rho$ acting on a Hilbert space $\mathcal{H}_1\otimes\mathcal{H}_2$ with $\mathcal{H}_1$ isomorphic to $\mathbb{C}^2$ and $\mathcal{H}_2$ isomorphic to $\mathbb{C}^3$, a necessary and sufficient condition of separability is the celebrated Peres-Horodecki criterion~\cite{Hor96}. This criterion states that the partial transpose of $\rho$ is positive semidefinite if and only if $\rho$ is separable. Since any fully symmetric three-qubit state can be expressed in the canonical basis of $\mathbb{C}^{2}\otimes \mathbb{C}^{3}$, we can apply this criterion to $\rho\in\mathcal{SNC}_3$~(see e.g.~\cite{Aug12}). Here, we will denote by $\rho^{\mathrm{PT}}$ the partial transpose performed on the first qubit (since we are dealing with symmetric states, the qubit on which the partial transpose is performed does not matter). In~\cite{Boh16}, it was shown that $\rho^{\mathrm{PT}}$ is positive semidefinite if and only if an Hermitian $8\times 8$ matrix $T$ given in terms of $x_{\mu_1\mu_2\mu_3}$ is positive semidefinite (the explicit expression for $T$ can be found at Eq.~(44) of Ref.~\cite{Boh16}). From this explicit form and using the relations (\ref{nocorrsym}), it can be checked that the characteristic polynomial of $T$ can be expressed as $\det(z\mathbb{1}_8-T)= z^2 q(z)^2$ with
\begin{equation}\label{charpolT}
\begin{aligned}
q(z)={}&z^3-2z^2+3\frac{(\mathrm{tr} A)^2-\mathrm{tr} A^2}{2}z\\
& -2\frac{(\mathrm{tr} A)^3-3(\mathrm{tr} A)^2\mathrm{tr} A^2+2\mathrm{tr} A^3}{3}.
\end{aligned}
\end{equation}
The matrix $T$ is positive semidefinite if and only if all roots of $q(z)$ are nonnegative. Moreover, the characteristic polynomial of $A$ is $\det(z\mathbb{1}_3-A)$ and reads
\begin{equation}\label{charpolA}
\begin{aligned}
&z^3-z^2+\frac{(\mathrm{tr} A)^2-\mathrm{tr} A^2}{2}z\\
&-\frac{(\mathrm{tr} A)^3-3(\mathrm{tr} A)^2\mathrm{tr} A^2+2\mathrm{tr} A^3}{6}.
\end{aligned}
\end{equation}
From Eqs.~(\ref{charpolT}) and (\ref{charpolA}), it appears that the roots of $q(z)$ are all nonnegative if and only if the roots of $A$ are all nonnegative (this follows from Descartes' rule of signs). Thus, $\rho^{\mathrm{PT}}$ is positive semidefinite if and only if $A$ is positive semidefinite. This provides a necessary and sufficient separability criterion in terms of the matrix $A$, namely $A\geqslant 0$. Using \eqref{correl}, we can see $A$ as the two-partite correlation matrix $A=(\langle \sigma_{ab}\rangle)_{1\leqslant a,b\leqslant 3}$, with $\sigma_{ab}=\sigma_a\otimes\sigma_b\otimes \mathbb{1}_2$ and $\langle \sigma_{ab}\rangle_\rho=\mathrm{tr} (\rho\,\sigma_{ab})$. The necessary and sufficient separability criterion can then be reformulated, for any $\rho\in\mathcal{SNC}_3$, as
\begin{equation}\label{rhosepApos3}
\rho~\mathrm{separable}\;\Leftrightarrow\; \begin{pmatrix}
\langle \sigma_{11}\rangle_\rho & \langle \sigma_{12}\rangle_\rho & \langle \sigma_{13}\rangle_\rho \\
\langle \sigma_{21}\rangle_\rho & \langle \sigma_{22}\rangle_\rho & \langle \sigma_{23}\rangle_\rho \\
\langle \sigma_{31}\rangle_\rho & \langle \sigma_{32}\rangle_\rho & \langle \sigma_{33}\rangle_\rho
\end{pmatrix} \geqslant 0.
\end{equation}
Let us now consider the two-qubit reduced density matrix $\rho_{2}$ of $\rho\in\mathcal{SNC}_3$. According to Eq.~(\ref{projrhok}), the tensor coordinates of $\rho_{2}$ are the $x_{\mu_1\mu_20}$. From Ref.~\cite{Boh16}, the partial transpose $\rho_2^{\mathrm{PT}}$ is positive semidefinite if and only if the $4\times 4$ matrix
$(x_{\mu_1\mu_20})_{0\leqslant\mu_1,\mu_2\leqslant 3}$ is positive semidefinite. This latter matrix is block-diagonal, with upper left $1\times1$ block being the identity and the bottom right $3\times3$ block given by matrix $A$. Therefore positivity of $(x_{\mu_1\mu_20})_{0\leqslant\mu_1,\mu_2\leqslant 3}$ is equivalent to positivity of $A$. These equivalences together with the result \eqref{rhosepApos3} show that for any $\rho\in\mathcal{SNC}_3$,
\begin{equation}
\rho~\mathrm{separable}\;\Leftrightarrow\;\rho_2~\mathrm{separable}.
\end{equation}
In physical terms, this means that one cannot end up in a separable state by tracing out one qubit from an entangled three-qubit symmetric state with no three-partite correlations. Therefore, the entanglement in such states has some robustness.
\subsection{Other sufficient entanglement criteria}
The above criterion for three-qubit states also provides us with a sufficient entanglement criterion in the general $N$-qubit case. Indeed, if $\rho\in\mathcal{SNC}_N$ is separable, then its three-qubit reduced density matrix $\rho_3$ is separable. Using (\ref{rhosepApos3}) and (\ref{projrhok}), we get a sufficient entanglement condition for $\rho$: if the $3\times 3$ matrix $A=(\langle \sigma_{ab}\rangle_\rho)_{1\leqslant a,b\leqslant 3}$, with $\sigma_{ab}=\sigma_a\otimes\sigma_b\otimes \mathbb{1}_2\otimes \cdots\otimes\mathbb{1}_2$, is not positive semidefinite, then $\rho$ is genuinely entangled. This is reminiscent of the entanglement criteria obtained for pure states in \cite{Mar13} based on two-point correlations.
It is in fact possible to obtain many more sufficient entanglement criteria from the PPT criteria applied to $\rho$ or to its $k$-qubit reduced density matrices. As shown in~\cite{Boh16}, the partial transpose matrices, and their positivity, can be expressed in terms of the $x_{\mu_1\ldots\mu_N}$ in a simple way. As we saw above, the partial transpose $\rho_2^{\mathrm{PT}}$ can be related with the $4\times 4$ matrix
$(x_{\mu_1\mu_20})_{0\leqslant\mu_1,\mu_2\leqslant 3}$ and positivity of $\rho_2^{\mathrm{PT}}$ is equivalent to the right-hand side of (\ref{rhosepApos3}). Similarly, positivity of the partial transpose $\rho_4^{\mathrm{PT}}$ is equivalent to positivity of the $16\times 16$ matrix indexed by the 16 pairs $(\mu_1,\mu_2)$ and $(\mu_3,\mu_4)$ and whose entries are the $x_{\mu_1\mu_2 \mu_3 \mu_4 0}$. All criteria that can be obtained in the same way lead to sufficient conditions of genuine entanglement in terms of correlators. However, these criteria lead to conditions that involve polynomials of high degree in $x_{\mu_1\ldots\mu_N}$ (the simple criterion $A\geqslant 0$ yields a polynomial of degree 3 in the $x_{ab0}$). In contrast, the criterion (\ref{genuine}) is of degree 2, and thus easier to deal with. In Sec.~\ref{sec:families}, our construction will be based on criterion (\ref{genuine}).
\section{Application to three-qubit states} \label{sec:threequbits}
Although we obtained a necessary and sufficient condition for three qubits in Sec.~\ref{sec:entcrit}, it is instructive to apply our general approach based on criterion (\ref{genuine}) to this simple case.
Condition \eqref{nocorrsym} implies that for a state $\rho$ in the representation \eqref{projrho} one has $x_{abc}=0$ and $x_{a00}=0$ for $a,b,c=1,2,3$. The only nonzero coordinates $x_{\mu_1\mu_2\mu_3}$ are the $x_{ab0}$ for $1\leqslant a,b\leqslant 3$, and $x_{000}=1$, leading to the representation (\ref{rho3}). We label by $\alpha_i$ the eigenvalues of $A$. One easily calculates $\bra{n}\rho\ket{n}=(1+3\,{\bf n}^TA{\bf n})/8$ and $\mathrm{tr}\rho^2=(1+3\,\mathrm{tr} A^2)/8$, so that condition \eqref{genuine} becomes
\begin{equation}
\forall \: {\bf n}\in S^2,\qquad {\bf n}^TA{\bf n}<\mathrm{tr} A^2.
\end{equation}
In terms of the eigenvalues of $A$, this condition can be reexpressed as
\begin{equation}
\label{condN3}
\max_i\alpha_i<\sum_i\alpha_i^2.
\end{equation}
As discussed above, since $\rho$ has to be a semidefinite positive matrix, $A$ must be such that $\mathrm{tr} A^2\leqslant 1$. Taking into account the fact that $\mathrm{tr} A=1$, the $\alpha_i$ must fulfill the two additional constraints
\begin{equation}
\label{addconstr}
\sum_i\alpha_i^2\leqslant 1\quad \textrm{and}\quad\sum_i\alpha_i=1.
\end{equation}
Among the set of triplets $(\alpha_1,\alpha_2,\alpha_3)\in\mathbb{R}^3$ fulfilling the conditions \eqref{addconstr}, one can easily find those verifying condition \eqref{condN3}. They are depicted in Fig.~\ref{fig1}. Equation (\ref{addconstr}) imposes that they are restricted to the plane $\sum_i\alpha_i=1$ and to the interior of the sphere $\sum_i\alpha_i^2\leqslant 1$ (see Fig.~\ref{fig1}). The equality $\alpha_1=\sum_i\alpha_i^2$ is the equation of a sphere of radius $1/2$ and center $(1/2,0,0)$, whose intersection with the plane is a circle. A similar analysis for $\alpha_2$ and $\alpha_3$ implies that the solutions to \eqref{condN3} lie strictly outside the three solid red circles of Fig.~\ref{fig1}. Finally, the triplets of solutions of \eqref{condN3} and \eqref{addconstr} lie in the region between the dashed blue circle and the solid red trilobe in Fig.~\ref{fig1}. These points correspond to the genuinely entangled states detected by the sufficient criterion (\ref{genuine}).\\
For $\rho\in\mathcal{SNC}_3$, the criterion \eqref{rhosepApos3} gives us a necessary and sufficient entanglement criterion in terms of the matrix $A$ defined above. Thus $\rho$ is separable if and and only if all $\alpha_i$ are nonnegative. In Fig.~\ref{fig1}, the corresponding region is the green triangle up and its interior. Hence $\rho$ is genuinely entangled if and only if its associated point lies outside the triangle but inside the dashed blue circle. Points outside the triangle but inside the trilobe are associated with genuinely entangled states not detected by criterion (\ref{genuine}).
\begin{figure}
\caption{Three-qubit symmetric states in the space of eigenvalues $\alpha_i$ of $A$. The orange plane (triangle down) corresponds to the condition $\mathrm{tr}
\label{fig1}
\end{figure}
\section{Construction of families} \label{sec:families}
\subsection{Rank-2 density matrices}\label{rank2sec}
\subsubsection{A necessary and sufficient entanglement criterion}
\label{rank2abs}
Let $\rho\in\mathcal{SNC}_N$ be of rank 2; it has two nonzero eigenvalues, which have to be equal according to the results of section \ref{antistates}. Since $\mathrm{tr}\rho=1$, both eigenvalues are equal to $1/2$, so that there exists a pure state $\ket{\psi}$ such that
\begin{equation}
\label{rhorank2}
\rho=\frac12\ketbra{\psi}+\frac12\ketbra{\bar{\psi}}.
\end{equation}
Any fully separable pure symmetric state $\ket{n}$ can be decomposed as
\begin{equation}
\ket{n}=\langle \psi|n\rangle \ket{\psi}+\langle\bar{\psi}|n\rangle \ket{\bar{\psi}}+\ket{\phi},
\end{equation}
where $\ket{\phi}$ is orthogonal to both $\ket{\psi}$ and $\ket{\bar{\psi}}$ (we recall that the latter two states are orthogonal). The overlap $\bra{n}\rho\ket{n}$ then reads
\begin{equation}
\label{nrhon}
\bra{n}\rho\ket{n}=\frac12|\langle \psi|n\rangle|^2+\frac12|\langle\bar{\psi}|n\rangle |^2=\frac12\left(1-\langle \phi|\phi\rangle\right).
\end{equation}
Since for the state $\rho$ given by Eq.~(\ref{rhorank2}), one has $\mathrm{tr}\rho^2=1/2$, Eq.~(\ref{nrhon}) yields
\begin{equation}\label{rank2cond}
\bra{n}\rho\ket{n}\leqslant \frac{1}{2}=\mathrm{tr}\rho^2.
\end{equation}
Thus, inequality or equality is always achieved in \eqref{genuine}. In fact, Eq.~(\ref{genuine}) is a necessary and sufficient condition for genuine entanglement in the case of rank-2 density matrices. Indeed, suppose Eq.~(\ref{genuine}) is violated for some $\ket{n}$. Then, from Eq.~(\ref{rank2cond}) one must have $\bra{n}\rho\ket{n} = \mathrm{tr}\rho^2=1/2$ which using \eqref{nrhon} implies that $\ket{\phi}=0$, so that $\ket{n}$ lies in the subspace spanned by $\ket{\psi}$ and $\ket{\bar{\psi}}$, which is the eigenspace of $\rho$ associated with eigenvalue $1/2$. In particular one must have the decomposition
\begin{equation}
\label{rhorank2beta}
\rho=\frac12\ketbra{n}+\frac12\ketbra{\bar{n}}.
\end{equation}
Therefore, $\rho$ is a mixture of two fully separable states, thus $\rho$ is separable. Hence, if $\rho$ is genuinely entangled, then Eq.~(\ref{genuine}) must hold. Thus, a rank-2 state $\rho\in\mathcal{SNC}_N$ is genuinely entangled if and only if
\begin{equation}
\label{genuinerank2}
\forall\: {\bf n}\in S^2,\quad \bra{n}\rho\ket{n}<\frac{1}{2}.
\end{equation}
It is separable if and only if there exists $\ket{n}$ such that $\bra{n}\rho\ket{n} = \mathrm{tr}\rho^2=1/2$, which is equivalent to \eqref{rhorank2beta}.
\subsubsection{A necessary separability criterion}
Let $\rho$ be a rank-2 symmetric state of the form \eqref{rhorank2}, thus with no $N$-partite correlations. Expressing \eqref{rhorank2} in terms of the coordinates $x^\rho_{\mu_1\mu_2\ldots\mu_N}$ of $\rho$ and the coordinates $x^\psi_{\mu_1\mu_2\ldots\mu_N}$ of $\ketbra{\psi}$ in the expansion (\ref{projrho}), we get
\begin{equation}
x^\rho_{\mu_1\mu_2\ldots\mu_N}=\frac{1+(-1)^{c(\mu_1,\ldots,\mu_N)}}{2}x^\psi_{\mu_1\mu_2\ldots\mu_N},
\end{equation}
where $c(\mu_1,\ldots,\mu_N)$ is the number of nonzero indices $\mu_i$. According to the previous subsection, it is separable if and only if it can be written as in~\eqref{rhorank2beta}. In terms of tensor coordinates, it is equivalent to
\begin{equation}
x^\rho_{\mu_1\mu_2\ldots\mu_N}=\frac{1+(-1)^{c(\mu_1,\ldots,\mu_N)}}{2}n_{\mu_1}\ldots n_{\mu_N}.
\end{equation}
This implies that for an even number of nonzero indices, we have $x_{\mu_1\ldots\mu_n}^{\rho}=n_{\mu_1}\ldots n_{\mu_N}=x^\psi_{\mu_1\mu_2\ldots\mu_N}$. In particular, if all but two indices are zero, we have $x^\rho_{ab 0\ldots 0}=n_an_b$, so that the matrix $A=(x^\rho_{ab 0\ldots 0})_{1\leqslant a,b\leqslant 3}=(x^\psi_{ab 0\ldots 0})_{1\leqslant a,b\leqslant 3}=(\langle \sigma_{ab}\rangle_\rho)_{1\leqslant a,b\leqslant 3}$ is of rank one, where $\sigma_{ab}=\sigma_a\otimes\sigma_b\otimes \mathbb{1}_2\otimes \cdots\otimes\mathbb{1}_2$.
We therefore get the following necessary condition for separability of rank-2 states,
\begin{equation}\label{sepcrit}
\rho~\mathrm{separable}\quad\Rightarrow\quad \mathrm{rank}A=1.
\end{equation}
\subsubsection{Explicit examples}
The above considerations allow us to construct families of genuinely entangled states of $\mathcal{SNC}_N$. Indeed, for any state $\ket{\psi}$, the mixed state $\rho=(\ket{\psi}\bra{\psi}+\ket{\bar{\psi}}\bra{\bar{\psi}})/2$ has no $N$-partite correlations. Choosing $\ket{\psi}$ in such a way that $A=(x^\psi_{ab 0\ldots 0})_{1\leqslant a,b\leqslant 3}$ is not of rank one warrants that $\rho$ is also genuinely entangled.
This construction can be achieved for instance for $\ket{\psi}=(\ket{D_{N}^{(r)}}+\ket{D_{N}^{(N-r)}})/\sqrt{2}$, which is a superposition of two Dicke states defined in \eqref{DickeStates}. Such states have already been studied in \cite{Tra17}. Here they provide a simple illustration of the rank-2 entanglement criterion derived in Section \ref{rank2abs}. The coefficients of the matrix $A^{\psi}$ are $x_{ab0\ldots0}^{\psi}=\bra{\psi}\sigma_{ab}\ket{\psi}$, so that
\begin{equation}
\begin{aligned}
x_{ab0\ldots0}^{\psi}=\frac{1}{2} & \left(\bra{D_{N}^{(r)}}\sigma_{ab}\ket{D_{N}^{(r)}}\right.+\bra{D_{N}^{(N-r)}}\sigma_{ab}\ket{D_{N}^{(N-r)}}\\[0.2cm]
&+\bra{D_{N}^{(r)}}\sigma_{ab}\ket{D_{N}^{(N-r)}}\left.+\bra{D_{N}^{(N-r)}}\sigma_{ab}\ket{D_{N}^{(r)}}\right).
\end{aligned}
\label{xabrk2}
\end{equation}
The last two terms of Eq.~(\ref{xabrk2}) can be shown to vanish for odd $N$ through an argument on the parity of the number of excitations \cite{Tra17}. In order to apply $\sigma_{ab}$ on the Dicke states, we decompose them as
\begin{multline}
\ket{D_{N}^{(r)}}= \sqrt{\frac{\binom{N-2}{r}}{\binom{N}{r}}}\ket{00}\ket{D_{N-2}^{(r)}}
+\sqrt{\frac{\binom{N-2}{r-2}}{\binom{N}{r}}}\ket{11}\ket{D_{N-2}^{(r-2)}}\\
+\sqrt{\frac{\binom{N-2}{r-1}}{\binom{N}{r}}}(\ket{01}+\ket{10})\ket{D_{N-2}^{(r-1)}}.
\end{multline}
Equation \eqref{xabrk2} then reduces to
\begin{multline}\label{xab}
x_{ab0\ldots0}^{\psi}=\frac{2r(N-r)}{N(N-1)}(\delta_{a,1}\delta_{b,1}+\delta_{a,2}\delta_{b,2})\\+\frac{(N-2r)^2-N}{N(N-1)}\delta_{a,3}\delta_{b,3}.
\end{multline}
The rank of $A^{\psi}$ is then equal to one if $r=0$ or $N$, to two if $r=(N-\sqrt{N})/2$, and to three otherwise. Thus, any mixed state defined by \eqref{rhorank2} with $\ket{\psi}=(\ket{D_{N}^{(r)}}+\ket{D_{N}^{(N-r)}})/\sqrt{2}$ and $1\leqslant r\leqslant N-1$ is a genuinely entangled state of $\mathcal{SNC}_N$.
\subsection{Arbitrary rank}
\subsubsection{General construction}
The above construction can be generalized to any even rank (as shown in Sec.~\ref{secspec}, any genuinely entangled $N$-qubit state with no $N$-partite correlation has its eigenvalues always twice degenerate, so that no odd-rank example can exist). From the decomposition \eqref{decomprho} of any state in $\mathcal{SNC}_N$, the sufficient genuine entanglement criterion \eqref{genuine} can be expressed as
\begin{equation}
\forall \:{\bf n}\in S^2,\quad \sum_{i=0}^{M}\lambda_i\left(\left|\bra{\psi_i}n\rangle\right|^2+\left|\bra{\bar{\psi}_i}n\rangle\right|^2\right) < 2\sum_{i=0}^M\lambda_i^2,
\end{equation}
with $\ket{n}$ a pure symmetric fully separable state. This criterion can be rewritten as
\begin{equation}\label{sphere}
\forall {\bf n}\in S^2,\quad \sum_{i=0}^{M}(\lambda_i-c_i({\mathbf n}))^2>\sum_{i=0}^Mc_i({\mathbf n})^2,
\end{equation}
where $c_i({\mathbf n})=(|\bra{\psi_i}n\rangle|^2+|\bra{\bar{\psi}_i}n\rangle|^2)/4$. Thus, if for all ${\mathbf n}$, the vector $(\lambda_0,\ldots,\lambda_M)$ is outside the sphere $S({\mathbf n})$ centered at $C({\mathbf n})=(c_0({\mathbf n}),\ldots,c_M({\mathbf n}))$ and going through the origin, then the state \eqref{decomprho} is genuinely entangled.
Let us explain how to construct arbitrary-rank examples. It suffices that one of the $\ket{\psi_i}$ in \eqref{decomprho}, for example $\ket{\psi_{i_0}}$, be such that $A=(x^{\psi_{i_0}}_{ab 0\ldots 0})_{1\leqslant a,b\leqslant 3}$ is of rank two or three, so that $\rho_{i_0}=\left(\ket{\psi_{i_0}}\bra{\psi_{i_0}}+\ket{\bar{\psi}_{i_0}}\bra{\bar{\psi}_{i_0}}\right)/2$ is genuinely entangled according to Eq.~\eqref{sepcrit}. Then, the vector $E=(0,\ldots,0,1/2,0,\ldots,0)\in \mathbb{R}^M$, where the nonzero component is the ${i_0}$th, is outside all spheres $S({\mathbf n})$ since Eq.~(\ref{sphere}) reduces in this case to $c_{i_0}({\mathbf n})<1/4$, which according to Eq.~(\ref{genuinerank2}) is equivalent to the fact that $\rho_{i_0}$ is genuinely entangled. Thus, for any fixed ${\mathbf n}$, the distance $d(E,S({\mathbf n}))$ between $E$ and the sphere $S({\mathbf n})$ is such that $d(E,S({\mathbf n}))>0$. Since ${\mathbf n}$ is parametrized by spherical angles $\theta$ and $\varphi$ which vary in the compact set $[0,\pi]\times[0,2\pi]$, the minimum of $d(E,S({\mathbf n}))$ over all ${\mathbf n}$ is reached for some ${\mathbf n}_0$, so that $\inf_{\mathbf n} d(E,S({\mathbf n}))=d(E,S({\mathbf n}_0))>0$. Therefore, there are vectors $(\lambda_0,\ldots,\lambda_M)$ in the vicinity of $E$ such that $\lambda_i\geqslant0$, $\sum_{i=0}^M\lambda_i=1/2$ and the genuine entanglement criterion \eqref{sphere} is fulfilled. This shows that once $\ket{\psi_{i_0}}$ has been chosen as explained above, any choice of $\ket{\psi_i}$ with $i\ne {i_0}$ allows to construct a state of arbitrary rank of the form \eqref{decomprho} which for some values of the weights $\lambda_i$ is guaranteed to be a genuinely entangled state of $\mathcal{SNC}_N$.
\subsubsection{Explicit examples}
As an illustration, we consider the case where $\ket{\psi_i}$ in (\ref{decomprho}) are chosen as
\begin{equation}\label{psiDicke}
\ket{\psi_i}=\frac{1}{\sqrt{2}}(\ket{D_{N}^{(i)}}+\ket{D_{N}^{(N-i)}}),\quad 0\leqslant i\leqslant M.
\end{equation}
According to Eq.~(\ref{xab}), any choice $i_0\ne 0$ yields a rank-2 state $\rho_{i_0}=\left(\ket{\psi_{i_0}}\bra{\psi_{i_0}}+\ket{\bar{\psi}_{i_0}}\bra{\bar{\psi}_{i_0}}\right)/2$ which is genuinely entangled. In the vicinity of each $E=(0,\ldots,0,1/2,0,\ldots,0)\in \mathbb{R}^M$, there exist values of $\lambda_i$ giving arbitrary rank genuinely entangled states.
\begin{figure*}
\caption{Three-qubit (a) and five-qubit (b) symmetric states in the space of eigenvalues of $\rho$. The orange line $\lambda_0+\lambda_1=1/2$ for $N=3$, and the large orange triangle down for $N=5$ correspond to the condition $\mathrm{tr}
\label{fig2}
\end{figure*}
In order to give an explicit region in parameter space where the constructed states are genuinely entangled, we use the explicit form of the $\ket{\psi_i}$. Since the antistate of a Dicke state is $\ket{\bar{D}_N^{(k)}}=(-1)^{N-k}\ket{D_N^{(N-k)}}$, it comes that $\ket{\psi_i}\bra{\psi_i}+\ket{\bar{\psi}_i}\bra{\bar{\psi}_i}=\ket{D_N^{(i)}}\bra{D_N^{(i)}}+\ket{D_N^{(N-i)}}\bra{D_N^{(N-i)}}$, which yields
\begin{equation}
\bra{n}\rho\ket{n}=\sum_{i=0}^{M}\lambda_i\left(\big|\bra{D_N^{(i)}}n\rangle\big|^2+\big|\bra{D_N^{(N-i)}}n\rangle\big|^2\right).
\end{equation}
Using the expansion (\ref{symsep}), we get
\begin{equation}\label{trig}
\bra{n}\rho\ket{n}=\sum_{i=0}^{M}\lambda_i u_i(\theta),
\end{equation}
with $u_i(\theta)$ defined as
\begin{equation}
\binom{N}{i}\left[\frac{\sin\theta}{2}\right]^{2i}\frac{\left(1-\cos\theta\right)^{N-2i}+\left(1+\cos\theta\right)^{N-2i}}{2^{N-2i}}.
\end{equation}
One therefore has the upper bound
\begin{equation}\label{trigN}
\bra{n}\rho\ket{n}\leqslant \sum_{i=0}^{M}\lambda_i \,\max_{\theta}u_i(\theta).
\end{equation}
Taking $\lambda_i$ such that $\sum_i\lambda_i \,\max_{\theta}u_i(\theta)<2\sum_i\lambda_i^2$ ensures that the state $\rho$ is genuinely entangled. This inequality gives a constraint on the weight of $\rho_{i_0}$ in the mixture $\rho$ given by Eq.~(\ref{decomprho}), which cannot be too small. For arbitrary $N$, one can find a less stringent analytical bound. Since the function $f_k(x)=[(1-\cos x)^k+(1+\cos x)^k]/2^k$ takes values between 0 and 1, we have $u_i(\theta)\leqslant \binom{N}{i}2^{-2i}$ so that any state with $\lambda_i$ such that
\begin{equation}
\sum_{i=0}^{M}\binom{N}{i}\frac{\lambda_i}{2^{2i}}<2\sum_{i=0}^{M}\lambda_i^2,
\label{eqn:sphere}
\end{equation}
$\lambda_i\geqslant 0$ and $\sum_{i=0}^{M}\lambda_i=1/2$ is genuinely entangled. As previously, if the vector $(\lambda_0,\ldots,\lambda_M)$ is outside the sphere $S$ centered at $C=(c_0,\ldots,c_M)$ with $c_i=\binom{N}{i}2^{-(2i+1)}$ and going through the origin, then the state \eqref{decomprho} with $\ket{\psi_i}$ given by \eqref{psiDicke} is genuinely entangled. The intersection of the outside of the sphere with the region $\lambda_i\geqslant 0$ and the plane $\sum_{i=0}^{M}\lambda_i=1/2$ is nonempty. Indeed, the point $E=(0,\ldots,0,1/2)\in \mathbb{R}^M$ lies strictly outside the sphere if and only if $c_{M}<1/4$, which is the case since $\binom{2M+1}{M}<4^M$ for all $M\geqslant1$. The distance $d$ from $E$ to the sphere constrained by the condition $\sum_{i=0}^{M}\lambda_i=1/2$ can be obtained by introducing Lagrange multipliers. All $\lambda_i$ with $\lambda_i\geqslant 0$, $\sum_{i=0}^{M}\lambda_i=1/2$ and at a distance less than $d$ from $E$ give genuinely entangled states.
For $N=3$, the point on the sphere $S$ and the plane $\lambda_0+\lambda_1=1/2$ which is closest to $E=(0,1/2)$ is $(\lambda_0,\lambda_1)=(1/16,7/16)$. Thus all states with $\lambda_1>7/16$ are genuinely entangled. In fact, in this case, we have the necessary and sufficient condition of Sec.~\ref{subsec:3qubit} which reads $A\geqslant 0$. Using Eq.~(\ref{xab}), this is equivalent to $\lambda_0\geqslant \lambda_1/3\geqslant 0$. Thus the state is genuinely entangled if and only if $\lambda_1>3/8$. In Fig.~\ref{fig2} (a), the region outside the red solid curve corresponds to genuinely entangled states detected by the criterion (\ref{genuine}), while states outside the blue dashed circle correspond to those detected by the analytical bound (\ref{eqn:sphere}). The red solid curve intersects the line $\lambda_0+\lambda_1=1/2$ at $\lambda_1=3/8$, which coincides with our necessary and sufficient condition.
For higher $N$, the bound (\ref{eqn:sphere}) allows us to obtain closed analytical expressions for a region of admissible values of $\lambda_i$. A visualization of the case $N=5$ can be found in Fig.~\ref{fig2} (b), where states outside the red curve are obtained numerically from the criterion (\ref{genuine}), and states outside the blue dashed circle correspond to the analytical bound (\ref{eqn:sphere}).
\section{Conclusions}
In this paper, we have investigated genuinely entangled states in the set $\mathcal{SNC}_N$ of $N$-qubit symmetric states with no $N$-partite correlations. The tensor representation~\cite{Gir15} has allowed us to give a simple characterization of these states. From this characterization, it easily follows that no such states exist for an even number of qubits. We have shown that for any $\rho\in\mathcal{SNC}_N$ with $N$ odd, all eigenspaces of $\rho$ are even-dimensional, and that the general form of the state is given by (\ref{decomprho}). This form generalizes the mixture of a state with its antistate investigated in~\cite{Sch15}. The parametrization (\ref{rho3}) has allowed us to find a simple necessary separability condition for $\rho\in\mathcal{SNC}_N$, namely $A\geqslant 0$ with $A=(\langle \sigma_{ab}\rangle_\rho)_{1\leqslant a,b\leqslant 3}$ and $\sigma_{ab}=\sigma_a\otimes\sigma_b\otimes \mathbb{1}_2\otimes \cdots\otimes \mathbb{1}_2$.
In the case of three qubits, we have shown that $A\geqslant 0$ is in fact a necessary and sufficient condition for a state in $\mathcal{SNC}_3$ to be separable. Interestingly, this leads to the equivalence between separability of $\rho\in\mathcal{SNC}_3$ and separability of its two-qubit reduced density matrix. This implies that the entanglement in states $\rho\in\mathcal{SNC}_3$ cannot be entirely destroyed upon the loss of one qubit.
In the case of rank-2 states $\rho\in\mathcal{SNC}_N$, we have obtained a necessary and sufficient condition for separability in terms of the same matrix $A$ as $\mathrm{rank}A=1$. This condition has allowed us to generalize the construction to families of arbitrary rank, by considering mixtures in the vicinity of rank-2 genuinely entangled $\rho\in\mathcal{SNC}_N$.
\section*{Acknowledgments}
O.G. thanks the University of Li\`ege for hospitality.
\end{document}
|
\begin{document}
\begin{frontmatter}
\title{The combinatorial essence of supercompactness\tnoteref{t1}}
\tnotetext[t1]{The results of this paper are from the author's doctoral dissertation~\cite{diss} written at the Ludwig Maximilians Universit\"at M\"unchen
under the supervision of Professor Dieter Donder, to whom the author feels greatly indebted.}
\author{Christoph Wei\ss}
\ead{[email protected]}
\address{Mathematisches Institut der Universit\"at M\"unchen, Theresienstr.~39, 80333 M\"unchen, Germany}
\begin{abstract}
We introduce combinatorial principles that characterize strong compactness and supercompactness for inaccessible cardinals but also make sense for successor cardinals.
Their consistency is established from what is supposedly optimal.
Utilizing the failure of a weak version of square, we show that the best currently known lower bounds for the consistency strength of these principles can be applied.
\end{abstract}
\begin{keyword}
Ineffable \sep Slender \sep Strongly Compact \sep Supercompact \sep Thin
\MSC 03E05 \sep 03E35 \sep 03E55 \sep 03E65
\end{keyword}
\end{frontmatter}
\section{Introduction}
It is a well-known theorem that a cardinal $\kappa$ is weakly compact if and only if it is inaccessible and the $\kappa$-tree property holds, that is, there are no $\kappa$-Aronszajn trees.
By~\cite{mitchell}, the $\omega_2$-tree property can be forced from a weakly compact cardinal and implies $\omega_2$ is weakly compact in $L$.
The tree property thus captures the combinatorial essence of weak compactness, even for successor cardinals.
Similarly, the property that there is no special $\kappa$-Aronszajn tree captures the essence of Mahlo, see~\cite[(1.9)]{todorcevic.partitioning}.
In the present work, we introduce principles $\ensuremath{\text{{\sf TP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$ as well as $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ that achieve the same for strong compactness and supercompactness respectively.
We present the ideals associated to the principles $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$, prove the consistency of $\ensuremath{\text{{\sf ISP}}}(\omega_2, \lambda)$, the strongest of the principles, from a $\lambda^{<\kappa}$-ineffable cardinal, and show $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ implies the failure of a weak form of square, giving lower bounds on its consistency strength.
\subsection*{Notation}
The notation used is mostly standard.
$\ensuremath{\text{{\rm Ord}}}$ denotes the class of all ordinals.
For $A \subset \ensuremath{\text{{\rm Ord}}}$, $\Lim A$ denotes the class of limit points of $A$.
$\ensuremath{\text{{\rm Lim}}}$ stands for $\Lim \ensuremath{\text{{\rm Ord}}}$.
If $a$ is a set of ordinals, $\otp a$ denotes the order type of $a$.
For a regular cardinal $\delta$, $\cof \delta$ denotes the class of all ordinals of cofinality $\delta$,
and $\cof(< \delta)$ denotes those of cofinality less than $\delta$.
For forcings, we write $p < q$ to mean $p$ is stronger than $q$.
Names either carry a dot above them or are canonical names for elements of $V$, so that we can confuse sets in the ground model with their names.
The phrases \emph{for large enough $\theta$} and \emph{for sufficiently large $\theta$} will be used for saying that there exists a $\theta'$ such that the sentence's proposition holds for all $\theta \geq \theta'$.
If $\kappa \subset X$, then
\begin{equation*}
P'_\kappa X \coloneqq \{ x \in P_\kappa X\ |\ \kappa \cap x \in \ensuremath{\text{{\rm Ord}}},\ \langle x, \in \rangle \prec \langle X, \in \rangle \}
\end{equation*}
is club.
For $x \in P'_\kappa X$ we set $\kappa_x \coloneqq \kappa \cap x$.
For $f: P_\omega X \to P_\kappa X$ let $\ensuremath{\text{{\rm Cl}}}_f \coloneqq \{ x \in P_\kappa X\ |\ \forall z \in P_\omega x\ f(z) \subset x \}$.
$\ensuremath{\text{{\rm Cl}}}_f$ is club, and it is well known that for any club $C \subset P_\kappa X$ there is an $f: P_\omega X \to P_\kappa X$ such that $\ensuremath{\text{{\rm Cl}}}_f \subset C$.
If $X \subset X'$, $R \subset P_\kappa X$, $U \subset P_\kappa X'$,
then the projection of $U$ to $X$ is $U \restriction X \coloneqq \{ u \cap X\ |\ u \in U \} \subset P_\kappa X$ and the lift of $R$ to $X'$ is $R^{X'} \coloneqq \{ x' \in P_\kappa X'\ |\ x' \cap X \in R \} \subset P_\kappa X'$.
For sections~\ref{sect.principles},~\ref{sect.ideals}, and~\ref{sect.weak_square}, $\kappa$ and $\lambda$ are assumed to be cardinals, $\kappa \leq \lambda$, and $\kappa$ is regular and uncountable.
\section{Combinatorial principles for strong compactness and supercompactness}\label{sect.principles}
Let us call a sequence $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ a \emph{$P_\kappa \lambda$-list} if $d_a \subset a$ for all $a \in P_\kappa \lambda$.
\begin{definition}\label{def.P_kappa_lambda.thin}
Let $D = \langle d_a\ |\ a \in P_\kappa \lambda \rangle$ be a $P_\kappa \lambda$-list.
\begin{itemize}
\item $D$ is called \emph{thin} if there is a club $C \subset P_\kappa \lambda$ such that $| \{ d_a \cap c\ |\ c \subset a \in P_\kappa \lambda \} | < \kappa$ for every $c \in C$.
\item $D$ is called \emph{slender} if for every sufficiently large $\theta$ there is a club $C \subset P_\kappa H_\theta$ such that $d_{M \cap \lambda} \cap b \in M$ for all $M \in C$ and all $b \in M \cap P_{\omega_1} \lambda$.\footnote{Note that this definition is slightly weaker than the one from \cite{diss} as ``for all $b \in M \cap P_\kappa \lambda$'' was replaced by ``for all $b \in M \cap P_{\omega_1} \lambda$.'' However, the proofs in \cite{diss} work for this weaker definition and the resulting stronger principle \ensuremath{\text{{\sf ISP}}}\ just the same.}
\end{itemize}
\end{definition}
\begin{proposition}\label{prop.P_kappa_lambda-thin->slender}
Let $D$ be a $P_\kappa \lambda$-list.
If $D$ is thin, then it is slender.
\end{proposition}
\begin{proof}
Let $C \subset P_\kappa \lambda$ be a club that witnesses $D = \langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is thin.
Define $g: C \to P_\kappa H_\theta$ by $g(c) \coloneqq \{ d_a \cap c\ |\ c \subset a \in P_\kappa \lambda \}$.
Let $\bar{C} \coloneqq \{ M \in C^{H_\theta}\ |\ \forall b \in M \cap P_\kappa \lambda\ \exists c \in M \cap C\ b \subset c,\ \forall c \in M \cap C\ g(c) \subset M \}$.
Then $\bar{C}$ is club.
Let $M \in \bar{C}$ and $b \in M \cap P_{\omega_1} \lambda$.
Then there is $c \in M \cap C$ such that $b \subset c$, so $d_{M \cap \lambda} \cap b = d_{M \cap \lambda} \cap c \cap b \in M$ as $d_{M \cap \lambda} \cap c \in g(c) \subset M$.
Therefore $\bar{C}$ witnesses $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is slender.
\end{proof}
\begin{definition}\label{def.ineffable_branch}
Let $D = \langle d_a\ |\ a \in P_\kappa \lambda \rangle$ be a $P_\kappa \lambda$-list and $d \subset \lambda$.
\begin{itemize}
\item $d$ is called a \emph{cofinal branch of $D$} if for all $a \in P_\kappa \lambda$ there is $z_a \in P_\kappa \lambda$ such that $a \subset z_a$ and $d \cap a = d_{z_a} \cap a$.
\item $d$ is called an \emph{ineffable branch of $D$} if there is a stationary set $S \subset P_\kappa \lambda$ such that $d \cap a = d_a$ for all $a \in S$.
\end{itemize}
\end{definition}
Combining these two definitions, we can define the following four combinatorial principles.
\begin{definition}
\begin{itemize}
\item $\ensuremath{\text{{\sf TP}}}(\kappa, \lambda)$ holds if every thin $P_\kappa \lambda$-list has a cofinal branch.
\item $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$ holds if every slender $P_\kappa \lambda$-list has a cofinal branch.
\item $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ holds if every thin $P_\kappa \lambda$-list has an ineffable branch.
\item $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ holds if every slender $P_\kappa \lambda$-list has an ineffable branch.
\end{itemize}
\end{definition}
\begin{remark}\label{remark.inaccessible->thin}
The reader should note that the principle $\ensuremath{\text{{\sf TP}}}(\kappa, \kappa)$ is just the tree property for $\kappa$.
Also, if $\kappa$ is an inaccessible cardinal, then every $P_\kappa \lambda$-list is thin.
Therefore $\ensuremath{\text{{\sf TP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$ as well as $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ are equivalent if $\kappa$ is inaccessible.
Furthermore this means an inaccessible cardinal $\kappa$ is weakly compact if and only if $\ensuremath{\text{{\sf TP}}}(\kappa, \kappa)$ holds, and it is ineffable if and only if $\ensuremath{\text{{\sf ITP}}}(\kappa, \kappa)$ holds.
\end{remark}
\begin{remark} The following implications hold.
\begin{enumerate}
\item $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ implies $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$,\label{en.ISP->SP}
\item $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ implies $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$,\label{en.ISP->ITP}
\item $\ensuremath{\text{{\sf ITP}}}(\kappa,\lambda)$ implies $\ensuremath{\text{{\sf TP}}}(\kappa,\lambda)$,\label{en.ITP->TP}
\item $\ensuremath{\text{{\sf SP}}}(\kappa,\lambda)$ implies $\ensuremath{\text{{\sf TP}}}(\kappa,\lambda)$.\label{en.SP->TP}
\end{enumerate}
We will see that~\ref{en.ISP->SP} and~\ref{en.ITP->TP} can not be reversed.
For if $\kappa$ is a strongly compact cardinal that is not supercompact, then by Theorem~\ref{theorem.TP<->stronglycompact} $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$ holds for all $\lambda \geq \kappa$, but by Theorem~\ref{theorem.ITP<->supercompact} we have that $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ cannot hold for all $\lambda \geq \kappa$.
This is also true for smaller $\kappa$.
One can show that the Mitchell collapse preserves $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$.
However, by Theorem~\ref{theorem.ITP_consistency_additional}, if the Mitchell collapse produces a model in which $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ holds, then also in the ground model $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ holds, so that again collapsing a strongly compact cardinal that is not supercompact yields a model in which $\ensuremath{\text{{\sf SP}}}(\kappa, \lambda)$ holds but $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ fails.
Furthermore implication~\ref{en.ISP->ITP} can not be reversed.
This follows from the fact that the forcing axiom $\ensuremath{\text{{\sf PFA}}}(\Gamma_\Sigma)$ from~\cite{koenig.forcing_indestructibility} can be seen to imply $\ensuremath{\text{{\sf ITP}}}(\omega_2, \lambda)$ for all $\lambda \geq \omega_2$.
The paper also shows that $\ensuremath{\text{{\sf PFA}}}(\Gamma_\Sigma)$ is consistent with the approachability property holding for $\omega_1$.
It is easily seen that this contradicts $\ensuremath{\text{{\sf ISP}}}(\omega_2, \omega_2)$, so that in any model of $\ensuremath{\text{{\sf PFA}}}(\Gamma_\Sigma)$ + ``the approachability property holds for $\omega_1$'' $\ensuremath{\text{{\sf ITP}}}(\omega_2, \lambda)$ holds for all $\lambda \geq \omega_2$ but $\ensuremath{\text{{\sf ISP}}}(\omega_2, \omega_2)$ fails.
\end{remark}
Jech~\cite{jech.combinatorial_problems} was the first to consider generalizations of the concept of a tree to $P_\kappa \lambda$-lists.
He gave the following characterization of strong compactness.
\begin{theorem}\label{theorem.jech}
The following are equivalent.
\begin{enumerate}
\item $\kappa$ is strongly compact.
\item For every $\lambda\geq\kappa$, every $P_\kappa \lambda$-list has a branch.
\end{enumerate}
\end{theorem}
Shortly after, Magidor~\cite{magidor.characterization_supercompact} extended Jech's result to supercompactness with the next theorem.
\begin{theorem}\label{theorem.magidor}
The following are equivalent.
\begin{enumerate}
\item $\kappa$ is supercompact.
\item For every $\lambda\geq\kappa$, every $P_\kappa \lambda$-list has an ineffable branch.
\end{enumerate}
\end{theorem}
By Remark~\ref{remark.inaccessible->thin} we can rephrase Theorems~\ref{theorem.jech} and~\ref{theorem.magidor} in the following way.
\begin{theorem}\label{theorem.TP<->stronglycompact}
Suppose $\kappa$ is inaccessible.
Then $\kappa$ is strongly compact if and only if\/ $\ensuremath{\text{{\sf TP}}}(\kappa, \lambda)$ holds for every $\lambda \geq \kappa$.
\end{theorem}
\begin{theorem}\label{theorem.ITP<->supercompact}
Suppose $\kappa$ is inaccessible.
Then $\kappa$ is supercompact if and only if\/ $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ holds for every $\lambda \geq \kappa$.
\end{theorem}
The advantage of these new formulations is that $\ensuremath{\text{{\sf TP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ are not limited to
inaccessible cardinals, as we will see in section~\ref{sect.consistency}.
\section{The corresponding ideals}\label{sect.ideals}
The principles $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ have ideals canonically associated to them.
\begin{definition}
Let $A \subset P_\kappa \lambda$ and
let $D = \langle d_a\ |\ a \in P_\kappa \lambda \rangle$ be a $P_\kappa \lambda$-list.
$D$ is called \emph{$A$-effable} if for every $S \subset A$ that is stationary in $P_\kappa \lambda$ there are $a, b \in S$ such that $a \subset b$ and $d_a \neq d_b \cap a$.
$D$ is called \emph{effable} if it is $P_\kappa \lambda$-effable.
\end{definition}
\begin{definition}
We let
\begin{align*}
I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda] & \coloneqq \{ A \subset P_\kappa \lambda\ |\ \text{there exists a thin $A$-effable $P_\kappa \lambda$-list} \},\\
I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda] & \coloneqq \{ A \subset P_\kappa \lambda\ |\ \text{there exists a slender $A$-effable $P_\kappa \lambda$-list} \}.
\end{align*}
By $F_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ and $F_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ we denote the filters associated to $I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$
and $I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ respectively.
\end{definition}
Note that $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ and $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ now say that $I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ and $I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ are proper ideals respectively.
By Proposition~\ref{prop.P_kappa_lambda-thin->slender} we have $I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda] \subset I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$.
\begin{proposition}\label{prop.I_IT.normal}
$I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ and $I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ are normal ideals on $P_\kappa \lambda$.
\end{proposition}
\begin{proof}
Suppose $D \subset P_\kappa \lambda$ and $g: D \to \lambda$ is regressive.
Set $A_\gamma \coloneqq {g^{-1}}'' \{ \gamma \}$.
Let $f: \lambda \times \lambda \to \lambda$ be bijective, and define $f_{\alpha_1}: \lambda \to \lambda$ by $f_{\alpha_1} ( \alpha_0 ) \coloneqq f(\alpha_0, \alpha_1)$.
We show that if $A_\gamma \in I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ for all $\gamma < \lambda$, then $D \in I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$, and that if $A_\gamma \in I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ for all $\gamma < \lambda$, then $D \in I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$.
In the thin case, that is, if $A_\gamma \in I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ for all $\gamma < \lambda$, let $\langle d^\gamma_a\ |\ a \in P_\kappa \lambda \rangle$ be a thin $A_\gamma$-effable $P_\kappa \lambda$-list for $\gamma < \lambda$.
Let $C^\gamma \subset P_\kappa \lambda$ be a club witnessing $\langle d^\gamma_a\ |\ a \in P_\kappa \lambda \rangle$ is thin.
Set $C \coloneqq \Delta_{\gamma < \lambda} C^\gamma$.
We may assume that for all $a \in C$ and all $\alpha_0, \alpha_1 < \lambda$
\begin{equation}\label{prop.I_IT.normal.eq1}
f(\alpha_0, \alpha_1) \in a \leftrightarrow \alpha_0, \alpha_1 \in a.
\end{equation}
For $a \in C \cap D$ set
\begin{equation*}
d_a \coloneqq f_{g(a)}'' d^{g(a)}_a,
\end{equation*}
and set $d_a \coloneqq \emptyset$ for $a \in P_\kappa \lambda - ( C \cap D )$.
If $c \in C$ and $a \in C \cap D$ are such that $c \subset a$ and $g(a) \notin c$, then
\begin{equation}\label{prop.I_IT.normal.eq2}
d_a \cap c = \emptyset.
\end{equation}
For if $g(a) \notin c$, then by~(\ref{prop.I_IT.normal.eq1}) we have $d_a \cap c = f_{g(a)}'' d^{g(a)}_a \cap c \subset \rng f_{g(a)} \cap c = \emptyset$.
Thus for fixed $c \in C$ we have $\{ d_a \cap c\ |\ c \subset a \in C \cap D \} \subset \{ f_\gamma'' d^\gamma_a \cap c\ |\ \gamma \in c,\ c \subset a \in C \cap A_\gamma \} \cup \{ \emptyset \}$.
For $\gamma \in c$ we have $c \in C^\gamma$ and thus, as $C^\gamma$ witnesses $\langle d^\gamma_a\ |\ a \in P_\kappa \lambda \rangle$ is thin, $| \{ d^\gamma_a \cap c\ |\ c \subset a \in C \cap A_\gamma \} | < \kappa$.
Therefore $| \{ d_a \cap c\ |\ c \subset a \in P_\kappa \lambda\} | < \kappa$, which shows $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is thin.
If $A_\gamma \in I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ for all $\gamma < \lambda$, let $\langle d^\gamma_a\ |\ a \in P_\kappa \lambda \rangle$ be a slender $A_\gamma$-effable $P_\kappa \lambda$-list for $\gamma < \lambda$.
Let $C^\gamma \subset P'_\kappa H_\theta$ be a club witnessing $\langle d^\gamma_a\ |\ a \in P_\kappa \lambda \rangle$ is slender, where $\theta$ is some large enough cardinal.
Set $C \coloneqq \Delta_{\gamma < \lambda} C^\gamma$.
We can again assume that for all $M \in C$ and $\alpha_0, \alpha_1 < \lambda$ $f(\alpha_0, \alpha_1) \in M \leftrightarrow \alpha_0, \alpha_1 \in M$.
In addition, we may require that
\begin{equation}\label{prop.I_IT.normal.eq3}
\langle M, \in, f \restriction (M \times M) \rangle \prec \langle H_\theta, \in, f \rangle
\end{equation}
for every $M \in C$.
As above we define $d_a \coloneqq f_{g(a)}'' d^{g(a)}_a$ for $a \in (C \restriction \lambda) \cap D$ and let $d_a \coloneqq \emptyset$ otherwise.
By the same argument that led to~(\ref{prop.I_IT.normal.eq2}), we have
\begin{equation}\label{prop.I_IT.normal.eq4}
d_a \cap b = \emptyset
\end{equation}
if $b \in P_\kappa \lambda$, $a \in (C \restriction \lambda) \cap D$, $b \subset a$, and $g(a) \notin b$.
To show $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is slender, let $M \in C$ and $b \in M \cap P_{\omega_1} \lambda$.
Set $a \coloneqq M \cap \lambda$.
If $M \notin D$, then $d_a \cap b \subset d_a = \emptyset \in M$, so assume $M \in D$.
Then $d_a \cap b = f_{g(a)}'' d^{g(a)}_a \cap b = f_{g(a)}'' ( d^{g(a)}_a \cap {f^{-1}_{g(a)}}'' b )$.
If $g(a) \notin b$, then by~(\ref{prop.I_IT.normal.eq4}) $d_a \cap b = \emptyset \in M$, so suppose $g(a) \in b$.
Then ${f^{-1}_{g(a)}}'' b = b$, so by the slenderness of $\langle d^{g(a)}_{\tilde{a}}\ |\ \tilde{a} \in P_\kappa \lambda \rangle$ we have $d^{g(a)}_a \cap {f^{-1}_{g(a)}}'' b \in M$.
Thus, as $g(a) \in b \subset M$, by~(\ref{prop.I_IT.normal.eq3}) $d_a \cap b = f_{g(a)}'' ( d^{g(a)}_a \cap {f^{-1}_{g(a)}}'' b ) \in M$.
In both cases we arrived at a $P_\kappa \lambda$-list $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ such that for a club $C \subset P_\kappa \lambda$ that is closed under $f$ and $f^{-1}$ we have
\begin{equation*}
d_a = f_{g(a)}'' d^{g(a)}_a
\end{equation*}
for every $a \in C \cap D$, and $d_a = \emptyset$ for $a \in P_\kappa \lambda - (C \cap D)$.
Suppose that $D \notin I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ for the thin case or $D \notin I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda]$ for the slender case.
Then there are $S \subset C \cap D$ stationary in $P_\kappa \lambda$ and $d \subset \lambda$ such that $d_a = d \cap a$ for all $a \in S$.
Since $g$ is regressive we may assume $S \subset A_\gamma$ for some $\gamma < \lambda$.
But then for $\tilde{d} \coloneqq {f^{-1}_\gamma}'' d$ and $a \in S$ it holds that
\begin{equation*}
d^\gamma_a
= {f^{-1}_\gamma}'' f_\gamma'' d^\gamma_a
= {f^{-1}_\gamma}'' d_a
= {f^{-1}_\gamma}'' (d \cap a)
= {f^{-1}_\gamma}'' d \cap {f^{-1}_\gamma}'' a
= \tilde{d} \cap a,
\end{equation*}
contradicting $\langle d^\gamma_a\ |\ a \in P_\kappa \lambda \rangle$ being effable.
\end{proof}
It is standard to verify that if $\lambda < \lambda'$, then $I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda] \subset \{ A' \restriction \lambda\ |\ A' \in I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda'] \}$ and $I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda] \subset \{ A' \restriction \lambda\ |\ A' \in I_\ensuremath{\text{{\rm IS}}}[\kappa, \lambda'] \}$.
This implies the following proposition.
\begin{proposition}\label{prop.ITP_nach_unten}
Suppose $\lambda \leq \lambda'$.
Then $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda')$ implies $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$, and $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda')$ implies $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$.
\end{proposition}
It is easy to check $\cof \omega \cap \kappa \in I_\ensuremath{\text{{\rm IT}}}[\kappa, \kappa]$.
The following theorem is the two cardinal analog of this observation.
\begin{theorem}\label{theorem.omega_cofinal_effable}
Suppose $\cf \lambda \geq \kappa$.
Then
\begin{equation*}
\{ a \in P_\kappa \lambda\ |\ \Lim a \cap \cof \omega \subset a \} \in F_\ensuremath{\text{{\rm IT}}} [\kappa, \lambda].
\end{equation*}
\end{theorem}
\begin{proof}
Let $A \coloneqq \{ a \in P_\kappa \lambda\ |\ \exists \eta_a \in \Lim a - a\ \cf \eta_a = \omega \}$ and for $a \in A$ let $\eta_a$ be a witness.
For $\delta \in \cof \omega \cap \lambda$ let $\langle d^\delta_\nu\ |\ \nu < \tau_\delta \rangle$ be an enumeration of $\{ d \subset \delta\ |\ \otp d = \omega,\ \sup d = \delta \}$.
For $a \in P_\kappa \lambda$ and $\delta \in \Lim a \cap \cof \omega$ let
\begin{equation*}
\nu^\delta_a \coloneqq \min \{ \nu < \tau_\delta\ |\ \sup(d^\delta_\nu \cap a) = \delta \}.
\end{equation*}
For $a \in A$ set
\begin{equation*}
d_a \coloneqq d^{\eta_a}_{\nu^{\eta_a}_a} \cap a,
\end{equation*}
and for $a \in P_\kappa \lambda - A$ let $d_a \coloneqq \emptyset$.
Then $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is $A$-effable, for suppose there were a cofinal $U \subset A$ and a $d \subset \lambda$ such that $d_a = d \cap a$ for all $a \in U$.
Let $a \in U$.
Since $\cf \lambda \geq \kappa$ there exists $b \in U$ such that $a \cup \Lim a \subset b$.
But then $\otp (d_b \cap a) < \omega$, contradicting $d_b \cap a = d_a$.
$\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is also thin, for let $a \in P_\kappa \lambda$.
Let
\begin{equation*}
B_a \coloneqq \{ d^\delta_{\nu^\delta_a} \cap a\ |\ \delta \in \Lim a \cap \cof \omega \} \cup P_\omega a.
\end{equation*}
Then $|B_a| < \kappa$.
Let $b \in A$ with $a \subset b$, and suppose $d_b \cap a \notin P_\omega a$.
Since $a \subset b$, we have $\nu^\delta_b \leq \nu^\delta_a$ for all $\delta \in \Lim a \cap \cof \omega$.
Because $|d_b \cap a| = \omega$ we also have that $d^{\eta_b}_{\nu^{\eta_b}_b} \cap a = d_b \cap a$ is unbounded in $\eta_b$.
Therefore $\nu^{\eta_b}_a \leq \nu^{\eta_b}_b$, so that $\nu^{\eta_b}_a = \nu^{\eta_b}_b$.
But this means $d_b \cap a = d^{\eta_b}_{\nu^{\eta_b}_a} \cap a \in B_a$.
\end{proof}
When $\kappa$ is inaccessible, the filter $F_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$ has some additional simple but helpful properties.
These will be used in section~\ref{sect.consistency}.
\begin{proposition}\label{prop.I_IT_inaccessible}
Let $\kappa$ be inaccessible.
Then
\begin{equation*}
\{ a \in P'_\kappa \lambda\ |\ \text{$\kappa_a$ inaccessible} \} \in F_\ensuremath{\text{{\rm IT}}} [\kappa, \lambda].
\end{equation*}
\end{proposition}
\begin{proof}
As $\kappa$ is inaccessible, $\{ a \in P'_\kappa \lambda\ |\ \text{$\kappa_a$ strong limit} \}$ is club.
So it remains to show $A \coloneqq \{ a \in P'_\kappa \lambda\ |\ \text{$\kappa_a$ singular} \} \in I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$.
Suppose $A \notin I_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$, and for $a \in A$ let $d_a \subset a$ be such that $\sup d_a = \kappa_a$, $\otp d_a = \cf \kappa_a$.
Then there exists a stationary $S \subset A$ such that $d_a = d \cap a$ for all $a \in S$.
We may assume $\kappa_a = \delta$ for some $\delta < \kappa$ and all $a \in S$.
But if $a, b \in S$ are such that $a \subset b$ and $\kappa_a < \kappa_b$, then $\otp d_b > \delta$, a contradiction.
\end{proof}
\begin{proposition}\label{prop.I_IT_closure}
Let $\kappa$ be inaccessible.
Let $g: P_\kappa \lambda \to P_\kappa \lambda$.
Then
\begin{equation*}
\{ a \in P'_\kappa \lambda\ |\ \forall z \in P_{\kappa_a} a\ g(z) \subset a \} \in F_\ensuremath{\text{{\rm IT}}} [\kappa, \lambda].
\end{equation*}
\end{proposition}
\begin{proof}
Suppose not.
Then
\begin{equation*}
B \coloneqq \{ a \in P'_\kappa \lambda\ |\ \exists z_a \in P_{\kappa_a} a\ g(z_a) \not\subset a \} \notin I_\ensuremath{\text{{\rm IT}}} [\kappa, \lambda].
\end{equation*}
So let $S \subset B$ be stationary and $z \subset \lambda$ be such that $z_a = z \cap a$ for all $a \in S$.
For all $a \in S$ we have $\mu_a \coloneqq |z_a| < \kappa_a$, so there are a stationary $S' \subset S$ and $\mu < \kappa$ such that $\mu_a = \mu$ for all $a \in S'$.
Suppose $|z| > \mu$.
Then there is $y \subset z$ such that $|y| = \mu^+ < \kappa$.
But $S'' \coloneqq \{ a \in S'\ |\ y \subset a \}$ is stationary and for every $a \in S''$ we have $z_a = z \cap a \supset y \cap a = y$, which implies $\mu = \mu_a = |z_a| \geq |y| = \mu^+$, a contradiction.
Since $S'$ is cofinal, there is an $a \in S'$ such that $z \cup g(z) \subset a$.
But then $z_a = z \cap a = z$ and $g(z_a) = g(z) \subset a$, so that $a \notin B$, contradicting $S' \subset B$.
\end{proof}
\section{The failure of a weak version of square}\label{sect.weak_square}
We define a weak variant of the square principle that is natural for our application.
It is a ``threaded'' version of Schimmerling's two cardinal square principle that is only defined on a subset $E$ of $\lambda$.
\begin{definition}
A sequence $\langle \mathcal{C}_\alpha\ |\ \alpha \in \ensuremath{\text{{\rm Lim}}} \cap E \cap \lambda \rangle$ is called a
\emph{$\square_E(\kappa, \lambda)$-sequence} if it satisfies the following properties.
\begin{enumerate}[(i)]
\item $0 < |\mathcal{C}_\alpha| < \kappa$ for all $\alpha \in \ensuremath{\text{{\rm Lim}}} \cap E \cap \lambda$,
\item $C \subset \alpha$ is club for all $\alpha \in \ensuremath{\text{{\rm Lim}}} \cap E \cap \lambda$ and $C \in \mathcal{C}_\alpha$,
\item $C \cap \beta \in \mathcal{C}_\beta$ for all $\alpha \in \ensuremath{\text{{\rm Lim}}} \cap E \cap \lambda$,
$C \in \mathcal{C}_\alpha$ and $\beta \in \Lim C$,
\item there is no club $D \subset \lambda$ such that $D \cap \delta \in \mathcal{C}_\delta$
for all $\delta \in \Lim D \cap E \cap \lambda$.
\end{enumerate}
We say that \emph{$\square_E(\kappa, \lambda)$} holds if there exists a $\square_E(\kappa, \lambda)$-sequence.
\emph{$\square(\kappa, \lambda)$} stands for $\square_\lambda(\kappa, \lambda)$.
\end{definition}
Note that $\square_{\tau, <\kappa}$ implies $\square(\kappa, \tau^+)$
and that $\square(\lambda)$ is $\square(2, \lambda)$.
\begin{theorem}\label{theorem.ITP->non_square}
Suppose $\cf \lambda \geq \kappa$ and $\square_{\cof(<\kappa)}(\kappa, \lambda)$ holds.
Then $\ensuremath{\lnot} \ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$.
\end{theorem}
\begin{proof}
Let $A \coloneqq \{ a \in P_\kappa \lambda\ |\ \Lim a \cap \cof \omega \subset a \}$.
By Theorem~\ref{theorem.omega_cofinal_effable}, $A \in F_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda]$.
So it remains to show $A \in I_\ensuremath{\text{{\rm IT}}} [\kappa, \lambda]$.
We may assume $\sup a \notin a$ for all $a \in A$.
Let $\langle \mathcal{C}_\gamma\ |\ \gamma \in \ensuremath{\text{{\rm Lim}}} \cap \cof(<\kappa) \cap \lambda \rangle$ be a $\square_{\cof(<\kappa)}(\kappa, \lambda)$-sequence.
For $\gamma \in \ensuremath{\text{{\rm Lim}}} \cap \cof(<\kappa) \cap \lambda$ let $C_\gamma \in \mathcal{C}_\gamma$, and set $d_a \coloneqq C_{\sup a} \cap a$ for $a \in A$, otherwise $d_a \coloneqq \emptyset$.
Then, since $\Lim a \cap \cof \omega \subset a$,
\begin{equation}\label{theorem.square->nonITP.eq1}
\sup d_a = \sup a
\end{equation}
for every $a \in A$.
$\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is thin, for let $a \in P_\kappa \lambda$.
Set
\begin{equation*}
B_a \coloneqq \{ ( C \cap a ) \cup h\ |\ \exists \eta \in \Lim a\ C \in \mathcal{C}_\eta,\ h \in P_\omega a \} \cup P_\omega a.
\end{equation*}
Then $|B_a| < \kappa$.
Let $b \in A$, $a \subset b$, and suppose $d_b \cap a \notin P_\omega a$.
Let $\eta \coloneqq \max \Lim (d_b \cap a)$.
Then $\eta \in \Lim C_{\sup b}$, so there is a $C \in \mathcal{C}_\eta$ such that $d_b \cap \eta = C_{\sup b} \cap b \cap \eta = C \cap b$, so $d_b \cap a \cap \eta = C \cap a$.
Since $|d_b \cap a - \eta| < \omega$, this means $d_b \cap a = ( C \cap a ) \cup ( d_b \cap a - \eta ) \in B_a$.
$\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is also $A$-effable.
For suppose there were a cofinal $U \subset A$ and $d \subset \lambda$ such that $d_a = d \cap a$ for all $a \in U$.
Then $d$ is unbounded in $\lambda$ by~(\ref{theorem.square->nonITP.eq1}).
Let $\delta \in \Lim d \cap \cof (< \kappa) \cap \lambda$.
We will show $d \cap \delta \in \mathcal{C}_\delta$, which contradicts the fact that $\langle \mathcal{C}_\alpha\ |\ \alpha \in \ensuremath{\text{{\rm Lim}}} \cap \cof(<\kappa) \cap \lambda \rangle$ is a $\square_{\cof(<\kappa)}(\kappa, \lambda)$-sequence, thus finishing the proof.
For every $a \in U$ such that $\delta \in \Lim (d \cap a)$ we have $C_{\sup a} \cap a = d_a = d \cap a$, and thus $\delta \in \Lim C_{\sup a}$, so that there is a $C_a \in \mathcal{C}_\delta$ such that $d \cap a \cap \delta = C_a \cap a$.
But since $|\mathcal{C}_\delta| < \kappa$, there is a cofinal $U' \subset \{ a \in U\ |\ \delta \in \Lim (d \cap a) \}$ such that $C_a = C$ for some $C \in \mathcal{C}_\delta$ and all $a \in U'$.
But then we have $d \cap \delta \cap a = C \cap a$ for all $a \in U'$, which means $d \cap \delta = C \in \mathcal{C}_\delta$.
\end{proof}
As a corollary, we get a well-known result originally due to Solovay~\cite{solovay}.
\begin{corollary}\label{cor.supercompact->non_square}
Suppose $\kappa$ is supercompact.
Then $\ensuremath{\lnot} \square_{\cof(<\kappa)}(\kappa, \lambda)$ for all $\kappa \leq \lambda$ with $\cf \lambda \geq \kappa$.
In particular $\ensuremath{\lnot} \square(\lambda)$ for all $\lambda \geq \kappa$ with $\cf \lambda \geq \kappa$.
\end{corollary}
\begin{proof}
This follows directly from Theorem~\ref{theorem.ITP<->supercompact} and Theorem~\ref{theorem.ITP->non_square}.
\end{proof}
\section{Consistency results}\label{sect.consistency}
\begin{definition} Let $V\subseteq W$ be a pair of transitive models of\/ \ensuremath{\text{{\sf ZFC}}}.
\begin{itemize}
\item $(V,W)$ satisfies the $\mu$-covering property if the class $P_\mu^V V$ is cofinal in $P_\mu^W V$, that is, for every $x \in W$ with $x \subset V$ and $|x| < \mu$ there is $z \in P_\mu^V V$ such that $x \subset z$.
\item $(V,W)$ satisfies the $\mu$-approximation property if for all $x \in W$, $x \subset V$, it holds that if $x \cap z \in V$ for all $z \in P_\mu^V V$, then $x \in V$.
\end{itemize}
A forcing $\mathbb{P}$ is said to satisfy the $\mu$-covering property or the $\mu$-approximation property if for every $V$-generic $G \subset \mathbb{P}$ the pair $(V, V[G])$ satisfies the $\mu$-covering property or the $\mu$-approximation property respectively.
\end{definition}
The following theorem was originally discovered by Mitchell~\cite{mitchell}.
We cite~\cite{krueger}, where it is presented in the more modern way we use.
The reader should note we use the convention that conditions are only defined on their support.
\begin{theorem}\label{theorem.existence_iteration}
Let $\kappa$ be inaccessible, $\tau < \kappa$ be regular and uncountable.
Then there exists an iteration $\langle \mathbb{P}_\nu\ |\ \nu \leq \kappa \rangle$ such that forcing with $\mathbb{P}_\kappa$ preserves all cardinals less than or equal to $\tau$,
$\mathrel\|\joinrel\relbar_\kappa \kappa = \tau^+$ and for $\eta = 0$ and every inaccessible $\eta \leq \kappa$
\begin{enumerate}[(i)]
\item $\mathbb{P}_\eta$ is the direct limit of $\langle \mathbb{P}_\nu\ |\ \nu < \eta \rangle$ and $\eta$-cc,\label{theorem.approx-iteration.en1}
\item if\/ $\mathbb{P}_\kappa = \mathbb{P}_\eta * \dot{\mathbb{Q}}$, then $\mathrel\|\joinrel\relbar_\eta \dot{\mathbb{Q}}\ \text{satisfies the $\omega_1$-approximation property}$,\label{theorem.approx-iteration.en3}
\item for every $\nu < \eta$, $\mathbb{P}_\nu$ is definable in $H_\eta$ from the parameters $\tau$ and $\nu$,\label{theorem.approx-iteration.en2}
\item $\mathbb{P}_\eta$ satisfies the $\omega_1$-covering property.\label{theorem.approx-iteration.en4}
\end{enumerate}
\end{theorem}
The next is a standard lemma which we will need.
\begin{lemma}\label{lemma.iteration_earlier}
Let $\kappa > \omega$ be regular, $\mathbb{P}_\kappa$ be the direct limit of an iteration $\langle \mathbb{P}_\nu\ |\ \nu < \kappa \rangle$.
Suppose $\mathbb{P}_\kappa$ is $\kappa$-cc.
Let $p \in \mathbb{P}_\kappa$ and $\dot{x} \in V^{\mathbb{P}_\kappa}$ such that $p \mathrel\|\joinrel\relbar \dot{x} \in P_\kappa V$.
Then there is $\rho < \kappa$ such that $p \mathrel\|\joinrel\relbar \dot{x} \in V[\dot{G}_\rho]$.
\end{lemma}
Recall from \cite{jech.combinatorial_problems} that $\kappa$ is called \emph{$\lambda$-ineffable} if every $P_\kappa \lambda$-list has an ineffable branch.
\begin{theorem}\label{theorem.ISP_consistency}
Let $\kappa$, $\lambda$ be cardinals,
$\tau$ regular uncountable, $\tau < \kappa \leq \lambda$, and
$\langle \mathbb{P}_\nu\ |\ \nu \leq \kappa \rangle$ be an iteration such that for all inaccessible $\eta \leq \kappa$
\begin{enumerate}[(i)]
\item $\mathbb{P}_\eta$ is the direct limit of $\langle \mathbb{P}_\nu\ |\ \nu < \eta \rangle$ and $\eta$-cc,\label{theorem.ISP_consistency.en1}
\item if\/ $\mathbb{P}_\kappa = \mathbb{P}_\eta * \dot{\mathbb{Q}}$, then $\mathrel\|\joinrel\relbar_\eta \dot{\mathbb{Q}}\ \text{satisfies the $\omega_1$-approximation property}$,\label{theorem.ISP_consistency.en3}
\item for every $\nu < \eta$, $\mathbb{P}_\nu$ is definable in $H_\eta$ from the parameters $\tau$ and $\nu$,\label{theorem.ISP_consistency.en2}
\item $\mathbb{P}_\eta$ satisfies the $\omega_1$-covering property.\label{theorem.ISP_consistency.en4}
\end{enumerate}
Suppose $\kappa$ is $\lambda^{<\kappa}$-ineffable.
Then $\mathrel\|\joinrel\relbar_\kappa \text{$\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$}$.
\end{theorem}
\begin{proof}
Let $G \subset \mathbb{P}_\kappa$ be $V$-generic and work in $V[G]$.
Let $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ be a slender $P_\kappa \lambda$-list, and let $C' \subset P_\kappa H_\theta$ be a club witnessing the slenderness of $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ for some large enough $\theta$.
For $x \in P_\kappa \lambda$ by Lemma~\ref{lemma.iteration_earlier} there is $\rho_x < \kappa$ such that $x \in V[G_{\rho_x}]$.
Thus $C \coloneqq \{ M \in C'\ |\ \forall x \in P_\kappa \lambda \cap M\ \rho_x \in M \}$ is such that $P_\kappa \lambda \cap M \subset V[G_{\kappa_M}]$ for all $M \in C$.
Let $\sigma \coloneqq (\lambda^{<\kappa})^V$.
Let $\bar{M} \in V$ be such that $\bar{M} \prec H_\theta^V$, $\lambda \cup P^V_\kappa \lambda \subset \bar{M}$, $|\bar{M}|^V = \sigma$.
Let $C_0 \coloneqq C \restriction \bar{M}$.
Since $\mathbb{P}_\kappa$ is $\kappa$-cc, there is a $C_1 \in V$ such that $C_1 \subset C_0$ and $V \models C_1 \subset P_\kappa \bar{M}\ \text{club}$.
Let
\begin{equation*}
E \coloneqq \{ M \in C_1\ |\ \tau < \kappa_M,\ \kappa_M\ \text{inaccessible in $V$},\
P^V_\tau (M \cap \lambda) \subset M\}.
\end{equation*}
\begin{claim}\label{consistency_claim1}
If $M \in E$, then $d_{M \cap \lambda} \in V[G_{\kappa_M}]$.
\end{claim}
\begin{claimproof}
Let $z \in P^{V[G_{\kappa_M}]}_{\omega_1} (M \cap \lambda)$.
$\mathbb{P}_{\kappa_M}$ satisfies the $\omega_1$-covering property by~(\ref{theorem.ISP_consistency.en4}), so there is $b \in P^V_{\omega_1} (M \cap \lambda)$ such that $z \subset b$.
Let $M' \in C$ be such that $M = M' \cap \bar{M}$.
Then $b \in M \subset M'$.
Therefore, by the slenderness of $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$, $d_{M \cap \lambda} \cap b = d_{M' \cap \lambda} \cap b \in P_\kappa \lambda \cap M' \subset V[G_{\kappa_{M'}}] = V[G_{\kappa_M}]$ and thus $d_{M \cap \lambda} \cap z = d_{M \cap \lambda} \cap b \cap z \in V[G_{\kappa_M}]$.
Let $\mathbb{P}_\kappa = \mathbb{P}_{\kappa_M} * \dot{\mathbb{Q}}$.
Then $\dot{\mathbb{Q}}^{G_{\kappa_M}}$ satisfies the $\omega_1$-approximation property by~(\ref{theorem.ISP_consistency.en3}), so since $z$ was arbitrary we get $d_{M \cap \lambda} \in V[G_{\kappa_M}]$.
\end{claimproof}
For $M \in E$ we have $\mathbb{P}_{\kappa_M} \subset M$ by~(\ref{theorem.ISP_consistency.en1}) and~(\ref{theorem.ISP_consistency.en2}).
By Claim~\ref{consistency_claim1} there is $\dot{d}_M \in V^{\mathbb{P}_{\kappa_M}}$ such that $\dot{d}_M^{G_{\kappa_M}} = d_{M \cap \lambda}$.
Let
\begin{equation*}
D_M \coloneqq \{ \langle p, \alpha, n \rangle\ |\ p \in \mathbb{P}_{\kappa_M},\ \alpha \in M \cap \lambda,\
(n = 0 \land p \mathrel\|\joinrel\relbar_{\kappa_M} \alpha \notin \dot{d}_M) \lor (n = 1 \land p \mathrel\|\joinrel\relbar_{\kappa_M} \alpha \in \dot{d}_M) \}.
\end{equation*}
Then $\langle D_M\ |\ M \in E \rangle \in V$ and $D_M \subset M$.
Work in $V$.
Let $f: \bar{M} \to \sigma$ be a bijection.
If $\lambda > \kappa$, additionally choose $f$ such that $f \restriction \kappa = \ensuremath{\text{{\rm id}}} \restriction \kappa$.
If $\kappa = \lambda$, then $\{ M \in C_1\ |\ f''M = \kappa_M \}$ is club, and we may assume it is $C_1$.
By Propositions~\ref{prop.I_IT_inaccessible} and~\ref{prop.I_IT_closure}
\begin{equation*}
F \coloneqq \{ m \in P'_\kappa \sigma\ |\ \kappa_m\ \text{inaccessible},\
P_\tau (m \cap f'' \lambda) \subset m \} \in F_\ensuremath{\text{{\rm IT}}}[\kappa, \sigma].
\end{equation*}
As $\kappa$ is $\sigma$-ineffable, there exist a stationary $S' \subset F$ and $d' \subset \sigma$ such that $f''D_{{f^{-1}}''m} = d' \cap m$ for all $m \in S'$ such that ${f^{-1}}''m \in E$.
But $E = \{ {f^{-1}}''m\ |\ m \in F \} \cap C_1$ by our choice of $f$ or the additional assumption on $C_1$, so for $S \coloneqq \{ {f^{-1}}''m\ |\ m \in S' \cap F \} \cap C_1$ and for $D \coloneqq {f^{-1}}''d'$ we have $D_M = D \cap M$ for all $M \in S$.
Back in $V[G]$, let $T \coloneqq S \restriction \lambda$ and
\begin{equation*}
d \coloneqq \{ \alpha < \lambda\ |\ \exists p \in G\ \langle p, \alpha, 1 \rangle \in D \}.
\end{equation*}
\begin{claim}\label{theorem.ISP_consistency.claim3}
If $a \in T$, then $d_a = d \cap a$.
\end{claim}
\begin{claimproof}
If $a \in T$, then $a = M \cap \lambda$ for some $M \in S$.
But then for $\alpha \in a$, if $\alpha \in d_a = d_{M \cap \lambda} = \dot{d}_M^{G_{\kappa_M}}$, then there is $p \in G_{\kappa_M}$ such that $p \mathrel\|\joinrel\relbar_{\kappa_M} \alpha \in \dot{d}_M$.
Thus $\langle p, \alpha, 1 \rangle \in D_M = D \cap M$, so that $\alpha \in d$ by the definition of $d$.
By the same argument, if $\alpha \notin d_a$, then $\alpha \notin d$.
\end{claimproof}
$T$ is stationary in $V$, so it is also stationary in $V[G]$ since $\mathbb{P}_\kappa$ is $\kappa$-cc.
Therefore, by Claim~\ref{theorem.ISP_consistency.claim3}, $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$ is not effable.
\end{proof}
Note that if $\kappa$ is $\lambda$-ineffable and $\cf \lambda \geq \kappa$, then by \cite{johnson} it follows that $\lambda^{<\kappa} = \lambda$.
So in this case, Theorem~\ref{theorem.ISP_consistency} shows $\ensuremath{\text{{\sf ISP}}}(\kappa, \lambda)$ is forced from the more natural condition that $\kappa$ is $\lambda$-ineffable.
\begin{corollary}\label{corollary.consistency_subtle_ineffable}
If the theory \ensuremath{\text{{\sf ZFC}}}\ + ``there is an ineffable cardinal'' is consistent, then the theory \ensuremath{\text{{\sf ZFC}}}\ + $\ensuremath{\text{{\sf ISP}}}(\omega_2, \omega_2)$ is consistent.
\end{corollary}
\begin{proof}
Taking $\tau = \omega_1$, this follows immediately from Theorem~\ref{theorem.existence_iteration}, Theorem~\ref{theorem.ISP_consistency}, and Remark~\ref{remark.inaccessible->thin}.
\end{proof}
\begin{corollary}\label{corollary.consistency_supercompact}
If the theory \ensuremath{\text{{\sf ZFC}}}\ + ``there exists a supercompact cardinal'' is consistent, then the theory \ensuremath{\text{{\sf ZFC}}}\ + ``$\ensuremath{\text{{\sf ISP}}}(\omega_2, \lambda)$ holds for every $\lambda \geq \omega_2$'' is consistent.
\end{corollary}
\begin{proof}
This follows from Theorems~\ref{theorem.existence_iteration},~\ref{theorem.ISP_consistency}, and~\ref{theorem.ITP<->supercompact}.
\end{proof}
In Corollaries~\ref{corollary.consistency_subtle_ineffable} and~\ref{corollary.consistency_supercompact}, $\omega_2$ only serves as the minimal cardinal for which the theorems hold true.
One can of course take successors of larger regular cardinals instead.
It is worth noting that, when using the Mitchell forcing from Theorem~\ref{theorem.existence_iteration}, Corollary~\ref{corollary.consistency_supercompact} and, when $\cf \lambda \geq \kappa$, Theorem~\ref{theorem.ISP_consistency} were best possible, as shows the next theorem.
Its proof can be found in \cite[Theorem 2.3.5]{diss} or \cite{joint}, where similar ``pull back'' theorems are used in a more general setting.
\begin{theorem}\label{theorem.ITP_consistency_additional}
Let $V \subset W$ be a pair of models of\/ \ensuremath{\text{{\sf ZFC}}}\ that satisfies the $\kappa$-covering property and the $\tau$-approximation property for some $\tau < \kappa$, and suppose $\kappa$ is inaccessible in $V$.
Then
\begin{equation*}
P^W_\kappa \lambda - P^V_\kappa \lambda \in I^W_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda],
\end{equation*}
which furthermore implies
\begin{equation*}
F_\ensuremath{\text{{\rm IT}}}^V[\kappa, \lambda] \subset F^W_\ensuremath{\text{{\rm IT}}}[\kappa, \lambda].
\end{equation*}
So in particular, if $W \models \ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$, then $V \models \ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$.
\end{theorem}
We proceed to give lower bounds on the consistency strength of our combinatorial principles.
We first consider the one cardinal variant, showing Corollary~\ref{corollary.consistency_subtle_ineffable} was best possible.
The next lemma is usually only given in its weaker version where $\kappa$ is required to be weakly compact.
\begin{lemma}\label{lemma.initial_segments_in_L}
Suppose $\kappa$ is regular uncountable and the tree property holds for $\kappa$.
Let $A \subset \kappa$.
If $A \cap \alpha \in L$ for all $\alpha < \kappa$, then $A \in L$.
\end{lemma}
\begin{proof}
Let $\delta \coloneqq \kappa + \omega$.
By~\cite[Proposition~5.3]{mitchell}, $\kappa$ is inaccessible in $L[A]$.
By the usual argument, one proves there exists a nonprincipal $\kappa$-complete ultrafilter $U$ on $P^{L[A]} \kappa \cap L_\delta[A]$, see~\cite[Proof of Theorem~5.9]{mitchell}.
Let $M$ be the transitive collapse of the internal ultrapower of $L_\delta[A]$ by $U$, and let $j: L_\delta[A] \to M$ be the corresponding embedding.
Then $j$ has critical point $\kappa$.
As $L_\delta[A] \models V = L[A]$, we have $M \models V = L[j(A)]$, so $M = L_\gamma[j(A)]$ for some limit ordinal $\gamma \geq \delta$.
It holds that $L_\delta[A] \models \forall \alpha < \kappa\ A \cap \alpha \in L$, so $L_\gamma[j(A)] \models \forall \alpha < j(\kappa)\ j(A) \cap \alpha \in L$, so in particular $L_\gamma[j(A)] \models A = j(A) \cap \kappa \in L$.
Therefore really $A \in L$.
\end{proof}
\begin{theorem}\label{theorem.ITP->ineffable_in_L}
Suppose $\kappa$ is regular and uncountable.
If\/ $\ensuremath{\text{{\sf ITP}}}(\kappa, \kappa)$ holds, then $L \models \kappa$ is ineffable.
\end{theorem}
\begin{proof}
Again by~\cite[Proposition~5.3]{mitchell}, $\kappa$ is inaccessible in $L$.
Let $\langle d_\alpha\ |\ \alpha < \kappa \rangle \in L$.
Then $\{ d_\alpha \cap \beta\ |\ \alpha \leq \kappa \} \subset P^L\beta$.
So $\langle d_a\ |\ a \in P_\kappa \lambda \rangle$, where $d_a = \emptyset$ if $a \notin \kappa$, is thin as $|P^L\beta| < \kappa$.
Thus by $\ensuremath{\text{{\sf ITP}}}(\kappa, \kappa)$ there is a $d \subset \kappa$ such that $d_\alpha = d \cap \alpha$ for stationarily many $\alpha < \kappa$.
This also means $d \cap \gamma \in L$ for all $\gamma < \kappa$.
Therefore $d \in L$ by Lemma~\ref{lemma.initial_segments_in_L}.
Since $\{\alpha < \kappa\ |\ d_\alpha = d \cap \alpha \} \in L$ is also stationary in $L$, the proof is finished.
\end{proof}
The best known lower bounds for the consistency strength of $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda)$ are derived from the failure of square.
The following theorem is due to Jensen, Schimmerling, Schindler, and Steel~\cite{jensen.schimmerling.schindler.steel}.
\begin{theorem}\label{theorem.strong_and_woodin}
Suppose $\lambda \geq \omega_3$ is regular such that $\eta^\omega < \lambda$ for all $\eta < \lambda$.
If $\ensuremath{\lnot} \square(\lambda)$ and $\ensuremath{\lnot} \square_\lambda$, then there exists a sharp for a proper class model with a proper class of strong cardinals and a proper class of Woodin cardinals.
\end{theorem}
\begin{corollary}\label{corollary.consistency-implication}
The consistency of\/ \ensuremath{\text{{\sf ZFC}}}\ + ``there is a $\kappa^+$-ineffable cardinal $\kappa$'' implies the consistency of\/ \ensuremath{\text{{\sf ZFC}}}\ + ``there is a proper class of strong cardinals and a proper class of Woodin cardinals.''
\end{corollary}
\begin{proof}
If $\kappa$ is $\kappa^+$-ineffable, then it is inaccessible and thus $\eta^\omega < \kappa$ for all $\eta < \kappa$.
By Proposition~\ref{prop.ITP_nach_unten}, $\ensuremath{\text{{\sf ITP}}}(\kappa, \kappa)$ holds.
By Theorem~\ref{theorem.ITP->non_square}, $\ensuremath{\text{{\sf ITP}}}(\kappa, \kappa)$ and $\ensuremath{\text{{\sf ITP}}}(\kappa, \kappa^+)$ imply $\ensuremath{\lnot} \square(\kappa)$ and $\ensuremath{\lnot} \square(\kappa^+)$, so by Theorem~\ref{theorem.strong_and_woodin} there is an inner model with a proper class of strong cardinals and a proper class of Woodin cardinals.
\end{proof}
\begin{corollary}
Suppose $\kappa$ is regular uncountable and $\lambda \geq \omega_3$ is such that $\cf \lambda \geq \kappa$ and $\eta^\omega < \lambda$ for all $\eta < \lambda$.
If\/ $\ensuremath{\text{{\sf ITP}}}(\kappa, \lambda^+)$ holds, then there exists an inner model with a proper class of strong cardinals and a proper class of Woodin cardinals.
\end{corollary}
\begin{proof}
This follows from Proposition~\ref{prop.ITP_nach_unten}, Theorem~\ref{theorem.ITP->non_square}, and Theorem~\ref{theorem.strong_and_woodin}.
\end{proof}
\section{Conclusion}
The reader will have noted that one could also define principles corresponding to $\lambda$-almost ineffability.
However, by~\cite{carr} $\lambda$-ineffability and $\lambda$-almost ineffability both characterize supercompactness, so that considering these principles does not seem to give any new insights.
The main motivation behind the principles we considered is of course the quest for an inner model for a supercompact cardinal.
So far the most interesting applications of the principles can be found in~\cite{joint}, which shows the following.
Suppose $\kappa$ is an inaccessible cardinal and $\mathbb{P}$ is an iteration of forcings of size less than $\kappa$ that takes direct limits stationarily often.
If $\mathbb{P}$ forces \ensuremath{\text{{\sf PFA}}}\ and $\kappa = \omega_2$, then $\kappa$ is strongly compact.
If $\mathbb{P}$ is additionally required to be proper, then $\kappa$ is necessarily supercompact.
As this is the only known means of constructing models of \ensuremath{\text{{\sf PFA}}}\ from large cardinal assumptions, it gives strong heuristic evidence on the lower bound of the consistency strength of \ensuremath{\text{{\sf PFA}}}.
\ifthenelse{\boolean{usemicrotype}}{\microtypesetup{spacing=false}}{}
\end{document}
|
\begin{document}
\RUNAUTHOR{Chen, Kuhn, and Wiesemann}
\RUNTITLE{Data-Driven Chance Constrained Programs over Wasserstein Balls}
\TITLE{Data-Driven Chance Constrained Programs \\ over Wasserstein Balls}
\ARTICLEAUTHORS{
\AUTHOR{Zhi Chen}
\AFF{College of Business, City University of Hong Kong, Kowloon Tong, Hong Kong, \\ [email protected]}
\AUTHOR{Daniel Kuhn}
\AFF{Risk Analytics and Optimization Chair, \'{E}cole Polytechnique F\'{e}d\'{e}rale de Lausanne, Lausanne, Switzerland, \\ [email protected]}
\AUTHOR{Wolfram Wiesemann}
\AFF{Imperial College Business School, Imperial College London, London, United Kingdom, \\ [email protected]}
}
\ABSTRACT{We provide an exact deterministic reformulation for data-driven chance constrained programs over Wasserstein balls. For individual chance constraints as well as joint chance constraints with right-hand side uncertainty, our reformulation amounts to a mixed-integer conic program. In the special case of a Wasserstein ball with the $1$-norm or the $\infty$-norm, the cone is the nonnegative orthant, and the chance constrained program can be reformulated as a mixed-integer linear program. Our reformulation compares favourably to several state-of-the-art data-driven optimization schemes in our numerical experiments.
}
\KEYWORDS{Distributionally robust optimization; ambiguous chance constraints; Wasserstein distance.}
\HISTORY{\today}
\maketitle
\section{Introduction}
Distributionally robust optimization is a powerful modeling paradigm for optimization under uncertainty, where the distribution of the uncertain problem parameters is itself uncertain, and where the performance of a decision is assessed in view of the worst-case distribution from a prescribed ambiguity set. The earlier literature on distributionally robust optimization has focused on moment ambiguity sets which contain all distributions that obey certain (standard or generalized) moment conditions; see, {\em e.g.}, \citet{Delage_Ye_2010}, \citet{Goh_Sim_2010} and \citet{Wiesemann_Kuhn_Sim_2014}. \citet{Pflug_Wozabal_2007} were the first to propose an ambiguity set of the form of a ball in the space of distributions with respect to the celebrated Wasserstein, Kanthorovich or optimal transport distance. The type-1 Wasserstein distance $d_{\rm W}(\mathbb{P}_1,\mathbb{P}_2)$ between two distributions $\mathbb{P}_1$ and $\mathbb{P}_2$ on $\mathbb{R}^K$, equipped with a general norm $\|\cdot\|$, is defined as the minimal transportation cost of moving $\mathbb{P}_1$ to $\mathbb{P}_2$ under the premise that the cost of moving a Dirac point mass from $\bm\xi_1$ to $\bm\xi_2$ amounts to $\|\bm{\xi}_1 - \bm{\xi}_2\|$. Mathematically, this implies that
$$
\begin{array}{rcl}
d_{\rm W}(\mathbb{P}_1,\mathbb{P}_2) \; = \; &\displaystyle \inf_{\mathbb{P} \in \mathcal{P}(\mathbb{P}_1, \mathbb{P}_2)}
& \mathbb{E}_{\mathbb{P}}[\|\bmt{\xi}_1 - \bmt{\xi}_2\|] ,
\end{array}
$$
where $\bmt{\xi}_1 \sim \mathbb{P}_1, \bmt{\xi}_2 \sim \mathbb{P}_2$, and $\mathcal{P}(\mathbb{P}_1, \mathbb{P}_2)$ represents the set of all distributions on $\mathbb{R}^K\times \mathbb{R}^K$ with marginals $\mathbb{P}_1$ and $\mathbb{P}_2$. The Wasserstein ambiguity set $\mathcal{F}(\theta)$ is then defined as a ball of radius $\theta\ge 0$ with respect to the Wasserstein distance, centered at a prescribed reference distribution~$\hat{\mathbb{P}}$:
\begin{equation}\label{set:Wasserstein}
\mathcal{F}(\theta) = \{\mathbb{P} \in \mathcal{P}(\mathbb{R}^K) \mid
d_{\rm W}(\mathbb{P}, \hat{\mathbb{P}}) \leq \theta\}.
\end{equation}
One can think of the Wasserstein radius $\theta$ as a budget on the transportation cost. Indeed, any member distribution in $\mathcal{F}(\theta)$ can be obtained by rearranging the reference distribution $\hat{\mathbb{P}}$ at a transportation cost of at most $\theta$. If only a finite training dataset $\{\bmh{\xi}_i\}_{i \in [N]}$ is available, a natural choice for $\hat{\mathbb{P}}$ is the empirical distribution $\hat{\mathbb{P}} = \frac{1}{N}\sum_{i = 1}^N \delta_{\bmh{\xi}_i}$, which represents the uniform distribution on the training samples. Throughout the paper, we will assume that $\hat{\mathbb{P}}$ is the empirical distribution.
While it has been recognized early on that Wasserstein ambiguity sets offer many conceptual advantages (\emph{e.g.}, their member distributions do not need to be absolutely continuous with respect to $\hat{\mathbb{P}}$ and, if properly calibrated, they constitute confidence regions for the unknown true data-generating distribution), it was believed that they almost invariably lead to hard global optimization problems. Recently, \citet{Esfahani_Kuhn_2017} and \citet{Zhao_Guan_2018} discovered that many interesting distributionally robust optimization problems over Wasserstein ambiguity sets can actually be reformulated as tractable convex programs---provided that $\hat{\mathbb{P}}$ is discrete and that the problem's objective function satisfies certain convexity properties. These reformulations have subsequently been generalized to Polish spaces and non-discrete reference distributions by \citet{blanchet2019quantifying} and \citet{Gao_Kleywegt_2016}. Since then, distributionally robust optimization models over Wasserstein ambiguity sets have been proposed for many applications, including transportation (\citealt{carlsson2018wasserstein}) and machine learning (\citealt{blanchet2019robust}, \citealt{gao2017distributional}, \citealt{shafieezadeh2019regularization} and \citealt{sinha2017certifiable}).
In this paper we study distributionally robust chance constrained programs of the form
\begin{equation}\label{prob:cc general}
\begin{array}{cll}
\displaystyle \min_{\bm{x} \in \mathcal{X}} &~\bm{c}^\top\bm{x} \\
{\rm s.t.} &~\displaystyle \mathbb{P}[\bmt{\xi} \in \mathcal{S}(\bm{x})] \geq 1-\varepsilon &~\forall \mathbb{P} \in \mathcal{F}(\theta),
\end{array}
\end{equation}
where the goal is to find a decision $\bm{x}$ from within a compact polyhedron $\mathcal{X} \subseteq \mathbb{R}^L$ that minimizes a linear cost function $\bm{c}^\top\bm{x}$ and ensures that the exogenous random vector $\bmt{\xi}$ falls within a decision-dependent safety set $\mathcal{S}(\bm{x}) \subseteq \mathbb{R}^K$ with high probability $1-\varepsilon$ under every distribution $\mathbb{P} \in \mathcal{F}(\theta)$. Since the reference distribution $\hat{\mathbb{P}}$ in~\eqref{prob:cc general} is the empirical distribution over the training dataset $\{\bmh{\xi}_i\}_{i \in [N]}$, we refer to~\eqref{prob:cc general} as a \emph{data-driven} chance constrained program.
To date, the literature on data-driven chance constraints has focused primarily on variants of problem~\eqref{prob:cc general} where the Wasserstein ambiguity set $\mathcal{F} (\theta)$ is replaced with an ambiguity set $\mathcal{G} (\theta)$ that contains all distributions close to the empirical distribution $\hat{\mathbb{P}}$ with respect to a $\phi$-divergence (such as the Kullback-Leibler divergence or the $\chi^2$-distance):
\begin{equation*}
\mathcal{G}(\theta) = \bigg\{\mathbb{P} \in \mathcal{P}(\mathbb{R}^K) ~\bigg|~\mathbb{P}\ll \hat{\mathbb{P}}, \;\;
\int_{\mathbb{R}^K} \phi\bigg(\dfrac{{\rm d}\mathbb{P}(\bm{\xi})}{{\rm d}\hat{\mathbb{P}}(\bm{\xi})}\bigg){\rm d}\hat{\mathbb{P}}(\bm{\xi}) \leq \theta \bigg.\bigg\},
\end{equation*}
where $\phi: \mathbb{R}_+ \rightarrow \mathbb{R}$ is the divergence function. \cite{Hu_Hong_2013} show that a distributionally robust chance constrained program over a Kullback-Leibler ambiguity set reduces to a classical chance constrained progam over the reference distribution $\hat{\mathbb{P}}$ and an adjusted risk threshold $\varepsilon' < \varepsilon$. While this result holds for any reference distribution, $\phi$-divergence ambiguity sets only contain distributions that are absolutely continuous with respect to $\hat{\mathbb{P}}$, that is, any distribution in $\mathcal{G} (\theta)$ only assigns positive probability to those measurable subsets $A \subseteq \mathbb{R}^K$ for which $\hat{\mathbb{P}} [\tilde{\bm{\xi}} \in A] > 0$. This is undesirable for problems with a large dimension $K$ and/or few training data, where it is unlikely that every possible value of $\tilde{\bm{\xi}}$ has been observed in $\{\bmh{\xi}_i\}_{i \in [N]}$. This shortcoming is addressed by \cite{Jiang_Guan_2016, jiang2018risk}, who replace the reference distribution with a Kernel density estimator.
Despite their tremendous success and widespread adoption in recent years, the use of $\phi$-divergences can lead to undesirable side effects in some applications: they compare distributions on a ``scenario-by-scenario" basis and thus do not consider the possibility of noisy measurements \citep{Gao_Kleywegt_2016}, and they generically fail to be probability metrics as they typically violate symmetry as well as the triangle inequality. Moreover, as we show next, $\phi$-divergence ambiguity sets may be overly optimistic when only few training samples are available.
\noindent \textbf{Motivating Example.}
Consider the arguably simplest instance of the data-driven optimization problem~\eqref{prob:cc general}, which estimates the worst-case value-at-risk $\sup_{\mathbb{P} \in \mathcal{F} (\theta)} \, \mathbb{P}\text{-VaR}_\varepsilon (\tilde{\xi})$ of a scalar random variable $\tilde{\xi}$ at level $\varepsilon$ from a limited set of i.i.d.~training samples $\{ \hat{\xi}_i \}_{i = 1}^N$ of $\tilde{\xi}$ under the unknown data-generating distribution $\mathbb{P}_0$ that are summarized by the empirical distribution $\hat{\mathbb{P}} = \frac{1}{N}\sum_{i = 1}^N \delta_{\hat{\xi}_i}$ at the centre of the Wasserstein ball $\mathcal{F} (\theta)$. To avoid technicalities, we assume that $\mathbb{P}_0$ is atomless. In addition, with $N_\dag = \lfloor (1-\varepsilon) N \rfloor$ and $N^\dag = \lceil (1-\varepsilon)N \rceil$ we define a distribution
\begin{equation*}
\mathbb{P}^\dag
\; = \;
\frac{1}{N} \sum_{i = 1}^{N_\dag} \delta_{\hat{\xi}_{(i)}} + \frac{(1 - \varepsilon) N - N_\dag}{N} \delta_{\hat{\xi}_{(N^\dag)}} + \frac{N^\dag - (1 - \varepsilon) N}{N} \delta_{\hat{\xi}_{(N^\dag)} + \theta / \varepsilon} + \frac{1}{N}\sum_{i = N^\dag +1}^N \delta_{\hat{\xi}_{(i)} + \theta / \varepsilon}
\end{equation*}
to be used subsequently. Here, $\hat{\xi}_{(j)}$ denotes the $j$-th order statistic of the training samples $\{ \hat{\xi}_i \}_{i = 1}^N$.
The \emph{reliability} of the aforementioned worst-case value-at-risk, that is, the probability that it weakly exceeds the unknown true value-at-risk $\mathbb{P}_0\text{-VaR}_\varepsilon (\tilde{\xi})$, can be bounded from \emph{below} by
\begin{align*}
\mathbb{P}_0^N \left[ \sup_{\mathbb{P} \in \mathcal{F} (\theta)} \, \mathbb{P}\text{-VaR}_\varepsilon (\tilde{\xi}) \; \geq \; \mathbb{P}_0\text{-VaR}_{\varepsilon} (\tilde{\xi}) \right]
\;\; &\geq \;\;
\mathbb{P}^N_0 \left[ \mathbb{P}^\dag\text{-VaR}_\varepsilon (\tilde{\xi}) \; \geq \; \mathbb{P}_0\text{-VaR}_\varepsilon (\tilde{\xi}) \right] \\
&= \;\;
\mathbb{P}^N_0 \left[ \hat{\mathbb{P}}\text{-VaR}_\varepsilon (\tilde{\xi}) \; \geq \; \mathbb{P}_0\text{-VaR}_\varepsilon (\tilde{\xi}) - \theta / \varepsilon \right] \\
&= \;\;
1 - \mathbb{P}^N_0 \left[ \hat{\mathbb{P}}\text{-VaR}_\varepsilon (\tilde{\xi}) \; < \; \mathbb{P}_0\text{-VaR}_\varepsilon (\tilde{\xi}) - \theta / \varepsilon \right] \\
&\geq \;\;
1 - \text{exp} \left( -2N(1-\varepsilon-\mathbb{P}_0[\tilde{\xi} \leq \mathbb{P}_0\textnormal{-VaR}_{\varepsilon}(\tilde{\xi}) - \theta/\varepsilon])^2 \right),
\end{align*}
where $\mathbb{P}_0^N$ is the $N$-fold product of $\mathbb{P}_0$ that generates $\{ \hat{\xi}_i \}_{i=1}^N$. The first inequality holds since $\mathbb{P}^\dag$ is contained in $\mathcal{F} (\theta)$. The first equality holds since $\mathbb{P}^\dag\text{-VaR}_\varepsilon (\tilde{\xi}) = \hat{\mathbb{P}}\text{-VaR}_\varepsilon (\tilde{\xi}) + \theta / \varepsilon$ by construction of $\mathbb{P}^\dag$, and the last inequality is due to a standard concentration inequality for empirical quantiles (see, \emph{e.g.}, Theorem~2.3.2 of \citealt{serfling2009approximation}).
If we replace the Wasserstein ambiguity set $\mathcal{F} (\theta)$ with the ambiguity $\mathcal{G} (\theta)$ of any $\phi$-divergence, on the other hand, then we can bound the reliability from \emph{above} by
\begin{align*}
\mathbb{P}_0^N \left[ \sup_{\mathbb{P} \in \mathcal{G} (\theta)} \, \mathbb{P}\text{-VaR}_\varepsilon (\tilde{\xi}) \; \geq \; \mathbb{P}_0\text{-VaR}_{\varepsilon} (\tilde{\xi}) \right]
\;\; &= \;\;
1 - \mathbb{P}^N_0 \left[ \sup_{\mathbb{P} \in \mathcal{G} (\theta)} \, \mathbb{P}\text{-VaR}_\varepsilon (\tilde{\xi}) \; < \; \mathbb{P}_0\text{-VaR}_{\varepsilon} (\tilde{\xi}) \right] \\
\;\; &\leq \;\;
1 - \mathbb{P}^N_0 \left[ \hat{\xi}_1, \hat{\xi}_2, \ldots, \hat{\xi}_N \; < \; \mathbb{P}_0\text{-VaR}_{\varepsilon} (\tilde{\xi}) \right] \\
\;\; &\leq \;\;
1 - (1 - \varepsilon)^N.
\end{align*}
Here, the first inequality holds since all distributions in $\mathcal{G} (\theta)$ share a common support with $\hat{\mathbb{P}}$, and the second inequality follows from the definition of $\mathbb{P}_0\text{-VaR}_{\varepsilon} (\tilde{\xi})$. We highlight that this probability bound holds for every radius $\theta$ of the $\phi$-divergence ball $\mathcal{G} (\theta)$.
\begin{figure}
\caption{\textnormal{Reliability bounds for the Wasserstein (worst-case) and $\phi$-divergence (best-case) ambiguity sets when approximating the VaR at level $\varepsilon = 0.1$ (left), $\varepsilon =0.05$ (middle) and $\varepsilon =0.01$ (right). We choose the radius $\theta = 1/\sqrt{N}
\label{fig:bound}
\end{figure}
Figure~\ref{fig:bound} compares the \emph{worst-case} reliability offered by the Wasserstein ambiguity set with the \emph{best-case} reliability of the $\phi$-divergence ambiguity set for a uniform distribution over the interval~$[0, 1]$. We observe that in low-sample regimes, $\phi$-divergence ambiguity sets may underestimate the true VaR with high probability.
$\clubsuit$
To our best knowledge, the paper of \cite{xie2020bicriteria} is the only previous work on data-driven chance constraints over Wasserstein ambiguity sets. The authors study the special class of covering problems, where the feasible region $\mathcal{X}$ satisfies $\eta \mathcal{X} \subseteq \mathcal{X}$ for every $\eta \geq 1$. This problem class encompasses, among others, portfolio optimization problems without budgetary restrictions and lot-sizing problems in the absence of setup costs. The authors prove that the resulting individual chance constrained program is NP-hard. They also demonstrate that two popular approximation schemes, the CVaR approximation as well as the scenario approximation, can perform arbitrarily poorly for classical individual chance constraints, that is, when the Wasserstein radius is $\theta = 0$. Based on this insight, the authors propose a bicriteria approximation scheme for covering problems with classical as well as distributionally robust individual chance constraints over moment and Wasserstein ambiguity sets. This bicriteria approximation scheme determines solutions that trade off a higher risk threshold $\varepsilon' > \varepsilon$ in the chance constraint with a smaller optimality gap $\varepsilon' / (\varepsilon' - \varepsilon)$. This is achieved by solving a tractable convex relaxation of the chance constrained problem (using, \emph{e.g.}, a Markovian or Bernstein generator) and subsequently scaling the solution to this relaxation so that it becomes feasible for the chance constraint with the higher risk threshold $\varepsilon'$. By design, the performance guarantee of the bicriteria approximation scheme becomes weaker (and eventually trivial) as the selected risk threshold $\varepsilon'$ \mbox{approaches the risk threshold $\varepsilon$ of the original problem formulation.}
In this paper, we study distributionally robust chance constrained programs over the Wasserstein ambiguity set~\eqref{set:Wasserstein}. We derive deterministic reformulations for individual chance constrained programs, where $\mathcal{S}(\bm{x}) = \{ \bm{\xi} \in \mathbb{R}^K \mid \bm{a} (\bm{\xi})^\top \bm{x} < b (\bm{\xi}) \}$ for affine functions $\bm{a}(\cdot) : \mathbb{R}^K \rightarrow \mathbb{R}^L$ and $b(\cdot) : \mathbb{R}^K \rightarrow \mathbb{R}$, as well as for joint chance constrained programs with right-hand side uncertainty, where $\mathcal{S}(\bm{x}) = \{\bm{\xi} \in \mathbb{R}^K \mid \bm{A} \bm{x} < \bm{b} (\bm{\xi}) \}$ for $\bm{A} \in \mathbb{R}^{M \times L}$ and an affine function $\bm{b} : \mathbb{R}^K \rightarrow \mathbb{R}^M$. Our reformulations are mixed-integer conic programs that reduce to mixed-integer linear programs when the norm $\left \lVert \cdot \right \rVert$ on $\mathbb{R}^K$ is the $1$-norm or the $\infty$-norm.
While preparing this paper for publication, we became aware of the independent work by \cite{xie2019distributionally}, which derives similar reformulations for distributionally individual and joint chance constraints over Wasserstein ambiguity sets. In contrast to our work, however, \cite{xie2019distributionally} assumes that each safety condition $\bm{a}_m^\top \bm{x} < b_m (\bm{\xi})$, $m \in [M]$, in the joint chance constraint depends on a subvector of $\bm{\xi}$, and that these subvectors are pairwise disjoint for different safety conditions. In other words, different safety conditions of the joint chance constraints studied by \cite{xie2019distributionally} must depend on different random variables. Furthermore, the reformulations of \cite{xie2019distributionally} are derived via duality theory, whereas our reformulations directly leverage the structural insights into the worst-case distributions. This enables us to keep our reformulations largely independent of the selected ground metric for the Wasserstein ball, which opens up possibilities to incorporate other cost functions in our definition of the Wasserstein distance.
Since the initial submission of this paper, our exact reformulation for data-driven chance constrained program over Wasserstein balls has been further studied and tightened; see, for instance, \cite{ho2020strong, ho2021distributionally}, \cite{shen2021convex} and \cite{zhang2021building}. Along with these theoretical extensions, our reformulation has also been applied in several domains, including risk sharing in finance \citep{chen2021sharing}, network design for humanitarian operations \citep{jiang2021distributionally} and optimal power flows in energy systems \citep{arrigo2022wasserstein}.
\noindent \textbf{Notation.}
We use boldface uppercase and lowercase letters to denote matrices and vectors, respectively. Special vectors of appropriate dimensions include $\bm{0}$ and $ \bm{e} $, which respectively correspond to the zero vector and the vector of all ones. We denote by $\|\cdot\|_*$ the dual norm of a general norm $\|\cdot\|$. We use the shorthand $ [N] = \left\{1,2,\ldots,N\right\} $ to represent the set of all integers up to $ N $. Given a (possibly fractional) real number $\ell\in [0,N]$, we define the partial sum of the $\ell$ first values in $\{k_i\}_{i \in [N]}$ as $\sum_{i = 1}^{\ell} k_i = \sum_{i = 1}^{\lfloor \ell \rfloor} k_i + (\ell - \lfloor \ell \rfloor) k_{\lfloor \ell \rfloor + 1}$. Random vectors are denoted by tilde signs ({\em e.g.}, $\bmt{\xi}$), while their realizations are denoted by the same symbols without tildes ({\em e.g.}, $\bm{\xi}$). Given a random vector $\bmt{\xi}$ governed by a distribution $\mathbb{P}$, a measurable loss function $\ell (\bm{\xi})$ and a risk threshold $\varepsilon \in (0, 1)$, the value-at-risk (VaR) of $\ell (\bm{\xi})$ at level $\varepsilon$ is defined as $\mathbb{P}\text{-VaR}_{\varepsilon} (\ell (\bm{\xi})) = \inf\{\gamma \in \mathbb{R} \mid \mathbb{P}[\gamma \leq \ell(\bmt{\xi})] \leq \varepsilon\}$.
\section{Exact Reformulation of Data-Driven Chance Constraints}\label{sec:exact_reformulation}
Section~\ref{sec:uq_wasserstein} reviews a previously established result on the quantification of uncertainty over Wasserstein balls. We use this result to derive an exact reformulation of generic data-driven chance constrained programs in Section~\ref{sec:ref_generic}. We finally specialize this generic reformulation to the subclasses of data-driven individual chance constrained programs as well as data-driven joint chance constrained programs with right-hand side uncertainty in Sections~\ref{sec:ref_indiv_cc} and~\ref{sec:ref_joint_cc}, respectively.
\subsection{Uncertainty Quantification over Wasserstein Balls}\label{sec:uq_wasserstein}
Consider an open safety set $\mathcal{S} \subseteq \mathbb{R}^K $, and denote by $\bar{\mathcal{S}} = \mathbb{R}^K \setminus \mathcal{S}$ its closed complement. The uncertainty quantification problem
\begin{equation}
\label{prob:uncertainty quantification}
\sup_{\mathbb{P} \in \mathcal{F}(\theta)} \mathbb{P}[\bmt{\xi} \notin \mathcal{S}]
\end{equation}
computes the worst (largest) probability of the system under consideration being unsafe, which is the case whenever the random vector $\bmt{\xi}$ attains a value in the unsafe set $\bar{\mathcal{S}}$. Throughout the rest of the paper, we exclude trivial special cases and assume that $\theta > 0$ and $\varepsilon \in (0, 1)$.
To solve the uncertainty quantification problem~\eqref{prob:uncertainty quantification}, denote by $\mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}})$ the distance of the $i^\text{th}$ data point $\bmh{\xi}_i \in \mathbb{R}^K$ of the empirical distribution $\hat{\mathbb{P}}$ to the unsafe set $\bar{\mathcal{S}}$. This distance is based on a norm $\left \lVert \cdot \right \rVert$, which we keep generic at this stage. Without loss of generality, we assume that the data points $\{\bmh{\xi}_i\}_{i \in [N]} $ are ordered in increasing distance to $\bar{\mathcal{S}}$, that is, $\mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}) \leq \mathbf{dist}(\bmh{\xi}_j, \bar{\mathcal{S}})$ for all $1 \leq i \leq j \leq N$. We also assume that $\mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}) = 0$ (that is, the data point $\bmh{\xi}_i$ is unsafe) if and only if $i \in [I]$, where $I = 0$ if $\mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}) > 0$ for all $i \in [N]$. Finally, we denote by $\bm{\xi}^\star_i \in \bar{\mathcal{S}}$ an unsafe point that is closest to the data point $\bmh{\xi}_i$, $i \in [N]$, in terms of the distance $\mathbf{dist} (\bmh{\xi}_i, \bar{\mathcal{S}})$.
\cite{blanchet2019quantifying} as well as \cite{Gao_Kleywegt_2016} have characterized the solution to the uncertainty quantification problem~\eqref{prob:uncertainty quantification} in closed form. To keep our paper self-contained, we reproduce their findings without proof in Theorem~\ref{thm:uncertainty-quantification} below.
\begin{theorem}\label{thm:uncertainty-quantification}
Let $j^\star = \max \, \{j \in [N] \cup \{ 0 \} \mid \sum_{i = 1}^j \mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}) \leq \theta N \}$. The uncertainty quantification problem~\eqref{prob:uncertainty quantification} is solved by a worst-case distribution $\mathbb{P}^\star \in \mathcal{F} (\theta)$ that is characterized as follows:
\begin{enumerate}
\item[(i)] If $j^\star = N$, then $\sup\limits_{\mathbb{P} \in \mathcal{F}(\theta)} \mathbb{P}[\bmt{\xi} \notin \mathcal{S}] \; = \; \mathbb{P}^\star [\bmt{\xi} \notin \mathcal{S}] \; = \; 1$ for
\begin{equation*}
\mathbb{P}^\star \; = \; \dfrac{1}{N} \sum_{i = 1}^I \delta_{\bmh{\xi}_i} \; + \; \dfrac{1}{N} \sum_{i = I+1}^N \delta_{\bm{\xi}^\star_i}.
\end{equation*}
\item[(ii)] If $j^\star < N$, then $\sup\limits_{\mathbb{P} \in \mathcal{F}(\theta)} \mathbb{P}[\bmt{\xi} \notin \mathcal{S}] \; = \; \mathbb{P}^\star [\bmt{\xi} \notin \mathcal{S}] \; = \; (j^\star+ p^\star)/N$ for
\begin{equation*}
\mathbb{P}^\star \; = \; \dfrac{1}{N} \sum_{i = 1}^I \delta_{\bmh{\xi}_i} \; + \; \dfrac{1}{N} \sum_{i = I+1}^{j^\star} \delta_{\bm{\xi}^\star_i} \; + \; \dfrac{p^\star}{N} \delta_{\bm{\xi}^\star_{j^\star+1}} \; + \; \dfrac{1-p^\star}{N}\delta_{\bmh{\xi}_{j^\star+1}} \; + \; \dfrac{1}{N} \sum_{i = j^\star+2}^N \delta_{\bmh{\xi}_i},
\end{equation*}
where $p^\star = (\theta N - \sum_{i = 1}^{j^\star} \mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}})) / \mathbf{dist}(\bmh{\xi}_{j^\star+1}, \bar{\mathcal{S}})$.
\end{enumerate}
\end{theorem}
Intuitively speaking, the worst-case distribution $\mathbb{P}^\star$ in Theorem~\ref{thm:uncertainty-quantification} transports the training dataset $\{\bmh{\xi}_i\}_{i \in [N]}$ to the unsafe set $\bar{\mathcal{S}}$ in a greedy fashion, see Figure~\ref{fig:greedy}. The data points $\bmh{\xi}_1,\dots,\bmh{\xi}_I$ are already unsafe and hence do not need to be transported. The subsequent data points $\bmh{\xi}_{I+1}, \ldots, \bmh{\xi}_{j^\star + 1}$ are closest to the unsafe set and are thus transported from $\mathcal{S}$ to $\bar{\mathcal{S}}$. Due to the limited transportation budget $\theta$, the data point $\bmh{\xi}_{j^\star + 1}$ is only partially transported. The safe samples $\bmh{\xi}_{j^\star + 2}, \ldots \bmh{\xi}_N$, finally, are too far away from the unsafe set $\bar{\mathcal{S}}$ and are thus left unchanged. Note that the distribution characterized in Theorem~\ref{thm:uncertainty-quantification} may not be the only distribution that solves problem~\eqref{prob:uncertainty quantification}.
\begin{figure}
\caption{{\textnormal{Empirical and worst-case distributions. The left graph visualizes the empirical distribution $\hat{\mathbb{P}
\label{fig:greedy}
\end{figure}
\subsection{Reformulation of Generic Chance Constraints}\label{sec:ref_generic}
We now develop deterministic reformulations for the distributionally robust chance constrained program~\eqref{prob:cc general}. To this end, we focus on the ambiguous chance constraint
\begin{equation}
\label{prob:worst-case cc}
\sup_{\mathbb{P} \in \mathcal{F}(\theta)} \mathbb{P}[\bmt{\xi} \notin \mathcal{S}(\bm{x})] \leq \varepsilon.
\end{equation}
For any fixed decision $\bm x\in \mathcal{X}$, we let $\mathcal{S}(\bm{x})$ be an arbitrary open safety set, and we denote by $\bar{\mathcal{S}}(\bm{x})$ its closed complement, which comprises all unsafe scenarios.
Every fixed training dataset $\{\bmh{\xi}_i\}_{i \in [N]}$ then induces a (decision-dependent) permutation $\bm{\pi}(\bm{x})$ of $[N]$ that orders the training samples in increasing distance to the unsafe set, that is,
\begin{equation*}
\mathbf{dist}(\bmh{\xi}_{\pi_1(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) \; \leq \; \mathbf{dist}(\bmh{\xi}_{\pi_2(\bm{x})}, \bar{\mathcal{S}}(\bm{x}))
\; \leq \; \cdots \; \leq \; \mathbf{dist}(\bmh{\xi}_{\pi_N(\bm{x})}, \bar{\mathcal{S}}(\bm{x})).
\end{equation*}
We first show that a fixed decision $\bm{x}$ satisfies the ambiguous chance constraint~\eqref{prob:worst-case cc} over the Wasserstein ambiguity set~\eqref{set:Wasserstein} if and only if the partial sum of the $\varepsilon N$ smallest transportation distances to the unsafe set multiplied by the mass $1/N$ of a training sample exceeds~$\theta$.
\begin{theorem}\label{thm:cc equivalent}
For any fixed decision $\bm{x}\in \mathcal{X}$, the ambiguous chance constraint~\eqref{prob:worst-case cc} over the Wasserstein ambiguity set~\eqref{set:Wasserstein} is equivalent to the deterministic inequality
\begin{equation}
\label{equivalence:theta positive}
\dfrac{1}{N}\sum_{i = 1}^{\varepsilon N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) \ge \theta.
\end{equation}
\end{theorem}
The left-hand side of~\eqref{equivalence:theta positive} can be interpreted as the minimum cost of moving a fraction $\varepsilon$ of the training samples to the unsafe set. If this cost exceeds the prescribed transportation budget $\theta$, then no distribution in the Wasserstein ambiguity set can assign the unsafe set a probability of more than $\varepsilon$, which means that the distributionally robust chance constraint~\eqref{prob:worst-case cc} is satisfied.
\noindent \emph{Proof of Theorem~\ref{thm:cc equivalent}.} $\;$
From Theorem~\ref{thm:uncertainty-quantification} we know that the worst-case distribution $\mathbb{P}^\star$ is an optimal solution (not necessarily unique) to the maximization problem embedded in the left-hand side of the ambiguous chance constraint~\eqref{prob:worst-case cc}. We thus conclude that the constraint~\eqref{prob:worst-case cc} is satisfied if and only if $\mathbb{P}^\star [\bmt{\xi} \notin \mathcal{S}(\bm{x})] \leq \varepsilon$ for $\mathbb{P}^\star$ defined in the statement of that theorem.
In case (\emph{i}) of Theorem~\ref{thm:uncertainty-quantification}, the ambiguous chance constraint~\eqref{prob:worst-case cc} is violated since $\mathbb{P}^\star [\bmt{\xi} \notin \mathcal{S}(\bm{x})] = 1$ while $\varepsilon < 1$ by assumption. At the same time, since $j^\star = N$, we have $\frac{1}{N} \sum_{i = 1}^{N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) \leq \theta$. If this inequality is strict, then~\eqref{equivalence:theta positive} is violated as desired since $\frac{1}{N} \sum_{i = 1}^{\varepsilon N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) \leq \frac{1}{N} \sum_{i = 1}^{N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x}))$. If the inequality is satisfied as an equality, on the other hand, we know that $\mathbf{dist}(\bmh{\xi}_{\pi_N(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) > 0$ since $\theta > 0$ by assumption and $\mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) \leq \mathbf{dist}(\bmh{\xi}_{\pi_j(\bm{x})}, \bar{\mathcal{S}}(\bm{x}))$ for all $i \leq j$ by construction of the re-ordering $\bm{\pi}(\bm{x})$. Thus, since $\varepsilon < 1$ by assumption, we have $\frac{1}{N} \sum_{i = 1}^{\varepsilon N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) < \frac{1}{N} \sum_{i = 1}^{N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) = \theta$ and equation~\eqref{equivalence:theta positive} is violated as desired.
In case (\emph{ii}) of Theorem~\ref{thm:uncertainty-quantification}, we have $\mathbb{P}^\star [\bmt{\xi} \notin \mathcal{S} (\bm{x})] = (j^\star+ p^\star)/N$ with $j^\star = \max \, \{j \in [N - 1] \cup \{0\} \mid \sum_{i = 1}^j \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) \leq \theta N \}$ as well as $p^\star = (\theta N - \sum_{i = 1}^{j^\star} \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x}))) / \mathbf{dist}(\bmh{\xi}_{\pi_{j^\star + 1} (\bm{x})}, \bar{\mathcal{S}} (\bm{x}))$. We claim that $j^\star+ p^\star$ is the optimal value of the bivariate mixed-integer optimization problem
\begin{equation}\label{eq:bivariate_mixed_integer}
\begin{array}{cll}
\displaystyle \max_{j, p} & \displaystyle j + p \\
{\rm s.t.} & \displaystyle \sum_{i = 1}^{j} \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) + p \cdot \mathbf{dist}(\bmh{\xi}_{\pi_{j + 1} (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) \leq \theta N \\[4mm]
& \displaystyle j \in [N - 1] \cup \{0\},~0 \leq p < 1.
\end{array}
\end{equation}
Indeed, the solution $(j, p) = (j^\star, p^\star)$ is feasible in~\eqref{eq:bivariate_mixed_integer} by definition of $j^\star$ and $p^\star$. Moreover, we have $j + p < j^\star + p^\star$ for any other feasible solution $(j, p)$ that satisfies $j = j^\star$ and $p \neq p^\star$. Assume now that the optimal solution $(j, p)$ to~\eqref{eq:bivariate_mixed_integer} would satisfy $j > j^\star$. Any such solution would violate the first constraint since $\sum_{i = 1}^j \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) > \theta N$ by definition of $j^\star$ while $p \geq 0$. Similarly, any solution $(j, p)$ with $j < j^\star$ cannot be optimal in~\eqref{eq:bivariate_mixed_integer} since $j \leq j^\star - 1$ while $p < p^\star + 1$.
We can re-express problem~\eqref{eq:bivariate_mixed_integer} as the univariate discrete optimization problem
\begin{equation*}
\max \bigg\{ j \in [0, N] ~\bigg|~ \sum_{i = 1}^{\lfloor j \rfloor} \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) \; + \; (j - \lfloor j \rfloor) \cdot \mathbf{dist}(\bmh{\xi}_{\pi_{\lfloor j \rfloor + 1} (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) \leq \theta N \bigg\}.
\end{equation*}
Using our definition of partial sums, we observe that this problem is equivalent to
\begin{equation*}
\max \bigg\{ j \in [0, N] ~\bigg|~ \sum_{i = 1}^j \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) \leq \theta N \bigg\}.
\end{equation*}
By construction, the mapping $\vartheta (j) = \sum_{i = 1}^j \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x}))$, $j \in [0, N]$, is continuous and monotonically nondecreasing. It therefore affords the right inverse $\vartheta^{-1} (t) = \max \{ j \in [0, N] \mid \vartheta(j) \leq t \}$ that satisfies $\vartheta \circ \vartheta^{-1} (t) = t$ for all $t \in [0, \vartheta(N)]$. Figure~\ref{fig:inverse_function} visualizes the relationship between $\vartheta$ and $\vartheta^{-1}$. We thus conclude that the ambiguous chance constraint~\eqref{prob:worst-case cc} is satisfied if and only if
\begin{align*}
\max \bigg\{ j \in [0, N] ~\bigg|~ \sum_{i = 1}^j \mathbf{dist}(\bmh{\xi}_{\pi_i (\bm{x})}, \bar{\mathcal{S}} (\bm{x})) \leq \theta N \bigg\} \leq \varepsilon N \quad
&\Longleftrightarrow \quad \max \{ j \in [0, N] ~|~ \vartheta (j) \leq \theta N \} \leq \varepsilon N \\
&\Longleftrightarrow \quad \vartheta^{-1} (\theta N) \leq \varepsilon N \\
&\Longleftrightarrow \quad \theta N \leq \vartheta (\varepsilon N),
\end{align*}
where the last equivalence follows from $\vartheta \circ \vartheta^{-1} (\theta N) = \theta N$, which holds because $\theta N \leq \vartheta(N)$ for $j^\star < N$, as well as the fact that $\vartheta$ is monotonically nondecreasing. By definition, the right-hand side of the last equivalence holds if and only if~\eqref{equivalence:theta positive} in the statement of the theorem is satisfied.
\Halmos
\endproof
\begin{figure}
\caption{{\textnormal{Relationship between $\vartheta$ and $\vartheta^{-1}
\label{fig:inverse_function}
\end{figure}
\begin{remark}
We emphasize that the inequality~\eqref{equivalence:theta positive} fails to be equivalent to the ambiguous chance constraint~\eqref{prob:worst-case cc} when $\theta = 0$, in which case the Wasserstein ball collapses to the singleton set $\mathcal{F}(0) = \{\hat{\mathbb{P}}\}$. To see this, suppose that $\bmh{\xi}_{\pi_i(\bm{x})} \in \bar{\mathcal{S}}(\bm{x})$ for all $i=1,\ldots,I$ and $\bmh{\xi}_{\pi_i(\bm{x})} \in \mathcal{S}(\bm{x})$ for all $i=I+1,\ldots,N$, where $I\ge 1$. If $\varepsilon < I/N$, then the chance constraint~\eqref{prob:worst-case cc} is violated because
\begin{equation*}
\hat{\mathbb{P}}[\bmt{\xi} \notin \mathcal{S}(\bm{x})] = \frac{I}{N}>\varepsilon,
\end{equation*}
while the inequality~\eqref{equivalence:theta positive} holds trivially because $\sum_{i = 1}^{\varepsilon N} \mathbf{dist}(\bmh{\xi}_{\pi_i(\bm{x})}, \bar{\mathcal{S}}(\bm{x})) \ge 0$.
\end{remark}
Theorem~\ref{thm:cc equivalent} establishes that a decision $\bm{x} \in \mathcal{X}$ satisfies the ambiguous chance constraint~\eqref{prob:worst-case cc} if and only if
the sum of the $\varepsilon N$ smallest distances of the training samples to the unsafe set $\bar{\mathcal{S}}(\bm{x})$ weakly exceeds $\theta N$. This result is of computational interest because the sum of the $\varepsilon N$ smallest out of $N$ real numbers is concave in those real numbers (while being convex in $\varepsilon$). This reveals that the constraint~\eqref{equivalence:theta positive} is convex in the decision-dependent distances $\{\mathbf{dist}(\bmh{\xi}_{i}, \bar{\mathcal{S}}(\bm{x}))\}_{i \in [N]}$. In the remainder we develop an efficient reformulation of this convex constraint that does not require an enumeration of all possible sums of $\varepsilon N$ different distances between the training samples and the unsafe set. This reformulation is based on the following auxiliary lemma.
\begin{lemma}\label{lem:sum of smallest}
For any $\varepsilon \in (0,1)$, the sum of the $\varepsilon N$ smallest out of $N$ real numbers $k_1,\dots,k_N$ coincides with the optimal value of the linear program
$$
\begin{array}{cll}
\displaystyle \max_{\bm{s}, t} & \varepsilon N t - \bm{e}^\top\bm{s} \\
{\rm s.t.} & k_i \geq t - s_i &~\forall i \in [N] \\
& \bm{s} \geq \bm{0}.
\end{array}
$$
\end{lemma}
\noindent \emph{Proof of Lemma~\ref{lem:sum of smallest}.} $\;$
By definition, the sum of the $\varepsilon N$ smallest elements of the set $\{k_1,\dots,k_N\}$ corresponds to the optimal value of the (manifestly feasible) linear program
\begin{equation*}
\begin{array}{cl}
\displaystyle \min_{\bm{v}} & \displaystyle \sum_{i \in [N]} k_i v_i \\ \text{s.t.} & \bm{0} \leq \bm{v} \leq \bm{e}, ~\bm{e}^\top\bm{v} = \varepsilon N.
\end{array}
\end{equation*}
The claim now follows from strong linear programming duality.
\Halmos
\endproof
Armed with Theorem~\ref{thm:cc equivalent} and Lemma~\ref{lem:sum of smallest}, we are now ready to reformulate the chance constrained program~\eqref{prob:cc general} as a deterministic optimization problem.
\begin{theorem}\label{thm:cc-deterministic}
The chance constrained program~\eqref{prob:cc general} is equivalent to
\begin{equation}\label{prob:cc reformulation}
\begin{array}{cll}
\displaystyle \min_{\bm{s}, t, \bm{x}} & \bm{c}^\top\bm{x} \\
{\rm s.t.} & \varepsilon N t - \bm{e}^\top\bm{s} \geq \theta N \\
& \mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}(\bm{x})) \geq t - s_i &~\forall i \in [N] \\
& \bm{s} \geq \bm{0}, ~\bm{x} \in \mathcal{X}.
\end{array}
\end{equation}
\end{theorem}
\noindent \emph{Proof of Theorem~\ref{thm:cc-deterministic}.} $\;$
The claim follows immediately by using Theorem~\ref{thm:cc equivalent} to reformulate the chance constraint~\eqref{prob:worst-case cc} as the inequality~\eqref{equivalence:theta positive}, using Lemma~\ref{lem:sum of smallest} to express the left-hand side of~\eqref{equivalence:theta positive} as a linear maximization problem and substituting the resulting constraint back into~\eqref{prob:cc general}.
\Halmos
\endproof
We emphasize that the reformulation offered by Theorem~\ref{thm:cc-deterministic} is independent of the selected ground metric $\mathbf{dist} (\cdot, \cdot)$. In the remainder, we assume that the ground metric is based on a norm $\lVert \cdot \rVert$.
\subsection{Reformulation of Individual Chance Constraints}\label{sec:ref_indiv_cc}
Assume now that problem~\eqref{prob:cc general} accommodates an individual chance constraint defined through the safety set $\mathcal{S}(\bm{x}) = \{\bm{\xi} \in \mathbb{R}^K \mid (\bm{A}\bm{\xi} + \bm{a})^\top \bm{x} < \bm{b}^\top\bm{\xi} + b\}$. Individual chance constrained programs have been studied, among others, in network design \citep{wang2007beta}, vehicle routing \citep{gounaris2013robust, ghosal2020distributionally} and portfolio optimization \citep{rujeerapaiboon2016robust, dert2000optimal}. By Lemma~\ref{lem:distance to the union of closed half-spaces} in the appendix, we have
$$\mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}(\bm x)) = \dfrac{((\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x})^+}{\|\bm{b} - \bm{A}^\top\bm{x}\|_*} ~~\forall i \in [N],
$$
where we adopt the convention that $0 / 0 = 0$, and thus Theorem~\ref{thm:cc-deterministic} allows us to reformulate problem~\eqref{prob:cc reformulation} as the deterministic optimization problem
\begin{equation}\label{prob:individual cc reformulation}
\begin{array}{cll}
\displaystyle \min_{\bm{s}, t, \bm{x}} & \bm{c}^\top\bm{x} \\
{\rm s.t.} & \varepsilon N t - \bm{e}^\top\bm{s} \geq \theta N \\
& \dfrac{((\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x})^+}{\|\bm{b} - \bm{A}^\top\bm{x}\|_*} \geq t - s_i &~\forall i \in [N] \\
& \bm{s} \geq \bm{0}, ~\bm{x} \in \mathcal{X}.
\end{array}
\end{equation}
Unfortunately, problem~\eqref{prob:individual cc reformulation} fails to be convex as its constraints involve fractions of convex functions. Below we show, however, that problem~\eqref{prob:individual cc reformulation} can be reformulated as a mixed integer conic program.
\begin{proposition}\label{prop:individual cc}
Assume that $\bm{A}^\top\bm{x} \ne \bm{b}$ for all $\bm{x} \in \mathcal{X}$. For the safety set $\mathcal{S}(\bm{x}) = \{\bm{\xi} \in \mathbb{R}^K \mid (\bm{A}\bm{\xi} + \bm{a})^\top \bm{x} < \bm{b}^\top\bm{\xi} + b\}$, problem~\eqref{prob:cc general} is equivalent to the mixed integer conic program
\begin{equation}\label{prob:individual cc reformulation linearization}
\begin{array}{rcll}
Z^\star_{\rm ICC} =& \displaystyle \min_{\bm{q}, \bm{s}, t, \bm{x}} & \bm{c}^\top\bm{x} \\
&{\rm s.t.} & \varepsilon N t - \bm{e}^\top\bm{s} \geq \theta N \|\bm{b} - \bm{A}^\top\bm{x}\|_* \\
&& (\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x} + {\rm M} q_i \geq t - s_i &~\forall i \in [N] \\
&& {\rm M} (1 - q_i) \geq t - s_i &~\forall i \in [N] \\
&& \bm{q} \in \{0,1\}^N, ~\bm{s} \geq \bm{0}, ~\bm{x} \in \mathcal{X},
\end{array}
\end{equation}
where ${\rm M}$ is a suitably large (but finite) positive constant.
\end{proposition}
\noindent \emph{Proof of Proposition~\ref{prop:individual cc}.} $\;$
We already know that the chance constrained program~\eqref{prob:cc general} is equivalent to the non-convex optimization problem~\eqref{prob:individual cc reformulation}. A complicating feature of this problem is the appearance of the maximum operator in the second constraint group, which evaluates the positive part of $(\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x}$. To eliminate this maximum operator, for each $i\in[N]$ we introduce a binary variable $q_i \in \{0,1\}$, and we re-express the $i^{\rm th}$ member of the second constraint group via the two auxiliary constraints
\begin{equation}\label{eq:big-M}
\dfrac{(\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x}}{\|\bm{b} - \bm{A}^\top\bm{x}\|_*} + {\rm M} q_i \geq t - s_i
\text{~~and~~} {\rm M} (1 - q_i) \geq t - s_i.
\end{equation}
Note that at optimality we have $q_i=1$ if $(\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x}$ is negative and $q_i=0$ otherwise. Intuitively, $q_i$ thus activates the less restrictive one of the two auxiliary constraints in~\eqref{eq:big-M}.
Next, we apply the variable substitutions $t\leftarrow t/\|\bm{b} - \bm{A}^\top\bm{x}\|_*$ and $\bm{s}\leftarrow \bm{s}/\|\bm{b} - \bm{A}^\top\bm{x}\|_*$, which is admissible because $\bm{A}^\top\bm{x} \ne \bm{b}$ for all $\bm{x} \in \mathcal{X}$. This change of variables yields the postulated reformulation~\eqref{prob:individual cc reformulation linearization}.
To see that a finite value of $\rm M$ is sufficient for our reformulation to be exact, we show that the expression $((\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x}) / \|\bm{b} - \bm{A}^\top\bm{x}\|_*$ as well as the values of $t$ and $s_i$, $i \in [N]$, in~\eqref{eq:big-M} can all be bounded without affecting the optimal value of problem~\eqref{prob:individual cc reformulation linearization}. This is clear for the fraction as $\mathcal{X}$ is compact and the denominator is non-zero for all $\bm{x} \in \mathcal{X}$. Moreover, $t$ is nonnegative as otherwise the first constraint in~\eqref{prob:individual cc reformulation linearization} would be violated. For any fixed values of $\bm{x}$ and $t$, an optimal value of $s_i$, $i \in [N]$, is given by $s_i^\star (\bm{x}, t) = ( t - ((\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x}) / \|\bm{b} - \bm{A}^\top\bm{x}\|_*)^+$. Since $\mathcal{X}$ is bounded, it thus remains to show that $t$ can be bounded from above. Indeed, for sufficiently large (but finite) $t$, the slope of $\varepsilon N t - \bm{e}^\top \bm{s}^\star (\bm{x}, t)$ on the left-hand side of the first constraint in~\eqref{prob:individual cc reformulation linearization} is $- (1 - \varepsilon) N$. Since $\varepsilon < 1$, we thus conclude that this constraint is violated for large values of $t$.
\Halmos
\endproof
\begin{remark}
The condition that $\bm{A}^\top\bm{x} \ne \bm{b}$ for all $\bm{x} \in \mathcal{X}$ does not restrict the generality of our formulation. Indeed, if an optimal solution $(\bm{q}^\star, \bm{s}^\star, t^\star, \bm{x}^\star)$ to problem~\eqref{prob:individual cc reformulation linearization} satisfies $\bm{A}^\top\bm{x}^\star \ne \bm{b}$, then $\bm{x}^\star$ solves problem~\eqref{prob:cc general} since our argument in the proof of Proposition~\ref{prop:individual cc} applies to $\bm{x}^\star$ even if $\bm{A}^\top\bm{x} = \bm{b}$ for some $\bm{x} \in \mathcal{X}$. Assume now that an optimal solution $(\bm{q}^\star, \bm{s}^\star, t^\star, \bm{x}^\star)$ to problem~\eqref{prob:individual cc reformulation linearization} satisfies $\bm{A}^\top\bm{x}^\star = \bm{b}$. In that case, the ambiguous chance constraint in problem~\eqref{prob:cc general} requires that $\bm{a}^\top \bm{x}^\star < b$. If that is the case for $\bm{x}^\star$, it is optimal in problem~\eqref{prob:cc general}. If, finally, an optimal solution $(\bm{q}^\star, \bm{s}^\star, t^\star, \bm{x}^\star)$ to problem~\eqref{prob:individual cc reformulation linearization} satisfies $\bm{A}^\top\bm{x}^\star = \bm{b}$ and $\bm{a}^\top \bm{x}^\star \geq b$, then one would ideally like to solve a variant of problem~\eqref{prob:individual cc reformulation linearization} that includes the additional constraint
\begin{equation}\label{rem2_constraint}
\bm{A}^\top \bm{x} \neq \bm{b} \quad \text{or} \quad \bm{a}^\top \bm{x} < b.
\end{equation}
This variant of problem~\eqref{prob:individual cc reformulation linearization} can be solved by solving $2 K + 1$ versions of problem~\eqref{prob:individual cc reformulation linearization}, where each version includes exactly one of the constraints $[\bm{A}^\top\bm{x}]_k > [\bm{b}]_k$, $[\bm{A}^\top\bm{x}]_k < [\bm{b}]_k$, $k \in [K]$, or $\bm{a}^\top \bm{x} < b$. One readily verifies that the solution that attains the least objective value amongst these $2K + 1$ versions of problem~\eqref{prob:individual cc reformulation linearization} is an optimal solution to problem~\eqref{prob:individual cc reformulation linearization} with the added constraint~\eqref{rem2_constraint}.
\end{remark}
\begin{remark}
The mixed-integer conic program~\eqref{prob:individual cc reformulation linearization} simplifies to a mixed-integer linear program whenever $\|\cdot\|$ represents the $1$-norm or the $\infty$-norm, and it can be reformulated as a mixed-integer second-order cone program whenever $\|\cdot\|$ represents a $p$-norm for some $p \in \mathbb{Q}$, $p>1$, see Section~2.3.1 in \cite{Ben-tal_Nemirovski_book}.
\end{remark}
\begin{remark}
The deterministic reformulation~\eqref{prob:individual cc reformulation linearization} is remarkably parsimonious. For an $L$-dimensional feasible region $\mathcal{X} \subseteq \mathbb{R}^L$ and an empirical distribution $\hat{\mathbb{P}}$ with $N$ data points, our reformulation~\eqref{prob:individual cc reformulation linearization} has $N$ binary variables, $L + N + 1$ continuous decisions as well as $2N + 1$ constraints (excluding those that describe $\mathcal{X}$). In comparison, a classical chance constrained formulation, which is tantamount to setting the Wasserstein radius to $\theta = 0$ in problem~\eqref{prob:cc general}, has $N$ binary variables, $L$ continuous decisions as well as $N + 1$ constraints. Thus, adding distributional robustness only requires an additional $N + 1$ continuous decisions as well as $N$ further constraints.
\end{remark}
\begin{remark}\label{rem:piecewise_linear}
The deterministic reformulation~\eqref{prob:individual cc reformulation linearization} requires the specification of a sufficiently large constant $\mathrm{M}$, which can typically be determined by an investigation of the structure of problem~\eqref{prob:individual cc reformulation linearization}. Alternatively, many commercial solver packages allow to directly specify the following reformulation of problem~\eqref{prob:individual cc reformulation linearization} via the use of piecewise linear constraints:
\begin{equation*}
\begin{array}{rcll}
Z^\star_{\rm ICC} =& \displaystyle \min_{\bm{q}, \bm{s}, t, \bm{x}} & \bm{c}^\top\bm{x} \\
&{\rm s.t.} & \varepsilon N t - \bm{e}^\top\bm{s} \geq \theta N \|\bm{b} - \bm{A}^\top\bm{x}\|_* \\
&& ( (\bm{b} - \bm{A}^\top\bm{x})^\top\bmh{\xi}_i + b - \bm{a}^\top\bm{x})^+ \geq t - s_i &~\forall i \in [N] \\
&& \bm{s} \geq \bm{0}, ~\bm{x} \in \mathcal{X}
\end{array}
\end{equation*}
This formulation has the advantage that it does not require the specification of the constant $\mathrm{M}$.
\end{remark}
\subsection{Reformulation of Joint Chance Constraints with Right-Hand Side Uncertainty}\label{sec:ref_joint_cc}
Assume next that problem~\eqref{prob:cc general} accommodates a joint chance constraint defined through the safety set $\mathcal{S}(\bm{x}) = \{\bm{\xi} \in \mathbb{R}^K \mid \bm{a}^\top_m \bm{x} < \bm{b}^\top_m\bm{\xi} + b_m ~\forall m \in [M]\}$, in which the uncertainty affects only the right-hand sides of the safety conditions. Without loss of generality, we may assume that $\bm{b}_m \ne \bm{0}$ for all $m \in [M]$. Indeed, if $\bm{b}_m = \bm{0}$, then the $m^{\rm th}$ safety condition in the chance constraint becomes independent of the uncertainty and can thus be absorbed in $\mathcal{X}$. Joint chance constrained programs with right-hand side uncertainty have been proposed, among others, for problems in transportation \citep{luedtke2010}, lot-sizing \citep{beraldi2002branch, kuccukyavuz2012mixing}, unit commitment \citep{yanagisawa_2013} and project management \citep{wiesemann2012multi}.
Observe that the complement of the safety set is now representable as $\bar{\mathcal{S}}(\bm{x}) = \bigcup_{m \in [M]} \mathcal{H}_m(\bm{x})$, where $\mathcal{H}_m(\bm{x}) = \{\bm{\xi} \in \mathbb{R}^K \mid \bm{a}^\top_m \bm{x} \geq \bm{b}^\top_m\bm{\xi} + b_m\}$ is a closed halfspace for every $m \in [M]$. By Lemma~\ref{lem:distance to the union of closed half-spaces} in the appendix we have
\begin{equation}\label{eq:distance-joint}
\mathbf{dist}(\bmh{\xi}_i, \bar{\mathcal{S}}(\bm{x})) = \min_{m \in [M]} \bigg\{ \dfrac{(\bm{b}^\top_m\bmh{\xi}_i + b_m - \bm{a}^\top_m\bm{x})^+}{\|\bm{b}_m\|_*} \bigg\} = \bigg(\min_{m \in [M]} \bigg\{\dfrac{\bm{b}^\top_m\bmh{\xi}_i + b_m - \bm{a}^\top_m\bm{x}}{\|\bm{b}_m\|_*} \bigg\}\bigg)^+.
\end{equation}
With this closed-form expression for the distance to the unsafe set, we can reformulate problem~\eqref{prob:cc general} as a mixed integer conic program.
\begin{proposition}\label{prop:joint cc}
For the safety set $\mathcal{S}(\bm{x}) = \{\bm{\xi} \in \mathbb{R}^K \mid \bm{a}^\top_m \bm{x} < \bm{b}^\top_m\bm{\xi} + b_m ~\forall m \in [M]\}$, where $\bm{b}_m \ne \bm{0}$ for all $m \in [M]$, the chance constrained program~\eqref{prob:cc general} is equivalent to the mixed integer conic program
\begin{equation}
\label{prob:joint cc reformulation M linearization}
\begin{array}{rcll}
Z^\star_{\rm JCC} =& \displaystyle \min_{\bm{q}, \bm{s}, t, \bm{x}} & \bm{c}^\top\bm{x} \\
&{\rm s.t.} & \varepsilon N t - \bm{e}^\top\bm{s} \geq \theta N \\
&& \dfrac{\bm{b}^\top_m\bmh{\xi}_i + b_m - \bm{a}^\top_m\bm{x}}{\|\bm{b}_m\|_*} + {\rm M} q_i \geq t - s_i &~\forall m \in [M], ~i \in [N] \\
&& {\rm M} (1 - q_i) \geq t - s_i &~\forall i \in [N] \\
&& \bm{q} \in \{0,1\}^N,~ \bm{s} \geq \bm{0}, ~\bm{x} \in \mathcal{X},
\end{array}
\end{equation}
where ${\rm M}$ is a suitably large (but finite) positive constant.
\end{proposition}
\noindent \emph{Proof of Proposition~\ref{prop:joint cc}.} $\;$
By Theorem~\ref{thm:cc-deterministic}, the chance constrained program~\eqref{prob:cc general} is equivalent to~\eqref{prob:cc reformulation}. Using~\eqref{eq:distance-joint}, the $i^{\rm th}$~member of the second constraint group in~\eqref{prob:cc reformulation} can be reformulated as
$$
\bigg(\displaystyle \min_{m \in [M]} \bigg\{\dfrac{\bm{b}^\top_m\bmh{\xi}_i + b_m - \bm{a}^\top_m\bm{x}}{\|\bm{b}_m\|_*} \bigg\}\bigg)^+ \geq t - s_i.
$$
To eliminate the maximum operator, we introduce a binary variable $q_i \in \{0,1\}$ to re-express the above constraint as
$$
\left\{
\begin{array}{ll}
\dfrac{\bm{b}^\top_m\bmh{\xi}_i + b_m - \bm{a}^\top_m\bm{x}}{\|\bm{b}_m\|_*} + {\rm M} q_i \geq t - s_i &~\forall m \in [M]\\
{\rm M} (1 - q_i) \geq t - s_i
\end{array}
\right.
$$
A similar argument as in the proof of Proposition~\ref{prop:individual cc} shows that a finite value of $\rm M$ is sufficient for our reformulation to be exact.
\Halmos
\endproof
Similar to Remark~\ref{rem:piecewise_linear} in the previous section, many commercial solvers allow to directly specify a reformulation of problem~\eqref{prob:joint cc reformulation M linearization} that replaces the constant $\mathrm{M}$ with piecewise linear constraints.
\begin{remark}
The deterministic reformulation~\eqref{prob:joint cc reformulation M linearization} has $N$ binary variables, $L + N + 1$ continuous decisions as well as $(M + 1) N + 1$ constraints (excluding those that describe $\mathcal{X}$). In comparison, the corresponding classical chance constrained formulation has $N$ binary variables, $L$ continuous decisions as well as $MN + 1$ constraints. Thus, adding distributional robustness requires an additional $N + 1$ continuous decisions as well as $N$ further (linear) constraints.
\end{remark}
\section{Numerical Experiments}
We compare our exact reformulation of the ambiguous chance constrained program~\eqref{prob:cc general} with the bicriteria approximation scheme of \cite{xie2020bicriteria} on a portfolio optimization problem in Section~\ref{sec:portfolio} as well as with a classical (non-ambiguous) chance constrained formulation and a Kernel density estimator based version of the ambiguous chance constrained program over a $\phi$-divergence ambiguity set on a transportation problem in Section~\ref{sec:transportation}. Our goal is to investigate the computational scalability of our reformulation as well as its out-of-sample performance in a data-driven setting. All results were produced on an Intel Xeon 2.66GHz processor with 8GB memory in single-core mode using CPLEX 12.8. Following Remark~\ref{rem:piecewise_linear}, we avoid the specification of the constant $\mathrm{M}$ in our ambiguous chance constrained program through the use of piecewise linear constraints.
\subsection{Portfolio Optimization}\label{sec:portfolio}
We consider a portfolio optimization problem studied by \cite{xie2020bicriteria}. The problem asks for the minimum-cost portfolio investment $\bm{x}$ into $K$ assets with random returns $\tilde{\xi}_1, \ldots, \tilde{\xi}_K$ that exceeds a pre-specified target return $w$ with high probability $1 - \varepsilon$. The problem can be cast as the following instance of the ambiguous chance constrained program~\eqref{prob:cc general}:
\begin{equation}\label{eq:portfolio}
\begin{array}{cll}
\displaystyle \min_{\bm{x}} & \bm{c}^\top\bm{x} \\
{\rm s.t.} &\displaystyle \mathbb{P}[\bmt{\xi}^\top\bm{x} > w] \geq 1-\varepsilon &~\forall \mathbb{P} \in \mathcal{F}(\theta)\\
&\bm{x} \geq \bm{0}.
\end{array}
\end{equation}
We compare our exact reformulation of problem~\eqref{eq:portfolio} with the $(\sigma, \gamma)$-bicriteria approximation scheme of \cite{xie2020bicriteria}, which produces solutions that satisfy the ambiguous chance constraint in~\eqref{eq:portfolio} with probability $1 - \sigma \varepsilon$, $\sigma > 1$, and whose costs are guaranteed to exceed the optimal costs in~\eqref{eq:portfolio} by a factor of at most $\gamma = \sigma / (\sigma - 1)$. Since the bicriteria approximation scheme can readily utilize support information for the random vector $\bmt{\xi}$, we replace the ambiguity set $\mathcal{F}(\theta)$ with $\bar{\mathcal{F}}(\theta) = \mathcal{F}(\theta) \cap \{\mathbb{P} \mid \mathbb{P}[\bmt{\xi} \in \mathbb{R}^K_+] = 1\}$ in their approach. Contrary to the experiments conducted by \cite{xie2020bicriteria}, we set $\sigma = 1$. This is to the disadvantage of their approach, as it does not provide any approximation guarantees in that case, but it allows us to compare the resulting portfolios as they provide the same return guarantees. For the performance of the bicriteria approximation scheme with $\sigma > 1$, we refer to Section~6.2 of \cite{xie2020bicriteria}.
In our numerical experiments, we consider a similar setting as \cite{xie2020bicriteria}. We set $K = 50$, $w = 1$ and choose the cost coefficients $c_1, \ldots, c_{50}$ uniformly at random from $\{ 1, \ldots, 100 \}$. Each asset return $\tilde{\xi}_i$ is governed by a uniform distribution on $[0.8, 1.5]$, and we assume that $N = 100$ training samples $\bmh{\xi}_1, \ldots, \bmh{\xi}_{100}$ are available. We use the $2$-norm Wasserstein ambiguity set, which implies that our exact reformulation of problem~\eqref{eq:portfolio} is a mixed-integer second-order cone program, and set the Wasserstein radius to $\theta \in \{0.05, 0.1, 0.2\}$. The risk threshold is set to $\varepsilon \in \{0.05, 0.1\}$.
\begin{table}[tb]
\begin{center}
\begin{tabular}{c>{\centering\arraybackslash}p{1.5cm}>{\centering\arraybackslash}>{\centering\arraybackslash}p{1.5cm}>{\centering\arraybackslash}>{\centering\arraybackslash}p{1.5cm}>{\centering\arraybackslash}p{1.5cm}>{\centering\arraybackslash}p{1.5cm}>{\centering\arraybackslash}p{1.5cm}}
\hline
\hline
\multirow{2}[0]{*}{$(\varepsilon, \theta)$}&\multicolumn{3}{c}{Ratio of objective values} &\multicolumn{3}{c}{Ratio of runtimes} \\
\cline{2-7}
&5\% &50\% &95\% &5\% &50\% &95\% \\
\hline
\multicolumn{1}{c}{$(0.05, 0.05)$} &1.6 &2.4 &3.2 &5.2 &8.3 &10.8 \\
\multicolumn{1}{c}{$(0.05, 0.10)$} &1.9 &2.9 &5.0 &4.9 &7.7 &10.6 \\
\multicolumn{1}{c}{$(0.05, 0.20)$} &2.3 &2.8 &3.5 &3.8 &4.9 &7.2 \\
\multicolumn{1}{c}{$(0.10, 0.05)$} &1.0 &1.1 &1.3 &7.3 &10.9 &13.0 \\
\multicolumn{1}{c}{$(0.10, 0.10)$} &1.5 &2.3 &3.1 &7.1 &9.7 &13.3 \\
\multicolumn{1}{c}{$(0.10, 0.20)$} &2.1 &2.7 &3.9 &4.2 &6.2 &10.1 \\
\hline
\hline \\
\end{tabular}
\end{center}
\caption{\textnormal{Objective and runtime ratios of the bicriteria approximation scheme for different values of $\varepsilon$ and $\theta$. For each parameter setting, we report the $5\%$, $50\%$ and $95\%$ quantiles over 50 randomly generated instances.}}
\label{table:bicteria}
\end{table}
\begin{figure}
\caption{{\textnormal{Runtimes (left) and reciprocal runtime ratios (right) of our exact reformulation and the bicriteria approximation scheme for $(\varepsilon, \theta) = (0.10,0.05)$ and different sample sizes $N$. The shaded regions cover the $5\%$ to $95\%$ quantiles of $50$ randomly generated instances, whereas the solid lines describe the median statistics.}
\label{fig:runtime}
\end{figure}
Table~\ref{table:bicteria} compares the objective values and runtimes of our exact reformulation and the bicriteria approximation scheme for various combinations of the risk threshold $\varepsilon$ and Wasserstein radius $\theta$. The table shows that despite incorporating additional support information, the bicriteria approximation scheme determines solutions whose costs significantly exceed those of the solutions found by our exact reformulation. Perhaps more surprisingly, the bicriteria approximation scheme is also computationally more expensive. As Figure~\ref{fig:runtime} shows, however, this is an artifact of the small sample size $N$ employed in the experiments of \cite{xie2020bicriteria}, and the bicriteria approximation scheme is faster than our exact reformulation for larger samples sizes.
\subsection{Transportation}\label{sec:transportation}
We consider a probabilistic transportation problem studied by \cite{luedtke2010} and \cite{yanagisawa_2013}. The problem asks for the cost-optimal distribution of a single good from a set of factories $f \in [F]$ to a set of distribution centers $d \in [D]$. Each factory $f \in [F]$ has an individual production capacity $m_f$, and each distribution center $d \in [D]$ faces a random aggregate customer demand $\tilde{\xi}_d$. The cost of shipping one unit of the good from factory $f$ to distribution center $d$ is denoted by $c_{fd}$. We aim to find a transportation plan that minimizes the shipping costs, respects the production capacity of each factory and satisfies the demand at each distribution center with high probability. The problem can be cast as the following instance of problem~\eqref{prob:cc general}:
\begin{equation}\label{eq:amb_transp_prob}
\begin{array}{cl@{\quad}l}
\displaystyle \min_{\bm{x}} & \bm{c}^\top\bm{x} \\
{\rm s.t.} & \displaystyle \mathbb{P} \Bigg[ \sum_{f \in [F]} x_{fd} \geq \tilde{\xi}_d \quad \forall d \in [D] \Bigg] \geq 1 - \varepsilon &~ \forall \mathbb{P} \in \mathcal{F}(\theta) \\[5mm]
& \displaystyle \sum_{d \in [D]} x_{fd} \leq m_f &~ \forall f \in [F] \\
& \displaystyle \bm{x} \geq \bm{0}.
\end{array}
\end{equation}
Here, $x_{fd}$ denotes the quantity shipped from factory $f \in [F]$ to distribution center $d \in [D]$. Problem~\eqref{eq:amb_transp_prob} is an ambiguous joint chance constrained program with right-hand side uncertainty. Since each safety condition in~\eqref{eq:amb_transp_prob} contains a single random variable with coefficient $1$ on the right-hand side, our exact reformulation reduces to the same mixed-integer linear program for any norm $\lVert \cdot \rVert$.
In our first experiment, we investigate the scalability of the exact reformulation of problem~\eqref{eq:amb_transp_prob} that is offered by Proposition~\ref{prop:joint cc}. To this end, we generate random test instances with $5$ factories and $10, 20, \ldots, 50$ distribution centers that are located uniformly at random on the Euclidean plane $[0, 10]^2$. We identify the transportation costs $c_{fd}$ with the Euclidean distances between the factories and distribution centers. The demand vector $\bmt{\xi}$ is described by $50$, $100$ or $150$ samples from a uniform distribution that is supported on $[0.8 \bm{\mu}, 1.2 \bm{\mu}]$, where the expected demand $\mu_d$ at distribution center $d \in [D]$ is picked uniformly at random from the interval $[0, 10]$. The capacity of each factory is chosen uniformly at random, and the capacities are subsequently scaled so that the factories can jointly produce up to $150\%$ of the maximum cumulative demand. For each instance, we choose $10$ ascending Wasserstein radii $\theta_1 < \ldots < \theta_{10}$ uniformly so that $\theta_1 = 0.001$ and $\theta_{10}$ is the smallest radius for which the corresponding instance of problem~\eqref{eq:amb_transp_prob} becomes infeasible. We fix $\varepsilon = 0.1$.
Tables~\ref{table:scalability_50}--\ref{table:scalability_150} and Figure~\ref{fig:case} compare the runtimes of our ambiguous chance constrained program with those of the classical chance constrained formulation of problem~\eqref{eq:amb_transp_prob},
\begin{equation}\label{eq:classical_transp_prob}
\begin{array}{cl@{\quad}l}
\displaystyle \min_{\bm{x}, \bm{y}} & \bm{c}^\top\bm{x} \\
{\rm s.t.} & \displaystyle \sum_{f \in [F]} x_{fd} + \mathrm{M} y_i \geq \hat{\xi}_{id} & \forall d \in [D], ~i \in [N] \\
& \displaystyle \bm{e}^\top \bm{y} \leq \lfloor \varepsilon N \rfloor \\
& \displaystyle \sum_{d \in [D]} x_{fd} \leq m_f & \displaystyle \forall f \in [F] \\
& \displaystyle \bm{x} \geq \bm{0}, ~ \bm{y} \in \{ 0, 1 \}^N,
\end{array}
\end{equation}
where $\mathrm{M}$ is a sufficiently large positive constant. The results show that for the smallest Wasserstein radius $\theta_1 = 0.001$, the ambiguous chance constrained program~\eqref{eq:amb_transp_prob} is---as expected---more difficult to solve than the corresponding classical chance constrained program~\eqref{eq:classical_transp_prob}. Interestingly, the ambiguous chance constrained program becomes considerably \emph{easier} to solve than the classical chance constrained program for the larger Wasserstein radii $\theta_2, \ldots, \theta_{10}$. This surprising result is explained in Figure~\ref{fig:radius}, which shows that the feasible region of the ambiguous chance constrained program tends to convexify as the Wasserstein radius $\theta$ increases. In fact, one can show that the set of vectors $\bm{q} \in \{ 0, 1 \}^N$ that are feasible in the deterministic reformulation of problem~\eqref{eq:amb_transp_prob} shrinks monotonically with $\theta$. Since it is the presence of these binary vectors that causes the non-convexity of problem~\eqref{eq:amb_transp_prob}, one can expect the problem to become better behaved as $\theta$ increases.
\begin{table}[tb]
\begin{center}
\begin{tabular}{c>{\centering\arraybackslash}p{1.1cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}}\hline
\hline
{\tabincell{c}{$\#$ of distribution \\ centers}} & CC &$\theta_1$ &$\theta_2$ &$\theta_3$ &$\theta_4$ &$\theta_5$
&$\theta_6$ &$\theta_7$
&$\theta_8$ &$\theta_9$ &$\theta_{10}$ \\
\hline
10 &0.5 &3.0 &0.1 &$<0.1$ &$<0.1$ &$<0.1$ &$<0.1$ &$<0.1$ &$<0.1$ &$<0.1$ &$<0.1$ \\
20 &4.0 &9.7 &0.2 &0.1 &0.1 &0.1 &$<0.1$ &$<0.1$ &$<0.1$ &0.1 &0.1 \\
30 &7.3 &13.1 &0.3 &0.2 &0.1 &0.1 &0.1 &0.1 &0.1 &0.1 &0.2 \\
40 &11.2 &19.3 &0.4 &0.2 &0.2 &0.2 &0.2 &0.2 &0.2 &0.2 &0.3 \\
50 &15.8 &166.5 &0.3 &0.2 &0.2 &0.2 &0.2 &0.2 &0.2 &0.3 &0.3 \\
\hline
\hline \\
\end{tabular}
\end{center}
\caption{\textnormal{Solution times in seconds for $N = 50$ training samples. `CC' and `$\theta_i$' refer to problem~\eqref{eq:classical_transp_prob} and problem~\eqref{eq:amb_transp_prob} with different Wasserstein radii, respectively. We present median results over 100 random instances. Where the median solution time exceeds 3,600s, we report the median optimality gap in brackets.}}
\label{table:scalability_50}
\end{table}
\begin{table}[tb]
\begin{center}
\begin{tabular}{c>{\centering\arraybackslash}p{1.1cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}}\hline
\hline
{\tabincell{c}{$\#$ of distribution \\ centers}} & CC &$\theta_1$ &$\theta_2$ &$\theta_3$ &$\theta_4$ &$\theta_5$
&$\theta_6$ &$\theta_7$
&$\theta_8$ &$\theta_9$ &$\theta_{10}$ \\
\hline
10 &16.3 &166.4 &4.7 &2.0 &1.5 &1.4 &1.4 &1.4 &1.4 &1.5 &1.8 \\
20 &93.6 &1910.8 &8.1 &2.9 &2.5 &2.5 &2.4 &2.4 &2.4 &2.7 &2.8 \\
30 &298.3 &$[0.2\%]$ &12.0 &4.0 &3.5 &3.3 &3.2 &3.3 &3.2 &3.6 &3.8 \\
40 &664.2 &$[0.8\%]$ &16.0 &5.1 &4.7 &4.5 &4.5 &4.5 &4.4 &4.8 &5.1 \\
50 &1,293.2 &$[0.8\%]$ &20.3 &6.5 &5.6 &5.5 &5.4 &5.4 &5.4 &5.7 &6.2 \\
\hline
\hline \\
\end{tabular}
\end{center}
\caption{\textnormal{Solution times for $N = 100$ training samples. The table has the same interpretation as Table~\ref{table:scalability_50}.}}
\label{table:scalability_100}
\end{table}
\begin{table}[tb]
\begin{center}
\begin{tabular}{c>{\centering\arraybackslash}p{1.1cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}>{\centering\arraybackslash}p{0.9cm}}\hline
\hline
{\tabincell{c}{$\#$ of distribution \\ centers}} & CC &$\theta_1$ &$\theta_2$ &$\theta_3$ &$\theta_4$ &$\theta_5$
&$\theta_6$ &$\theta_7$
&$\theta_8$ &$\theta_9$ &$\theta_{10}$ \\
\hline
10 &94.6 &$[0.7\%]$ &85.6 &48.5 &44.8 &44.0 &42.5 &43.3 &43.0 &52.0 &77.0 \\
20 &874.2 &$[1.9\%]$ &143.9 &90.5 &76.3 &75.6 &72.8 &72.5 &73.2 &85.7 &112.4 \\
30 &$[0.1\%]$ &$[3.2\%]$ &213.8 &126.4 &113.0 &109.5 &108.9 &108.8 &110.3 &125.4 &165.1 \\
40 &$[0.3\%]$ &$[3.7\%]$ &286.8 &168.2 &154.2 &149.1 &149.3 &151.7 &152.1 &182.8 &231.5 \\
50 &$[0.4\%]$ &$[3.0\%]$ &324.6 &207.0 &189.3 &190.9 &190.0 &190.4 &191.8 &233.0 &294.4\\
\hline
\hline \\
\end{tabular}
\end{center}
\caption{\textnormal{Solution times for $N = 150$ training samples. The table has the same interpretation as Table~\ref{table:scalability_50}.}}
\label{table:scalability_150}
\end{table}
\begin{figure}
\caption{{\textnormal{Median solution times (below dashed lines) and optimality gaps (above dashed lines) for $D = 10$ and $N = 50$ (left), $D = 30$ and $N = 100$ (middle) and $D = 50$ and $N = 150$ (right).}
\label{fig:case}
\end{figure}
\begin{figure}
\caption{{\textnormal{For a transportation problem with $F = 1$ factory, $D = 2$ distribution centers and $N = 10$ training samples, the graphs visualize the feasible regions of the classical chance constrained formulation~\eqref{eq:classical_transp_prob}
\label{fig:radius}
\end{figure}
\begin{figure}
\caption{{\textnormal{Probability of meeting the safety conditions (left) and transportation costs (right) for several data-driven approaches in our transportation problem with uniformly distributed demands. Both figures present median quantities over $100$ random instances.}
\label{fig:uniform}
\end{figure}
\begin{figure}
\caption{{\textnormal{Probability of meeting the safety conditions (left) and transportation costs (right) for several data-driven approaches in our transportation problem with normally distributed demands. Both figures present median quantities over $100$ random instances.}
\label{fig:normal_unbounded}
\end{figure}
\begin{figure}
\caption{{\textnormal{Probability of meeting the safety conditions (left) and transportation costs (right) for several data-driven approaches in our transportation problem with exponentially distributed demands. Both figures present median quantities over $100$ random instances.}
\label{fig:exponential_unbounded}
\end{figure}
We next compare the out-of-sample performance of our ambiguous chance constrained program~\eqref{eq:amb_transp_prob}, where the risk threshold $\varepsilon \in \{ 0.1, \, 0.05, \, 0.01 \}$ and the Wasserstein radius $\theta \in \{ 1\mathrm{E}-i \, : \, i = 2, 3, \ldots, 6 \}$ are selected using a $7$-fold cross-validation on the training dataset (`DRO'), with \emph{(i)} the classical chance constrained program~\eqref{eq:classical_transp_prob}, where the risk threshold is fixed to $\varepsilon = 0.1$ (`SAA'), \emph{(ii)} a variant of the classical chance constrained program~\eqref{eq:classical_transp_prob}, where the risk threshold $\varepsilon \in \{ 1\mathrm{E}-i \, : \, i = 1, 2, \ldots, 5 \} \cup \{ 0.05 \}$ is selected using a $7$-fold cross-validation on the training dataset (`CCT'), as well as \emph{(iii)} a Kernel density estimator based version of the ambiguous chance constrained program over a $\phi$-divergence ambiguity set, where the risk threshold $\varepsilon \in \{ 0.1, \, 0.05, \, 0.01 \}$ and the bandwidth $h \in \{ 1\mathrm{E}-i \, : \, i = -2, -1, \ldots, 3 \}$ of the Gaussian kernel are selected using a $7$-fold cross-validation on the training dataset (`KDE'; see \citealp{Jiang_Guan_2016}). We note that CCT can be regarded as a cross-validated version of the `best data-driven reformulation' proposed by \citet{lam:best}. We generate random problem instances with $5$ factories, $20$ distribution centers and $25$, $30$, \ldots, $250$ training samples. In all experiments, the expected demand $\mu_d$ at distribution center $d \in [D]$ is picked uniformly at random from the interval $[0, 10]$, whereas the actual demands follow a uniform distribution that is supported on $[0.8 \bm{\mu}, 1.2 \bm{\mu}]$ (Figure~\ref{fig:uniform}), a normal distribution with mean $\bm{\mu}$ and covariance matrix $0.1 \cdot \text{diag} (\bm{\mu})$ (Figure~\ref{fig:normal_unbounded}) or an exponential distribution where each distribution center $d \in [D]$ faces a demand $(1 + 0.4 \cdot [\tilde{\zeta}_d - 0.5]) \cdot \mu_d$, where $\tilde{\zeta}_d$ follows an exponential distribution with parameter $\lambda = 2$ (Figure~\ref{fig:exponential_unbounded}). In all cases, the demands are truncated to the non-negative real line. Our results indicate that the classical chance constrained program~\eqref{eq:classical_transp_prob} generates solutions that significantly violate the chance constraint, even if we select the risk threshold $\varepsilon$ out-of-sample. The two ambiguous chance constrained formulations, on the other hand, achieve the desired risk threshold, often at a modest increase in transportation costs. While our approach and the $\phi$-divergence ambiguity set perform similarly, our formulation appears to result in lower transportation costs, especially when data is scarce.
\section*{Acknowledgments}
The authors are grateful to the review team for constructive comments that led to substantial improvements of the paper.
The authors gratefully acknowledge financial support from the ECS grant~9048191, the SNSF grant BSCGI0$\underline{~}$157733 and the EPSRC grant EP/N020030/1.
\begin{appendices}
\section{Distance to a Union of Halfspaces}
The distance of a point $\bmh{\xi} \in \mathbb{R}^K $ to a closed set $\mathcal{C} \subseteq \mathbb{R}^K$ with respect to a norm $\|\cdot\|$ is defined as
$$
\mathbf{dist}(\bmh{\xi}, \mathcal{C}) = \min\{\|\bm{\xi} - \bmh{\xi}\| \mid \bm{\xi} \in \mathcal{C}\}.
$$
Note that the minimum is always attained. In the following, we derive a closed-form expression for the distance of a point to the union of finitely many closed halfspaces.
\begin{lemma}
\label{lem:distance to the union of closed half-spaces}
Let $\mathcal{H}_m = \{\bm{\xi} \in \mathbb{R}^K \mid a_m \geq \bm{b}^\top_m \bm{\xi}\}$ be a closed halfspace for each $m \in [M]$. If $\mathcal{C} = \bigcup_{m \in [M]} \mathcal{H}_m$ denotes the union of all halfspaces, then the distance of a point $\bmh{\xi}$ to $\mathcal{C}$ is given by
$$
\mathbf{dist}(\bmh{\xi}, \mathcal{C}) = \min_{m \in [M]} \bigg\{\dfrac{(\bm{b}^\top_m\bmh{\xi} - a_m)^+}{\|\bm{b}_m\|_*}\bigg\} = \bigg(\min_{m \in [M]} \bigg\{\dfrac{\bm{b}^\top_m\bmh{\xi} - a_m}{\|\bm{b}_m\|_*}\bigg\}\bigg)^+.
$$
\end{lemma}
\noindent \emph{Proof of Lemma~\ref{lem:distance to the union of closed half-spaces}.} $\;$
We first prove the assertion for $M=1$, in which case $\mathcal{C} = \mathcal{H}_1$.
We thus have
\begin{align*}
\mathbf{dist}(\bmh{\xi}, \mathcal{C}) =~& \min_{\zeta, \bm{\xi}} \big\{ \zeta \mid \zeta \geq \|\bm{\xi} - \bmh{\xi}\| , ~ a_1 \geq \bm{b}^\top_1\bm{\xi} \big\} \\
=~ & \max_{u, \bm{v}, w} \big\{\bm{v}^\top\bmh{\xi} - w a_1 ~\big|~ u = 1 ,~ \bm{v} = \bm{b}_1 w, ~ u \geq \|\bm{v}\|_*, ~w \geq 0 \big\}\\
=~ & \max_{ w} \big\{(\bm b_1^\top \bmh{\xi}- a_1) w ~\big|~ w\le 1/ \|\bm{b}_1\|_*, ~w \geq 0\big\} \\
=~ & \dfrac{(\bm{b}^\top_1\bmh{\xi} - a_1)^+}{\|\bm{b}_1\|_*},
\end{align*}
where the second equality follows from strong conic duality, which holds because the primal minimization problem is strictly feasible. Similarly, for $M\ge 1$ we find
\begin{align*}
\mathbf{dist}(\bmh{\xi}, \mathcal{C}) =~& \min_{m \in [M]} \mathbf{dist}(\bmh{\xi}, \mathcal{H}_m)= \min_{m \in [M]} \bigg\{\dfrac{(\bm{b}^\top_m\bmh{\xi} - a_m)^+}{\|\bm{b}_m\|_*}\bigg\} = \bigg(\min_{m \in [M]} \bigg\{\dfrac{\bm{b}^\top_m\bmh{\xi} - a_m}{\|\bm{b}_m\|_*}\bigg\}\bigg)^+,
\end{align*}
where the second equality follows from the first part of the proof.
\Halmos
\endproof
\end{appendices}
\end{document}
|
\begin{document}
\frontmatter
\title[Suspensions of homology spheres]{Suspensions of homology spheres}
\author[R.D. Edwards]{Robert D. Edwards}
\address{Department of Mathematics, UCLA, Los Angeles, CA 90095-1555}
\email{[email protected]}
\thanks{}
\date{}
\dedicatory{
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\\This research was supported in part under NSF Grant No. MCS 76-06903.
Preparation of the electronic manuscript was supported by NSF Grant
DMS-0407583. Final editing was carried out by Fredric Ancel, Craig Guilbault
and Gerard Venema.}
\begin{abstract}
This article is one of three highly influential articles on the topology of
manifolds written by Robert D. Edwards in the 1970's but never published. This
article \textquotedblleft Suspensions of homology spheres\textquotedblright
\ presents the initial solutions of the fabled Double Suspension Conjecture.
The article \textquotedblleft Approximating certain cell-like maps by
homeomorphisms\textquotedblright\ presents the definitive theorem on the
recognition of manifolds among resolvable generalized manifolds. (This work
garnered Edwards an invitation to give a one-hour plenary address to the 1978
International Congress of Mathematicians.) The third article \textquotedblleft
Topological regular neighborhoods\textquotedblright\ develops a comprehensive
theory of regular neighborhoods of locally flatly embedded topological
manifolds in high dimensional topological manifolds. The manuscripts of these
three articles have circulated privately since their creation. The organizers
of the Workshops in Geometric Topology
(http://www.math.oregonstate.edu/\symbol{126}topology/workshop.htm) with the
support of the National Science Foundation have facilitated the preparation of
electronic versions of these articles to make them publicly available.
This article contains four major theorems:
\begin{enumerate}
\item[I.] The double suspension of Mazur's homology 3-sphere is a 5-sphere,
\item[II.] The double suspension of any homology n-sphere that bounds a
contractible (n+1)-manifold is an (n+2)-sphere,
\item[III.] The double suspension of any homology 3-sphere is the cell-like
image of a 5-sphere.
\item[IV.] The triple suspension of any homology 3-sphere is a
6-sphere.
\end{enumerate}
Edwards' proof of I was the first evidence that the suspension process could
transform a non-simply connected manifold into a sphere, thereby answering a
question that had puzzled topologists since the mid-1950's if not earlier.
Results II, III and IV represent significant advances toward resolving the
general double suspension conjecture: the double suspension of every homology
n-sphere is an (n+2)-sphere. [The general conjecture was subsequently proved
by J. W. Cannon (Annals of Math. 110 (1979), 83-112).]
\end{abstract}
\maketitle
\tableofcontents
\centerline{\large{\bf Introduction}}
In the study of triangulations of manifolds, there arises naturally the
following.
\textsc{Suspension Question.} \emph{Is there any manifold }$H^{n}$\emph{,
other than the }$n$\emph{-sphere}$\footnote{Throughout this paper,
superscripts on topological spaces always denote dimension (as is customary).
Their occasional appearance or disappearance in later sections has no
notational significance, e.g., $K^{2}$ and $K$ denote the same space.}$\emph{,
such that for some }$k\geq1$\emph{, the }$k$\emph{-fold suspension }$
\Sigma^kH^n$\emph{ is homeomorphic to the }$(n+k)$\emph{-sphere? }(see
Prologue, Section I; definitions are below).
By Alexander duality applied to the $(k-1)$-dimensional \textquotedblleft
suspension sphere\textquotedblright\ $S^{k-1}$ in $\Sigma^{k}H^{n}$, coupled
with the fact that $\Sigma^{k}H^{n}-S^{k-l}$ is homeomorphic to $H^{n}\times
H^{k}$, such a manifold $H^{n}$ satisfying the suspension question must
necessarily be a \textit{homology} n-\textit{sphere}, which means a closed
topological $n$-manifold having the integral homology groups of the $n$-sphere
(this explains the customary letter $H$). This paper examines the sufficiency
of this condition. The potential singular set (i.e. nonmanifold set) of
$\Sigma^{k}H^{n}$ is along the suspension $(k-1)$-sphere. There one readily
sees that $\Sigma^{k}H^{n}$ is locally homeomorphic to $\mathring{c}
H^{n}\times\mathbb{R}^{k-1}$, where $\mathring{c}H^{n}$ denotes the open cone
on $H^{n}$. It is known that the suspension question really amounts to that of
deciding whether $\mathring{c}H^{n}\times\mathbb{R}^{k-1}$ is a topological
manifold. This is because:
\begin{quotation}
\emph{If the suspension of a compact space }$Y$\emph{ is a manifold, then
}$\Sigma Y$\emph{ must be a sphere.}
\end{quotation}
\noindent The quickest proof of this fact is to argue that the complement in
$\Sigma Y$ of a suspension point (which is homeomorphic to $\mathring{c}Y)$
must be homeomorphic to euclidean space, since every compact subset of it lies
in an open ball (details of this are in \cite{Brwn}).
The $k=2$ case of the above suspension question has come to be known as the
Double Suspension Conjecture: \emph{If }$H^{n}$\emph{ is a homology }
$n$\emph{-sphere, then} $\Sigma^{2}H^{n}\approx S^{n+2}$ (where $\approx$
means \textquotedblleft is homeomorphic to\textquotedblright). The
unrestricted version of this conjecture (i.e. allowing $k>2$) is sometimes
called the Multiple Suspension Conjecture. (I do not know why the Double
Suspension Conjecture is a conjecture, rather than a question or problem; that
is the way most people refer to it now. This conjecture has been raised as a
question in various places, for example \cite{CZ} and \cite{Gl1}-\cite{Gl4};
probably the best known place is Milnor's list of problems \cite[ p. 579]
{Las}. Earlier it was referred to as the Sphere Problem \cite[p. 16]{Moi}. The
suspension question must have confronted all researchers who since the time of
Brouwer's work \cite{Brou} have attempted to understand triangulations of
manifolds. For example, see \cite{Cai1} or \cite{Cai2} (especially $\S $ 6);
the latter work has a particularly good bibliography. See also \cite{Ku}.
There exist many homology spheres, which are not themselves homeomorphic to
spheres, on which the conjecture can be tested. Perhaps the most famous is
Poincar\'{e}'s binary dodecahedral homology 3-sphere, but unfortunately it
turns out to be somewhat difficult to work with, as we will see. There exist
in profusion more tractable homology n-spheres in all dimensions $n\geq3$ (see
Parts I and II).
All known genuine (i.e. nontrivial) homology n-spheres are
nonsimply-connected, necessarily so when $n\geq5$, for in these dimensions the
generalized topological Poincar\'{e} conjecture \cite{New1} establishes that a
simply-connected homology n-sphere is homeomorphic to a sphere. This explains
why the $k=1$ case of the suspension question is not emphasized, for in that
case an additional necessary condition on $H^{n}$ is that it be
simply-connected (assuming $n\geq2$). Hence that case of the question can have
nontrivial content only possibly when $n=3$ or $4$. If $H^{4}$ is a homotopy
4-sphere, it is known that $\Sigma H^{4}\approx S^{5}$ (see \cite[Assertion p.
83]{Si3} or \cite[Appendix I]{Si6}; compare \cite{Hi2}); if $H^{3}$ is a
homotopy $3$-sphere, it is \textbf{not} known whether $\Sigma H^{3}\approx
S^{4}$, but it is known that $\sigma^{2}H^{3}\approx S^{5}$ (\cite[Theorem
A]{Si3}; compare \cite{Gl1}).
These results represent the first cases of the suspension conjecture ever
settled (possibly vacuously, of course). For nonsimply connected homology
spheres, the double suspension conjecture remained one of those tantalizing
problems that had three possible outcomes: it could be true never, sometimes,
or always. Now, after several stages of development due to myself and J.
Cannon, the conjecture has been completely settled in the affirmative by
the
\textsc{Double Suspension Theorem.} \textit{The double suspension $\Sigma
^{2}H^{n}$ of any homology sphere $H^{n}$ is homeomorphic to a sphere.
}
The purpose of this paper is to present all of my work related to the
suspension question, which took place during the period 1974-76. Briefly
stated, I established as a general rule that $\Sigma^{k}H^{n}\approx S^{n+k}$
whenever $n+k\geq6$ $(k\geq2)$. Recently, Cannon crystallized and extended my
work to prove a much more general theorem, one consequence of which was the
final case $\Sigma^{2}H^{3}\approx S^{5}$ \cite{Can2}.
This paper is written in four parts, elaborated in the manner I have always
intended, corresponding to the four stages in the development of my
understanding of the problem. The complete contents of the paper are as
follows:
\noindent\textsc{Prologue:}\textbf{ }\textit{Consequences of the Double
Suspension Theorem; plus preliminaries, definitions and notation.
}
As motivation for the main results presented in Parts I-IV, a number of
consequences of the double suspension are presented. In addition, some
preliminaries are discussed; specifically, cell-like decompositions and the
Bing Shrinking Theorem. Finally, some definitions and notation are
established.
\noindent\textsc{Part I:} \textit{The double suspension of Mazur's homology
3-sphere is} $S^{5}$.\textit{
}
This represents the first case of the conjecture settled for a
nonsimply-connected homology sphere. The proof amounts to a self-contained,
bare-hands construction of a discernible homeomorphism.
\noindent\textsc{Part II:} \textit{The double suspension of any homology
$n$-sphere which bounds} \textit{a contractible $(n+1)$-manifold is a
sphere.
}
In particular, this holds for any homology $n$-sphere, $n\geq4$. This part
generalizes the construction in Part I. It makes essential use of an important
construction introduced by \v{S}tan'ko in \cite{St1}. A Postscript to Part II
explains a certain Replacement Principle for Cell-like Compacta in manifolds.
An Appendix to Part II explains how to shrink the decomposition which is at
the heart of Parts I and II, namely, the spun Bing decomposition.
\textsc{Part III:} \textit{The double suspension of any homology $3$-sphere is
the image of $S^{5}$ under a cell-like map.
}
Thus, the suspension question for an arbitrary homology $3$-sphere is a
cell-like decomposition space problem. This result was proved independently by
Cannon in \cite{Can3}. This part contains in passing a simplified construction
of Siebenmann's non PL-triangulable topological manifold, extracted from
\cite{Sch}.
\noindent\textsc{Part IV:} \textit{The triple suspension of any homology
$3$-sphere is $S^{6}$.
}
This ad hoc argument rests in the end on a clever construction of Bing, which
he used to establish the shrinkability of a countable collection of flat arcs
in a manifold.
Although these results are being formally published here for the first time,
various informal lecture notes from my talks, taken by various people, have
been around for some time, e.g. from Orsay (Spring 1975), Cambridge (July
1975), Nantes (Spring 1976), Institute for Advanced Study (October 1976), and
the AMS St. Louis meeting (January 1977). I am grateful in particular to A.
Fathi, L. Guillou and Y. M. Vissetti for their clear write-up of my Orsay talks.
\mainmatter
\part*{Prologue: Consequences of the Double Suspension Theorem; plus
preliminaries, definitions and notation}
\section{Consequences of the Double Suspension Theorem}
This section is a compilation of various corollaries, most of them known to
various people before the theorem was proved. (In the seven month period
before Cannon's work, when only my triple suspension theorem (Part IV) was
known as a general rule, some of the results here were restricted to ambient
dimension $\geq6$, rather than the present $\geq5$,)
\subsection{\textbf{Exotic triangulations}}
The most noteworthy corollary is:
\begin{theorem}
\textit{Not all triangulations of topological manifolds need be piecewise
linear ($PL$; i.e. combinatorial) triangulations}. \textit{In fact, for any
given dimension $q\geq5$, there exists a triangulated topological manifold
$Q^{q}$ which is not even homotopically equivalent to any $PL$ manifold}.
\end{theorem}
\textsc{Discussion:} A $PL$ manifold-without-boundary can be defined to be a
polyhedron $P$ which is piecewise linearly homogeneous, that is, for any two
points $x,y\in P$, there is a $PL$ homeomorphism $h:P\rightarrow P$ such that
$h(x)=y$. In this spirit, a triangulated topological manifold can be defined
to be polyhedron which is \textbf{topologically} homogeneous. The point, then,
of the first part of the assertion above is that there exists a topologically
homogeneous polyhedron which is not piecewise linearly homogeneous. Such an
example is provided by $\Sigma^{2}H^{n}\approx S^{n+2}$ for any nontrivial
polyhedral homology sphere $H^{n}$, e.g. Newman's, Poenaru's or Mazur's (see
Part I).
Still, this example leaves something to be desired, since $\Sigma^{2} H^{n}$
is topologically homeomorphic to a piecewise linearly homogeneous polyhedron.
This leads to the second part of the assertion.
The most easily described example of the polyhedron $Q$ is the familiar one
$Q^{q}=(M^{4}\cup c(\partial M^{4}))\times T^{q-4}$, where $T^{q-4}$ denotes
the $(q-4)$-torus, and where $M^{4}$ is the 4-manifold-with-boundary described
by Milnor as being eight copies of the tangent disc bundle of $S^{2}$ plumbed
together according to the $E_{8}$-diagram (see \cite[Chap 5]{Brwd} or
\cite[\S 8]{HNK}), or (equivalently) described by Hirzebruch as the resolution
of the singularity of the equation $z_{1}^{2}+z_{2}^{3}+z_{3}^{5}=0$,
restricted to the unit ball in $C^{3}$ \cite[$\S $9, esp. Exercise (5.8)]
{HNK}. The manifold $M^{4}$, being smooth, can be given $PL$ manifold
structure. The boundary $\partial M^{4}$ is Poincar\'{e}'s binary dodecahedral
homology 3-sphere. The significance of the manifold-with-singularity
$M^{4}\cup c(\partial M^{4})$ is that it is parallelizable off of the
conepoint, and it has index 8. This contrasts with the bedrock theorem of
Rokhlin that any closed $PL$ 4-manifold which is parallelizable off of a point
must have index divisible by 16.
If $Q$ were homotopically equivalent to a $PL$ manifold, $W$ say, then one
could homotope the map
\[
W\ \overset{\text{hom.equiv}}{\longrightarrow}\ Q\overset{\text{proj.}
}{\longrightarrow}\ T^{q-4}
\]
to be $PL$ and transverse to a point in $T^{q-4}$, producing as point-preimage
a closed $PL$ 4-manifold $N^{4}$. One argues that $N^{4}$ would be
almost-parallelizable, and also would have index 8, contradicting Rokhlin's
theorem. See \cite[Section 5]{Si2} and \cite[IV, App. B]{KS2} for a discussion
of this and related details, and for other references.
The $PL$ singular set of $Q^{q}$ is the subset $c\times T^{q-4}$, along which
$Q^{q}$ is locally homeomorphic to $\mathring{c}(\partial M^{4})\times
\mathbb{R}^{q-4}$. By the Double Suspension Theorem, $Q^{q}$ is locally
euclidean there, and hence is a topological manifold.
The above simple description of the manifold $Q^{q}$ rests of course on the
Double Suspension Theorem and all the work it entails. A topological manifold
homotopically equivalent (now known homeomorphic) to $Q^{q}$, for any $q\geq
5$, was first constructed in 1969 by Siebenmann \cite[Section 5]{Si2}{, as a
counterexample to the $PL$ triangulation conjecture for topological manifolds.
His construction, which rests in the end only on the local contractibility of
the homeomorphism group of a topological manifold, is the starting point of
the work in Part III. }
This discussion closes with some quick remarks related to $Q^{q}$ and to
triangulations of manifolds.
\textsc{1. Triangulating topological manifolds.}\quad The question of whether
all topological manifolds are triangulable (i.e., homeomorphic to polyhedra)
remains open. Completing the line of investigation begun in \cite{Si3}, D.
Galewski - R. Stern \cite{GS2} and T. Matumoto \cite{Mat2} have established
the following stimulating result (incorporating now the Double Suspension
Theorem into their work): \textit{All topological manifolds of dimension
$\geq5$ are triangulable if and only if there exists a homology $3$-sphere
$H^{3}$, having Rokhlin invariant} $1\in\mathbb{Z}/2$ (meaning: $H^{3}$ bounds
a parallelizable $PL$ 4-manifold of index 8), \textit{such that $H^{3}\#H^{3}$
bounds a $PL$ acyclic $4$-manifold (i.e., such that $H^{3}$ is $PL$
homology-cobordant to $-H^{3}$)}. In fact, there is a specific, easily
constructed \textquotedblleft test\textquotedblright\ topological
5-manifold-without-boundary $M^{5}$, which is triangulable if and only if such
a homology 3-sphere exists.
\textsc{2. Algebraic varieties.}\quad It is commonly known that $\Sigma
^{k}H^{n}$ is a real algebraic variety for many homology spheres $H^{n}$, e.g.
Poincar\'{e}'s $H^{3}$, $k$ arbitrary. Recently S. Akbulut and R. Mandelbaum
remarked that the non-PL-triangulable manifold $Q^{q}$ described above can be
realized as a real algebraic variety (or complex algebraic variety, if $q$ is
even) (see \cite{AK}).
\textsc{3. Stratified spaces.}\quad As already shown, the topological space
$Q^{q}$ is an example of a space whose intrinsic topological stratification
has only one stratum, yet for \textbf{any} polyhedral structure on $Q$ (even
up to homotopy), its intrinsic $PL$ stratification must have at least two
strata. D.R. Anderson constructed in \cite{An} an example of such a space $Q$
having no $PL$ stratification as simple as its intrinsic topological
stratification, without using any suspension theorems for homology
spheres.
\textsc{4. Topological transversality.}\quad Assuming the Double Suspension
Theorem true for a single homology 3-sphere $H^{3}$ of Rokhlin invariant 1,
e.g. Poincar\'{e}'s $H^{3}$, M. Scharlemann established the following
transversality theorem at dimension 4 (this is the nonrelative version; for
complete statements, see \cite[Thms. B and C]{Sch}): \textit{Given a map
$f:M^{m}\rightarrow E$ from a topological $m$-manifold to the total space $E$
of a euclidean microbundle $\xi:X\hookrightarrow E\rightarrow X$ of fiber
dimension $m-4$ over a space $X$, then there is an arbitrarily small homotopy
of $f$ to a map $f_{\ast}$ such that $f_{\ast}$ is microbundle-transverse to
the core $X$ of $E$, and such that $f_{\ast}^{-1}(X)$ is a $4$
-manifold-with-isolated-singularities, each singularity being homeomorphic to
$\overset{\circ}{c}H^{3}$.} Recall that if $m-k\neq4\neq m$, where $k=$ fiber
dimension of $E$, then Kirby-Siebenmann \cite[III, Section 1]{KS2} proved the
best possible topological transversality theorem, which concludes that
$f_{\ast}^{-1}(X)$ is an $(m-k)$-manifold, without singularities.
\textsc{5. Collapsibility and shellability.}\quad In \cite{BCC}, the authors
prove the following curious result, using the fact that the double suspension
of some nonsimply connected homology $n$-sphere $H^{n}$ is topologically a
sphere: \textit{For every $n\geq3$, there is a polyhedron $P^{n+2}$,
topologically homeomorphic to the $(n+2)$-ball, such that $P\times I^{n-2}$ is
not collapsible, but $P\times I^{n+3}$ is collapsible.} The example is
$P=\Sigma^{2}H^{n}-\operatorname*{int}B^{n+2}$, where $B^{n+2}$ is any $PL$
cell in $\Sigma^{2}H^{n}$ disjoint from the suspension circle. In a similar
vein, $\Sigma^{2}H^{n}$ provides an example of a nonshellable triangulation of
a sphere (see \cite{DK}).
\subsection{\textbf{Polyhedral homology manifolds.}}
Another consequence of the Double Suspension Theorem is the second half of the
following assertion (the first half being already known).
\begin{theorem}
\textit{Suppose $P$ is a connected polyhedron. If $P$ is a topological
manifold, then $P$ is a homotopy manifold (as defined below). Conversely, if
$P$ is a homotopy manifold and $\dim P\neq4\neq\dim\partial P$, then $P$ is a
topological manifold.}
\end{theorem}
The dimension restriction arises because it is not known whether the cone on a
homotopy 3-sphere is a manifold.
Two special cases of the above assertion are as follows:
\begin{corollary}
\textit{Suppose $P_{1},P_{2}$ are two connected polyhedra, with $\dim
P_{i}\neq0\neq\dim\partial P_{i}$ (the latter nonequality meaning that $P_{i}$
contains no open set homeomorphic to $[0,1))$. Then}
\begin{enumerate}
\item \textit{the product $P_{1}\times P_{2}$ is a topological manifold
$\Longleftrightarrow P_{1}$ and $P_{2}$ are homology manifolds with
homotopically collared boundaries, and}
\item \textit{the join $P_{1}\star P_{2}$ is a compact topological manifold
(necessarily a ball or sphere) $\Longleftrightarrow P_{1}$ and $P_{2}$ are
compact homology manifolds with homotopically collared boundaries, such that
each of $P_{1}$ and $P_{2}$ either has the integral homology groups of a
sphere, or else is contractible.}
\end{enumerate}
\end{corollary}
A \textit{polyhedral homology manifold} is a polyhedron $P$ in which the link
of each component of each stratum has the integral homology groups of either a
sphere or a point. Using duality, this property holds if it is known to hold
only for the links of those strata-components which are closed subsets of $P$.
The union of the strata-components whose links are acyclic comprise a
subpolyhedron of $P$, called the \textit{boundary} of $P$ and denoted
$\partial P$; it can be shown to be a homology manifold-without-boundary.
The strata referred to in this discussion are from any $PL$ stratification of
$P$ compatible with the $PL$ structure of the polyhedron. For example, one
could let the $i^{\text{th}}$ stratum be $K^{(i)}-K^{(i-1)}$, where $K$ is a
simplicial complex which is $PL$ homeomorphic to $P$, and where $K^{(j)}$
denotes the $j$-skeleton of $K$. The most natural stratification of a
polyhedron $P$ is the minimal one, i.e., the intrinsic $PL$ stratification
\cite[p. 421]{Ak}. A \textit{vertex} of a stratification is simply a point of
the 0-stratum; this stratum may be empty. If one prefers, one may talk about
simplicial homology manifolds instead of polyhedral homology manifolds, but
the polyhedral setting is the natural one (just as the notion of a PL manifold
is a more natural notion than that of a combinatorial manifold).
Much of this material is explained more fully, from the simplicial standpoint,
in \cite[Chapter V]{Mau}.
If a polyhedral homology manifold $P$ is to have the property that
$P\times\mathbb{R}^{n}$ is a topological manifold for some euclidean space
$\mathbb{R}^{n}$, then $P$ must have the additional property that its acyclic
links are in fact contractible (this condition is vacuous if $\partial
P=\emptyset)$. Let such a $P$ be called a \textit{polyhedral homology manifold
with homotopically collared boundary.} The reason for this name is that it can
be verified that a polyhedral homology manifold $P$ satisfies this additional
property if and only if there is a \textquotedblleft homotopy
collaring\textquotedblright\ map $\psi:\partial P\times\lbrack0,1)\rightarrow
P$ extending the inclusion $\partial P\times0=\partial P\hookrightarrow P$
such that $\psi(\partial P\times(0,1))\cap\partial P=\emptyset$. As an
example, the polyhedron $P=\Sigma^{k}M$, for any acyclic compact $PL$ manifold
$M$ and any $k\geq1$, is a polyhedral homology manifold. However, its boundary
is homotopically collared if and only if $M$ is simply-connected.
If a polyhedral homology manifold $P$ is itself to be a topological manifold,
then in addition it must have the property that the links in both $P$ and
$\partial P$ of vertices (as defined above) must be simply connected whenever
they have dimension $\geq2$. Let a polyhedron $P$ which is a polyhedral
homology manifold with homotopically collared boundary, and which satisfies
this link condition, be called a \textit{polyhedral homotopy manifold} (this
terminology may contrast with some earlier usage, but it seems justified, here
at least).
\begin{proof}
[Proof of Theorem]In the following discussion, all stratifications of
polyhedra will be assumed to be intrinsic stratifications, in order to
minimize hypotheses.
The basic case of the assertion is when $P$ is a homotopy
manifold-without-boundary. The proof in this case proceeds by induction on the
\textit{depth} of $P$, which is defined to be $\dim P-\dim P_{0}$, where
$P_{0}$ is the nonempty stratum of $P$ of lowest dimension (see for example
\cite{Si5}). For induction purposes, then, assume $n\geq0$, and assume it has
been established that given any polyhedral homotopy manifold-without-boundary
$Q$, with depth $Q\leq n$ and $\dim Q\neq4$, then $Q$ is a topological
manifold-without-boundary. Suppose $P$ is a polyhedral homotopy
manifold-without-boundary with depth $P=n+1$ and $\dim P\neq4$. Let $P_{0}$ be
the nonempty stratum of $P$ of lowest dimension. Then depth $(P-P_{0})\leq n$,
and hence by the induction hypothesis $P-P_{0}$ is a topological
manifold-without-boundary. Let $k=\dim P_{0}$, and let $L$ be the link of any
component of $P_{0}$. In order to show that $P$ is a manifold, it suffices to
establish:
\noindent\textbf{Claim:} \textit{$\overset{\circ}{c}L\times\mathbb{R}^{k}$ is
a topological manifold.}
The proof of this is divided into several cases, depending on the dimension of
$L$.
\begin{case}
$\dim L\leq2$.
\end{case}
Then $L$ is topologically a sphere, hence the claim follows.
\begin{case}
$\dim L=3$.
\end{case}
Then $L$ is a (manifold) homology 3-sphere. Hence the claim follows from the
Double Suspension Theorem (note that $k\geq1$ in this case, since $\dim
P\neq4).$
\begin{case}
$\dim L\geq4$.
\end{case}
We have that depth$(L\times\mathbb{R}^{1})$ (= depth $L)<$ depth $P$, and
hence by the induction hypothesis the polyhedral homotopy
manifold-without-boundary $L\times\mathbb{R}^{1}$ is a topological manifold.
By the Proposition in Part II, $L\times\mathbb{R}^{1}$ is embeddable as a
neighborhood of the end of some open contractible topological manifold $M$. If
$k=0$, then by hypothesis $L$ is homotopically a sphere, hence $M$ is
homeomorphic to $\mathbb{R}^{m}$ (\cite{Si1}, or \cite{St} coupled with the
fact that $M$ has $PL$ manifold structure). Hence $\overset{\circ}{c}L$, which
is homeomorphic to the 1-point semicompactification of the end, is a manifold.
If $k\geq1$, then $\overset{\circ}{c}L\times\mathbb{R}^{k}$, being
homeomorphic to $M/X\times\mathbb{R}^{1}$, is a manifold by Part II, where
$X=M-L\times\mathbb{R}^{1}$. This establishes the basic case of the
Theorem.\newline(Technical aside: When only the triple suspension theorem was
known for homology 3-spheres, and hence the above assertion required the
additional restriction $\dim P\neq5\neq\dim\partial P$, the last part of this
argument, for the case $\dim L=4$ and $k\geq2$, had to be established by a
more complicated argument.)
In the general case, when $\partial P\neq\emptyset$, the preceding argument
can be augmented by using the following collar argument, which avoids some
delicate link analysis at $\partial P$. Let $Q$ be gotten from $P$ by
attaching to $P$ an exterior boundary collar, this being denoted
\[
Q=(P+\partial P\times\lbrack0,1])/\partial P=\partial P\times0.
\]
Hence $\partial Q=\partial P\times1$. By the without-boundary case, both
$\partial Q$ and $Q-\partial Q$ are manifolds, hence $Q$ is a manifold.
Furthermore the given collar $\partial P\times\lbrack0,1]$ for $\partial Q$ in
$Q$ is 1-LCC (1-locally co-connected) in $Q$, that is, small loops in
$Q-\partial P\times\lbrack0,1]$ (sizes measured in the metric of $Q$) are
null-homotopic in $Q-\partial P\times\lbrack0,1]$ by small homotopies. Hence,
a now-standard radial engulfing argument (\cite{Se}; see \cite[\S 3]{Da3})
shows that the interval-fibers of this collar in $Q$ can be shrunk to points
by pseudoisotopy of $Q$, and hence $Q$ is homeomorphic to the quotient space,
which is $P$.
\end{proof}
\begin{remark}
It can be deduced from the recent work of Cannon \cite{Can2} combined with
Cannon \cite{Can3} or Bryant-Lacher \cite{BL2} that the purely topological
analogue of the assertion above is true. That is, it remains true with
\emph{polyhedron} replaced by its topological analogue, namely a
\emph{conelike-stratified (CS) set} as defined by Siebenmann in \cite{Si5}.
Although links in cone-like stratified sets are not intrinsically
well-defined, they become intrinsically well-defined after crossing them with
euclidean space of dimension equal to their codimension. Hence the homotopy
type of \textquotedblleft the\textquotedblright\ link makes sense. The
existence of intrinsic stratifications for conelike-stratified sets was
establishes by M. Handel in \cite{Han}.
\end{remark}
\subsection{\textbf{Wild embeddings with mapping cylinder neighborhoods.}}
Another consequence of the Double Suspension Theorem is:
\begin{theorem}
\textit{For any given dimension $m\geq5$, there exists a topological embedding
of a circle into an $m$-manifold such that the embedding has a manifold
mapping cylinder neighborhood, and yet the embedding is not locally flat.}
\end{theorem}
Such an example cannot exist in dimensions $m\leq4$ (see \cite{BL2}). The
example is provided by the suspension circle embedded in $\Sigma^{2}
H^{n}\approx S^{n+2}$, for any non-simply-connected homology sphere $H^{n}$.
This particular embedding is homogeneous by ambient isotopy. Note that this
shows, for example, that there is no reasonable notion of an `intrinsically'
good point of an embedding (one might conjecture such a notion after learning
statements such as \textquotedblleft any circle in euclidean space is tame
modulo a 0-dimensional $F_{\sigma}$ subset\textquotedblright).
\subsection{Codimension two embeddings.}
Since the work of Kirby-Siebenmann, it has been known that there exists a
topologically locally flat codimension 2 embedding of one $PL$ manifold into
another, such that the embedding cannot be ambient isotoped to be piecewise
linearly locally flat. For example, one can take the inclusion $S^{3}
=S^{3}\times0\hookrightarrow(S^{3}\times\mathbb{R}^{2})_{\theta})$, where
$(S^{3}\times\mathbb{R}^{2})_{\theta}$ denotes the $PL$ manifold obtained by
putting the nonstandard $PL$ structure $\theta$ on $S^{3}\times\mathbb{R}^{2}$
\cite{KS1}. The Double (or Multiple) Suspension Theorem provides another
example, for given any homology 3-sphere $H^{3}$, and any $PL$ manifold
$M^{m},m\geq0$, \textit{the homeomorphism $\Sigma^{2}H^{3}\times M^{m}\approx
S^{5}\times M^{m}$ can be chosen to be piecewise linearly locally flat on}
$H^{3}\times M^{m}\Leftrightarrow$ \textit{the Rokhlin invariant of} $H^{3}$
is 0. The proof will not be given here; it uses the same argument that was
used in Section I above, involving transversality and Rokhlin's theorem.
\subsection{\textbf{Exotic group actions.}}
It is well known that the Double Suspension Theorem establishes that
\textit{there is a nonstandard semifree action of $S^{1}$ on $S^{5}$ which is
piecewise linear with respect to some polyhedral structure on $S^{5}$.} For
there is a natural semifree action of $S^{1}$ on $\Sigma^{2}H^{3}=S^{1}\ast
S^{3}\approx S^{5}$ with fixed point set $H^{3}$. Another interesting group
action, pointed out to me by F. Raymond and H. Samelson, is the following:
\textit{there is a topological action of $SO(3)$ on $S^{7}$ with all isotropy
groups discrete.} (Compare \cite{MS} and \cite{Ol}; the latter work in fact
exhibits such an action which is smooth.) This action follows from the fact
that $S^{7}\approx H^{3}\ast H^{3}$, where $H^{3}$ is Poincar\'{e}'s binary
dodecahedral homology 3-sphere (cf. II above). The action of $SO(3)$ is the
diagonal action.
\section[Preliminaries: Cell-like decompositions]{Preliminaries: Cell-like
decompositions and the Bing Shrinking Criterion}
The suspension problem for homology spheres is a problem in the theory of
cell-like upper semicontinuous decompositions of manifolds. This venerable
subject, fathered by R.L. Moore and developed largely by R.H. Bing, studied
the class of proper cell-like maps $\{f:M\rightarrow Q\}$ from manifolds onto
metric spaces (definitions below). The major problem of the subject is
\textit{to decide when the quotient} (i.e. {target) space $Q$ is a manifold,
or at least when $Q\times R^{k}$ is a manifold, for some $k$.}
The link between the suspension question for homology spheres and cell-like
decomposition theory became apparent as a result of the Newman, Poenaru and
Mazur constructions of homology $n$-spheres which bound contractible
$(n+1)$-manifolds (recalled in Part I). For suppose $H^{3}=\partial M^{4}$,
where $M^{4}$ is a contractible 4-manifold. Let $X$ be a spine of $M$, that
is, let $X=M-\partial M\times\lbrack0,1)$, where $\partial M\times\lbrack0,1)$
denotes any open collar for $\partial M$ in $M$. Then $\Sigma^{k}H^{3}$ is a
sphere if and only if $M/X\times\mathbb{R}^{k-1}$ is a manifold (as explained
in the Introduction), where $M/X$ denotes the quotient space of $M$ gotten by
identifying $X$ to a point; clearly $M/X\approx c(\partial M)$. So the
suspension question becomes one of determining whether the target of the
cell-like map $M\rightarrow M/X$ is stably a manifold.
In the subject of cell-like decompositions of manifolds, there are two
landmark results of a general character, the Shrinking Theorem of Bing, and
the Cellular Approximation Theorem of Armentrout and Siebenmann. Both concern
the question of approximating certain maps by homeomorphisms.
The version of Bing's theorem which I prefer is the following if-and-only-if
version. In applications of this theorem, invariably $M$ is a manifold, $f$ is
a cell-like surjection and the problem is to decide whether $Q$ is a manifold.
The significance of Bing's theorem was to turn attention from the target space
$Q$, where it had been focused since R.L. Moore's work \cite{Mo} on cell-like
decompositions of the plane, to the source space $M$, where one had the
obvious advantage of working in a space known to be a manifold. The
justification for the approximation statement in the theorem will become clear
after the next theorem.
\textsc{Shrinking Theorem} (Bing 1952, from \cite[Section 3 II, III]{Bi1};
compact version). \textit{A surjection $f:M\rightarrow Q$ of compact metric
spaces is approximable by homeomorphisms if and only if the following
condition holds (now known as the Bing Shrinking Criterion): Given any
$\epsilon>0$, there exists a homeomorphism $h:M\rightarrow M$ such that}
\begin{enumerate}
\item distance $(fh,f)<\epsilon,$ \textit{and}
\item \textit{for each} $y\in Q$, diam$(h(f^{-1}(y)))<\epsilon$.
\end{enumerate}
There are many refinements and addenda one can make to this theorem, the most
significant having to do with realizing $f$ by pseudoisotopy versus realizing
$h$ by ambient isotopy. These will be not discussed here. Bing used the
theorem in much of his subsequent work, including \cite[proofs of
Theorems]{Bi2}, \cite[Section 8]{Bi3} and \cite[\S 3 and Thm2]{Bi4}. McAuley
\cite{McA1} was the first person to broaden the theorem to the above
generality; his proof was a straightforward adaptation of Bing's.
The easy half of the proof of the theorem is the implication $\Rightarrow$;
one simply writes $h=g_{0}^{-1}g_{1}$ for two successively chosen
homeomorphisms $g_{0},g_{1}$ approximating $f$. The nontrivial, and
significant half of the theorem is the implication $\Leftarrow$. Bing's idea
here was to construct a surjection $p:M\rightarrow M$, with $fp$ close to $f$,
such that the point-inverse sets of $p$ coincide exactly with those of $f$.
Then $g\equiv fp^{-1}:M\rightarrow Q$ defines the desired homeomorphism
approximating $f$. The map $p$ is constructed as a limit of homeomorphisms
$p=\lim_{i\rightarrow\infty}h_{1}h_{2}\ldots h_{i}$ where the $h_{i}^{\prime
}s$ are provided by the Shrinking Criterion, for $\epsilon_{i}$ values which
go to 0 and $i$ goes to $\infty$. The heart of the proof is showing how to
choose each $\epsilon_{i}$, the main point being that it depends on the
composition $h_{1}\ldots h_{i-1}$. Details are given in many places, for
example, \cite[pp. 45,46]{Ch}.
This proof of the implication $\Leftarrow$ becomes quite transparent when
recast as a Baire category argument. For in the Baire space $C(M,Q)$ of maps
from $M$ to $Q$, with the uniform metric topology, let $E$ be the closure of
the set $\{fh\mid h:M\rightarrow M$ is a homeomorphism$\}$. The Bing Shrinking
Criterion amounts to saying that for any $\epsilon>0$, the open subset of
$\epsilon$-maps in $E$, call it $E_{\epsilon}$, is dense in $E$. Hence
$E_{0}\equiv\cap_{\epsilon>0}E_{\epsilon}$ is dense in $E$, since $E$ is a
Baire space. Since $E_{0}$ consists of homeomorphisms, this show that $f\in E$
is approximable by homeomorphisms.
\indent In applying the above Shrinking Theorem to show that various of his
decomposition spaces were manifolds, possibly after stabilizing (\cite{Bi1}
\cite{Bi2}\cite{Bi3}), Bing used only the \textquotedblleft
if\textquotedblright\ part of the theorem, establishing that his Criterion
held, i.e. establishing that the decomposition was shrinkable. But to show
that his dogbone decomposition space was \textbf{not} a manifold, Bing used
the \textquotedblleft only if\textquotedblright\ part of the theorem, bridging
the gap by showing that if the decomposition space $Q$ were a manifold, then
the quotient map $f:\mathbb{R}^{3}\rightarrow Q$ would have to be approximable
by homeomorphisms \cite[Section 8]{Bi3}. This result has been generalized to
become the
\textsc{Cellular Approximation Theorem.}\textbf{ }(extending Moore \cite{Mo}
for $m=2$, see \cite[\S 11]{McA2}; Armentrout \cite{Ar} for $m=3$; Siebenmann
\cite{Si6} for $m\geq5$; compact without-boundary version). \textit{Suppose
$f:M^{m}\rightarrow Q^{m}$ is a map of closed manifolds, $m\neq4$. Then $f$ is
a cell-like surjection} (read \textit{cellular if $m=3$) if and only if $f$ is
approximable by homeomorphisms.
}
\indent The previously known ``if'' part is quite easy, and holds for all $m$.
The ``only if'' part requires some highly sophisticated geometrical analysis.
\indent The significance of these two theorems taken together is clear:
\begin{quotation}
\textit{The problem of deciding whether a particular cell-like image of a
manifold-without-boundary is itself a manifold is equivalent to deciding
whether the Bing Shrinking Criterion holds in the source } (dimension
$\mathbf{4}$ excepted).
\end{quotation}
\indent The subject of cell-like decompositions of manifolds developed in the
1950's into a robust and active theory, thanks to Bing's spectacular series of
theorems. By comparison, progress throughout the 1960's was gradual. From 1960
on, the duncehat decomposition problem for $M^{4}\times\mathbb{R}^{1}$
(explained in Part I) was a natural problem to work on; from about 1970 on, it
was the natural problem to work on.
\indent In this paper, only the \textquotedblleft if\textquotedblright\ part
of Bing's Shrinking Theorem is used. But the psychological comfort provided by
Siebenmann's theorem was vital to this work.
\section{Definitions and notation}
Throughout this paper, \textit{all spaces are locally compact separable
metric,} and all manifolds are topological manifolds, possibly with boundary,
unless states otherwise. All definitions are standard.
\indent The (single) \textit{suspension} of a compact metric space $Y$,
denoted $\Sigma Y$, is the quotient of $Y\times\lbrack-1,1]$ gotten by
identifying to distinct points the two subsets $Y\times\pm1$. The
$k$-\textit{fold suspension} $\Sigma^{k}Y$ is $\Sigma(\ldots(\Sigma Y)\ldots)$
($k$ times); if $k=0$, this is understood to be $Y$. Equivalently, $\Sigma
^{k}Y$ can be defined to the join, $Y\ast S^{k-1}$ of $Y$ with the
$(k-1)$-sphere. It follows that $Y\ast S^{k-1}-S^{k-1}$ is homeomorphic to
$Y\times\mathbb{R}^{k}$, and $Y\ast S^{k-1}-Y$ is homeomorphic to
$\mathring{c}Y\times S^{k-1}$, where $\mathring{c}Y$ denotes the \textit{open
cone} on $Y$, that is, $\mathring{c}Y=Y\times\lbrack0,1)/Y\times0$.
A compact metric space is \textit{cell-like} provided that whenever it is
embedded in an ANR (e.g., in the Hilbert cube), it is null-homotopic inside of
any given neighborhood of itself in the ANR (see \cite{Lac1} and \cite{Lac2}
for an elaboration of this and related facts). A map $f:X\rightarrow Y$ of
spaces is \textit{proper} provided the preimage of any compact subset of $Y$
is compact, and $f$ is \textit{cell-like} provided it is onto and each
point-inverse is cell-like. A \textit{cell-like upper-semicontinuous
decomposition} of a space $X$ is nothing more than a proper cell-like
surjection from $X$ onto some (quotient) space $Y$.
\indent Standard notations are $\operatorname*{int}_{X}A,\operatorname*{cl}
_{X}A$ and $\operatorname*{fr}_{X}A$ for the \textit{interior},
\textit{closure} and \textit{frontier} of a subset $A$ in a space $X$; the
subscript $X$ is omitted whenever it is clear. In this paper careful
distinction is made between the notions of \textit{boundary} and
\textit{frontier}, the former being used only in the manifold sense, e.g.
$\partial M$, and the latter being used only in the point-set sense, e.g.
$\operatorname*{fr}A.$
\indent This paper is concerned largely with constructing homeomorphisms and
ambient isotopies of manifolds. The ambient manifolds can always be assumed to
be piecewise linear (or smooth, if one prefers), and it will be convenient to
use the well-established concepts of piecewise linear topology, e.g., regular
neighborhoods, expansions and collapses, and transversality. As an example,
the phrase \textquotedblleft$J$ \textit{is transverse to} $M\times0$
\textit{in} $M\times\mathbb{R}^{1}$\textquotedblright, where $J$ is a
polyhedron (homeomorphic to $K\times\mathbb{R}^{1}$, in Part I) and $M$ is a
manifold, will mean that there is a bicollar $\alpha:M\times(-1,1)\rightarrow
M\times\mathbb{R}^{1}$ for $M=M\times0$ in $M\times\mathbb{R}^{1}$ (which,
after ambient isotopy fixing $M\times0$, can be assumed to be the standard
bicollar) such that $\alpha|(J\cap M\times(-1,1))$ is a bicollar for $J\cap
M\times0$ in $J$ (everything $PL$).
\part{The double suspension of Mazur's homology $3$--sphere is $S^{5}$}
\indent The goal of this part is to show that the double suspension of a
certain homology $3$-sphere described by Mazur is homeomorphic to $S^{5}$
(this was announced in \cite{Ed2}). The proof is completely descriptive, so
that in particular one can see the wild suspension circle in $S^{5}$
materialize as a limit of tame circles.
\indent By Lefschetz duality, the boundary of any compact contractible
manifold is a homology sphere. Examples of contractible $(n+1)$-manifolds with
nonsimply connected boundary (i.e. genuine homology $n$-sphere boundary), for
$n\geq4,$ were first constructed by Newman \cite{New1}, by taking the
complement in $S^{n+1}$ of the interior of a regular neighborhood of any
acyclic, nonsimply-connected $2$-complex (some applications of this
construction appear in \cite{Wh2}, \cite{CW} and \cite{CZ}). Constructions
were subsequently given for $n=3$ by Poenaru \cite{Po} and Mazur \cite{Maz}.
In this part we are interested in the simplest possible such construction, of
lowest possible dimension, namely $n=3$.
\indent The Poenaru construction goes as follows. Take the product
\[
P^{4}\equiv(S^{3}-\operatorname*{int}N(K_{0}))\times\lbrack-1,1]
\]
of a compact, nontrivial knot complement with an interval, and attach to it a
4-ball $B^{4}$ by gluing a tubular neighborhood $N(K_{1})$ of a nontrivial
knot $K_{1}$ in $\partial B^{4}$ to a tubular neighborhood of a meridian
$\mu\times0$ in $P^{4}$. This is the desired manifold, which can be written
$M^{4}=(P^{4}\cup B^{4})/[N(\mu\times0)=N(K_{1})]$. The advantage of this
construction of Poenaru is the ease with which one can verify that (1) $M^{4}$
is contractible, since the attached 4-ball kills $\pi_{1}(P^{4})$, and (2)
$\partial M^{4}$ is not simply connected, since it is the union of two knot
complements, and the Loop Theorem implies that for any nontrivial knot $K$ in
$S^{3}$, $\pi_{1}(\partial N(K))\rightarrow\pi_{1}(S^{3}-\operatorname*{int}
N(K))$ is monic. The disadvantage of this construction (for the purposes of
Part I) is that the spine of $M^{4}$, although it can be chosen $2$
-dimensional, is not especially simple.
\indent Mazur's construction, on the other hand, starts with the simplest
possible spine, i.e. the simplest contractible but noncollapsible polyhedron,
namely the $2$-dimen\-sion\-al duncehat, and thickens it to be a 4-manifold
with nonsimply connected boundary. Recall that the duncehat can quickly be
described as the space gotten by attaching a $2$-cell $B^{2}$ to a circle
$S^{1}$ by sewing $\partial B^{2}$ to $S^{1}$ by a degree one map which goes
twice around $S^{1}$ in one direction, and once around in the other direction.
Since Mazur's homology 3-sphere is the one we work with, we recall further
details and establish some notation.
\indent The duncehat $K^{2}$ and its thickening $M^{4}$ will be described
together, making use of Figure I-1.
\begin{figure}
\caption{The Mazur 4-manifold $M^{4}
\end{figure}
It is convenient to express $M^{4}$ as a union of a 0-handle $D^{4}$, a
1-handle $B^{1}\times D^{3}$ and a $2$-handle $B^{2}\times D^{2}$. Let $L^{1}$
be the 1-complex embedded in $\partial D^{4}$ as shown in Figure I-1a.
$L^{1}$ is usually referred to as the \emph{linked eyeglasses}. The
intersection $K^{2}\cap D^{4}$ is to be $cL^{1}$, where the coning is done to
the centerpoint \*of $D^{4}$. The 1-handle $B^{1}\times D^{3}$ is attached to
$D^{4}$ as shown in Figure I-1b, and $K^{2}\cap S^{1}\times\partial D^{3}$ is
defined to be the familiar Mazur curve $\Gamma$ in $S^{1}\times\partial D^{3}
$. The intersection $K^{2}\cap S^{1}\times D^{3}$ is to be regarded as the
mapping cylinder of an apparent degree one map from $\Gamma$ onto the core
$S^{1}\times0$ of $S^{1}\times D^{3}$. Finally, the $2$-handle $B^{2}\times
D^{2}$ is attached to $S^{1}\times B^{3}$ by identifying $\partial B^{2}\times
D^{2}$ with a neighborhood of $\Gamma$ in $S^{1}\times\partial D^{3}$ in such
a manner that $\partial B^{2}\times0$ is identified to $\Gamma$. Then
$K^{2}\cap B^{2}\times D^{2}$ can be taken to be $B^{2}\times0$. The
\textquotedblleft twisting\textquotedblright\ of the attaching map here (in
the direction perpendicular to $\Gamma$) is not specified, because it is
irrelevant in the upcoming constructions.
The only properties of the pair $(M^{4},K^{2})$ used below are:
\begin{itemize}
\item[(i)] $M^{4}$ is a contractible $PL$ manifold, and $K^{2}$ is a
polyhedral spine of $M^{4}$;
\item[(ii)] $K^{2}-K^{(1)}$ is an open $2$-cell locally flatly embedded in
$\operatorname*{int}M^{4}$, where $K^{(1)}$ denotes the intrinsic 1-skeleton
of $K^{2}$, namely $S^{1}\times0$; and
\item[(iii)] near $\ast\in K^{2}$, the embedding of $K^{2}$ in
$\operatorname*{int}M^{4}$ is as described in Figure I-1.
\end{itemize}
\indent To prove that $\Sigma^{2}(\partial M^{4})\approx S^{5}$, i.e. that
$M^{4}/K^{2}\times\mathbb{R}^{1}$ is a manifold, it suffices to establish that
the Bing Shrinking Criterion holds for the stabilized quotient map $\pi\times
id_{\mathbb{R}^{1}}:M\times\mathbb{R}^{1}\rightarrow M/K\times\mathbb{R}^{1}$.
This amounts to establishing the
\textsc{Shrinking Proposition. }\emph{Given any }$\varepsilon>0$\emph{, there
is a homeomorphism }$h:M^{4}\times\mathbb{R}^{1}\rightarrow M^{4}
\times\mathbb{R}^{1}$\emph{, fixed on }$\partial M^{4}\times R^{1}$\emph{,
such that for each }$t\in R^{1}$\emph{, }
\begin{enumerate}
\item $h\left( M\times t\right) \subset M\times\left[ t-\varepsilon
,t+\varepsilon\right] $\emph{, and}
\item $\operatorname*{diam}\left( h\left( K^{2}\right) \times t\right)
<\varepsilon$\emph{.
}
\end{enumerate}
\textsc{Technical Notes. }The following brief comments concern the precise
relation of the above statement to the Bing Shrinking Criterion as stated in
the Preliminaries: (a) If one prefers to stay in the world of compacta, one
can replace $\mathbb{R}^{1}$ by $S^{1}$; the reasons that $\mathbb{R}^{1}$ is
used here are (i) tradition, and (ii) the notion of, and notation for,
\textquotedblleft vertical\textquotedblright\ and \textquotedblleft
horizontal\textquotedblright\ motions are more natural in $M\times
\mathbb{R}^{1}$ than in $M\times S^{1}$; (b) tradition has the homeomorphism
$h$ being uniformly continuous, which in fact it is by construction but that
is not really necessary; and (c) in the above statement of the Shrinking
Proposition, the Bing Shrinking Criterion really demands that $\partial M$ be
replaced by $M-\operatorname*{int}N_{\epsilon}(K)$, but this stronger
statement is clearly deducible from the given one by replacing $M$ by a small
regular neighborhood of $K$ in $M$.
\indent The above homeomorphism $h$ will be isotopic to the identity
$rel\,\partial M\times\mathbb{R}^{1}$, by construction. This is useful to keep
in mind when trying to visualize it.
\indent To understand the motivation for the following construction of $h$,
recall the basic principle:
\begin{quotation}
$h$ \textit{is to be thought of as being an arbitrarily close approximation to
a surjection} $p:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$,
\textit{whose nontrivial point-inverses are precisely the sets} $\{K\times
t\mid t\in\mathbb{R}^{1}\}$.
\end{quotation}
\noindent This is because in Bing's proof of the implication $\Leftarrow$ of
his theorem (see the Preliminaries), he constructs $p$ as a limit
$p=\lim_{i\rightarrow\infty}h_{1}h_{2}\ldots h_{i}$, where the $h_{i}$'s are
homeomorphisms provided by the Shrinking Proposition, for smaller and smaller
values of $\epsilon$. (Recall that from this map $p$ one gets the desired
homeomorphism $g$ approximating the quotient map $\pi\times id_{\mathbb{R}
^{1}}:M\times\mathbb{R}^{1}\rightarrow M/K\times\mathbb{R}^{1}$ by defining
$g=(\pi\times id_{\mathbb{R}^{1}})p^{-1}:M\times\mathbb{R}^{1}\overset
{\approx}{\rightarrow}M/K\times\mathbb{R}^{1}$.)
\indent Now, the image $p(K\times\mathbb{R}^{1})$ $(\approx\mathbb{R}^{1})$ in
$M\times\mathbb{R}^{1}$ must have the following remarkable pathological
property (recognized by Glaser in \cite[p. 16, last two paragraphs]{Gl2}):
\begin{quotation}
\textit{for each} $t\in\mathbb{R}^{1}$, \textit{the intersection}
$p(K\times\mathbb{R}^{1})\cap M^{4}\times t$ \textit{must be} $0$
-\textit{dimensional, yet wild enough so that the inclusion $\partial
M^{4}\times t\hookrightarrow M\times t-p(K\times\mathbb{R}^{1})$ induces a
$\pi_{1}$-monomorphism.}
\end{quotation}
\noindent The second assertion follows because the inclusion
\[
\partial M^{4}\times t\hookrightarrow M^{4}\times\mathbb{R}^{1}-p(K\times
\mathbb{R}^{1})
\]
must induce a $\pi_{1}$-monomorphism, which is clear from the properties of
$p$. The first assertion follows because if $p(K\times\mathbb{R}^{1})\cap
M^{4}\times t$ were not 0-dimension, then there would exist some interval
$(a,b)\subset\mathbb{R}^{1}$ such that $p(K\times(a,b))\subset M^{4}\times t$,
which would then guarantee that the interval $p(K\times(a,b))$ would be
locally flatly embedded in $M\times\mathbb{R}^{1}$ (say by the Klee trick, as
in \cite[Part II]{CW}), and hence $\partial M$ would be simply connected (e.g.
by general positioning off of $p(K\times\mathbb{R}^{1})$ in $M\times
\mathbb{R}^{1}$ a $2$-disc initially mapped into $p(M\times c)$, $c=$ midpoint
of $(a,b)$, and then pushing this $2$-disc out to $\partial M\times
\mathbb{R}^{1})$. Consequently, if the homeomorphism $h$ of the Shrinking
Proposition is to approximate $p$, then given $\epsilon>0$ one must be able to
construct a homeomorphism $h$ having the following property:
\begin{quotation}
\textit{for each $t\in\mathbb{R}^{1}$, each component of the intersection}
$h(K\times\mathbb{R}^{1})\cap M\times t$ \textit{has diameter} $<\epsilon$
(and yet necessarily the inclusion $\partial M\times t\hookrightarrow M\times
t-h(K\times\mathbb{R}^{1})$ must induce a $\pi_{1}$-monomorphism).
\end{quotation}
\noindent This is what motivates the construction below.
\indent The Basic Lemma below (or more accurately, the first 3 steps of it)
shows how to construct such a homeomorphism $h$ having this intersection
property with respect to the single level $M \times0$. This Lemma is the heart
of the entire proof; everything after it amounts to tidying up.
\indent The first, second and fourth steps of the Basic Lemma involve
splitting operations, which are called either meiosis or mitosis, depending on
the context. They are the freshest ingredients of the proof.
\indent The Lemma requires a definition. Given a homeomorphism $h:M\times
\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$, then a component $C$ of
$h(K\times\mathbb{R}^{1})\cap M\times0$ is \textit{source-isolated} if
$h^{-1}(C)$ is isolated in $K\times\mathbb{R}^{1}$, in the sense that there is
an interval $(a,b)\subset\mathbb{R}^{1}$ such that $K\times(a,b)\cap
h^{-1}(M\times0)=h^{-1}(C)$. In the statement below, $C$ most often will be a
2-sphere lying in $h(K-K^{(1)})\times\mathbb{R}^{1}$, in which case
source-isolation will imply that $h^{-1}(C)$ bounds a 3-cell in $K\times
\mathbb{R}^{1}$ whose interior misses $h^{-1}(M\times0)$.
\indent One should think of the following lemma as starting at $i=0$, with
$h_{0}=$ identity, and producing successively better approximations to the
desired homeomorphism $h$ of the Shrinking Proposition.
\textsc{Basic Lemma}$_{i}$ (to be read separately and successively for
$i=1,2,3,4$). \emph{Given }$\delta>0$\emph{, there exists a homeomorphism
}$h_{i}:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$\emph{, with
compact support in }$\operatorname*{int}M\times\mathbb{R}^{1}$\emph{, such
that }$h_{i}(K\times\mathbb{R}^{1})$\emph{ is transverse to }$M\times0$\emph{
and the components of }$h_{i}(K\times\mathbb{R}^{1})\cap M\times0$\emph{ are
as follows.}
\begin{itemize}
\item[($i=1$)] \emph{There are two components. One is a pierced duncehat
(described below) having diameter }$<\delta$\emph{, and the other is a
source-isolated }$2$\emph{-sphere }$\Sigma^{2}\times0$\emph{, where }
$\Sigma^{2}\subset M^{4}$\emph{.}
\emph{Let }$\Sigma^{2}\times D^{2}$\emph{ be a product neighborhood of
}$\Sigma^{2}$\emph{ in }$M^{4}$\emph{.}
\item[($i=2$)] \emph{There are }$1+2^{p}$\emph{ components }$(p=p(\delta
))$\emph{. One is the }$\delta$\emph{-small pierced duncehat from Step 1, and
the remaining }$2^{p}$\emph{ are source-isolated 2-spheres lying in }
$\Sigma^{2}\times D^{2}\times0$\emph{, such that this collection of }$2^{p}
$\emph{ 2-spheres in }$\Sigma^{2}\times D^{2}\times0$\emph{ is equivalent to
the }$p^{th}$\emph{ stage in the spun Bing collection of 2-spheres in }
$S^{2}\times D^{2}$\emph{ (described below).}
\item[($i=3$)] \emph{There are }$1+2^{p}$\emph{ components (same }
$p=p(\delta))$\emph{. One is the }$\delta$\emph{-small pierced duncehat, and
the remaining }$2^{p}$\emph{ are source-isolated }$\delta$\emph{-small
2-spheres in }$\Sigma^{2}\times D^{2}\times0$\emph{.}
\item[($i=4$)] \emph{There are }$1+2^{p+1}$\emph{ components (same
}$p=p(\delta))$\emph{. One is the }$\delta$\emph{-small pierced duncehat, and
the remaining }$2^{p+1}$\emph{ are }$\delta$\emph{-small embedded duncehats
lying in }$\Sigma^{2}\times D^{2}\times0$\emph{, each being the image under
}$h_{4}$\emph{ of some entire level }$K\times t_{q},1\leq q\leq2^{p+1}
$\emph{.
}
\end{itemize}
\textsc{Note. }In using the above Lemma later, all we will really care about
is the intersection of $h_{i}(K\times\mathbb{R}^{1})$ with the 2-handle
$B^{2}\times D^{2}\times0\subset M^{4}\times0$, not with the entire level
$M^{4}\times0$. But we have stated the Lemma in the above elaborated form to
make the overall process a little clearer.
\begin{proof}
It is best to gain first a qualitative understanding of the components of
$h_{i}(K\times\mathbb{R}^{1})\cap M\times0$, for $i=1,\ldots,4$, without
paying attention to their size. For this purpose, it is easiest to work in the
source, understanding there the pre-images $K\times\mathbb{R}^{1}\cap
h_{i}^{-1}(M\times0)$. After this one can come to grips with the images
$h_{i}(K\times\mathbb{R}^{1})\cap M\times0$, and the size of their components.
Following this advice, we present the construction in two rounds, in the first
round just working in the source, describing there the sets $K\times
\mathbb{R}^{1}\cap h_{i}^{-1}(M\times0),1\leq i\leq4$, and their components.
In the second round we pay attention to how these components are embedded in
$M\times0$, and their size.
In the first round of the construction, each set $h_{i}^{-1}(M\times0),1\leq
i\leq4$, will be described as the frontier in $M\times\mathbb{R}^{1}$ of an
arbitrarily small (relative) regular neighborhood in $M\times\mathbb{R}^{1}$
of a subpolyhedron $A_{i}$ of $M\times\mathbb{R}^{1}$, where
\[
M\times(-\infty,0]=A_{0}\,\includegraphics{x-uarrow}\,A_{1}
\,\includegraphics{x-udarrow}\,\ldots\,\includegraphics{x-udarrow}\,A_{2}
=A_{3}\,\includegraphics{x-udarrow}\,A_{4}.
\]
Here the arrows indicate expansions and collapses. Each $A_{i}$ will contain
$A_{0}$, and $\operatorname*{cl}(A_{i}-A_{0})$ will be a compact 2-dimensional
polyhedron in $\operatorname*{int}M\times\mathbb{R}^{1}$. The regular
neighborhoods will be relative to $\partial M\times(-\infty,0)]$, and hence
the intersection of their frontiers with $\partial M\times\mathbb{R}^{1}$ will
be $\partial M\times0$. Each regular neighborhood is to be chosen so that its
restriction to $K\times\mathbb{R}^{1}$ is a regular neighborhood in
$K\times\mathbb{R}^{1}$ of the subpolyhedron $B_{i}\equiv A_{i}\cap
K\times\mathbb{R}^{1}$. The successive sets $\{h_{i}^{-1}(M\times
(-\infty,0])\},1\leq i\leq4$, can be thought of as gotten by applying the
uniqueness of regular neighborhoods principle, with each homeomorphism $h_{i}$
being the end of an ambient isotopy. (This regular neighborhood description,
from my Cambridge notes, is not quite the manner in which I originally
perceived the $h_{i}$'s, but it is the quickest way to describe them.)
The $(A_{i},B_{i})$'s are as follows (see Figure I-3).
\textbf{Step i = 0.} $A_{0}=M\times(-\infty,0];B_{0}=K\times(-\infty,0]$.
\textbf{Step i = 1} (first round). Let $a\in L^{1}$ be the midpoint of the
interval part of the linked eyeglasses $L^{1}$ (see Figure I-2).
\begin{figure}
\caption{$L^{1}
\end{figure}
Let $L_{\#}^{1}$ be the closure of one of the components of $L^{1}-a$ (either
one); hence $L_{\#}^{1}$ is a circle with a feeler attached, with the free end
of the feeler being the point $a$. Let $D_{\#}^{2}$ be the natural 2-disc that
the circle of $L_{\#}^{1}$ bounds in $\partial D^{4}$, so that $D_{\#}^{2}\cap
L^{1}=\partial D_{\#}^{2}\cup d$, where $d$ is an interior point of
$D_{\#}^{2}$. Writing the 0-handle $D^{4}$ of $M^{4}$ as $D^{4}=\partial
D^{4}\times\lbrack0,1)\cup\ast$, then define
\[
A_{1}=A_{0}\cup(L_{\#}^{1}\cup D_{\#}^{2})\times\frac{1}{2}\times1\cup
a\times\frac{1}{2}\times\lbrack0,1]\subset A_{0}\cup D^{4}\times\mathbb{R}
^{1}\subset M\times\mathbb{R}^{1}
\]
(see Figure I-3$_{1}$).
In other words, $A_{1}-A_{0}$ lies in $\partial D^{4}\times\frac{1}{2}
\times\lbrack0,1]$, where [0,1] is a sub-interval of the vertical coordinate
$\mathbb{R}^{1}$ of $M\times\mathbb{R}^{1}$, and $A_{1}-A_{0}$ consists of a
horizontal 2-disc-with-feeler lying in the $M\times1$ level, together with the
vertical interval joining the free end of the feeler to the $M\times0$ level.
Let $A_{\delta}$ denote $A_{1}$ minus the interior of the 2-disc $D_{\#}
^{2}\times\frac{1}{2}\times1$, and let $B_{\delta}=A_{\delta}\cap
K\times\mathbb{R}^{1}$, which is $K\times(-\infty,0]$ with a
circle-with-feeler attached. By the construction, $B_{1}=B_{\delta}\cup
d\times\frac{1}{2}\times1$, which has two components, one of them a point.
The set $K\times\mathbb{R}^{1}\cap h_{1}^{-1}(M\times0)$, being the frontier
in $K\times\mathbb{R}^{1}$ of a regular neighborhood of $B_{1}$ in
$K\times\mathbb{R}^{1}$, will consist of two components, one of them the
2-sphere boundary of a 3-ball, and the other a \textit{pierced duncehat} (see
Figure I-4). As an abstract set, a pierced duncehat $K_{\#}^{2}$ can be
described as the union of a duncehat-with-hole and a 2-torus, the boundary of
the hole being identified with an essential curve on the 2-torus. In symbols,
\[
K_{\#}^{2}=(K^{2}-\operatorname*{int}C^{2})\cup_{\partial C^{2}=S^{1}
\times\text{point}}S^{1}\times S^{1},
\]
where $C^{2}$ is a $2$-cell in $K^{2}-K^{(1)}$. This will be taken up again in
the second round; for the moment, our main concern is to define the sets
$\{h_{i}^{-1}(M\times(-\infty,0])\}$ and (therefore) $\{K\times\mathbb{R}
^{1}\cap h_{i}^{-1}(M\times(-\infty,0])\}$, leaving to be specified the actual
behavior of the $h_{i}$'s on these sets.
\begin{figure}
\caption{Some aspects of Step $i=1$, including the pierced duncehat}
\end{figure}
\textbf{Step i = 2} (first round). This is the interesting step. The
polyhedron $A_{2}$ will be described in stages, by describing a finite
sequence of polyhedra
\[
A_{1}=A_{1,0}\includegraphics{x-udarrow}A_{1,1}\includegraphics{x-udarrow}\ldots
\includegraphics{x-udarrow}A_{1,p-1}\includegraphics{x-udarrow}A_{1,p}=A_{2}
\]
where $p$ is to be specified. The first expansion-collapse is typical of them
all; it is described using two expansions and a collapse, plus a final
repositioning:
\[
A_{1,0}\includegraphics{x-uarrow}A_{1,0}^{^{\prime}}
\includegraphics{x-uarrow}A_{1,0}^{^{\prime\prime}}
\includegraphics{x-darrow}A_{1,0}^{^{\prime\prime\prime}}\leadsto A_{1,1}
\]
(see Figure I-$3_{2}$).
The polyhedron $A_{1,0}^{\prime}$ is gotten from $A_{1,0}=A_{1}$ by starting
at the point $d\times\frac{1}{2}\times1$ in $A_{1}$, and sending out a feeler
(= interval) $F^{1}$ upwards from this point to the point $a\times\frac{1}
{2}\times2$, always staying in $(K-K^{(1)})\times\lbrack1,2]$, and then
adjoining the disc-with-feeler $(L_{\#}^{1}\cup D_{\#}^{2})\times\frac{1}
{2}\times2$ to $A_{1,0}\cup F^{1}$. Note that the feeler $F^{1}$ may have to
travel a long distance to join the two points, but nevertheless it is
possible, since the points $a$ and $d$ are joinable by an arc in the open
2-cell $K^{2}-K^{(1)}$. (Aside: This entire step could be done without any
vertical motion, working always in the $M\times1$ level to change $A_{1}$ to
$A_{2}$, postponing to Step 3 or 4 the matter of source-isolation. This is the
point of view adopted in the more general program in Part II. But for Part I
the present description seems a bit easier.)
Next, $A_{1,0}^{^{\prime\prime}}$ is gotten by thickening part of
$A_{1,0}^{\prime}$, adding to it a small 3-dimensional \textquotedblleft
tubular neighborhood\textquotedblright\ of $F^{1}\cup L_{\#}^{1}\times\frac
{1}{2}\times2$, resembling a 3-dimensional eyebolt where \textquotedblleft
tubular neighborhood\textquotedblright\ of $F^{1}$ means the restriction to
$F^{1}$ of a 2-disc normal bundle of $(K^{2}-K^{(1)})\times\mathbb{R}^{1}$ in
$M^{4}\times\mathbb{R}^{1}$, and where \textquotedblleft tubular
neighborhood\textquotedblright\ of $L_{\#}^{1}\times\frac{1}{2}\times2$ means
a genuine regular neighborhood of it (rel $a\times\frac{1}{2}\times2$) in
$\partial D^{4}\times\frac{1}{2}\times2$. We assume these thickenings match up
nicely with each other, and with what has already been defined, as suggested
by Figure I-$3_{2}.$
To get $A_{1,0}^{^{\prime\prime\prime}}$ from $A_{1,0}^{^{\prime\prime}}$,
most of the newly added 3-dimensional eyebolt is collapsed away, starting at
the $(d\times\frac{1}{2}\times1)$-end and collapsing upwards, so that the only
material which is left behind is the 2-dimensional outer skin of the eyebolt,
plus a spanning 2-disc $D_{-}^{2}$ in the handle at the end of the eyebolt.
Also left undisturbed is the other 2-disc $D_{+}^{2}$ spanning the eye of the
eyebolt. Thus, $A_{1,0}^{^{\prime\prime\prime}}$ is homeomorphic to
\[
A_{\delta}\cup\left( 2-disc\text{ with handle}\right) \cup D_{-}^{2}\cup
D_{+}^{2}\text{,}
\]
where the boundaries of $D_{-}^{2}$ and $D_{+}^{2}$ are identified with the
circles in the figure-eight spine of the disc-with-handle (which is a
punctured 2-tours), and where the boundary of the disc-with-handle is
identified with the circle in $A_{\delta}$. In the collapsing of
$A_{1,0}^{\prime\prime}$ to $A_{1,0}^{\prime\prime\prime}$, some of the
intersection of $A_{1,0}^{\prime\prime}$ with $K\times\mathbb{R}^{1}$ is
collapsed away. Originally it consisted of $B_{\delta}$, plus a circle with a
feeler attached, plus a point $d_{+}\in\operatorname*{int}D_{+}^{2}$. When
done with the collapse the circle-with-feeler has been reduced to just a
single point on the circle, namely the center $d_{-}$ of $D_{-}^{2}$, while
$B_{\delta}$ and the point $d_{+}$ remain undisturbed.
The final repositioning of $A_{1,0}^{\prime\prime\prime}$, done to achieve the
source-isolation property of the Lemma, is to move one of the two intersection
points, say $d_{+}$, upwards to say the $M\times3$ level, keeping fixed the
complement of a small neighborhood of this point in $A_{1,0}^{\prime
\prime\prime}$. Let $A_{1,1}$ be this repositioned copy of $A_{1,0}
^{\prime\prime\prime}$. By construction,
\[
B_{1,1}=(B_{1,0}-d\times\frac{1}{2}\times1)\cup\{d_{-},d_{+}\}=B_{\delta}
\cup\{d_{-},d_{+}\}\text{,}
\]
where one of the new points is at the 2-level, and the other is at the 3-level.
Qualitatively, $A_{1,1}$ is gotten from $A_{1,0}$ in a single operation,
simply by \textit{puckering} the $2$-disc $D_{\#}^{2}\times\frac{1}{2}
\times1\subset A_{1,0}$, where a \textit{puckered} 2-\textit{disc} is a
2-disc-with-handle with two spanning $2$-discs attached to make it
contractible, as remarked in the description of $A_{1,0}^{\prime\prime\prime}$
above. Note that a puckered 2-disc really is obtained by puckering a genuine
$2$-disc; see Figure I-6a.
To get $A_{1,2}$ from $A_{1,1}$, qualitatively one simply puckers each of the
two spanning $2$-discs $D_{-}^{2}$ and $D_{+}^{2}$ in $A_{1,1}$.
Quantitatively, one does two independent, non-interfering expansion-collapse
processes, each process being a copy of the one used above to go from
$A_{1,0}$ to $A_{1,1}$, this time starting at the two new points $d_{-}$ and
$d_{+}$ of $B_{1,1}$. The initial feelers sent out should go up to say the
4-level (from the $2$-level) and to the 6-level (from the 3-level), and then
the bulk of the activity takes place in those levels, except that the final
repositioning operation makes the four new points of $B_{1,2}=B_{\delta}
\cup\{d_{--},d_{-+},d_{+-},d_{++}\}$ lie in say the $4,5,6$ and $7$-levels.
Now the pattern is established. The set $K\times\mathbb{R}^{1}\cap
h_{i,j}^{-1}(M\times0)$, being the frontier in $K\times\mathbb{R}^{1}$ of a
regular neighborhood in $K\times\mathbb{R}^{1}$ of $B_{1,j}=B_{\delta}
\cup2^{j}$ points, will consist of a pierced duncehat and $2^{j}$ $2$-spheres,
and these components are isolated in $K\times\mathbb{R}^{1}$, in that they
have no overlap in the $\mathbb{R}^{1}$-coordinate (i.e., their $\mathbb{R}
^{1}$-projections are disjoint). On the other hand, the images under $h_{1,j}$
of these components are linked in $M^{4}\times0$ in a very interesting manner,
as will be explained in the second round.
\textbf{Step i = 3} (first round). $A_{3}=A_{2}$. Hence $h_{3}$ is
qualitatively the same as $h_{2}$. The quantitative difference between $h_{3}$
and $h_{2}$ will be explained in the second round.
\textbf{Step i = 4} (first round). See Figures I-3$_{4}$ and I-7. Given
$A_{2}$ $(=A_{3})$, let $\{t_{\mu}\mid1\leq\mu\leq2^{p}\}$ denote the
$\mathbb{R}^{1}$-levels of the $2^{p}$ points of $B_{2}$ (in the above
construction, these levels were $t_{\mu}=2^{p}+\mu-1)$. Then define
$A_{4}=A_{2}\cup\bigcup\{K^{2}\times t_{\mu}\mid1\leq\mu\leq2^{p}\}$. The
expansion-collapse $A_{2}\includegraphics{x-udarrow}A_{4}$ follows from the fact
that $K\times I$ is collapsible. Note that $B_{4}=B_{2}\cup\bigcup
\{K^{2}\times t_{\mu}\mid1\leq\mu\leq2^{p}\}=B_{\delta}\cup\bigcup
\{K^{2}\times t_{\mu}\mid1\leq\mu\leq2^{p}\}$. It is clear that the
intersection $K^{2}\times\mathbb{R}^{1}\cap h_{4}^{-1}(M^{4}\times0)$ consists
of $1+2^{p+1}$ components, one being the pierced duncehat, and the others
being say the $2^{p+1}$ duncehats $\{K^{2}\times(t_{\mu}\pm\lambda)\mid
1\leq\mu\leq2^{p}\}$, for some small $\lambda>0$.
Having established the qualitative definitions of the $h_{i}$'s, we seek now
to gain a better understanding of their behavior, paying close attention to
the target intersections $h_{i}(K^{2}\times\mathbb{R}^{1})\cap M^{4}\times0$.
\textbf{Step i = 1} (second round). Analysis of the two-component intersection
$h_{1}(K\times\mathbb{R}^{1})\cap M\times0$ reveals that it can be described
in the following way (this amplifies the earlier remarks; see Figure I-4c).
Starting with $K^{2}$ in $M^{4}$, remove a small $2$-cell $C^{2}$ from
$K^{2}-K^{(1)}$, and replace it with a $2$-torus to produce the pierced
duncehat component $K_{\#}^{2}$. At the same time, add to the picture a
disjoint $2$-sphere $\Sigma^{2}$ lying in $M^{4}$, so that in $M^{4}$ it links
the $2$-torus of $K_{\#}^{2}$, as suggested by Figure I-4c. Note that the
inclusion $\partial M^{4}\hookrightarrow M^{4}-(K_{\#}^{2}\cup\Sigma^{2})$ is
monic on the fundamental group, as it must be (but this need not be verified).
It is interesting to actually watch the original duncehat
\[
K^{2}\times0=K^{2}\times\mathbb{R}^{1}\cap M^{4}\times0
\]
in $M^{4}\times0$ transform into these two components $K_{\#}^{2}\times0$ and
$\Sigma^{2}\times0$, by following the ambient isotopy $h_{t}$ of
$h_{0}=identity$ to $h_{1}$, focusing on the intersection $h_{t}
(K\times\mathbb{R}^{1})\cap M\times0$ (or more easily, on $K\times
\mathbb{R}^{1}\cap h_{t}^{-1}(M\times0)$, using the earlier description). The
changes in $K^{2}\times0$ take place in an arbitrarily small neighborhood of
$C^{2}\times0$ in $M^{4}\times0$, and are symmetric under rotation of this
4-dimensional neighborhood about the fixed $2$-plane transverse to
$C^{2}\times0$ at its center (the \textquotedblleft spinning\textquotedblright
\ point of view, explained in the upcoming $i=2$ case, is also useful here).
The changes are (Figure I-4c): first $C^{2}\times0$ divides, or splits, into
two parallel copies of itself, with boundaries remaining joined together at
$\partial C^{2}\times0$; then the $2$-sphere $\Sigma^{2}\times0$ materializes,
growing from say the original center point of $C^{2}\times0$, until its
diameter is almost that of $C^{2}\times0$; finally, the two parallel
boundary-identified copies of $C^{2}\times0$ join together near their centers,
becoming pierced there, so their union forms the $2$-torus which links the
$2$-sphere in $M\times0$. This dividing operation can be regarded as
\textit{duncehat meiosis}.
The pierced duncehat $K_{\#}\times0$ can now be made arbitrarily small in
$M\times0$, by level-preserving ambient isotopy of $M\times\mathbb{R}^{1}$,
because it can be pushed arbitrarily close to the circle $K^{(1)}\times0$,
which itself can be made small homotopically hence isotopically, using the
contractibility of $K^{2}$. This motion will stretch $\Sigma^{2}\times0$
large, but that is allowed, for Steps 2 and 3 will take care of that. We
henceforth will assume these motions have been incorporated into the
homeomorphism $h_{1}$.
\textbf{Step i = 2} (second round). In this step the $2$-sphere component of
intersection $\Sigma^{2}\times0$ from Step 1 is replaced by $2^{p}$ new
$2$-sphere components of intersection, which are embedded in a tubular
neighborhood $\Sigma^{2}\times D^{2}\times0$ of $\Sigma^{2}\times0$ in
$M^{4}\times0$. The goal here is to describe how these new $2$-spheres are
embedded. The pierced duncehat component $K_{\#}^{2}\times0$ from Step 1 is
left untouched (for the remainder of the proof, in fact).
The model situation comes from Bing's foundational 1952 paper \cite{Bi1}.
There Bing described a certain nest of 3-dimensional solid tori, which he used
to define a decomposition of $\mathbb{R}^{3}$. They can be described as
follows (see Figure I-5).
\begin{figure}
\caption{The Bing collection of thickened 1-spheres (i.e., solid tori) in
$\mathbb{R}
\end{figure}
Starting with $S^{1}\times D^{2}$, let $\chi_{-},\chi_{+}:S^{1}\times
D^{2}\rightarrow S^{1}\times\operatorname*{int}D^{2}$ be two disjoint
embeddings, with images denoted $S_{-}^{1}\times D^{2}$ and $S_{+}^{1}\times
D^{2}$, such that each image is by itself trivially embedded in $S^{1}\times
D^{2}$, and yet the two images are linked in $S^{1}\times D^{2}$ as shown in
Figure I-5. These embeddings can be iterated, to produce for any $p>0$ a
collection of $2^{p}$ solid tori $\{S_{\mu}^{1}\times D^{2}\mid\mu
\in\{-,+\}^{p}\}$ in $S^{1}\times D^{2}$, where
\[
S_{\mu}^{1}\times D^{2}=\chi_{\mu(1)}(\chi_{\mu(2)}(\ldots(\chi_{\mu(p)}
(S^{1}\times D^{2}))\ldots)).
\]
We assume that the embeddings $\chi_{-},\chi_{+}$ are chosen so that this
collection is invariant under reflection in $\mathbb{R}^{2}\subset
\mathbb{R}^{3}$, as shown in Figure I-5, where $\mathbb{R}^{2}$ is drawn
vertically in $\mathbb{R}^{3}$.
The collection of $2$-spheres that arises in this step is gotten by
\textquotedblleft spinning\textquotedblright\ this original Bing collection of
1-spheres. Imagine $\mathbb{R}^{4}$ as being gotten from $\mathbb{R}
^{3}=\mathbb{R}^{3}\times0\subset\mathbb{R}^{4}$, by spinning, or rotating,
$\mathbb{R}^{3}$ in $\mathbb{R}^{4}$ through $360^{0}$ (or $180^{0}$, if you
wish to be economical) about the plane $\mathbb{R}^{2}$, keeping
$\mathbb{R}^{2}$ fixed. Because of the symmetric positioning of the embeddings
$\{\chi_{\mu}\}$, each solid torus $S_{\mu}^{1}\times D^{2}$, when spun,
produces a thickened $2$-sphere $S_{\mu}^{2}\times D^{2}$. This collection
$\{S_{\mu}^{2}\times D^{2}\}$ of thickened $2$-spheres in $S^{2}\times D^{2}$
will be called the \textit{spun Bing collection} of thickened $2$-spheres.
It turns out that in Step 2, as described earlier, the $2^{p}$ $2$-sphere
components of $h_{2}(K\times\mathbb{R}^{1})\cap M\times0$ can be described as
the collection $\{\Sigma_{\mu}^{2}\mid\mu\in\{-,+\}^{p}\}\times0$, where
$\Sigma_{\mu}^{2}$ is the core $\Sigma_{\mu}^{2}\times0$ of the thickened
$2$-sphere $\Sigma_{\mu}^{2}\times D^{2}$ lying in $\Sigma^{2}\times D^{2}$,
all of this data being gotten by corresponding $\Sigma^{2}\times D^{2}$ to
$S^{2}\times D^{2}$.
This can be seen by careful analysis of the earlier description. What follows
is a description of a precise model which may make this clearer; this is the
way I originally perceived the construction. Since the model also will be used
to describe the generalization in Part II, it is presented here in its
generalized context.
The model starts in euclidean 3-space, which will be denoted $\mathbb{E}^{3}$
here so that there will be no erroneous correspondence made with
$\mathbb{R}^{3}$ in the description of spinning given above, or in the
Appendix. In $\mathbb{E}^{3}$ we define the 1-dimensional subsets $L_{-}^{1}$
and $L_{+}^{1}$ shown in Figure I-6b, each consisting of a circle with an
infinitely long tail attached, such that the circles are linked. Using
coordinates $(a,b,c)$ for $\mathbb{E}^{3}$ as shown, their precise
descriptions are as follows (where $\eta>0$ small): $L_{-}^{1}$ is the union
of the set of points in the $ac$-plane which are exactly distance $\eta$ from
the interval $0\times0\times\lbrack-2,0]$, together with the interval
$0\times0\times(-\infty,-2,-\eta]$ (pardon the nonstandard coordinates used in
Figure I-6, but they seem to yield the best pictures).
\begin{figure}
\caption{Some aspects of Step $i=2$}
\end{figure}
Similarly, $L_{+}^{1}$ is the union of the set of points in the $bc$-plane
which are exactly distance $\eta$ from the interval $0\times0\times
\lbrack0,2]$, together with the interval $0\times0\times\lbrack2+\eta,\infty)$.
In what follows the two first stage $(1+k)$-spheres $S_{-}^{1+k}\cup
S_{+}^{1+k}$ of the $k$-times spun Bing collection of $(1+k)$-spheres in
$S^{1+k}\times D^{2}$ will be described, by means of intersections taking
place in $(4+k)$-space. It is probably best to understand first the $k=0$ case
(i.e. the original Bing case) and the $k=1$ case (which is the case of
interest in Part I). After describing the model, and the operation of
spherical mitosis in the model, it will be shown how the $k=1$ case of the
model corresponds to the current situation in $M^{4}\times\mathbb{R}^{1}$. The
relevant pictures are Figures I-6 b,c.
In euclidean $(4+k)$-space $\mathbb{E}^{4+k}=\mathbb{E}^{3}\times
\mathbb{E}^{1+k}$ (see above comment on the use of $\mathbb{E}$), consider the
sets $L_{-}^{2+k}\equiv L_{-}^{1}\times\mathbb{E}^{1+k};$ $L_{+}^{2+k}\equiv
L_{+}^{1}\times\mathbb{E}^{1+k};$ $\mathbb{E}^{2}=\mathbb{E}^{2}
\times0=\mathbb{E}^{2}(a,b)\subset\mathbb{E}^{3}=\mathbb{E}^{3}\times
0\subset\mathbb{E}^{4+k}$; and $Q^{3+k}\equiv$ the boundary of the unit
tubular neighborhood of $\mathbb{E}^{2}$ in $\mathbb{E}^{4+k}$. Thus
$\mathbb{E}^{2}$ is a $2$-plane in $\mathbb{E}^{3}$ perpendicular to the
$c$-axis, and $Q^{3+k}$ is naturally homeomorphic to $\mathbb{E}^{2}\times
S^{1+k}$, where we are thinking of $S^{1+k}$ as the unit sphere in
$0\times\mathbb{E}^{1}(c)\times\mathbb{E}^{1+k}$, where $\mathbb{E}^{1}(c)$,
is the $c$-coordinate axis in $\mathbb{E}^{3}=\mathbb{E}^{2}(a,b)\times
\mathbb{E}^{1}(c)$. (Note that $\mathbb{E}^{3}$ itself can be regarded as the
$k=-1$ case of this construction.)
Let $D^{2}$ denote the unit disc in $\mathbb{E}^{2}$, so that $D^{2}\times
S^{1+k}$ (henceforth denoted $S^{1+k}\times D^{2})$ is the intersection of
$Q^{3+k}$ with $D^{2}\times\mathbb{E}^{1}(c)\times\mathbb{E}^{1+k}$ in
$\mathbb{E}^{3}\times\mathbb{E}^{1+k}$. The significance of this model is the following
\textbf{Observation.} \textit{The pair $(S^{1+k}\times D^{2},S^{1+k}\times
D^{2}\cap(L_{-}^{2+k}\cup L_{+}^{2+k}))$ corresponds in a natural manner to
the pair $(S^{1+k}\times D^{2},S_{-}^{1+k}\cup S_{+}^{1+k})$, where $S^{1+k}$
and $S_{+}^{1+k}$ are the two first-stage $(1+k)$-spheres in the $k$-times
spun Bing collection of $(1+k)$-spheres in $S^{1+k}\times D^{2}$.}
Understanding this is a matter of analyzing the model, seeing first the $k=0$
case and then proceeding to higher dimensions.
We wish to describe a process by which these two $(1+k)$-spheres $S_{-}
^{1+k}\cup S_{+}^{1+k}$ can be gotten from the original core $(1+k)$-sphere
$S^{1+k}\times0\subset S^{1+k}\times D^{2}$ in a continuous manner. Let
$\tau:\mathbb{E}^{3}\times\mathbb{E}^{1+k}\rightarrow\mathbb{E}^{3}
\times\mathbb{E}^{1+k}$ be translation by 4 units in the positive direction of
the $c$-axis of $\mathbb{E}^{3}$. Let $\tau_{t}$, $0\leq t\leq1$, be the
natural linear isotopy joining $\tau_{0}=identity$ to $\tau_{1}=\tau$. We wish
to focus on the translated copy $\tau^{-1}(S^{1+k}\times D^{2})$ of
$S^{1+k}\times D^{2}$, and to examine its intersection with the always-fixed
subset $L_{-}^{2+k}\cup L_{+}^{2+k}$ of $\mathbb{E}^{4+k}$, as $\tau
^{-1}(S^{1+k}\times D^{2})$ is translated back to its standard position
$S^{1+k}\times D^{2}$ by the isotopy $\{\tau_{t}\}$. (The reason for this
somewhat backward point of view, i.e. moving $S^{1+k}\times D^{2}$ instead of
moving $L_{-}^{2+k}\cup L_{+}^{2+k}$, is that in the real situation in
$M^{4}\times\mathbb{R}^{1}$ this is what is happening, as we will see.)
Examination reveals that the intersection $\tau_{t}(\tau^{-1}(S^{1+k}\times
D^{2}))\cap(L_{-}^{2+k}\cup L_{+}^{2+k})$ starts out at $t=0$ looking like the
core sphere of $\tau^{-1}(S^{1+k}\times D^{2})$, and then as $t$ increases the
intersection undergoes a transformation, dividing into two components, so that
ultimately at time $t=1$ it has become the pair of linked $(1+k)$-spheres
$S_{-}^{1+k}\cup S_{+}^{1+k}$ in $\tau_{1}(\tau^{-1}(S^{1+k}\times
D^{2}))=S^{1+k}\times D^{2}$. This is the process of \textit{spherical
meitosis}. It is interesting to follow the intermediate stages, even in the
original Bing $k=0$ case.
The $k=1$ case of the above-described model is corresponded to our situation
in $M^{4}\times\mathbb{R}^{1}$ in the following manner. The space
$\mathbb{E}^{3}$ is to be thought of as an open subset of the boundary
$\partial D^{4}$ of the 0-handle $D^{4}$ of $M^{4}$, so that $\mathbb{E}
^{3}\cap L^{1}=L_{-}^{1}\cup L_{+}^{1}$. The fourth coordinate $\mathbb{E}
^{1}$ of $\mathbb{E}^{3}\times\mathbb{E}^{1+1}$ is to be thought of as the
$(0,1)$-coordinate in $D^{4}-\ast=\partial D^{4}\times\lbrack0,1)$, with the
origin of $\mathbb{E}^{1}$ corresponding say to the point $\frac{1}{2}
\in\lbrack0,1)$. Finally, the fifth coordinate $\mathbb{E}^{1}$ of
$\mathbb{E}^{3}\times\mathbb{E}^{1+1}$ is to be thought of as the
$\mathbb{R}^{1}$ coordinate of $M^{4}\times\mathbb{R}^{1}$, translated so that
the origin of $\mathbb{E}^{1}$ is at the $2$-level (or later on at the
4-level, or 6-level, or whatever higher level of $\mathbb{R}^{1}$ one is
working in at the time). In summary, the first four coordinates of
$\mathbb{E}^{3}\times\mathbb{E}^{1+1}$ are to be thought of as defining an
open subset of the 0-handle $D^{4}$ of $M^{4}$, and the fifth coordinate is to
be thought of as the vertical coordinate of $M^{4}\times\mathbb{R}^{1}$.
(Aside: Actually, there is no mathematical justification for distinguishing
the last two coordinates of $\mathbb{E}^{3}\times\mathbb{E}^{1+1}$ from each
other, since the construction in the model is symmetric about $\mathbb{E}^{3}
$. But it is conceptually helpful to make this distinction.)
We now can use this model to understand the construction of $h_{1,1}$ from
$h_{1,0}=h_{1}$. Let $\mathbb{E}_{\#}^{2}=\operatorname*{int}D_{\#}^{2}
\times\frac{1}{2}\times1$ denote the interior of the $2$-disc part of $A_{1}$.
Then $\mathbb{E}_{\#}^{2}\subset M\times1$, and $\mathbb{E}_{\#}^{2}\cap
K\times\mathbb{R}^{1}$ is the center point $d\times\frac{1}{2}\times1$ of
$\mathbb{E}_{\#}^{2}$. Suppose distances are scaled so that the portion of
$h_{1}^{-1}(M\times0)$ lying near the $2$-sphere component $h_{1}^{-1}
(\Sigma^{2}\times0)$ of $K\times\mathbb{R}^{1}\cap h_{1}^{-1}(M^{4}\times0)$
looks like the boundary of the unit tubular neighborhood of $\mathbb{E}
_{\#}^{2}$ in $M^{4}\times\mathbb{R}^{1}$. Forget the 3-stage transformation
of $A_{1,0}=A_{1}$ to $A_{1,1}$; instead let us just isotope $A_{1,0}$ to a
new position by isotoping $\mathbb{E}_{\#}^{2}$ in $M^{4}\times\mathbb{R}^{1}
$, keeping it fixed near its boundary, so that a neighborhood in
$\mathbb{E}_{\#}^{2}$ of the center point $d\times\frac{1}{2}\times1$ of
$\mathbb{E}^{2}$ is moved up to the $2$-level $M\times2$, there to coincide
with a large compact piece of the plane $\mathbb{E}^{2}=\mathbb{E}^{2}(a,b)$
of the model. During the first part of this isotopy, the intersection of
$\mathbb{E}_{\#}^{2}$ with $K^{2}\times\mathbb{R}^{1}$ is to be kept always
the center point of $\mathbb{E}_{\#}^{2}$. As $\mathbb{E}_{\#}^{2}$ nears the
end of its journey, in a neighborhood of the $M^{4}\times2$ level, we see it
near the 0-level of the model. There we suppose that the end of its isotopy
coincides with the translation by $\tau_{t}$ of the plane $\tau^{-1}
(\mathbb{E}^{2})$ in the model to its standard position $\mathbb{E}^{2}$.
Hence, near the end of the isotopy the intersection of the moving
$\mathbb{E}_{\#}^{2}$ with the fixed $K\times\mathbb{R}^{1}$ changes from a
single point to four points. But that is not important. What is important is
that the movement of $h_{1,0}^{-1}(M\times0)$ to $h_{1,1}^{-1}(M\times0)$ can
be viewed as the movement of the frontier of a regular neighborhood of this
moving plane $\mathbb{E}_{\#}^{2}\subset A_{1,0}$, where at the end of the
isotopy the portion of this regular neighborhood we see in the model is the
1-neighborhood of the moving plane $\tau_{t}(\tau^{-1}(\mathbb{E}^{2}))$. So
the point is this: qualitatively, the change in the intersection of
$h_{1,0}^{-1}(M\times0)$ with $K\times\mathbb{R}^{1}$, as it is isotoped to to
$h_{1,1}^{-1}(M\times0)$, is the same as the change of the intersection of
$\tau^{-1}(Q^{4})$ with $L_{-}^{3}\cup L_{+}^{3}$ in the model $\mathbb{E}
^{3}\times\mathbb{E}^{2}$, as $\tau^{-1}(Q^{4})$ is isotoped by the
translations $\{\tau_{t}\}$ back to its standard position $Q^{4}$. Hence the
change in the intersection amounts to spherical meitosis.
From this, the relation of the two $2$-sphere components of $h_{1,1}
(K\times\mathbb{R}^{1})\cap M\times0$ to the single $2$-sphere component of
$h_{1,0}(K\times\mathbb{R}^{1})\cap M\times0$ can be seen to be modelled on
the spun Bing construction. (As an incidental remark, to change the above
repositioned $A_{1,0}$ into the earlier-described $A_{1,1}$, one only has to
pucker (cf. earlier) the newly positioned $\mathbb{E}_{\#}^{2}$, to reduce its
four pints of intersection with $K^{2}\times\mathbb{R}^{1}$ to two points of
intersection. See Figure I-6a.)
This same analysis works for the later stages of this step, i.e. going from
$h_{1,1}$ to $h_{1,2}$, etc.
\textbf{Step i=3 }(second round). In this step, there are no qualitative
changes made in $h_{2}$ to get $h_{3}$, but instead the diameters of the
various $2$-sphere components of $h_{2}(K\times\mathbb{R}^{1})\cap M\times0$
are made $\delta$-small, by following $h_{2}$ by a level-preserving
homeomorphism of $M\times\mathbb{R}^{1}$. This level-preserving homeomorphism
is obtained in the obvious manner from the ambient isotopy of $\Sigma
^{2}\times D^{2}\;rel\;\partial$ used to shrink small the $2^{p}$
$p^{\text{th}}$ stage $2$-spheres in the spun Bing collection of $2$-spheres.
This shrinking of the spun Bing collection is explained in the Appendix to
Part II. It is for this shrinking argument that $p=p(\delta)$ must be chosen
large. Note that $p$ can in fact be chosen at the start of Step 2 (as it must
be) because, in addition to $\delta$, it depends only on how $\Sigma^{2}\times
D^{2}$ is embedded in $M$, and that embedding is chosen after the construction
of $h_{1}$.
\textbf{Step i=4 }(second round). The effect of this step is to take each
$\delta$-small $2$-sphere component $\Sigma_{\mu}^{2}\times0$ of
$h_{3}(K\times\mathbb{R}^{1})\cap M\times0$, and to replace it by two copies
of duncehats, $h_{4}(K^{2}\times(t_{\mu}\pm\lambda))$, as suggested earlier.
See Figure I-7.
\begin{figure}
\caption{Step $i=4$: the passage from $h_{3}
\end{figure}
The important thing is that these two new duncehat components of intersection
can be chosen to lie in any arbitrarily small neighborhood $\Sigma_{\mu}
^{2}\times D^{2}\times0$ of $\Sigma_{\mu}^{2}\times0$ in $M^{4}\times0$, hence
their size is automatically controlled. If one examines the isotopy of $h_{3}$
to $h_{4}$, engendered by the expansion-collapse of $A_{3}$ to $A_{4}$, one
can see the component-of-intersection $\Sigma_{\mu}^{2}\times0$ undergo
meiosis in $\Sigma_{\mu}^{2}\times D^{2}\times0$, becoming two duncehats which
are linked there.
This completes the proof of the Basic Lemma.
\end{proof}
\indent Given the Basic Lemma, the remainder of the proof of the Shrinking
Proposition is patterned on \cite[Section 3]{EM}. Figures 1, 3, and 4 there
are meaningful here, too. More precise details can be gotten from that paper.
Recall that $B^{2}\times D^{2}$ denotes the $2$-handle of $M^{4}$.
\textsc{Window Building Lemma. }\emph{Given }$\delta>0$\emph{, there is a
homeomorphism }$h_{\#}:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}
$\emph{, fixed on }$\partial M\times\mathbb{R}^{1}$\emph{, such that}
\begin{enumerate}
\item \emph{for each }$j\in\mathbb{Z}$\emph{ and each }$t\in\lbrack j-1,j+1]$,
$h_{\#}(M\times t)\subset M\times\lbrack j-1,j+1],$\emph{ and }
\item \emph{for each }$t\in\mathbb{R}^{1}$\emph{, if }$h_{\#}(K\times t)\cap
B^{2}\times D^{2}\times\lbrack j-1+\delta,j+1-\delta]\neq\emptyset$\emph{ for
any }$j\in2\mathbb{Z}$\emph{, then }$\operatorname*{diam}h_{\#}(K\times
t)<\delta$\emph{.}\textit{ }
\end{enumerate}
\begin{proof}
This follows quickly from the Basic Lemma. We can suppose that the
homeomorphism $h_{4}$ constructed there has the additional property that the
punctured duncehat component of $h_{4}(K^{2}\times\mathbb{R}^{1})\cap
M^{4}\times0$ does not intersect $B^{2}\times D^{2}\times0$. This either can
be verified by construction (e.g., it would be natural to make $h_{1}$ have
this property), or one can argue that the pierced duncehat component, being
$\delta$-small, can be assumed to lie in some ball in $M^{4}\times0$, and
therefore it can be isotoped off of $B^{2}\times D^{2}\times0$ in $M^{4}
\times0$. Supposing this, then by applying after $h_{4}$ a vertical
homeomorphism of $M^{4}\times\mathbb{R}^{1}$ which basically expands
$B^{2}\times D^{2}\times\lbrack-\eta,\eta]$ to $B^{2}\times D^{2}\times
\lbrack-1+\delta,1-\delta]$ (i.e., \textquotedblleft opens the window
wider\textquotedblright), for some small $\eta>0$, one can obtain a
homeomorphism $h_{\ast}:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}
^{1}$, fixed on $\partial M\times\mathbb{R}^{1}$ and having compact support,
such that $h_{\ast}$ satisfies condition (2) above for the value $j=0$. To
complete the proof, one conjugates $h_{\ast}$ to make it have the additional
property that its support lies in $M\times\lbrack-1,1]$, and then one obtains
$h_{\#}$ by taking the sum of an infinite number of translates of $h_{\ast}$
stacked on top of each other.
\end{proof}
\begin{proof}
[Proof of Shrinking Proposition from the Window Building Lemma]Given
$\epsilon>0$, choose a small ball $B^{4}$ in $\operatorname*{int}M^{4}$, with
$\operatorname*{diam}B^{4}<\epsilon/2$. We indicate how to construct a
homeomorphism $h:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$,
fixed on $\partial M\times\mathbb{R}^{1}$, which satisfies the following
weakened versions of the conditions from the Shrinking Proposition: for each
$t\in\mathbb{R}^{1}$,
\begin{itemize}
\item[1$^{\prime}$.] $h(M\times t)\subset M\times\lbrack t-3,t+3]$, and
\item[2$^{\prime}$.] either
\begin{itemize}
\item[a.] $h(K^{2}\times t)\subset B^{4}\times\lbrack t-3,t+3]$, or
\item[b.] $\operatorname*{diam}h(K^{2}\times t)<\epsilon$
\end{itemize}
\end{itemize}
From this weaker version of the Shrinking Proposition it is clear that the
original version follows, simply by rescaling the vertical coordinate.
\begin{itemize}
\item[1.] To construct this $h$, first one constructs a uniformly continuous
homeomorphism $g:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$,
fixed on $\partial M\times\mathbb{R}^{1}$, such that for each $t\in
\mathbb{R}^{1},g(M\times t)\subset M\times\lbrack t-1,t+1]$, and
\item[2.] the image under $g$ of $(M_{1}^{4}\cap(D^{4}\cup B^{1}\times
D^{3}))\times\mathbb{R}^{1}\cup\bigcup\{M_{1}\times(j+1)\mid j\in
2\mathbb{Z}\}$ lies in $B^{4}\times\mathbb{R}^{1}$, where $M_{1}^{4}
=M^{4}-\partial M\times\lbrack0,1)$ for some collar $\partial M\times
\lbrack0,2)$ of $\partial M$ in $M^{4}$, and where $D^{4}\cup B^{1}\times
D^{3}$ is the union of the 0-handle and the 1-handle of $M^{1}$.
\end{itemize}
The details for $g$ (which are simple) are omitted, since a more general such
$g$ is constructed in Part II. Given $g$, then one can let $h=gh_{\#}$, where
$h_{\#}$ is provided by the Window Building Lemma for some sufficiently small
value of $\delta=\delta(\epsilon,g)$, and where we are assuming without loss
that $h_{\#}(K\times\mathbb{R}^{1})\subset M_{1}\times\mathbb{R}^{1}$. This
completes the proof of the Shrinking Proposition, and hence Part I.
\end{proof}
\part{The double suspension of any homology $n$--sphere which bounds a
contractible $\left( n-1\right) $--manifold is a sphere}
\indent The purpose of this part is to generalize Part I to prove
\begin{theorem}
\label{double-suspension-of-boundary}\textit{The double suspension $\Sigma
^{2}H^{n}$ of any homology sphere $H^{n}$ which bounds a contractible
topological manifold is homeomorphic to a sphere. In particular, if $n\geq4$,
this conclusion holds.}
\end{theorem}
\indent The last sentence is justified by the following Proposition (which
basically is known; see proof). It is stated in sufficient generality for use
in the Prologue, Section II.
\begin{proposition}
\label{homology-spheres-bound}Suppose $H^{n}$ is a compact space such that
$H^{n}\times S^{1}$ is a topological manifold-without-boundary, and such that
$H^{n}$ has the integral homology groups of the $n$-sphere. If $n\geq4$, then
there is an embedding of $H^{n}\times\mathbb{R}^{1}$ into some open
contractible topological manifold $M^{n+1}$ so that $M^{n+1}-(H^{n}
\times\mathbb{R}^{1})$ is compact (i.e., $H^{n}\times\mathbb{R}^{1}$ is a
neighborhood of the end of $M^{n+1}$).
\end{proposition}
\indent The Proposition is unknown for $n=3$. If $n\geq4$, and $H^{n}$ is a
$PL$ manifold homology $n$-sphere, then Kervaire proved in \cite[Cor, p.
71]{Ke} that $H^{n}$ bounds a contractible $PL$ manifold. Using the post-1968
knowledge that any (topological manifold) homology $n$-sphere, $n\geq5$, is a
$PL$ manifold, then Kervaire's proof covers this case, too. (Aside: it is
unknown whether every homology $4$-sphere is a $PL$ manifold, but topological
immersion theory readily establishes, by immersing $H^{n}
-\operatorname*{point}$ into $\mathbb{R}^{4}$, that $H^{n}
-\operatorname*{point}$ is a $PL$ manifold).
\indent The virtue of the following proof is its brevity.
\begin{proof}
[Proof of Proposition \ref{homology-spheres-bound}]It is rudimentary homotopy
theory that one can attach to $H^{n}$ a finite number of $2$-cells, and then
an equal number of $3$-cells, to make $H^{n}$ homotopically a sphere. Doing
these attachings as surgeries in a band $H^{n}\times(0,1)$ in $H^{n}\times
S^{1}$, one produces from $H^{n}\times S^{1}$ a new manifold, say $G$, which
is homotopically equivalent to $S^{n}\times S^{1}$. (The one nontrivial aspect
of this argument is embedding the attaching $1$- and $2$-spheres to have
product neighborhoods (i.e., trivial normal bundles). Perhaps this is most
easily handled by using the above-mentioned immersion proof to put a smooth
structure on $(H^{n}-\operatorname*{point})\times(0,1)$, and then doing all
surgeries there, using well-known smooth arguments). Now the elementary
argument in \cite[Appendix I]{Si6} establishes that $\widetilde{G}$, the
universal cover of $G$, is homeomorphic to $S^{n}\times\mathbb{R}^{1}$. Let
$F=\widetilde{G}\cup\infty\approx\mathbb{R}^{n+1}$ be gotten by compactifying
one end of $\widetilde{G}$. Then we can let $M^{n+1}\subset F$ be the bounded
open complementary domain of $F-H^{n}\times0$, where $H^{n}\times0$ denotes
any of the infinitely many natural copies of $H^{n}$ in $F$.
\end{proof}
\indent As indicated in the Preliminaries, Theorem
\ref{double-suspension-of-boundary} follows from the more general
\begin{theorem}
[single cell-like set version]\label{single-cell-like-set}\textit{Suppose $X$
is a cell-like set in a manifold-without-boundary $M^{m}$. If $m\geq4$, then
the stabilized quotient map $\pi\times id_{\mathbb{R}^{1}}:M\times
\mathbb{R}^{1}\rightarrow M/X\times\mathbb{R}^{1}$ is approximable by
homeomorphisms.}
\end{theorem}
\indent The $m=3$ version of this theorem was established in \cite{EP}
\cite{EM} under the additional assumption that $X$ has a irreducible
3-manifold neighborhood in $M$, thus avoiding the Poincar\'{e} conjecture. The
$m=2$ version, which does not require stabilization, is considered classical.
From the above-stated version of Theorem \ref{single-cell-like-set}, one
readily deduces the somewhat more general \textquotedblleft closed-$0$
-dimensional\textquotedblright\ version, as in \cite{EP} \cite{EM}, explained
again in \cite{Ed2}, but there is no reason to elaborate this here.
\indent The reader will recognize the construction below as a straightforward
generalization of that in Part I, a fact which I realized in the month
(January 1975) following the completion of Part I. But the $m=4$ case was
elusive, and it wasn't until several months later (August 1975) that I
realized that the solution there was to use the freedom of the extra
$\mathbb{R}^{1}$-coordinate.
\indent In case $X$ happens to be a codimension 2 polyhedron in $M$ (e.g., if
$M$ is a compact contractible $PL$ manifold, $\dim M \geq5$, then $M$ has such
a spine $X$), then several steps in the following proof become trivial, so
this is a good case to keep in mind.
\indent Theorem \ref{single-cell-like-set} is proved by showing that the Bing
Shrinking Criterion is satisfied, i.e., by proving
\textsc{Shrinking Proposition. }\emph{Suppose }$X$\emph{ is a cell-like set in
a manifold-without-boundary }$M^{m}$\emph{. If }$m\geq4$\emph{, then given any
neighborhood }$U$\emph{ of }$X$\emph{ and any }$\varepsilon>0$\emph{, there is
a homeomorphism }$h:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}
$\emph{, fixed on }$(M-U)\times\mathbb{R}^{1}$\emph{, such that for each
}$t\in\mathbb{R}^{1}$\emph{,}
\begin{itemize}
\item[1.] $h(U\times t)\subset U\times\lbrack t-\epsilon,t+\epsilon]$\emph{,
and}
\item[2.] $\operatorname*{diam}h(X\times t)<\epsilon$\emph{.
}
\end{itemize}
Succinctly stated, the idea of the proof is to produce a neighborhood basis
for $X$ in $M$ which is sufficiently standard in some sense, so that the
constructions from Part I can be applied. Details follow.
\indent Since $X$ is cell-like, then $X$ has a $PL$ triangulable neighborhood
in $M^{m}$ (even when $m=4)$. The easiest proof of this is the one which uses
topological immersion theory to immerse some neighborhood of $X$ into
$\mathbb{R}^{m}$ (cf. proof of Proposition above). So without loss $M$ is a
$PL$ manifold.
\indent It is a standard fact that $X$ has an arbitrarily small compact $PL$
manifold neighborhood $N$ in $M$ such that $N$ has an $(m-2)$-dimensional
spine. This is proved by taking an arbitrary compact $PL$ manifold
neighborhood $N_{\ast}$ of $X$, and by isotoping its dual $1$-skeleton off of
$X$, by an ambient isotopy supported in $\operatorname*{int}N_{\ast}$, using
the fact that $X$ is cell-like and $N_{\ast}-X$ has one end. Then a small
neighborhood of the repositioned dual 1-skeleton of $N_{\ast}$ can be deleted
from $N_{\ast}$ to produce $N$. This argument used the dimension restriction
$m\geq4$ in constructing the isotopy. (Recall that when $m=3$ the existence of
such a neighborhood for $X$ a contractible $2$-dimensional polyhedron would
imply the Poincar\'{e} conjecture. However, in this dimension, if one
hypothesizes in addition that $X$ has an irreducible $3$-manifold
neighborhood, then such $1$-spine neighborhoods exist; see \cite[Lemma
1]{McM1}.)
\indent Letting $N$ be such a neighborhood of $X$, we can write $N=L\cup
(\bigcup_{\alpha=1}^{r}H_{\alpha})$ (see Figure II-1), where $L$ is a compact
manifold with an $(m-3)$-dimensional spine, and where the $H_{\alpha}$'s are
disjoint handles of index $m-2$, attached to $L$ so that for each $\alpha$,
$L\cap H_{\alpha}=\delta H_{\alpha}\subset\partial L$, where $\delta
H_{\alpha}$ is the \emph{attaching-boundary} of $H_{\alpha}$, defined by
$\delta H_{\alpha}=\partial D_{\alpha}^{m-2}\times D^{2}\subset D_{\alpha
}^{m-2}\times D^{2}=H_{\alpha}$. The \emph{core} of the handle is the
$(m-2)$-cell $D_{\alpha}^{m-2}=D_{\alpha}^{m-2}\times0\subset D_{\alpha}
^{m-2}\times D^{2}=H_{\alpha}$. See Figure II-1.
\begin{figure}
\caption{The neighborhood $\displaystyle N=L\cup\bigcup_{\alpha=1}
\end{figure}
\indent This neighborhood $N$ can be thought of as a higher dimensional
analogue of the familiar 3-dimensional cube-with-handles neighborhood.
\indent The primary goal is to establish
\begin{lemma}
[Building a Single Window]\label{building-a-window}Suppose the data $X\subset
N^{m}$ as above. Let $H_{a}$ be any one of the $\left( m-2\right) $-handles
of $N$. Then given any $\delta>0$, there is a homeomorphism $h_{\alpha
}:N\times\mathbb{R}^{1}\rightarrow N\times\mathbb{R}^{1}$ such that
\begin{enumerate}
\item $h_{\alpha}$ \textit{has compact support in $\operatorname*{int}
N\times\mathbb{R}^{1}$, and}
\item \textit{for each $t\in\mathbb{R}^{1}$, if $h_{\alpha}(X\times t)\cap
H_{\alpha}\times\lbrack-1,1]\neq\emptyset$, then} $\operatorname*{diam}
h_{\alpha}(X\times t)<\delta$.
\end{enumerate}
\end{lemma}
\textsc{Note. } $h_{\alpha}$ can be regarded as the analogue of the
homeomorphism $h_{\ast}$ constructed during the proof of the Window Building
Lemma, Part I (at least in the case $N$ has only one $(m-2)$-handle
$H_{\alpha}$).
\indent The first task is to describe a certain \emph{model handle} sequence
which will be used in the proof of Lemma 1. The 3-dimensional version of the
model is a solid cylinder $H^{3}(0)=D^{1}\times D^{2}$ (to be thought of as a
1-handle), containing the familiar infinite sequence of linked sub-1-handles
as shown in Figure II-2. That is, $H^{3}(0)$, together with its subhandles,
amount to the Bing collection of solid tori, cut in half by the vertical plane
shown in Figure I-5. The \emph{attaching-boundary} of $H^{3}(0)$ is the union
of the two end discs, $\delta H^{3}(0)=\partial D^{1}\times D^{2}$. The union
of the $2^{p}$ sub-1-handles of $H^{3}(0)$ at stage $p$ is denoted $H^{3}(p)$,
with attaching-boundary $\delta H^{3}(p)=H^{3}(p)\cap\delta H^{3}(0)$.
\begin{figure}
\caption{The model handle sequence $H^{3}
\end{figure}
\indent In higher dimensions, the initial \emph{model handle} is
$H^{m}(0)\equiv H^{3}(0)\times D^{m-3}$ (which we really want to think of as
$(D^{1}\times D^{m-3})\times D^{2})$, and the \emph{attaching-boundary} of
$H^{m}(0)$ is $\delta H^{m}(0)=\delta H^{3}(0)\times D^{m-3}\cup
H^{3}(0)\times\partial D^{m-3}$ (which can then be thought of as the thickened
$(m-3)$-sphere $\partial(D^{1}\times D^{m-3})\times D^{2})$. Let
$H^{m}(p)=H^{3}(p)\times D^{m-3}$ and let $\delta H^{m}(p)=H^{m}(p)\cap\delta
H^{m}(0)$. It is important to realize that $\delta H^{m}(p)$ is \textbf{not}
just $\delta H^{3}(p)\times D^{m-3}$, but also includes $H^{3}(p)\times
\partial D^{m-3}$. \emph{Hence for }$m\geq1$\emph{, the subset }$\delta
H^{m}(p)$\emph{ of }$\delta H^{m}(0)$\emph{ looks like the }$p^{th}$\emph{
stage of the spun Bing collection of thickened }$(m-3)$\emph{-spheres in
}$S^{m-3}\times B^{2}$\emph{.}
\indent We will need ramified versions of these models, produced in the spirit
of R. Daverman's ramified cantor set constructions \cite[Lemma 4.1]{Da1}. Any
stage in the construction of the sequence $\{H^{3}(p)\}$, starting with the
$0^{\text{th}}$ stage, may be \emph{ramified}, which means that each handle of
that stage is replaced by several adjacent, parallel copies of itself.
Examples are shown in Figure II-3.
\begin{figure}
\caption{Producing a model ramified handle sequence}
\end{figure}
There may be a different number of ramifications performed in each
handle-component at each stage; the only restriction is that the number of
ramifications at each stage be finite. These ramification indices will not be
recorded in any way, and our notation for a ramified handle will be the same
as that for an unramified handle. It will become clear why these ramifications arise.
\indent A model ramified $(m-2)$-handle sequence $\{H^{m}(p)\}$ is defined to
be $\{H^{3}(p)\times D^{m-3}\}$, as before, where now $\{H^{3}(p)\}$ is a
model ramified 1-handle sequence. In particular there is no ramification in
the $D^{m-3}$-coordinate (whatever that would mean).
\indent The reason for these model handles is for use in the following Lemma.
From now on, we restrict attention to the $\dim M \geq5$ case, returning to
the $\dim M = 4$ case at the end of the proof.
\begin{lemma}
[Repositioning]\label{repositioning}\textit{Suppose $X,N^{m}$ and $H_{\alpha}$
are as in Lemma 1, and suppose $m\geq5$. Then for any $p\geq0$, there is a
repositioning of $X$ in $\operatorname*{int}N$ (by an unnamed ambient isotopy
of $N$ $rel$ $\partial$) and there is a sequence of compact neighborhoods of
the repositioned $X,$ $N=N_{0}\supset N_{1}\supset\cdots\supset N_{p}$, such
that the $(p+1)$-tuple of pairs $(H_{\alpha},\delta H_{\alpha})\cap
(N_{0},N_{1},\ldots,N_{p})$ is homeomorphic, respecting this filtered
structure, to the first $p$ stages of some model ramified pair $(H^{m}
(0),\delta H^{m}(0))$, i.e., to}
\[
((H^{m}(0),H^{m}(1),\cdots,H^{m}(p)),(\delta H^{m}(0),\delta H^{m}
(1),\cdots,\delta H^{m}(p))).
\]
\end{lemma}
\indent We are really only interested in the $p^{th}$ stage itself, but the
intermediate stages help clarify the picture.
\indent The Repositioning Lemma will be deduced by repeated application of the
following Lemma, which was inspired by a similar construction in \v{S}tanko's
work \cite[\S \ 3.1]{St1}, \cite[\S \ 6.1]{St2}, cf. \cite[Fundamental
Lemma]{Ed1}.
\begin{lemma}
\label{Stanko-type-lemma}Suppose $N$ is a compact manifold neighborhood of a
cell-like set $X$, $\dim N\geq5,$ and suppose $D_{1}^{2},\cdots,D_{q}^{2}$ are
disjoint 2-discs in $N$ such that for each $j,$ $D_{j}^{2}\cap\partial
N=\partial D_{j}^{2}$ (with all embeddings nice, e.g. $PL$). Then for each
$j$, there is a disc-with-handles $F_{j}^{2}$, gotten from $D_{j}^{2}$ by
adding some (unspecified) number of standard handles, which are arbitrarily
small and arbitrarily close to $D_{j}^{2}\cap X$, and there is a repositioning
of $X$, with support arbitrarily close to $X$, such that when done $X\cap(
{\textstyle\bigcup\nolimits_{j=1}^{q}}
F_{j}^{2})=\emptyset$.
\end{lemma}
\begin{proof}
[Proof of Lemma \ref{Stanko-type-lemma}]For simplicity, we treat only the
$q=1$ case, writing $D^{2}$ for $D_{1}^{2};$ the general case is the same.
Also, we omit the precise treatment of epsilons. Let $A^{2}\subset
\operatorname*{int}D^{2}$ be a small compact $2$-manifold neighborhood of
$X\cap D^{2}$ in $D^{2}$. The key fact is: for each component $A_{\ell}^{2}$
of $A^{2}$, there is a connected oriented surface $\widetilde{A}_{\ell}^{2}$
in $N-X$, lying close to $X$, with $\partial\widetilde{A}_{\ell}^{2}=\partial
A_{\ell}^{2}$. This is because, by Alexander duality, loops in $N-X$ near $X$
are null-homologous in $N-X$ near $X$. Let $\widetilde{F}^{2}=(D^{2}
-A^{2})\cup\widetilde{A}^{2}$, where $\widetilde{A}^{2}$ is the union of the
$\widetilde{A}_{\ell}^{2}$'s, general-positioned to be disjoint as necessary.
It remains to isotope $\widetilde{F}^{2}$ to coincide with the
\textquotedblleft standard\textquotedblright\ surface $F^{2}$, which is gotten
from $D^{2}$ by adding to each component of $A_{\ell}^{2}$ a number of small
handles equal to the genus of $\widetilde{A}_{\ell}^{2}$. This is where $X$ is
moved. Since $X$ is cell-like, there is a homotopy of $\widetilde{F}^{2}$
inside of a small neighborhood of $X$, carrying id: $\widetilde{F}
^{2}\rightarrow\widetilde{F}^{2}$ to a homeomorphism $\widetilde{F}
^{2}\rightarrow F^{2}$. Since $\dim M\geq5$, this homotopy can be converted
into an isotopy (if $\dim N=5$, use general position and the Whitney trick to
embed this 3-dimensional homotopy in $N\times I$ near $X\times I$, and then
invoke concordance $\Rightarrow$ isotopy). This completes the proof of Lemma 3.
\end{proof}
\begin{proof}
[Proof of Lemma \ref{repositioning} from Lemma \ref{Stanko-type-lemma}]First
we produce $N_{1}$. Let $D_{\alpha}^{2}$ be the cocore $2$-disc of the handle
$H_{\alpha}$, i.e., $D_{\alpha}^{2}=0\times D^{2}\subset D_{\alpha}
^{m-2}\times D^{2}=H_{\alpha}^{m}$. Applying Lemma 3, we can assume that $X$
has been repositioned so that $X\cap F_{\alpha}^{2}=\emptyset$, where
$F_{\alpha}^{2}$ is gotten from $D_{\alpha}^{2}$ by adding some number of
small handles, say $k$ of them. Let $H^{m}(1)$ denote the first stage of a
model $(m-2)$-handle $H^{m}(0)$ in which the 0$^{\text{th}}$ stage
ramification index is $k$, that is, $H^{m}(1)$ consists of $k$ pairs of linked
handles, instead of just one pair of linked handles as in the unramified model
(caution: by our convention, handles from distinct pairs in $H^{m}(1)$ do not
link; see Figure II-3, third frame.) Let $N_{1}$ be gotten from $N$ by
replacing $H_{\alpha}^{m}$ with a copy of $H^{m}(1)$, positioned so that
$N_{1}\cap F_{\alpha}^{2}=\emptyset$, and so that $N_{1}$ is a spine of
$N-F_{\alpha}^{2}$. This is best seen in the $m=3$ case; see Figure II-3,
third and sixth $f$. To complete this step, isotope $X$ into
$\operatorname*{int}N_{1}$.
Next, $N_{2}$ is produced inside of $N_{1}$, just as $N_{1}$ was produced in
$N$, this time using the $2k$ $2$-discs in $N_{1}$ which are the natural
cocore $2$-discs of the $2k$ $(m-2)$-handles of $N_{1}$ lying in $H_{\alpha}$.
See Figure II-3, last four frames. In applying Lemma 3 to the set of $2$-discs
in $N_{1}$, each $2$-disc may have a different number of handles added, and so
the ramification indices of these various first stage handles may be different
(of course, one could add dummy handles, to make the indices all the same).
Continuing this way one completes the proof of the Repositioning Lemma.
From this point on, the outline of the proof is the same as that of Part I,
and we will concentrate only on the nontrivial differences.
\end{proof}
\begin{proof}
[Proof of Lemma \ref{building-a-window}]This is modeled on the Basic Lemma of
Part I. We will describe the analogous successive homeomorphisms $h_{1}
,h_{2},h_{3},h_{4}$, and finally the homeomorphism $h_{\alpha}$, without
precisely stating the full list of properties each has, to save unnecessary repetition.
During the course of this construction, there will be a great deal of
\textquotedblleft repositioning\textquotedblright\ of $X$ in $N$, often
unnamed, as done earlier in Lemmas 2 and 3, and this repositioning is to be
built into the $h_{i}$'s. This repositioning is always to be thought of as
being done in the source copy of $N\times\mathbb{R}^{1}$, that is, such
motions will always be put in front of any already-constructed motions. It
will be understood that any such repositionings in $N\times\mathbb{R}^{1}$
will be level-preserving, and will be damped to be the identity out near
$N\times\pm\infty$, away from all of the essential activity.
The goal of the first three steps is to produce a homeomorphism $h_{3}$ which
has nicely controlled behavior at the target level $N\times0$, as before. But
unlike in Part I, there will be no discussion until Step 4 of the
source-isolation of the components of $h_{i}(X\times\mathbb{R}^{1})\cap
N\times0$.
\textbf{Step 1.} Suppose $X$ has been repositioned so that $N_{1}\subset N$
exists, as described in Lemma 2. For the moment, suppose the intersection
$N_{1}\cap H_{\alpha}$ is a single, unramified pair of linked handles. The
purpose of $h_{1}$ is to \textquotedblleft pierce\textquotedblright\ the
intersection $N_{1}\times\mathbb{R}^{1}\cap N\times0=N_{1}\times0$ by moving
$N_{1}\times\mathbb{R}^{1}$ so that $h_{1}(N_{1}\times\mathbb{R}^{1})\cap
N\times0$ consists of two components, one of them a pierced copy of $N_{1}$,
and the other a thickened $(m-2)$-sphere. This is most easily described in the
source, using a polyhedral pair $(A_{1},B_{1})$, as in Part I. This time the
important $2$-dimensional part of $A_{1}$ (i.e., $A_{1}-N\times(-\infty,0])$
is more like a $2$-dimensional finger rather than a 1-dimensional feeler with
a $2$-disc attached to its end. See Figure II-4.
\begin{figure}
\caption{The 2-dimensional finger part $F^{2}
\end{figure}
If we were restricting the construction to the essential part of the linked
handle pair $N_{1}\cap H_{\alpha}$, namely the $(m-2)$-dimensional cores
$D_{-}^{m-2}\cup D_{+}^{m-2}\subset N_{1}\cap H_{\alpha}$, then we would
choose $A_{1}\equiv N\times(-\infty,0]\cup F^{2}$, where $F^{2}$ is a
$2$-dimensional finger (topologically a $2$-disc) which intersects
$N\times(-\infty,0]$ in (the bottom) half of its 1-dimensional circle
boundary, and intersects $D_{-}^{m-2}\times\lbrack0,\infty)$ in the other half
of its boundary $(\approx$ interval, called its \emph{rim} and denoted
$\rho_{-}^{F}$), and is such that $F^{2}\cap(D_{+}^{m-2}\times\mathbb{R}
^{1})=$ point. If the cores $D_{-}^{m-2}\cup D_{+}^{m-2}$ are thickened by
crossing them with $D^{2}$ to form $N_{1}\cap H_{\alpha}$, then $F^{2}$ should
be thickened at its rim $\rho F$ by the same amount Given this new, suitably
thickened $A_{1}=N\times(-\infty,0]\cup F^{2}\cup\rho F\times D^{2}$, let
$h_{1}:N\times\mathbb{R}^{1}\rightarrow N\times\mathbb{R}^{1}$ be defined as
in Part I, so that $h_{1}^{-1}(N\times(-\infty,0])$ is a regular neighborhood
rel $\partial N\times\mathbb{R}^{1}$ of $A_{1}$ in $N\times\mathbb{R}^{1}$,
and so that $N_{1}\times\mathbb{R}^{1}\cap h_{1}^{-1}(N\times0)$ is the
frontier in $N_{1}\times\mathbb{R}^{1}$ of a regular neighborhood rel
$\partial N_{1}\times\mathbb{R}^{1}$ of
\[
B_{1}\equiv A_{1}\cap N_{1}\times\mathbb{R}^{1}=N_{1}\times(-\infty,0]\cup\rho
F\times D^{2}\cup(\operatorname*{point}\times D^{2}).
\]
It follows that this set $N_{1}\times\mathbb{R}^{1}\cap h_{1}^{-1}(N\times0)$
consists of the two components being sought, namely a pierced copy of $N_{1}$
and a thickened $(m-2)$-sphere.
In the general ramified situation where the intersection $N_{1}\cap H_{\alpha
}$ consists of $k$ pairs of linked handles instead of just one pair, one must
use $k$ fingers in $A_{1}$ instead of one, so that $h_{1}(N_{1}\times
\mathbb{R}^{1})\cap N\times0$ will have $k+1$ components, one of them a
$k$-times-pierced copy of $N_{1}$, denoted $N_{1\#}$, and the other $k$ of
them being thickened $(m-2)$-spheres.
To complete Step 1, one follows the above homeomorphism $h_{1}$ by a
level-preserving homeomorphism of (the target) $N\times\mathbb{R}^{1}$, which
moves $N_{1\#}\times0$ in $N\times0$ to be disjoint from the handle
$H_{\alpha}\times0$. It is the newly made holes in $N_{1\#}$ that allow this
move, just as in the pierced duncehat case in Part I.
\textbf{Step 2.} The homeomorphism $h_{2}$ will be gotten from $h_{1}$ by
preceding $h_{1}$ by a level-preserving repositioning of $X$ in the source.
Qualitatively this will be a mildly different point of view form that in Part
I, but it will accomplish the same thing (Step 2 of Part I could have been
done this way, but it didn't seem worth the effort in that simple situation).
Suppose $p=p(\delta)$ has been chosen, as it will be in Step 3 (caution: this
$p$ will correspond to $p-1$, not $p$, of Part I. Relabeling could be done to
avoid this, but that probably would cause more confusion that it would
prevent.) The motion needed to get $h_{2}$ from $h_{1}$ is simply this: $X$ is
repositioned in $N_{1}$ so that a sequence $N\supset N_{1}\supset\ldots\supset
N_{p}$ of neighborhoods of $X$ exists, as described in Lemma 2 (where $N_{1}$
is from the preceding Step 1). Then it turns out that the intersection
$h_{2}(N_{p}\times\mathbb{R}^{1})\cap N\times0$, which lies in the previously
constructed intersection $h_{1}(N_{1}\times\mathbb{R}^{1})\cap N\times0$,
consists of a copy of $N_{p}$ that has been pierced many times, plus a
collection of thickened $(m-2)$-spheres which is equivalent to the collection
of $(p-1)^{\text{st}}$ stage thickened $(m-2)$-spheres in a ramified spun Bing
collection of $(m-2)$-spheres. If there were no ramifications done in
constructing the sequence $N_{1},\ldots,N_{p}$, then there would be exactly
$2^{p-1}$ thickened $(m-2)$-spheres in this collection; in general, there are
many more.
\textbf{Step 3.} The homeomorphism $h_{3}$ is gotten by following $h_{2}$ by a
level-preserving homeomorphism of (the target) $N\times\mathbb{R}^{1}$, which
shrinks to diameter $<\delta$ the thickened $(m-2)$-sphere components of the
intersection $h_{2}(N_{p}\times\mathbb{R}^{1})\cap N\times0$. This uses the
shrinkability of the spun Bing collection of $(m-2)$-spheres, explained in the
Appendix to Part II. Here we assume that $p=p(\delta)$ was chosen so large
that this shrinking to size less than $\delta$ is possible. At the end of this
step, it has been arranged that each component of $h_{3}(N_{p}\times
\mathbb{R}^{1})\cap N\times0$ which intersects $H_{\alpha}\times0$ has
diameter $<\delta$.
\textbf{Steps 4 and }$\alpha$\textbf{.} These steps are combined, because the
generality of the present setting makes it difficult\footnote{In fact
impossible, by the example in [McM2].} to construct a pure analogue of the
homeomorphism $h_{4}$ of the duncehat case. The complicating factors will be
explained after establishing some geometry.
The important thing to realize for this step is that near any thickened
$(m-2)$-sphere component $\Sigma_{\mu}^{m-2}\times D^{2}$ of $N_{p}
\times\mathbb{R}^{1}\cap h_{3}^{-1}(N\times0)$ (working now in the source),
the preimage $h_{3}^{-1}(N\times0)$ looks like the boundary of a tubular
neighborhood in $N\times\mathbb{R}^{1}$ of some $2$-dimensional plane, call it
$\mathbb{E}_{\mu}^{2}\times1$ (which we will assume lies in the level
$N\times1$), where $\mathbb{E}_{\mu}^{2}$ cuts transversally across one of the
$(m-2)$-handles of $N_{p}$ in $N$, say $D_{\gamma}^{m-2}\times D^{2}$, hence
$\mathbb{E}_{\mu}^{2}\cap D_{\gamma}^{m-2}\times D^{2}=\operatorname*{point}
\times D^{2}.$ The plane $\mathbb{E}_{\mu}^{2}$ is not a closed subset of $N$,
so when we talk about a tubular neighborhood of $\mathbb{E}_{\mu}^{2}\times1$
in $N\times\mathbb{R}^{1}$, it should be understood that we are restricting
attention to a neighborhood of $N_{p}\times\mathbb{R}^{1}$. The plane
$\mathbb{E}_{\mu}^{2}$ is not immediately apparent from the description given
so far, but it could be obtained from the finger $F^{2}$ by puckering $F^{2}$,
just as the corresponding plane $\operatorname*{int}D_{\mu}^{2}$ in Part I was
obtained by puckering the $2$-disc part of $A_{1}$. The only real difference
between the present situation and that of Part I is the thickness of the
various components of the picture, i.e., the fact that some of the components
from Part I have been producted with $D^{2}$ to obtain the components here.
Consider the pair $(N(\mathbb{E}_{\mu}^{2}\times1),\partial N(\mathbb{E}_{\mu
}^{2}\times1))\cap N_{p}\times\mathbb{R}^{1}$, i.e. the intersection of the
tubular neighborhood $N(\mathbb{E}_{\mu}^{2}\times1)$ of $\mathbb{E}_{\mu}
^{2}\times1$ in $N\times\mathbb{R}^{1}$, and its boundary $\partial
N(\mathbb{E}_{\mu}^{2}\times1)$, with $N_{p}\times\mathbb{R}^{1}$. It can be
regarded as a pair $(\Delta_{\mu}^{m-1},\Sigma_{\mu}^{m-2}))\times D^{2}$,
where the (ball, boundary sphere) pair $(\Delta_{\mu}^{m-1},\Sigma_{\mu}
^{m-2})$ is the intersection of the pair $(N(\mathbb{E}_{\mu}^{2}
\times1),\partial N(\mathbb{E}_{\mu}^{2}\times1))$ with $D_{\gamma}
^{m-2}\times\mathbb{R}^{1}$, where $D_{\gamma}^{m-2}$ is as above. As a
consequence, the intersection $N_{p}\times\mathbb{R}^{1}\cap h_{3}
^{-1}(N\times\lbrack-\epsilon,\epsilon])$, for small $\epsilon>0$, can be
regarded as being $\Sigma_{\mu}^{m-2}\times\lbrack-\epsilon,\epsilon]\times
D^{2}$, where $\Sigma_{\mu}^{m-2}\times\lbrack-\epsilon,\epsilon]$ denotes a
small collar neighborhood of $\Sigma_{\mu}^{m-2}$ in $D_{\gamma}^{m-2}
\times\mathbb{R}^{1}$, and where the correspondence preserves the
$[-\epsilon,\epsilon]$-coordinate. By a simple reparametrization of this
collar coordinate, combined with an expansion in the target taking
$N\times\lbrack-\epsilon,\epsilon]$ onto $N\times\lbrack-1,1]$, we can assume
without loss that $\epsilon=1$ above, which we do from now on.
As in Part I, the components of the intersection $N_{p}\times\mathbb{R}
^{1}\cap h_{3}^{-1}(N\times\lbrack-1,1])$ can be made (source-)isolated, i.e.,
their projections to the $\mathbb{R}^{1}$ coordinate can be arranged to be
disjoint, because the various disjoint thickened $(m-1)$-cells $\{\Delta_{\mu
}^{m-1}\times D^{2}\}$ corresponding to the spherical components of
intersection can be slid vertically to have nonoverlapping $\mathbb{R}^{1}
$-coordinate values. Another way of saying this is that the planes
$\{\mathbb{E}_{\mu}^{2}\times1\}$ in $N\times\mathbb{R}^{1}$ can be vertically
repositioned to lie at different levels $\{\mathbb{E}_{\mu}^{2}\times t_{\mu
}\}$. We assume that this has been done (as it was in Part I).
In constructing the homeomorphism $h_{\alpha}$ there are two aspects of this
general situation which serve to make this step more complicated than that of
Part I: the $2$-plane $\mathbb{E}_{\mu}^{2}$ may intersect $X$ in more than a
single point, and also $X\times t_{\mu}$ may not have a ball regular
neighborhood in $N\times\mathbb{R}^{1}$ whose boundary slices $X\times
\mathbb{R}^{1}$ at precise, entire levels (this was guaranteed in Part I by
$K\times I$ being collapsible). Before dealing with these difficulties, it is
worth noting that if in fact these nice conditions prevail, then the
construction in Part I for $h_{4}$ and $h_{\ast}$ also works here to produce
$h_{\alpha}$.
The following adaptation of the Part I construction will take care of both
difficulties at the same time. Suppose for the moment that $C_{\mu}
\equiv\mathbb{E}_{\mu}^{2}\cap X$ is $0$-dimensional, e.g., a cantor set. Let
$\phi_{\mu}:N\times\mathbb{R}^{1}\longrightarrow N\times\mathbb{R}^{1}$ be a
near-homeomorphism (i.e. a limit of homeomorphisms), supported arbitrarily
near $X\times t_{\mu}$, such that for each $t\in\mathbb{R}^{1}$, if $\phi
_{\mu}(X\times t)\cap\mathbb{E}_{\mu}^{2}\times t_{\mu}\neq\emptyset$, then
$\phi_{\mu}(X\times t)=\operatorname*{point}\in\mathbb{E}_{\mu}^{2}\times
t_{\mu}$, and furthermore these $X\times t$'s are the only nontrivial
point-inverses of $\phi_{\mu}$ (see Figure II-5). To get the map $\phi_{\mu}$,
basically one uses the idea that any cantor set's worth of $X\times t$'s in
$N\times\mathbb{R}^{1}$ is shrinkable. Arguing more precisely, one first can
do a vertical perturbation of $\mathbb{E}_{\mu}^{2}\times t_{\mu}$ to make it
intersect each $X\times t$ in at most a single point, and then one can use
engulfing to shrink these $X\times t$'s to the points of intersection, keeping
fixed the repositioned $\mathbb{E}_{\mu}^{2}\times t_{\mu}$. After shrinking,
one brings the repositioned $\mathbb{E}_{\mu}^{2}\times t_{\mu}$ back to its
original position, using the inverse of the original vertical perturbation.
\begin{figure}
\caption{The construction of $h_{4}
\end{figure}
Given $\phi_{\mu}$, the idea now is to do a sort of expansion-meshing
operation, by taking the earlier chosen collar $\Sigma_{\mu}^{m-2}
\times\lbrack-1,1]$ in $D_{\gamma}^{m-2}\times\mathbb{R}^{1}$ (recall
$\epsilon=1$) and isotoping it in $D_{\gamma}^{m-2}\times\mathbb{R}^{1}$ in a
certain manner. Because of the correspondence made above of $\Sigma_{\mu
}^{m-2}\times\lbrack-1,1]\times D^{2}$ with $N_{p}\times\mathbb{R}^{1}\cap
h_{3}^{-1}(N\times\lbrack-1,1])$, this isotoping may be regarded as producing
a modification of the homeomorphism $h_{3}$. The goal of isotoping the collar
is to achieve, for an arbitrary preassigned $\eta>0$, that for each
$t\in\mathbb{R}^{1}$, $\phi_{\mu}(X\times t)\cap\Sigma_{\mu}^{m-2}
\times\lbrack-1,1]\times D^{2}\subset\Sigma_{\mu}^{m-2}\times\lbrack
s-\eta,s+n]\times D^{2}$ for some $s\in\lbrack-1,1]$. This is done as follows.
First, one takes the originally chosen collar $\Sigma_{\mu}^{m-2}\times
\lbrack-1,1]$ and, keeping the outer boundary $\Sigma_{\mu}^{m-2}\times1$
fixed, one isotopes the band $\Sigma_{\mu}^{m-2}\times\lbrack-1,1-\eta]$ so
close to the plane $\mathbb{E}_{\mu}^{2}\times t_{\mu}$ that no image
$\phi_{\mu}(X\times t)$, $t$ arbitrary, intersects both $\Sigma_{\mu}
^{m-2}\times1$ and $\Sigma_{\mu}^{m-2}\times\lbrack-1,1-\eta]$. Next, keeping
fixed the repositioned band $\Sigma_{\mu}^{m-2}\times\lbrack1-\eta,1]$ one
isotopes the band $\Sigma_{\mu}^{m-2}\times\lbrack-1,1-2\eta]$ much closer to
the plane $\mathbb{E}_{\eta}^{2}\times t_{\mu}$, so that no image $\phi_{\mu
}(X\times t)$ intersects both $\Sigma_{\mu}^{m-2}\times\lbrack1-\eta,1]$ and
$\Sigma_{\mu}^{m-2}\times\lbrack-1,1-2\eta]$. Continuing this way, the desired
degree of control is achieved.
Having done this expansion-meshing operation, independently and disjointly for
each $\mu$, then one can define $h_{\alpha}=\overline{h}_{3}\overline{\phi}$,
where $\overline{\phi}$ is a homeomorphism closely approximating the union
$\phi$ of all of the $\phi_{\mu}$'s that were chosen above, and where
$\overline{h}_{3}$ denotes the homeomorphism obtained by modifying $h_{3}$ by
the expansion-meshing operations just described.
It remains to discuss how the 0-dimensionality of $C_{\mu}$ can be achieved.
If $X$ had dimension $\leq m-2$, i.e. if $X$ had codimension $\geq2$ in $N$,
then achieving this would be an easy matter of putting $X$ in topological
general position with respect to $\mathbb{E}_{\mu}^{2}$. This suggests a
solution: make $X$ have dimension $\leq m-2$. That is, replace $X$ with a new
cell-like space $X_{\ast}$ lying near $X$ and obtained from $X$ by a certain
limiting process, such that $\dim X_{\ast}\leq m-2$ and $M-X_{\ast}\approx
M-X$. The most convenient time to do this is at the very start of the proof,
using the fact that $X$ has arbitrarily small neighborhoods with spines of
dimension $\leq m-2$. For if one takes a nested basis of such neighborhoods,
and squeezes the first one close to its spine, and then the (repositioned)
second one very close to its (repositioned) spine, etc., one produces in the
limit the desired $X_{\ast}$. This process is enlarged upon in the Postscript
below, where a replacement $X_{\ast}$ having much nicer properties is
produced. This completes the proof of Lemma 1, in the $m\geq5$ case.
In the $m=4$ case, the above proof breaks down for one essential reason: Lemma
\ref{repositioning} is unknown. The trouble comes in Lemma
\ref{Stanko-type-lemma}, in constructing the surfaces $\widetilde{A}_{\ell
}^{2}$ (which can in fact be done), and in trying to move these surfaces back
to standard position by isotopy (this is a fundamental problem). The way to
circumvent this difficulty is to use the extra freedom provided by the
$\mathbb{R}^{1}$ coordinate, which in effect turns the problem from a
4-dimensional problem into a 5-dimensional problem. An outline of this rescue
operation follows.
The idea is that one can at least do the already-described motions of Lemma
\ref{Stanko-type-lemma} in $N\times\mathbb{R}^{1}$, if not in $N$. This lets
one prove a weaker version of Lemma \ref{repositioning}, which says that
$X\times\mathbb{R}^{1}$ can be repositioned in $N\times\mathbb{R}^{1}$ so that
$X\times\mathbb{R}^{1}\cap N\times\lbrack-a,a]\subset N_{p}\times\lbrack-a,a]$
for any preassigned large $a$ (of course, the vertical movement of
$X\times\mathbb{R}^{1}$ may have to be as large as $a$, but that is not
important). Now, one can do Steps 1, 2, and 3 above without change. Step
4-$\alpha$ works also (let us assume, as justified above, that $\dim X\leq2)$,
even though $X\times\mathbb{R}^{1}$ has been grossly perturbed. One can still
make the intersection of $\mathbb{E}_{\mu}^{2}\times t_{\mu}$ with the
perturbed $X\times\mathbb{R}^{1}$ be 0-dimensional, with the intersection
points all having distinct $\mathbb{R}^{1}$-coordinates in $X\times
\mathbb{R}^{1}$, and then one can shrink these particular $X\times t$ levels
to these intersection points. Then the expansion-meshing works as described,
to produce $h_{\alpha}$.
The proof of Lemma 1 is now complete, for all $m\geq4$, as claimed.
\end{proof}
\indent From this point on, the proof is modeled on \cite[pp. 201,202]{EM},
just as the proof in Part I. The first step is to establish the
\textsc{Window Building Lemma. }\emph{Suppose the data }$X\subset N^{m}$\emph{
as earlier (explained before Lemma 1), and let }$H_{\#}=\cup_{\alpha=1}
^{r}H_{\alpha}$\emph{ be the union of the }$(m-2)$\emph{-handles of }
$N$\emph{. Then given any }$\delta>0$\emph{, there is a homeomorphism }
$h_{\#}:N\times\mathbb{R}^{1}\rightarrow N\times\mathbb{R}^{1}$\emph{, fixed
on }$\partial N\times\mathbb{R}^{1}$\emph{, such that}
\begin{itemize}
\item[1.] \emph{for each }$j\in2\mathbb{Z}$\emph{ and each }$t\in\lbrack
j-1,j+1],$ $h_{\#}(N\times t)\subset N\times\lbrack j-1,j+1]$\emph{, and}
\item[2.] \emph{for each }$t\in\mathbb{R}^{1},$\emph{ if }$h_{\#}(X\times
t)\cap H_{\#}\times\lbrack j-1+\delta,j+1-\delta]\neq\emptyset$\emph{ for any
}$j\in2\mathbb{Z}$\emph{, then }$\operatorname*{diam}h_{\#}(X\times t)<\delta
$\emph{.
}
\end{itemize}
\indent This is easily proved from the model single window version, Lemma 1.
The idea is first to construct a homeomorphism $h_{\ast}:N\times\mathbb{R}
^{1}\rightarrow N\times\mathbb{R}^{1}$, compactly supported in
$\operatorname*{int}N\times\mathbb{R}^{1}$, such that $h_{\ast}$ satisfies
condition (2) above for the value $j=0$. This $h_{\ast}$ is gotten by first
selecting some nonoverlapping vertical translates of the \textquotedblleft
window blocks\textquotedblright\ $H_{\alpha}\times\lbrack-1+\delta
,1-\delta],1\leq\alpha\leq r$ then applying Lemma 1 separately to
\textquotedblleft make a window\textquotedblright\ in each of these completely
disjoint blocks, using for this the different $h_{\alpha}$'s, and finally
translating these windows back to their original positions. Next, $h_{\ast}$
can be conjugated by a vertical homeomorphism of $N\times\mathbb{R}^{1}$ to
arrange that the support of $h_{\ast}$ lies in $\operatorname*{int}
N\times(-1,1)$. From this $h_{\ast}$, one builds the desired $h_{\#}$ by
stacking vertical translates of $h_{\ast}$ on top of each other.
\begin{proof}
[Proof of Shrinking Proposition from the Window Building Lemma]Given a
neighborhood $U$ of $X$ in $M^{m}$ and an $\epsilon>0$, choose a small ball
$B^{m}$ in $U$, with $\operatorname*{diam}B^{m}<\epsilon/2$ and
$\operatorname*{int}B^{m}\cap X\neq\emptyset$. We show how to construct a
homeomorphism $h:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$,
fixed on $(M-U)\times\mathbb{R}^{1}$, which satisfies the following weakened
versions of the conditions from the Shrinking Proposition: for each
$t\in\mathbb{R}^{1}$,
\begin{itemize}
\item[1$^{\prime}$.] $h(U\times t)\subset U\times\lbrack t-3,t+3]$, and
\item[2$^{\prime}$.] either
\begin{itemize}
\item[a.] $h(X\times t)\subset B^{m}\times\lbrack t-3,t+3]$, or
\item[b.] $\operatorname*{diam}h(X\times t)<\epsilon$.
\end{itemize}
\end{itemize}
From this it is clear that the original Shrinking Proposition follows, simply
by rescaling the vertical coordinate.
To construct this $h$, first one constructs a certain auxiliary homeomorphism
$g=g_{2}g_{1}:M\times\mathbb{R}^{1}\rightarrow M\times\mathbb{R}^{1}$. Let
$\ast\in\operatorname*{int}B^{m}\cap X$ be a basepoint. First one chooses a
uniformly continuous homeomorphism $g_{2}$, which is supported in
$\bigcup\{U\times\lbrack j+\frac{1}{2},j+\frac{3}{2}]\mid j\in2\mathbb{Z}\}$
and fixed on $\ast\times\mathbb{R}^{1}$, such that for each $j\in
2\mathbb{Z},g_{2}(X\times(j+1))\subset\operatorname*{int}B^{m}\times
\mathbb{R}^{1}$. The existence of $g_{2}$ follows from the cellularity of
$X\times t$ in $M\times\mathbb{R}^{1}$. Let $V$ be a compact neighborhood of
$X$ in $U$ such that $g_{2}(V\times(j+1))\subset\operatorname*{int}B^{m}
\times\mathbb{R}^{1}$ for each $j\in2\mathbb{Z}$, and let $C^{m}$ be a compact
neighborhood of $\ast$ in $B^{m}$ such that $g_{2}(C^{m}\times\mathbb{R}
^{1})\subset\operatorname*{int}B^{m}\times\mathbb{R}^{1}$. By codimension 3
engulfing, there is a (small) neighborhood $N$ of $X$ in $V$, with structure
$N=L\cup\bigcup_{\alpha=1}^{r}H_{\alpha}$ as discussed earlier (so in
particular $L$ has an $(m-3)$-spine), and there is a homeomorphism
$g_{0}:M\rightarrow M$, supported in $V$, such that $g_{0}(L)\subset C$. Let
$g_{1}=g_{0}\times id_{\mathbb{R}^{1}}:M\times\mathbb{R}^{1}\rightarrow
M\times\mathbb{R}^{1}$. Then $g=g_{2}g_{1}:M\times\mathbb{R}^{1}\rightarrow
M\times\mathbb{R}^{1}$ is such that
\begin{itemize}
\item[1.] $g=$ identity on $(M-U)\times\mathbb{R}^{1}$, and for each
$t\in\mathbb{R}^{1}$, $g(U\times t)\subset U\times\lbrack t-1,t+1],$ and
\item[2.] the image under $g$ of $L\times\mathbb{R}^{1}\cup\bigcup
\{N\times(j+1)\mid j\in2\mathbb{Z}\}$ lies in $B^{m}\times\mathbb{R}^{1}$.
\end{itemize}
Given $g$, then one can let $h=gh_{\#}$, where $h_{\#}$, is provided by the
Window Building Lemma for some sufficiently small value of $\delta
=\delta(\epsilon,g)$, and where $h_{\#}$ is assumed to be extended via the
identity over $(M-N)\times\mathbb{R}^{1}$. This completes the proof of the
Shrinking Proposition, and hence Theorem 2.
\centerline{\bf Postscript to Part II}
\end{proof}
\indent Embedded in the preceding proof is what I call the
\textsc{Replacement Principle for Cell-Like Sets. }\emph{Given any cell-like
compact set }$X$\emph{ in a manifold-without-boundary }$M,$ $\dim M\geq
5$\emph{, then }$X$\emph{ can be replaced by a nearby 1-dimensional cell-like
set }$X_{\ast}$\emph{, such that }$M-X_{\ast}$\emph{ is homeomorphic to
}$M-X,$ \emph{while }$X_{\ast}$\emph{ is homeomorphic to a reduced cone on a
cantor set (defined more precisely below).}
\emph{In fact, the replacement operation can be done in the following
continuous manner. There is an isotopy of embeddings }$g_{t}:M-X\rightarrow
M,$ $0\leq t\leq1$\emph{, starting at }$g_{0}=identity$\emph{ and fixed off of
an arbitrarily small neighborhood of }$X$\emph{, such that }$g_{1}
(M-X)=M-X_{\ast}$\emph{. Thus, in particular, }$X$\emph{ is transformed to
}$X_{\ast}$\emph{ in a semicontinuous fashion, through the intermediate
cell-like sets }$\{M-g_{t}(M-X)\}$\emph{.}\textit{
}
\indent I first enunciated this principle at the time of the Park City
Conference (February 1974), when I proved it for (wildly embedded) cell-like
sets $X$ of codimension $\geq3$, using my variation of the proof of
\v{S}tanko's Approximation Theorem (unpublished notes; cf. \cite{BL2}). In
certain special cases one can find better replacements for $X$, e.g. Giffen's
replacement of the duncehat spine of Mazur manifold by a $2$-disc \cite{Gi}
(which can then be made into an arc (see \cite{DE} and \cite[\S 3]{She})), but
as general rule, the above principle seems the best possible, even for $X$ a polyhedron.
\begin{proof}
[Proof of the Replacement Principle for Cell-Like Sets ](Using the machinery
of Part II). The basic tool is the following variation of Lemma
\ref{repositioning} (see Figure IIP-1). Recall $m\geq5$.
\end{proof}
\setcounter{figure}{0} \renewcommand{\Roman{part}-\arabic{figure}}{\Roman{part}P-\arabic{figure}}
\begin{figure}
\caption{The nested neighborhood sequence $\{N_{i}
\end{figure}
\textsc{Lemma 2}$^{\prime}$\textsc{. }\textit{Given any neighborhood
$N_{0}^{m}=L_{0}\cup\bigcup\{H_{0,\alpha}\mid1\leq\alpha\leq r_{0}\}$ of $X$
with structure as described earlier \textrm{(i.e.,} $L_{0}$ has an
$(m-3)$-spine, and the $H_{0,\alpha}$'s are $(m-2)$-handles attached to
$L_{0}$, then there is an arbitrarily small neighborhood $N_{1}=L_{1}
\cup\{H_{1,\beta}\mid1\leq\beta\leq r_{1}\}$ of $X$ in $\operatorname*{int}
N_{0}$, and a repositioning of $N_{1}$ in $N_{0}$ (by an unnamed isotopy of
$N_{0}\;rel\;\partial N_{0}$), such that after repositioning,}
\begin{itemize}
\item[1.] $L_{1}\subset\;\operatorname*{int}L_{0}$,
\item[2.] \textit{for each handle $H_{0,\alpha}$ of $N_{0}$, the triple
$(H_{0,\alpha},\delta H_{0,\alpha},H_{0,\alpha}\cap N_{1})$ is homeomorphic to
a standard triple $(H(0),\delta H(0),H(1))$, where $H(1)$ is a ramified
collection of 1st stage $(m-2)$-handles in $H(0)$ (as defined earlier; see
Figure II-3, third frame), and }
\item[3.] \textit{each handle $H_{1,\beta}$ of $N_{1}$ intersects at most one
handle $H_{0,\alpha}$ of $N_{0}$, and there is at most one component of
intersection (which by (2) is a single subhandle of $H_{0,\alpha})$.
}
\end{itemize}
\begin{proof}
[Proof of Lemma 2$^{\prime}$]First choose an arbitrarily small neighborhood
$N^{m}$ of $X$ having $(m-2)$-spine, and position $N$ in $N_{0}$, using simple
general positioning, so that for each handle $H_{0,\alpha}$ of $N_{0}$, the
intersection $N\cap H_{0,\alpha}$ goes straight through the handle, looking
like several parallel, smaller copies of $H_{0,\alpha}$ (i.e., like a ramified
version of $H_{0,\alpha}$, as in Figure II-3, second frame). Now, using the
earlier Repositioning Lemma 2, applied to $N$ and the $2$-discs which are the
central cocores of the handle-components of $N\cap H_{0,\alpha}$, for all
$\alpha$, find a repositioning of $X$ in $N$ and a neighborhood $N_{1}$ having
$(m-2)$-spine such that for each handle $H_{0,\alpha}$, the intersection
$H_{0,\alpha}\cap N_{1}$ is as described in (2) above. It remains to achieve
(1) and (3). Let the $(m-2)$-dimensional spine $K_{1}$ of $N_{1}$ be
positioned, and subdivided if necessary, so that $K_{1}^{(m-3)}\subset
\operatorname*{int}L_{0}$, and so that each $(m-2)$-simplex of $K_{1}$
intersects at most one $(m-2)$-handle $H_{0,\alpha}$ of $N_{0}$, in at most a
single $(m-2)$-disc. Now squeeze $N_{1}$ close to its newly positioned spine
$K_{1}$, and let $L_{1}$ be the \textquotedblleft
restriction\textquotedblright\ of $N_{1}$ to $K_{1}^{(m-3)}$, This completes
the proof of Lemma 2$^{\prime}$.
\end{proof}
\begin{proof}
[Proof of Replacement Principle (continued)]Given Lemma 2$^{\prime}$, one
applies it repeatedly to produce an infinite sequence $\{N_{i}=L_{i}
\cup\bigcup\{H_{i,\gamma}\mid1\leq\gamma\leq r_{i}\}\}$ of neighborhoods of
$X$, repositioning $X$ at each step, such that each $N_{i}$ is null homotopic
in its predecessor $N_{i-1}$, and is positioned there as described by Lemma
2'. This produces in the limit a new cell-like compactum $X_{\infty}
=\cap\{N_{i}\}$. We assume in addition that in the sequence $\{N_{i}\}$ the
handle alignment is maintained from step to step as shown in Figure IIP-1 so
that for any handle $H_{i,\gamma}$ of any neighborhood $N_{i}$, the triple
$(H_{i,\gamma},\delta H_{i,\gamma},H_{i,\gamma}\cap X_{\infty})$ is
homeomorphic to a model ramified triple $(H(0),\delta H(0),H(\infty))$, where
$H(\infty)\equiv\cap_{p=0}^{\infty}H(p)$, which is a ramified collection of
$(m-2)$-dimensional Bing cells. Elaborating this, letting $\delta
H(\infty)\equiv\cap_{p=0}^{\infty}\delta H(p)=H(\infty)\cap\delta H(0)$, then
$\delta H(\infty)$ is a cantor set's worth of $(m-3)$-cells (namely the
$(m-4)$-times-spun Bing collection of arcs) and $(H(\infty),\delta
H(\infty))\approx\delta H(\infty)\times([0,1],0)$. Hence in particular
$X_{\infty}-\operatorname*{int}H_{i,\gamma}$ is a strong deformation retract
of $X_{\infty}$.
For each handle $H_{i,\gamma}$, let $G_{i,\gamma}$ denote the union of all
$(m-2)$-handles $H_{j,\sigma},j>1$, which intersect $H_{i,\gamma}$, and let
$F_{i,\gamma}=G_{i,\gamma}\cap X_{\infty}$. By our assumptions, if
$H_{i,\gamma}$ and $H_{k,\tau}$ are two handles and if $G_{i,\gamma}\cap
G_{k,\tau}\neq\emptyset$ then $k>i$ and $G_{k,\tau}\subset G_{i,\gamma}$ (or
vice versa). More importantly, $F_{i,\gamma}$ is homeomorphic to
$H(\infty)-\delta H(\infty)$ for some ramified model $H(\infty)$. Let
$L_{\infty}=\cap_{i=0}^{\infty}L_{i}\subset X_{\infty}$, so that $X_{\infty
}-L_{\infty}$ is the disjoint union of all of the $F_{i,\gamma}$'s.
\textbf{Claim.} $L_{\infty}$ \emph{is cellular in }$M$.
\textit{Proof.} $L_{\infty}$ is cell-like because it is the intersection of a
nested sequence of cell-like sets, namely $L_{\infty}=\cap_{i=0}^{\infty
}(L_{i}\cap X_{\infty})$. Each set $L_{i}\cap X_{\infty}$ is cell-like because
it is a retract of $X_{\infty}$, for as noted above, $X_{\infty}
-\operatorname*{int}H_{i,\gamma}$ is a strong deformation retract of
$X_{\infty}$ for each $m-2$ handle $H_{i,\gamma}$ of $N_{i}$. Granted that
$L_{\infty}$ is cell-like, then it is cellular because it is an intersection
of manifolds having $(m-3)$-spines. This establishes the Claim.
Since $L_{\infty}$ is cellular, it can be shrink to a point, producing
$X_{\#}=X_{\infty}/L_{\infty}$. This space $X_{\#}$ has particularly nice
structure, being a countable null wedge of subsets of $M$ each homeomorphic to
$H(\infty)/\delta H(\infty)$. This latter set is a cone on a cantor set's
worth of $m-3$ cells (as in Bing's original $m=3$ case). To obtain $X_{\ast}$
from $X_{\#}$, one can use the shrinkability of the spun Bing collection of
$(m-3)$-cells (see the Appendix) to shrink each copy of $H(\infty)/\delta
H(\infty)$ to a cone on a cantor set, by shrinking to a cantor set each of the
parallel copies of $\delta H(\infty)\times\{\operatorname*{point}\}$ that make
up $H(\infty)/\delta H(\infty)\approx\delta H(\infty)\times\lbrack0,1]/\delta
H(\infty)\times0=c\delta H(\infty)$.
\centerline{\bf Appendix. Shrinking the spun Bing decomposition.}
\end{proof}
\indent The $k$-times spun Bing decomposition of euclidean $(3+k)$-space into
points and tame $(k+1)$-discs, described earlier and again below, is at the
very heart of the work of Parts I and II (and Cannon's subsequent work, too).
The purpose of this Appendix is to establish:
\begin{theorem}
\label{spun-bing}The spun Bing decomposition is shrinkable.
\end{theorem}
\indent The ramified versions of the spun Bing decomposition, which appeared
earlier in Part II, are also shrinkable. This will be evident from the
construction below, which also works in the ramified context, with identical
motions and equal efficiency.
The $k$-times spun Bing decomposition has been around for some time (as I
found out after needing and proving the above theorem in 1974). L. Lininger,
in order to exhibit some nonstandard topological involutions of spheres,
showed in \cite{Li} that the once-spun version was shrinkable, and suggested
that the higher dimensional versions were also. Neuzil's thesis \cite{Neu}
handled a class of decompositions of 4-space that included the once-spun
version. Recently, R. Daverman \cite[Cor 11.7]{Da3} showed that the
shrinkability of the general $k$-times-spun, versions, $k\geq2$, followed from
a general mismatch theorem. This was adapted to the $k=1$ case in \cite{CD}.
\indent The proof given here is elementary, in the sense of Bing's original
proof, for that is what it is modeled on. In fact, it is almost fair to say
that one simply takes Bing's original proof and spins it.
\begin{proof}
[Proof of Theorem \ref{spun-bing}]The proof is divided into two natural steps.
The first step describes a certain decomposition of $\mathbb{R}^{3}
\times\lbrack0,\infty)$, gotten by unfolding, or unraveling, the original Bing
decomposition of $\mathbb{R}^{3}\times0$, and then shows that this
decomposition of $\mathbb{R}^{3}\times\lbrack0,\infty)$ is shrinkable in a
certain level-preserving manner. The second step shows how to spin these
motions to shrink the spun Bing decomposition.
\textbf{Step 1. }Consider the original Bing decomposition of $\mathbb{R}^{3}$
(from \cite{Bi1}), as shown at the bottom of Figure IIA-1. It is defined using
two disjoint, linked embeddings $\chi_{-},\chi_{+}:S^{1}\times D^{2}
\rightarrow S^{1}\times\operatorname*{int}D^{2}$, whose images are denoted
$S_{-}^{1}\times D^{2}$ and $S_{+}^{1}\times D^{2}$. By taking iterates of
these embeddings, e.g. $\chi_{+}(\chi_{-}(S^{1}\times D^{2}))$, one obtains
the deeper stages of the defining neighborhood sequence of solid tori. At the
$p^{\text{th}}$ stage, there are $2^{p}$ linked solid tori, denoted $\{S_{\mu
}^{1}\times D^{2}\mid\mu\in\{-,+\}^{p}\}$, embedded in the original solid
torus $S^{1}\times D^{2}$. Letting $T_{p}$ denote the union of the $2^{p}$
$p^{\text{th}}$ stage components, then the nontrivial elements of the Bing
decomposition of $\mathbb{R}^{3}$ are the components of the intersection
$\cap_{p=1}^{\infty}T_{p}$, which can be arranged to be $C^{\infty}$-smooth
arcs, as suggested in Figure IIA-1.
\setcounter{figure}{0} \renewcommand{\Roman{part}-\arabic{figure}}{\Roman{part}A-\arabic{figure}}
\begin{figure}
\caption{Unraveling the original Bing decomposition in $\mathbb{R}
\end{figure}
We wish to extend this decomposition of $\mathbb{R}^{3}=\mathbb{R}^{3}\times0$
to one of $\mathbb{R}^{3}\times\lbrack0,\infty)$, with all decomposition
elements lying in levels $\{\mathbb{R}^{3}\times t\}$, by unravelling it, as
suggested by Figure IIA-1. Fix some small $\eta>0$. Then, as one works up
through the levels, from $t=0$ to $t=1$, the following activity takes place:
from $t=0$ to $t=1-\eta$, nothing happens, i.e., the decomposition of each
intermediate level is just a translate of the decomposition of the 0-level;
from $t=1-\eta$ to $t=1-\eta/2$, the first stage is unraveled, i.e.,
$S_{+}^{1}\times D^{2}$ and $S_{-}^{1}\times D^{2}$ are isotoped until they
look disjoint (i.e., are separated by a hyperplane), as pictured; from
$t=1-\eta/2$ to $t=1-\eta/3$, the second stage is unraveled, etc. What one
obtains is an unraveling of the original Bing decomposition of $\mathbb{R}
^{3}\times0$ to a standard decomposition of $\mathbb{R}^{3}\times1$ consisting
of a straight cantor set's worth of arcs. (It is interesting to ponder the
pseudoisotopy of $\mathbb{R}^{3}$ that this process yields in the limit, but
that is another matter.)
There is a natural \textquotedblleft defining neighborhood
sequence\textquotedblright\ for this decomposition of $\mathbb{R}^{3}
\times\lbrack0,\infty)$, which we describe at this point. For each $p\geq0$
and each $\mu\in\{-,+\}^{p}$, let $C_{\mu}^{3}$ denote the convex hull of the
$p^{\text{th}}$ stage component $S_{\mu}^{1}\times D^{2}$ \textbf{after} it
has been unraveled, i.e., in any level $\mathbb{R}^{3}\times t$ for
$1-\eta/(p+1)\leq t\leq1$. Let $N_{p}^{4}$ denote the union of $\cup\{C_{\mu
}^{3}\mid|\mu|=p\}\times\lbrack1-\eta/(p+1),1+\eta/(p+1)]$ together with the
unraveling images of $\cup\{S_{\mu}^{1}\times D^{2}\mid|\mu|=p\}$ in
$\mathbb{R}^{3}\times\lbrack0,1-\eta/(p+1)]$. Then each $N_{p}^{4}$ is
homeomorphic to $B^{2}\times D^{2}$, with $N_{p}^{4}\cap\mathbb{R}^{3}\times0$
corresponding to $\partial B^{2}\times D^{2}$, and furthermore the above
described decomposition of $\mathbb{R}^{3}\times\lbrack0,\infty)$ has as its
nontrivial elements the components of $(\cap_{p=0}^{\infty}N_{p}^{4}
)\cap\mathbb{R}^{3}\times t$, for $t$ varying from 0 to 1.
It turns out that this decomposition of $\mathbb{R}^{3}\times\lbrack0,\infty)$
is shrinkable \emph{in a level-preserving manner}. This calls for a mild
extension of Bing's original shrinking argument.
As Bing showed, the key to shrinking his decomposition is to regard
$S^{1}\times D^{2}$ as being long and thin, as drawn in Figure II A-2, and to
show that if one cuts $S^{1}\times D^{2}$ into any number of chambers by means
of transverse $2$-planes in $\mathbb{R}^{3}$, then the decomposition elements
can be isotoped in $S^{1}\times D^{2}$ so that when done each lies inside a
single chamber. To save words, we make a definition for this. Suppose
$P_{1}^{2},\ldots,P_{q}^{2}$ are $q$ parallel $2$-planes in $\mathbb{R}^{3}$,
transverse to the \textquotedblleft long\textquotedblright\ axis of
$S^{1}\times D^{2}$ as shown in Figure II A-2, such the two \textquotedblleft
bends\textquotedblright\ of $S^{1}\times D^{2}$ lie to the outer sides of the
endmost planes $P_{1}$ and $P_{q}$. These planes cut $\mathbb{R}^{3}$ into
$q+1$ \emph{chambers} (and they cut $S^{1}\times D^{2}$ into $2q$ chambers).
We will say that the $p^{th}$ stage of the Bing decomposition is
\emph{essentially }$\emph{q}$\emph{-chamberable} if, for any $\delta>0$, there
is an ambient isotopy of $\mathbb{R}^{3}$, fixed outside of $S^{1}\times
D^{2}$, such that the image of each of the $2^{p}$ $p^{\text{th}}$ stage solid
tori under the final homeomorphism lies in the $\delta$-neighborhood of one of
these chambers. Generalizing this in a natural manner, we say that the
$p^{th}$ stage of the developed Bing decomposition of $\mathbb{R}^{3}
\times\lbrack0,\infty)$ (as described above) is \emph{essentially
q-chamberable} if, for any $\delta>0$, there is a level-preserving ambient
isotopy of $\mathbb{R}^{3}\times\lbrack0,\infty)$, fixed outside of
\[
N_{0}^{4}=(S^{1}\times D^{2}\times\lbrack0,1-\eta])\cup(C^{3}\times
\lbrack1-\eta,1+\eta])\text{,}
\]
where $C^{3}\approx B^{3}$ is the convex hull of $S^{1}\times D^{2}$ in
$\mathbb{R}^{3}$, such that the image of each of the $2^{p}$ components of the
$p^{\text{th}}$ stage neighborhood $N_{p}^{4}$ under the final homeomorphism
lies in the $\delta$-neighborhood of one of the $q+1$ chambers of
$\mathbb{R}^{3}\times\lbrack0,\infty)$ determined by the 3-planes $P_{i}
^{2}\times\lbrack0,\infty),1\leq i\leq q$.
Bing proved that for any $q\geq0$, the $q^{th}$ stage of his decomposition is
essentially $q$-chamberable. The following proposition is a simple extension
of this.
\textbf{Proposition 1.} \emph{For any }$q\geq0$\emph{, the }$q^{th}$\emph{
stage of the developed Bing decomposition of }$R^{3}\times\lbrack0,\infty
)$\emph{ is essentially q-chamberable.}
\textit{Proof of Proposition 1.} By induction on $q$. See Figure IIA-3. (also,
Bing's pictures in \cite{Bi1} are helpful here). Let the $2$-planes $P_{1}
^{2},\ldots,P_{q}^{2}$ be given. The first step is to apply the induction
hypothesis separately inside of the two components $N_{-}^{4}$ and $N_{+}^{4}$
of $N_{1}^{4}$, to reposition in them the components of the $q^{\text{th}}$
stage $N_{q}^{4}$ which lie there, as follows. For $N_{-}^{4}$, using the
initial $q-1$ $2$-planes $P_{1}^{2},\ldots,P_{q-1}^{2}$, the induction
hypothesis provides a level-preserving isotopy of $\mathbb{R}^{3}\times
\lbrack0,\infty)$ supported in $N_{-}^{4}$ such that for each of the $2^{q-1}$
components of $N_{q}^{4}$ lying in $N_{-}^{4}$, its image under the final
homeomorphism lies in the $\delta$-neighborhood ($\delta$ arbitrarily small)
of one of the $q$ chambers of $\mathbb{R}^{3}\times\lbrack0,\infty)$
determined by $P_{1}^{2}\times\lbrack0,\infty),\ldots,P_{q-1}^{2}\times
\lbrack0,\infty)$. For $N_{+}^{4}$, one finds a similar isotopy, \textbf{using
instead} the final $q-1$ q-planes $P_{2}^{2},\ldots,P_{q}^{2}$. Let these
isotopies be applied to $\mathbb{R}^{3}\times\lbrack0,\infty)$.
\begin{figure}
\caption{The left side shows how $N_{-}
\end{figure}
The Proposition is completed by isotoping $N_{-}^{4}\cup N_{+}^{4}$ in
$N_{0}^{4}$ in a certain careful manner, as shown in Figure IIA-3.
It is best to describe this level-preserving isotopy in three pieces. Below
the $(1-\eta)$-level, one does in each $t$-level the classic Bing move, whose
motion here takes place only in the two outermost chambers (i.e. the two
chambers whose frontiers are $P_{1}^{2}\times\lbrack0,\infty)$ and $P_{q}
^{2}\times\lbrack0,\infty))$. In the left most chamber one pulls the loop of
$N_{+}^{4}$ very tight to make it poke into this chamber no more than distance
$\delta$. In the rightmost chamber one similarly pulls the loop of $N_{-}^{4}$
very tight. Of course, these pulling motions stretch the other two end-loops
longer, but that does not matter. Above the $(1-\eta/2)$-level, where
$S_{-}^{2}\times D^{2}$ and $S_{+}^{1}\times D^{2}$ are in their unraveled
positions, the motion again takes place only in the two outermost chambers,
and again the same pulling-tight rule is applied, making small the left
end-loop of $N_{+}^{4}$ and the right end-loop of $N_{-}^{4}$. The nontrivial
part of the isotopy is what takes place in between these two levels. This is
described in Figure IIA-3, which shows the positioning of $N_{-}^{4}\cup
N_{+}^{4}$ in $N_{0}^{4}$ before and after the isotopy is applied. That is,
the left side of Figure IIA-3 shows the original unraveling motion of
$S_{-}^{1}\times D^{2}\cup S_{+}^{1}\times D^{2}$ in the various levels
$\mathbb{R}^{3}\times t,0\leq t\leq1+\eta$, and the right side of Figure IIA-3
shows the new unraveling motion, which has been gotten by applying the
level-preserving isotopy of $N_{0}^{4}$ to the original unraveling motion. The
important thing is that between the 3-planes $P_{1}^{2}\times\lbrack0,\infty)$
and $P_{q}^{2}\times\lbrack0,\infty)$ this new unraveling motion
\emph{respects projection to the \textquotedblleft long\textquotedblright
\ axis} (i.e., the left-right axis, in the pictures). Hence the chambering
which was already achieved inside of $N_{-}^{4}$ and $N_{+}^{4}$, by the
earlier motions which took place there is not lost. It can be verified that
after applying this pictorially-described isotopy, the desired chambering has
been achieved, completing the proof.
Strictly speaking, the only part of Proposition 1 that is used in Step 2 is
the motion of $N_{-}^{4}\cup N_{+}^{4}$ in $N_{0}^{4}$ described above. But it
is useful to understand the entire Proposition, for it is a special case of
what follows.
\textbf{Step 2.} In this step, the motions of Proposition 1 are applied to
shrink the $k$-times-spun Bing decomposition. In a nutshell, the point is that
the unfolded Bing decomposition described in Step 1 is exactly one-half of the
once-spun Bing decomposition (after amalgamating arcs in the former to become
half-discs in the latter), and furthermore, the $k$-times-spun Bing
decomposition, $k\geq2$, can be gotten by $(k-1)$-spinning the unfolded Bing
decomposition (with its elements amalgamated). This entire step can best be
understood by concentrating on the $k=1$ and $k=2$ cases.
To define the $k$-times-spun Bing decomposition of $\mathbb{R}^{3+k}$ into
points and tame $(1+k)$-cells, one starts with half of the original Bing
decomposition, lying in $\mathbb{R}^{2}\times\lbrack0,\infty)$ as shown in
Figure IIA-4, with stages denoted by $B^{1}\times D^{2}$ and $\{B_{\mu}
^{1}\times D^{2}\mid\mu\in\{-,+\}^{p}\}$. Then one imagines $\mathbb{R}^{3+k}$
as being obtained from $\mathbb{R}^{2}\times\lbrack0,\infty)$ by spinning
$\mathbb{R}^{2}\times\lbrack0,\infty)$ through a $k$-sphere's worth of
directions, keeping $\mathbb{R}^{2}\times0$ fixed. During this spinning, the
arcs of the half-Bing-decomposition sweep out $(1+k)$-cells in $\mathbb{R}
^{3+k}$. (Equivalently, one can define this decomposition of $\mathbb{R}
^{3+k}$ to be the restriction to the boundary $(3+k)$-sphere of the product
decomposition of $B^{1}\times D^{2}\times I^{1+k}$ into $(2+k)$-cells. But the
first model seems better suited to this proof.) The $p^{\text{th}}$ stage
defining neighborhoods of the $k$-times-spun Bing decomposition are the
thickened $(1+k)$-spheres $\{S_{\mu}^{1+k}\times D^{2}\mid\mu\in\{-,+\}^{p}
\}$, which are the images of the cylinders $\{B_{\mu}^{1}\times D^{2}\mid
\mu\in\{-,+\}^{p}\}$ under the spinning.
\begin{figure}
\caption{Half of the original Bing decomposition of $\mathbb{R}
\end{figure}
In trying to shrink this decomposition, the important coordinate is the
$\mathbb{R}^{1+k}$ coordinate in $\mathbb{R}^{2}\times\mathbb{R}
^{1+k}=\mathbb{R}^{3+k}$, i.e., the coordinate which is perpendicular to the
fixed plane $\mathbb{R}^{2}$. In the original Bing case, i.e. when $k=0$, this
is the long-and-thin coordinate direction. For higher $k$ it might be thought
of as the flat-and-thin coordinate direction. The point is, if one projects
the $k$-times-spun Bing decomposition onto this $\mathbb{R}^{1+k}$ coordinate,
then each $(1+k)$-disc is projected homeomorphically onto a ball $I^{1+k}$ in
$\mathbb{R}^{1+k}$ (at least if the decomposition is correctly positioned,
e.g. as in Figure IIA-1, bottom frame). Also, the core $(1+k)$-sphere of each
component $S_{\mu}^{1+k}\times D^{2}$ of any stage of the defining
neighborhood sequence is flattened by this projection to be a $(1+k)$-disc.
Now we shift from round thinking to square thinking, in order to talk about
chambers. Regarding $I^{1+k}$ as $[-1,1]^{1+k}$ (which explains the reason for
the letter $I$, we can choose a set of $k$-planes in $\mathbb{R}^{1+k}$, each
being perpendicular to one of the $1+k$ coordinate axes and each passing
through $\operatorname*{int}I^{1+k}$, so that these $k$-planes cut
$\mathbb{R}^{1+k}$ into \emph{chambers}, whose intersections with $I^{1+k}$
are small. For notational purposes, let this collection of $k$-planes be
denoted $P^{k}(\vec{q})$, where the $(1+k)$-tuple of non-negative integers
$\vec{q}=(q_{1},q_{2},\ldots,q_{1+k})$ lists the number of $k$-planes
perpendicular to each axis. Let $\Vert\vec{q}\Vert=\sum_{i=1}^{1+k}q_{i}$.
Crossing these $k$-planes and chambers with $\mathbb{R}^{2}$, we get a
partitioning of $\mathbb{R}^{3+k}$, which we will use to measure smallness
with regard to the spun Bing decomposition. But there is one technical point
which has to be discussed, namely how these $(2+k)$-planes are allowed to pass
through the \textquotedblleft bends\textquotedblright\ in the $S_{\mu}
^{1+k}\times D^{2}$'s near $\partial I^{1+k}$ (as they must do, when $k\geq
1)$. The idea here is to impose as much regularity as possible. One way to say
this accurately is, given the collection $P^{k}(\vec{q})$ of $k$-planes, and
letting $\epsilon$ be the minimum distance from any of the $k$-planes to
either of the two $k$-faces of $I^{1+k}$ parallel to it, then the
decomposition is made so taut, and the bends in the defining neighborhoods
$\{S_{\mu}^{1+k}\times D^{2}\}$ are made so sharp, and so near $\partial
I^{1+k}$, that for any of the $(1+k)$-choose-$\ell$ coordinate sub-spaces
$\mathbb{R}^{\ell}$ of $\mathbb{R}^{1+k}$ $(1\leq\ell\leq1+k)$, the projection
$\pi:\mathbb{R}^{3+k}\rightarrow R^{\ell}$, when restricted \textbf{over} the
subset $[-1+\epsilon/2,1-\epsilon/2]^{\ell}\subset\mathbb{R}^{\ell}$, is a
product map on all members of the defining neighborhood sequence (i.e., any
bending, or pinching, in the $\mathbb{R}^{\ell}$ coordinate direction occurs
outside of $\pi^{-1}([-1+\epsilon/2,1-\epsilon/2]^{\ell})$. Hence this
restricted part of the $k$-times-spun decomposition (and its defining
neighborhood sequence) looks like the product of the $(k-\ell)$-times-spun
Bing decomposition (and its defining neighborhood sequence) with the cube
$[-1+\epsilon/2,1-\epsilon/2]^{\ell}$. One consequence of this regularity is
that the two basic spun embeddings $\underline{x},\underline{x}_{+}
:S^{1+k}\times D^{2}\rightarrow S^{1+k}\times D^{2}$ which define the
$k$-times-spun Bing decomposition (cf. start of Step 1) can be assumed to
respect the chambering of $\mathbb{R}^{3+k}$ by the codimension one
hyperplanes $\mathbb{R}^{2}\times P^{k}(\vec{q})$, that is, for each such
chamber $C,\chi_{\underline{+}}(C\cap S^{1+k}\times D^{2})\subset C$. Hence
the various finite compositions of $\chi_{-}$ and $\chi_{+}$ have this
property. This will be important in Proposition 2 below.
Generalizing the earlier definition, we say the $p^{\text{th}}$ stage of the
$k$-times-spun Bing decomposition is \emph{essentially }$q$\emph{-chamberable}
if for any collection $P^{k}(\overset{\rightarrow}{q})$ of $k$-planes as
described above, with $\Vert\vec{q}\Vert=q$, and for any $\delta>0$, there is
an ambient isotopy of $\mathbb{R}^{3+k}$, fixed outside of the $0^{\text{th}}$
stage $S^{1+k}\times D^{2}$, such that the image of each of the $2^{p}$
$p^{\text{th}}$ stage components $S_{\mu}^{1+k}\times D^{2}$ under the final
homeomorphism lies in the $\delta$-neighborhood of one of the chambers of
$\mathbb{R}^{3+k}$ determined by $P^{k}(\vec{q})$.
\textbf{Proposition 2.} \emph{For any }$q\geq0$\emph{, the }$q^{th}$\emph{
stage of the }$k$\emph{-times-spun Bing decomposition of }$R^{3+k}$\emph{ is
essentially }$q$\emph{-chamberable.}
\emph{Proof of Proposition 2. } By induction on $q$. Let $q_{i}$ be any
nonzero component of $\vec{q}=(q_{1},\ldots,q_{1+k})$. We want to regard the
$k$-times-spun Bing decomposition as being gotten by $(k-1)$-spinning the
unfolded Bing decomposition. This $(k-1)$-spinning is to be done in
$\mathbb{R}^{3+k}=\mathbb{R}^{2}\times\mathbb{R}^{1+k}$, by taking the base
$\mathbb{R}^{3}\times0$ of $\mathbb{R}^{3}\times\lbrack0,\infty)$ and
indentifying it with $\mathbb{R}^{2}\times\mathbb{R}^{1}(e_{i})$, where
$e_{i}$ is the $i^{\text{th}}$ coordinate direction in $\mathbb{R}^{1+k}$ ($i$
as in $q_{i}$, above), and then by spinning $\mathbb{R}^{3}\times
\lbrack0,\infty)$ through the $(k-1)$-sphere's worth of directions in
$\mathbb{R}^{k}(e_{1},\ldots,\widehat{e}_{i},\ldots,e_{1+k})$. The important
thing is: this spinning can be done so that
\begin{itemize}
\item[a)] the $(k-1)$-spin of the defining neighborhood sequence of the
unfolded Bing decomposition (i.e., the $N_{p}^{4}$'s in Step 1) is
corresponded to the defining neighborhood sequence of the $k$-times-spun Bing
decomposition (i.e., to the unions $\cup\{S_{\mu}^{1+k}\times D^{2}\mid\mu
\in\{-,+\}^{p}\}$, for each $p\geq0$; hence in particular the decompositions
will coincide);
\item[b)] the $q_{i}$ $(2+k)$-planes of $\mathbb{R}^{2}\times P^{k}(\vec{q})$
which are perpendicular to $\mathbb{R}^{1}(e_{i})$ become perpendicular to the
$(-\infty,\infty)$-coordinate of $\mathbb{R}^{3}=\mathbb{R}^{2}\times
(-\infty,\infty)$ in the $(k-1)$-spin structure, and
\item[c)] the remaining $q-q_{i}\ (2+k)$-planes of $\mathbb{R}^{2}\times
P^{k}(\vec{q})$ become parallel to the $\mathbb{R}^{3}$ coordinate in the
$(k-1)$-spin structure.
\end{itemize}
Granted this, then Proposition 2 follows using the argument from Proposition
1, as we now explain.
First, one applies the induction hypothesis of Proposition 2 separately to the
two first stage components $S_{-}^{1+k}\times D^{2}$ and $S_{+}^{1+k}\times
D^{2}$. For notation here, let $P_{i,1}^{k}$ and $P_{i,q_{i}}^{k}$ denote the
first and the last (i.e. the extremes) among the subcollection $P_{i}^{k}$ of
$q_{i}$ $k$-planes in $P^{k}(\vec{q})$ which are perpendicular to
$\mathbb{R}^{1}(e_{i})$ (if $q_{i}=1$, then these two $k$-planes coincide).
Let $P_{-}=P^{k}(\vec{q})-\{P_{i,q_{i}}^{k}\}$ and $P_{+}=P^{k}(\vec{q}
{)}-\{P_{i,1}^{k}\}$. Then applying the induction hypothesis to those of the
$q^{\text{th}}$ stage components $\{S_{u}^{1+k}\mid u\in\{-,+\}^{q}\}$ which
lie inside of $S_{-}^{1+k}\times D^{2}$, using the collection $P_{-}$ of $q-1$
$k$-planes, one obtains an isotopy of $S_{-}^{1+k}\times D^{2}\;rel\;\partial$
such that the image of each of these components under the final homeomorphism
lies in the $\delta$-neighborhood ($\delta$ arbitrarily small) of one of the
chambers of $\mathbb{R}^{3+k}$ determined by $P_{-}$. Similarly, one obtains
an isotopy of $S_{+}^{1+k}\times D^{2}\;rel\;\partial$ such that the image of
each of the remaining $q^{\text{th}}$ stage components under the final
homeomorphism lies in the $\delta$-neighborhood of one of the chambers of
$\mathbb{R}^{3+k}$ determined by $P_{+}$. After applying these two isotopies
in $\mathbb{R}^{3+k}$, one completes the motion of Proposition 2 by applying
the corresponding next motion of Proposition 1, spun $k-1$ times in the
structure described above to become a motion of $\mathbb{R}^{3+k}$. The
$q_{i}$ $2$-planes that are used for the motion of Proposition 1 are the
intersections with $\mathbb{R}^{2}\times\mathbb{R}^{1}(e_{i})$ of the $q_{i}$
perpendicular planes in the collection $\mathbb{R}^{2}\times P_{i}^{k}$. The
fact that the motion from Proposition 1 was level-preserving, combined with
(c) above, ensures that the $\delta$-control imposed by the initial isotopies
supported in $S_{-}^{1+k}\times D^{2}\cup S_{+}^{1+k}\times D^{2}$ is not
diminished at all. Using the same analysis as in Proposition 1, this proof is
seen to be completed.
\end{proof}
\centerline{\bf The Interlude}
In the (seemingly long) period between my proofs of Theorem 2 (basically
January 1975) and the nearly definitive Triple Suspension Theorem (Part IV;
October 1976), a certain fact became known, which seemed to make it more
likely that the Multiple Suspension Conjecture was true. For historical
interest, it seems worth recalling,
\textsc{Observation. }\emph{Suppose }$H_{1}^{3},H_{2}^{3}$\emph{ are homology
3-spheres. Fix }$k\geq2$\emph{. Then }$\Sigma^{k}(H_{1}^{3}\#H_{2}^{3})\approx
S^{k+3}\Leftrightarrow\Sigma^{k}H_{i}\approx S^{k+3}$\emph{ for }
$i=1,2$\emph{.
}
If the Multiple Suspension Conjecture were to be false, it seemed natural that
it would fail precisely for Rokhlin invariant 1 homology 3-spheres, because of
the significant relation they were known to have with regard to $PL$ versus
non-$PL$ triangulations of manifolds (cf. Prologue). But a consequence of the
above observation is the
\textsc{Corollary. }\emph{The following conjecture is false: }$\Sigma^{k}
H^{3}\approx S^{k+3}\Leftrightarrow\mu(H^{3})=0$\emph{, where }$\mu$\emph{ is
the Rokhlin invariant in }$Z/2$\emph{.}
The corollary follows because $\mu$ is additive under connected sum.
The implication $\Leftarrow$ of the Observation is implicit in \cite[proof of
Prop 4]{Gl2}, \cite[Step 5]{Gl3}, and is explicitly noted in \cite{ES}. The
implication $\Rightarrow$ (which I realized in February 1975, and others did
independently around then is a consequence of \v{S}tanko's Approximation
Theorem in the codimension one setting (\cite{St3}; its fault (see \cite{AC})
is immaterial here). It says that, granted ${\mathring{c}}(H_{1}^{3}
\#H_{2}^{3})\times\mathbb{R}^{k-1}$ is a manifold, then the natural closed
subset ${\mathring{c}}(H_{i}^{3}-\operatorname*{int}B^{3})\times
\mathbb{R}^{k-1}(i=1$ or 2) can be re-embedded to have 1-LC complement, hence
(by \cite{Da2} or \cite{Ce}) be collared, hence ${\mathring{c}}H_{i}^{3}
\times\mathbb{R}^{k-1}$ is a manifold.
\part{The double suspension of any homology $3$--sphere is the image of
$S^{5}$ under a cell-like map}
\indent When working on the double suspension question for an arbitrary
homology 3-sphere $H^{3}$, it is natural first to ask whether it can be
reduced to a cell-like decomposition space problem, i.e., whether $\Sigma
^{2}H^{3}$ is the cell-like image of some manifold. The aim of this section is
to prove that this is so.
\begin{theorem}
\label{existence-of-cell-like-map}\textit{For any homology $n$-sphere $H^{n}$,
there is a cell-like map $f:S^{n+2}\rightarrow\Sigma^{2}H^{n}$ from the
$(n+2)$-sphere onto the double suspension of $H^{n}$.}
\end{theorem}
This was proved independently by J. Cannon in \cite{Can1}.
Recall that the Theorem is readily established in the cases where $H^{n}$ is
known to bound a contractible $(n+1)$-manifold, hence whenever $n\geq4$ (see
the remark following the Proposition, Part II). So the new content of the
theorem is the $n=3$ case (but the proof works for any $n$).
The construction which follows is the second I formulated (in February 1976);
the first (September 1975) is sketched at the end of this part. This second
construction has the advantage that the prerequisites are simpler (they being
only the half-open $h$-cobordism theorem and the local contractibility of the
homeomorphism group of a topological manifold), and also that the non-trivial
point-inverses of $f$ are easier to understand.
The following construction grew out of my attempt to understand a fundamental
manifold in PL-TOP manifold theory: the compact topological manifold $P^{5}$,
homotopy-equivalent to $S^{1}$, such that $\partial P^{5}\approx H^{3}\times
S^{1}$, where $H^{3}$ is any given homology 3-sphere. The importance of such a
manifold $P^{5}$ is that, when the Rokhlin invariant of $H^{3}$ is nonzero,
then $P^{5}$ provides a counterexample to the PL triangulation conjecture for
topological manifolds (cf. Prologue, Section I). (The goal of the double
suspension problem is to show that $P^{5}$ is in fact $cH^{3}\times S^{2}$.
The goal of the Theorem above amounts to constructing a cell-like map $p$ from
$P^{5}$ onto $cH^{3}\times S^{1}$ such that $p$ is a homeomorphism on
$\partial P^{5}$. The above-implied uniqueness of $P^{5}$ is a consequence of
the 6-dimensional topological s-cobordism theorem applied to a cobordism
constructed to join rel $\partial$ two candidates $P_{0}$ and $P_{1}$. The
cobordism, which is homeomorphic to $B^{5}\times S^{1}$, exists because
$P_{0}\cup_{\partial}P_{1}$ is homotopically equivalent to $S^{4}\times S^{1}
$, hence is homeomorphic to $S^{4}\times S^{1}$ (applying the topological
version of \cite[Thm 1.2]{Sha1} \cite[Thm 1.1]{Sha2}; or see \cite[V, App
C]{KS2}, keeping in mind that $\mathcal{S}^{TOP}(B^{4}\times S^{1})=0$ implies
the above-mentioned fact). However, the uniqueness of $P^{5}$ is not germane here.)
The existence of the manifold $P^{5}$ is a consequence of the work of
Kirby-Siebenmann. In \cite[Section 5]{Si2} Siebenmann presented a construction
which produced $P^{5}\times S^{1}$, and $P^{5}$ itself was an implicit
consequence of this and a splitting theorem (cf. \cite[Remark 5.4]{Si2}). This
was made explicit in \cite{Mat1} and \cite{GS1}. In \cite{Sch}, Scharlemann
presented a simplified construction of $P^{5}$, which was distilled from some
earlier arguments of Kirby and Siebenmann. In doing so he exhibited a
structure on $P^{5}$ that was strong enough to let him make some significant
assertions about topological transversality at dimension 4 (cf. Prologue,
Section I). My construction below of the cell-like map $f:S^{5}\rightarrow
\Sigma^{2}H^{3}$ is a direct extension of the work of Siebenmann and Scharlemann.
One can take the following point of view about $P^{5}$. Observing that
$P^{5}\cup_{\partial}H^{3}\times B^{2}$ is a homotopy 5-sphere, hence is
homeomorphic to $S^{5}$, the problem in constructing $P^{5}$ amounts to
constructing an embedding of $H^{3}\times B^{2}$ into $S^{5}$ so that the
complement has the homotopy type of $S^{1}$. (Such a construction was known
for \textquotedblleft homology type of $S^{1}$\textquotedblright\ in place of
"homotopy type of $S^{1}$, for it was known that any oriented 3-manifold
smoothly embeds in $S^{5}$ with product neighborhood \cite[Cor. 4]{Hi1}.
Recall that the desired embedding $H^{3}\times B^{2}\rightarrow S^{5}$ above
can be made smooth, if and only if the Rokhlin invariant of $H^{3}$ is zero
(see Prologue, Section IV).
The construction of $P^{5}$ in \cite{Sch} can be briefly described as follows
(details are filled in below). Given a homology 3-sphere $H^{3}$, do 1-and
$2$-surgeries to $H^{3}\times T^{2}$ ($T^{2}=2$-torus), with the attaching
maps of the surgeries being confined to some tube $H^{3}\times D^{2}$, to
produce a manifold, denoted $\chi(H^{3}\times T^{2})$, which is
homotopy-equivalent to $S^{3}\times T^{2}$. By the local contractibility of
the homeomorphism group of a manifold, some finite cover of $\chi(H^{3}\times
T^{2})$ is homeomorphic to $S^{3}\times T^{2}$ (see the Proposition below; the
minimum information needed in this paragraph is that the universal cover
$\widetilde{\chi}(H^{3}\times T^{2})$ is homeomorphic to $S^{3}\times
\mathbb{R}^{2})$. Hence $\widetilde{\chi}(H^{3}\times T^{2})\approx
S^{3}\times\mathbb{R}^{2})$ can be compactified in the natural way by adding a
circle at infinity to produce $S^{5}=S^{3}\times\mathbb{R}^{2}\cup S^{1}$.
Letting $B^{2}\subset\operatorname*{int}(T^{2}-D^{2})$ be some $2$-cell, this
produces the desired embedding of $H^{3}\times B^{2}$ into $S^{5}$.
The proof below of the Theorem amounts to showing that embedded in the above
construction there is a natural cell-like map from $S^{5}$ onto $\Sigma
^{2}H^{3}$.
We proceed to the details of the proof. For convenience and completeness, the
preceding part of the argument will be repeated, with full details. We
continue to restrict attention to the $n = 3$ case, although the proof works
for any $n$, without alteration.
\begin{proof}
[Proof of Theorem \ref{existence-of-cell-like-map}]Let $H^{3}$ be any homology
3-sphere. Let $G^{3}=H^{3}-\operatorname*{int}B^{3}$, where $B^{3}$ is some
collared 3-cell in $H^{3}$. Consider the homology 5-cell $G^{3}\times I^{2}$.
From \cite{Ke} (cf. Proposition, Part II) there is a contractible 5-manifold
$M^{5}$ such that $\partial M^{5}=\partial(G^{3}\times I^{2})$ ($M^{5}$ is in
fact unique, by the 6-dimensional $h$-cobordism theorem rel $\partial$).
Inasmuch as $M^{5}$ is the fundamental building block of this proof, it may be
worthwhile pointing out how easily it can be constructed. Kervaire quickly
produced $M^{5}$ by doing 1- and $2$-surgeries in $\operatorname*{int}
(G^{3}\times I^{2})$, the 1-surgeries to kill the fundamental group, and the
(equal number of) $2$-surgeries to make $H_{2}$ again 0. Another way of
exhibiting $M^{5}$ comes from the observation that $M^{5}\cup_{\partial}
G^{3}\times I^{2}$ is a homotopy 5-sphere, hence is $S^{5}$. Thus $M^{5}$ can
be regarded as the complement $S^{5}-\alpha(G^{3}\times I^{2})$, where
$\alpha:G^{3}\times I^{2}\rightarrow S^{5}$ is any (PL say) embedding. Any
such $\alpha$ works, because $S^{5}-\alpha(G^{3}\times I^{2})$ is not only
acyclic (by duality), but is simply connected, since $\alpha(G^{3}\times
I^{2})$ has a $2$-dimensional spine. Such an embedding $\alpha$ is most easily
produced by first embedding a $2$-dimensional spine of $G^{3}$ into $S^{5}$,
and then taking a regular neighborhood of the image. That this neighborhood is
homeomorphic to $G^{3}\times I^{2}$ can be verified by comparing the 0,1 and
$2$-handle structure of the regular neighborhood to that of $G^{3}\times
I^{2}$. (Note that the embedding $\alpha$ is in fact unique, by the above
mentioned uniqueness of $M\;rel\;\partial$).
Returning to the proof of the Theorem, one has the natural map $I^{2}
\rightarrow T^{2}$ which identifies the opposite sides of the square $I^{2}$
to produce the $2$-torus $T^{2}$. Doing these identifications to each
1-dimensional square $g\times\partial I^{2}$ in $G^{3}\times\partial
I^{2}\subset M^{5}$, one produces from $M^{5}$ a manifold-with-boundary
$N^{5}$, with $\partial N^{5}\approx\partial B^{3}\times T^{2}$, and a map
$\phi:N^{5}\rightarrow B^{3}\times T^{2}$ which is a homeomorphism on the
boundary, and is a homotopy equivalence $rel\;\partial$. Now comes the
basic
\textbf{Proposition (Kirby-Siebenmann)}. \emph{Some finite cover }
$\widehat{\phi}:\widehat{N}^{5}\rightarrow B^{3}\times\widehat{T}^{2}\approx
B^{3}\times T^{2}$\emph{ is homotopic }$rel\;\partial$\emph{ to a
homeomorphism.
}
Actually, it follows from the work of Kirby-Siebenmann that $\phi$ itself is
homotopic $rel\;\partial$ to a homeomorphism \cite[V, Thm. C.2]{KS2}
\cite{HW}, but that is unnecessarily strong for the purposes at hand.
\emph{Proof of Proposition }(From \cite[Lemma 4.1]{Si2}). Regard $N^{5}$ as a
5-dimensional h-cobordism-with-boundary from $N_{0}^{4}\equiv B^{2}
\times0\times T^{2}$ to $N_{1}^{4}\equiv B^{2}\times1\times T^{2}$, the
product-boundary being $\partial B^{2}\times\lbrack0,1]\times T^{2}=\partial
N_{0}\times\lbrack0,1]=\partial N_{1}\times\lbrack0,1]$, where we are
regarding $B^{3}$ as $B^{2}\times\lbrack0,1]$. For notational convenience
below, assume without loss that $\phi^{-1}(\partial B^{3}\times T^{2}
)=\partial N^{5}$. By the half-open 5-dimensional h-cobordism theorem
(\cite{Co}; recall that it uses only engulfing), the restricted map
$\phi|:N^{5}-N_{1}^{4}\rightarrow B^{2}\times\lbrack0,1)\times T^{2}$ is
homotopic $rel\partial$ to a homeomorphism, say $\phi_{0}:N^{5}-N_{1}
^{4}\overset{\approx}{\longrightarrow}B^{2}\times\lbrack0,1)\times T^{2}$, and
similarly the restricted map $\phi|:N^{5}-N_{0}^{4}\rightarrow B^{2}
\times(0,1]\times T^{2}$ is homotopic $rel\;\partial$ to a homeomorphism
$\phi_{1}:N^{5}-N_{0}^{4}\overset{\approx}{\longrightarrow}B^{2}
\times(0,1]\times T^{2}$. The goal now is to make $\phi_{0}$ agree with
$\phi_{1}$ over say $B^{2}\times\lbrack\frac{1}{2},1)\times T^{2}$, by using
meshing (\`{a} la Cernavskii) and taking finite covers and then applying local
contractibility of the homeomorphism group of a manifold. Amplifying this, it
is an easy matter using meshing in the radial direction of $B^{2}$, and also
in the $(0,1)$-coordinate, to arrange that $\phi_{1}\phi_{0}^{-1}|B^{2}
\times(0,1)\times T^{2}$ respects the $B^{2}\times(0,1)$ coordinate to an
arbitrarily fine degree. Now, let $\widehat{\phi}_{0},\widehat{\phi}_{1}$ be
large finite covers of $\phi_{0},\phi_{1}$ so that the homeomorphism
$\widehat{\phi}_{1}\widehat{\phi}_{0}^{-1}:B^{2}\times(0,1)\times
T^{2}\rightarrow B^{2}\times(0,1)\times T^{2}$ arbitrarily closely respects
the $T^{2}$ coordinate, in addition to the $B^{2}\times(0,1)$ coordinate. That
is, $\widehat{\phi}_{1}\widehat{\phi}_{0}^{-1}$ is arbitrarily close to the
identity. By local contractibility, there is a homeomorphism $\rho:B^{2}
\times\lbrack0,1)\times T^{2}\rightarrow B^{2}\times\lbrack0,1)\times T^{2}$
such that $\rho=identity$ on $B^{2}\times\lbrack0,\frac{1}{4}]\times T^{2}
\cup\partial B^{2}\times\lbrack0,1)\times T^{2}$ and $\rho=\widehat{\phi}
_{1}\widehat{\phi}_{0}^{-1}$ over $\phi B^{2}\times\lbrack\frac{1}{2},1)\times
T^{2}$. Replacing $\widehat{\phi}_{0}$ by $\rho\widehat{\phi}_{0}$, one sees
that the homeomorphisms $\rho\widehat{\phi}_{0}$ and $\widehat{\phi}_{1}$
agree over $B^{2}\times\lbrack\frac{1}{2},1)\times T^{2}$, and so together
they define the desired homeomorphism of $\widehat{N}^{5}$ to $B^{2}
\times\lbrack0,1]\times T^{2}$.
Let $\psi:\widehat{N}^{5}\rightarrow B^{3}\times T^{2}$ be the homeomorphism
of finite covers promised by the Proposition. Let $\widetilde{\psi}
:\widetilde{N}^{5}\rightarrow B^{3}\times\mathbb{R}^{2}$ be an induced
homeomorphism of universal covers. Let $\gamma:\mathbb{R}^{2}\rightarrow
\operatorname*{int}D^{2}$ be a radial homeomorphism, say $\gamma(x)=x/(1+\Vert
x\Vert)$, where $D^{2}$ denotes the unit disc in $\mathbb{R}^{2}$ (the letter
$D^{2}$ is used here, rather than $B^{2}$, for notational distinction and
clarity. Henceforth one should think of the $B^{2}$'s and $B^{3}$'s as coming
from the first coordinate of $B^{3}\times\mathbb{R}^{2}$, and $D^{2}$ as
coming from the second coordinate.) Identifying $B^{5}$ with the joint
$B^{3}\ast S^{1}$, so that $B^{5}-S^{1}=B^{3}\ast S^{1}-S^{1}=B^{3}
\times\overset{\circ}{c}S^{1}=B^{3}\times\operatorname*{int}D^{2}$, where
$S^{1}=\partial D^{2}$ and $\overset{\circ}{c}S^{1}$ denotes the open cone on
$S^{1}$, then the embedding $id_{B^{3}}\times\gamma:B^{3}\times\mathbb{R}
^{2}\rightarrow B^{3}\times\operatorname*{int}D^{2}\hookrightarrow B^{3}\ast
S^{1}=B^{5}$ establishes a compactification of $B^{3}\times\mathbb{R}^{2}$ by
a circle to a space homeomorphic to $B^{5}$. Preceding this embedding with the
homeomorphism $\widetilde{\psi}:\widetilde{N}^{5}\rightarrow B^{3}
\times\mathbb{R}^{2}$ establishes such a compactification of $\widetilde
{N}^{5}$; this will be denoted $\widetilde{N}^{5}\cup S^{1}\approx B^{5}$.
At this point one has produced the manifold $P^{5}$ which disproves the $PL$
triangulation conjecture for topological manifolds (cf. earlier remarks, fifth
paragraph before Proposition.)
The task now is to construct a cell-like surjection $f_{0}:\widetilde{N}
^{5}\cup S^{1}\rightarrow G^{3}\ast S^{1}(\approx\Sigma^{2}G^{3})$ such that
$f_{0}$ carries $\partial\widetilde{N}^{5}\cup S^{1}(\approx S^{4})$
homeomorphically onto $\partial G^{3}\ast S^{1}(=S^{2}\ast S^{1})$. Thus each
point-inverse of $f_{0}$ will intersect $\partial\widetilde{N}^{5}\cup S^{1}$
in at most a single point. Given such a cell-like map $f_{0}$, one produces
the desired cell-like map $f:S^{5}\rightarrow H^{3}\ast S^{1}(\approx
\Sigma^{2}H^{3})$ simply by gluing 5-balls onto the source and target of
$f_{0}$, and extending $f_{0}$ over these 5-balls by coning. In symbols,
\[
f:S^{5}\approx\widetilde{N}^{5}\cup S^{1}\cup_{\partial}B^{5}\overset
{f_{0}\cup\text{homeo.}}{\longrightarrow}G^{3}\ast S^{1}\cup_{\partial}
B^{3}\ast S^{1}=(G^{3}\cup_{\partial}B^{3})\ast S^{1}=H^{3}\ast S^{1}.
\]
To construct $f_{0}$, we analyze in more detail the structure of the universal
cover $\widetilde{N}^{5}$ of $N^{5}$ and its compactification $\widetilde
{N}^{5}\cup S^{1}$. For each $(i,j)\in\mathbb{Z}\oplus\mathbb{Z}$, let
$I^{2}(i,j)$ be the square $[i-\frac{1}{4},i+\frac{1}{4}]\times\lbrack
j-\frac{1}{4},j+\frac{1}{4}]$ in $\mathbb{R}^{2}$; it is centered at the point
$(i,j)$ and has side-length $\frac{1}{2}$. Let $G_{1}^{3}=G^{3}-\partial
G^{3}\times\lbrack0,1)\subset\operatorname*{int}G^{3}$, where $\partial
G^{3}\times\lbrack0,2)$ is some boundary collar for $\partial G^{3}$ in
$G^{3}$. In $G^{3}\times\mathbb{R}^{2}$, take each 5-dimensional block
$G_{1}^{3}\times I^{2}(i,j)$, for each pair $(i,j)$ \textbf{except} $(0,0)$,
and replace it, fixing boundary, with a copy of the contractible manifold.
$M^{5}$, which will be denoted $M^{5}(i,j)$. It turns out that this produces
$\widetilde{N}^{5}$. The reasoning here is in two steps. First, it is clear
that $\widetilde{N}^{5}$ can be identified with the space gotten from
$G^{3}\times\mathbb{R}^{2}$ by replacing \textbf{all} of the blocks $G_{1}
^{3}\times I^{2}(i,j)$ with copies of $M^{5}$. Second, leaving $G_{1}
^{3}\times I^{2}(0,0)$ unreplaced (the reason for which will become clear)
does not affect the homeomorphism type of the resultant union, because for
example the space
\[
F^{5}\equiv\left( G^{3}\times\lbrack-\frac{1}{2},\frac{1}{2}]\times
\lbrack-\frac{1}{2},\infty)-\cup_{j=1}^{\infty}G_{1}^{3}\times I^{2}
(0,j)\right) \cup\bigcup_{j=1}^{\infty}M^{5}(0,j)
\]
is homeomorphic fixing boundary to the space $(F^{5}-G_{1}^{3}\times
I^{2}(0,0))\cup M^{5}(0,0)$, by a simple sliding motion in the $[-\frac{1}
{2},\infty)$-coordinate direction which takes each $M(0,j)$ onto $M(0,j-1)$,
for $j\geq1$.
We now construct a simple but important cell-like surjection $\rho
:D^{2}\rightarrow D^{2}$ (see Figure III-2). This map $\rho$ will be the
identity on $\partial D^{2}$, and the nontrivial point-inverses of $\rho$ will
comprise a countable null collection of $2$-discs, each intersecting $\partial
D^{2}$ in a single point (\emph{null} means that for any $\epsilon>0$, there
are only finitely many members of the collection having diameter $\geq
\epsilon)$. For each relatively prime pair $(p,q)\in\mathbb{Z}\oplus
\mathbb{Z}-(0,0)$ (i.e. $gcd\{p,q\}=1$, so this includes all pairs where $p$
or $q$ is $\pm1$), let $I_{\ast}(p,q)=\bigcup_{\ell\geq1}I(\ell p,\ell q)$,
which is a union of squares in $\mathbb{R}^{2}$ converging to infinity in the
direction $\theta$, where $\cos\theta=p/\Vert(p,q)\Vert$ and $\sin
\theta=q/\Vert(p,q)\Vert$. For each $(p,q)$, we wish to join together the
components of $I_{\ast}(p,q)$ by using bands to connect adjacent squares in
the sequence, so that the resultant union, to be denoted $I_{\#}(p,q)$, is
contractible (See Figure III-1; this operation is amplified in the next
paragraph). In fact, $I_{\#}(p,q)$ will be homeomorphic to a $2$-disc minus a
boundary point. This connecting operation is to be done so that if one defines
$D(p,q)=\gamma(I_{\#}(p,q))\cup(p,q)/\Vert(p,q)\Vert\subset D^{2}$, where
$\gamma:\mathbb{R}^{2}\overset{\approx}{\longrightarrow}\operatorname*{int}
D^{2}$ is as above and the point $(p,q)/\Vert(p,q)\Vert$ lies on $\partial
D^{2}$, then the collection $\{D(p,q)\}$ is a disjoint null collection of
$2$-discs in $D^{2}$, each intersecting $\partial D^{2}$ in a single point.
Given the collection $\{D(p,q)\}$, then $\rho$ can be taken to be any map
$\rho:D^{2}\rightarrow D^{2}$, fixed on $\partial D^{2}$, such that the
nontrivial point-inverses of $\rho$ are precisely the sets $\{D(p,q)\}$.
\setcounter{figure}{0} \renewcommand{\Roman{part}-\arabic{figure}}{\Roman{part}-\arabic{figure}}
\begin{figure}
\caption{Connecting the squares of $I_{*}
\end{figure}
\begin{figure}
\caption{The nontrivial point-inverses of the cell-like surjection
$\rho=D^{2}
\end{figure}
One way to do the above connecting operation with precision is, given
$I_{\ast}(p,q)$, to adjoin to it the straight ray $\mathbb{R}^{1}
(p,q)(\approx\lbrack0,\infty))$ which starts at the point $(p,q)$ and passes
through all of the points $(\ell p,\ell q)$, $\ell\geq1$. These rays
$\{\mathbb{R}(p,q)\}$ are all disjoint, but a given $\mathbb{R}(p,q)$ may
unfortunately pass through other $I(p^{\prime},q^{\prime})$'s. To get the
desired disjointness here, one can argue as follows. Let $\pi:\mathbb{R}
^{2}\rightarrow\mathbb{R}^{2}$ be a map, bounded close to $id_{\mathbb{R}^{2}
}$ (say by making it the identity on the grid $(\mathbb{Z}+\frac{1}{2}
)\times\mathbb{R}^{1}\cup\mathbb{R}^{1}\times(\mathbb{Z}+\frac{1}{2}
)\subset\mathbb{R}^{2})$, such that the only nontrivial point-inverses of
$\pi$ are the sets $\pi^{-1}((i,j))=I^{2}(i,j),(i,j)\in\mathbb{Z}
\oplus\mathbb{Z}-(0,0)$, and such that for each relatively prime pair $(p,q)$,
$\pi$ leaves invariant (not fixed) the set $\mathbb{R}(p,q)\cap\bigcup
_{\ell\geq1}[\ell p-\frac{1}{2},\ell p+\frac{1}{2}]\times\lbrack\ell
q-\frac{1}{2},\ell q+\frac{1}{2}].$ Then each preimage set $\pi^{-1}
(\mathbb{R}(p,q))$ looks like $I_{\ast}(p,q)$ with its components joined
together by arcs. So one can take as $I_{\#}(p,q)$ a small thickening of
$\pi^{-1}(\mathbb{R}(p,q))$.
Given $\rho$ as described above, let $\rho_{t}:D^{2}\rightarrow D^{2},0\leq
t\leq1$, be a pseudoisotopy of $\rho_{0}=$ identity to $\rho_{1}=\rho$
(\emph{pseudoisotopy} means here that each $\rho_{t},t<1$, is a homeomorphism).
In order to define the desired cell-like map $f_{0}:\widetilde{N}\cup
S^{1}\rightarrow G^{3}\ast S^{1}$, we first define a certain cell-like map
$g_{0}:G^{3}\ast S^{1}\rightarrow G^{3}\ast S^{1}$. For each $x\in G^{3}$,
define $g_{0}|x\ast S^{1}$ to be the map $\rho_{t(x)}:x\ast S^{1}\rightarrow
x\ast S^{1}$, where $x\ast S^{1}$ is being identified with $D^{2}=0\ast S^{1}$
in the obvious manner, and where $t(x)\in\lbrack0,1]$ is defined using the
previously chosen collar $\partial G^{3}\times\lbrack0,2)$, by
\[
t(x)=\left\{
\begin{array}
[c]{lll}
t & \text{if} & x\in\partial G^{3}\times t,\;\;t\;\epsilon\;[0,1),\;\text{or}
\\
1 & \text{if} & x\in G^{3}-\partial G^{3}\times\lbrack0,1)=G_{1}^{3}.
\end{array}
\right.
\]
Thus $g_{0}|\partial G^{3}\ast S^{1}=identity$, and the nontrivial
point-inverses of $g_{0}$ comprise a countable null collection of cell-like
subsets of $G^{3}\ast S^{1}$, each intersecting $S^{1}$ in a single point.
This is because the nontrivial point-inverses of $g_{0}$ are the sets
\[
G_{1}^{3}\times\gamma(I_{\#}^{2}(p,q))\cup(p,q)/\Vert(p,q)\Vert\subset
G^{3}\times\operatorname*{int}D^{2}\cup S^{1}=G^{3}\ast S^{1}\text{,}
\]
where $(p,q)$ ranges over all relatively prime pairs in $\mathbb{Z}
\oplus\mathbb{Z}-(0,0)$, and each of these set is homeomorphic to $c(G_{1}
^{3}\times I)$.
Now, according to the discussion earlier, we can regard $\widetilde{N}^{5}\cup
S^{1}$ as being obtained from $G^{3}\ast S^{1}$ by removing each block
$G_{1}^{3}\times\gamma(I^{2}(i,j)),\ (i,j)\in\mathbb{Z}\oplus\mathbb{Z}
-(0,0)$, and replacing it with a copy $M(i,j)$ of $M^{5}$. Since $g_{0}$ sends
each block $G_{1}^{3}\times\gamma(I^{2}(i,j))$ to a single point in
$S^{1}\subset G^{3}\ast S^{1}$, then $g_{0}$ gives rise to a well-defined map
\textbf{after} these replacement operations are done in the \textbf{source} of
$g_{0}$. This new map is the desired $f_{0}:\widetilde{N}^{5}\cup
S^{1}\rightarrow G^{3}\ast S^{1}$.
If this definition is to be made more formally, let $\chi:G^{3}\ast
S^{1}\rightarrow\widetilde{N}^{5}\cup S^{1}$ be a map which restricts on each
block $G_{1}^{3}\times\gamma(I^{2}(i,j))$ to a degree 1 identity-on-boundary
map onto $M(i,j)$, and which is the identity elsewhere, and then note that
$g_{0}\chi^{-1}$ well-defines the desired map $f_{0}$.
The nontrivial point-inverses of $f_{0}$ are each homeomorphic to the
one-point-compactification $F^{5}\cup\infty$ of
\[
F^{5}\equiv(G_{1}^{3}\times\lbrack-\frac{1}{2},\frac{1}{2}]\times\lbrack
\frac{1}{2},\infty)-\cup_{j=1}^{\infty}G_{1}^{3}\times I^{2}(0,j))\cup
\bigcup_{j=1}^{\infty}M^{5}(0,j).
\]
The compactum $F^{5}\cup\infty$ is cell-like because it is contractible, which
in turn follows from the fact that the space consisting of two copies of
$M^{5}$ glued together along $G^{3}\times I\subset\partial M^{5}$, strongly
deformation retracts to either single copy of $M^{5}$, since each space is
contractible (this fact is to be contrasted to the fact that $M^{5}$ itself
does not strongly deformation retract to $G^{3}\times I)$.
This completes the construction of $f_{0}$ and hence $f$. It turns out, then,
that $f$ has a countable null collection of nontrivial point-inverses, each a
contractible ANR. One can seek to improve the point-inverses of $f$, for
example by taking spines to lower their dimension, keeping in mind that the
ultimate goal is to make $f$ a homeomorphism. That leads to the work in Part IV.
\end{proof}
The following paragraphs describe the original proof I formulated that the
double suspension conjecture for any homology 3-sphere is reducible to a
cell-like decomposition problem for $S^{5}$. It rests on some clever
4-dimensional analysis of A. Casson. I found this proof useful only
psychologically, because I was never able to make the decomposition of $S^{5}$
nice enough so that I could work with it. Interestingly, one message of
Cannon's work in \cite{Can2} is that given such a decomposition as I produced
(or he, in \cite{Can2}), it can always be changed into a standard
decomposition which one can work with. This proof is presented here in part to
advertise Cannon's unpublished work, which focuses attention on some
interesting questions in 4-dimensional topology.
\begin{proof}
[Original Proof of Theorem \ref{existence-of-cell-like-map} ]Suppose, then,
that $H^{3}$ is any homology 3-sphere. It was pointed out in the Interlude
following Part II how the implication $\Sigma^{2}(H^{3}\#H^{3})\approx
S^{5}\Rightarrow\Sigma^{2}H^{3}\approx S^{5}$ followed from a certain
splitting construction. Hence it suffices here to work with a homology
3-sphere of Rokhlin invariant 0, namely $H^{3}\#H^{3}$, instead of an
arbitrary $H^{3}$. The following argument shows:
\noindent\emph{Given a homology 3-sphere }$H^{3}$\emph{ of Rokhlin invariant
}$0$\emph{, there is a cell-like map }$p:P^{5}\rightarrow cH^{3}\times S^{1}
$\emph{ from some }$5$\emph{-manifold-with-boundary }$P^{5}$\emph{ onto
}$cH^{3}\times S^{1}$\emph{ such that the only nontrivial point-inverses of
}$p$\emph{ lie in }$p^{-1}(c\times S^{1})\subset\;\operatorname*{int}P^{5}
$\emph{.
}
(Recall that if one wishes to produce from $p$ a cell-like map from $S^{5}$
onto $\Sigma^{2}H^{3}$, one merely glues copies of $H^{3}\times B^{2}$ onto
the boundaries of $P^{5}$ and $cH^{3}\times S^{1}$, and extends $p$ over these
sets via the identity.)
If $H^{3}$ has Rokhlin invariant 0, it is a standard consequence that
$H^{3}=\partial M^{4}$, where $M^{4}$ is some simply-connected parallelizable
$PL$ manifold such that $M^{4}\cup c\partial M^{4}$ has the homotopy type of
$\#_{k}S^{2}\times S^{2}$, the connected sum of $k$ copies of $S^{2}\times
S^{2}$, for some $k$. One would like to be able to do 4-dimensional
(simply-connected) surgery to $\operatorname*{int}M^{4}$, to convert $M^{4}$
to a contractible 4-manifold $N^{4}$ (which could even be topological for the
purposes at hand). For then one could let $P^{5}=N^{4}\times S^{1}$, and
$p=n\times id_{S^{1}}$, where $n:N^{4}\rightarrow cH^{3}$ is gotten by
collapsing to a point a spine of $N^{4}$. Unfortunately, such surgery is not
known to be possible. But, Andrew Casson has shown that such surgery is
possible to some extent, and using his work one can construct the desired
manifold $P^{5}$ and map $p:P^{5}\rightarrow cH^{3}\times S^{1}$.
Casson has shown the following:\footnote{This is unpublished, although some
notes from Casson's lectures, taken by C. Gordon and R. Kirby, have been in
circulation since Casson's work in 1974.} There exist disjoint open sets
$U_{1},\cdots,U_{k}$ in $\operatorname*{int}M^{4}$, bounded away from
$\partial M^{4}$, such that
\begin{itemize}
\item[(i)] each $U_{i}$ is proper-homotopy-equivalent to $S^{2}\times
S^{2}-\ast$ ($\ast=\operatorname*{point}$), in the following special manner:
Each $U_{i}$ is homeomorphic to $S^{2}\times S^{2}-C_{i}$, where $C_{i}$ is
some compact subset of $S^{2}\times S^{2}$ which is cell-like and satisfies
the cellularity criterion of McMillan \cite{McM1} (which is that the end of
$S^{2}\times S^{2}-C_{i}$ is 1-connected; from this one can show that
$S^{2}\times S^{2}-C_{i}$ is proper-homotopy-equivalent to $S^{2}\times
S^{2}-\ast)$, and
\item[(ii)] the homomorphism $\oplus_{i=1}^{k}H_{2}(U_{i})\rightarrow
H_{2}(M)$ is an intersection-form-preserving isomorphism.
\end{itemize}
As a consequence of (i) and (ii), it follows that $M^{4}-\bigcup_{i=1}
^{k}U_{i}$ is \v{C}ech homotopically 3-connected.
(One of Casson's fundamental questions is: \emph{Are the ends of the }$U_{i}
$\emph{'s diffeomorphic (or even homeomorphic) to }$S^{3}\times R^{1}$\emph{?}
If so, one can do surgery on $M^{4}$ by replacing each $U_{i}$ by an open
4-cell, thereby producing the desired contractible manifold $N^{4}$ mentioned
above. Although unnecessary for our purposes, it may be worth recalling what
Casson's typical open set $U_{i}$ looks like. Regard $S^{2}\times S^{2}$ as
consisting of a 0-handle $B^{4}$, two $2$-handles attached to a pair of
linking solid tori in $\partial B^{4}$, and a 4-handle. Let $W_{1},W_{2}$ be
two Whitehead continua in $\partial B^{4}$, each the familiar intersection of
a nest of solid tori, constructed in the two given solid tori in $\partial
B^{4}$ (\cite[\S 4]{Wh1}; see also \cite[\S 11]{Bi4}). Thus $W_{1}$ and
$W_{2}$ link geometrically in $\partial B^{4}$ Let $C\approx\Sigma W_{1}
\vee\Sigma W_{2}$ be gotten by coning $W_{1}\cup W_{2}$ to the origin in
$B^{4}$, and then coning $W_{1}$ and $W_{2}$ separately to the center points
of the respective $2$-handles in which they lie. Then $U=S^{2}\times S^{2}-C$
is the model open set of Casson. His general open set is gotten by replacing
$W_{1}$ and $W_{2}$ above by two Whitehead-like continua which are produced by
suitably ramifying the original Whitehead construction.)
Let $U_{i}$ be any open 4-manifold as described in (i) above.
\textbf{Proposition.} $U_{i}\times S^{1}$\emph{ is homeomorphic to }
$(S^{2}\times S^{2}-\ast)\times S^{1}$\emph{, in such a manner that the
}$S^{1}$\emph{ coordinate is preserved at }$\infty$\emph{. That is, there is a
homeomorphism of pairs }$h:(U_{i}\bigcup\infty,\infty)\times S^{1}
\overset{\rightarrow}{\approx}(S^{2}\times S^{2},\ast)\times S^{1}$\emph{ such
that }$h|\infty\times S^{1}=$\emph{ identity, where }$U_{i}\bigcup\infty
$\emph{ denotes the one-point compactification of }$U_{i}$.
\emph{Proof of Proposition. }This is a cell-like decomposition problem.
Regarding $U_{i}$ as the subset $S^{2}\times S^{2}-C_{i}$ of $S^{2}\times
S^{2}$, the assertion is that there is a homeomorphism $h:(S^{2}\times
S^{2}/C_{i})\times S^{1}\rightarrow S^{2}\times S^{2}\times S^{1}$ such that
$h$ carries $\{C_{i}\}\times S^{1}$ onto $\ast\times S^{1}$ via the identity.
To construct $h$, it suffices as usual to construct a map $f:S^{2}\times
S^{2}\times S^{1}\rightarrow S^{2}\times S^{2}\times S^{1}$ such that $f$ is a
homeomorphism over the complement of $\ast\times S^{1}$, and $f^{-1}
(\ast\times t)=C_{i}\times t$ for each $t\in S^{1}$. For then one can define
$h=f\pi^{-1}$, where $\pi:S^{2}\times S^{2}\times S^{1}\rightarrow(S^{2}\times
S^{2}/C_{i})\times S^{1}$ is the natural quotient map. The map $f$ is
constructed by shrinking the decomposition $\{C_{i}\times t\mid t\in S^{1}\}$
of $S^{2}\times S^{2}\times S^{1}$ keeping $\ast\times S^{1}$ fixed, where
without loss $\ast\in C_{i}$. This shrinking can be done using a dual skeleton
engulfing argument in the manner of \cite[Thm 1]{EG}.
Given the Proposition, one produces the manifold $P^{5}$ by removing from
$M^{4}\times S^{1}$ each open set $U_{i}\times S^{1}$, and sewing in its place
$\operatorname*{int}B^{4}\times S^{1}$. To do this precisely, let
$\alpha:\operatorname*{int}B^{4}-0\rightarrow S^{2}\times S^{2}-\ast$ be an
embedding such that $S^{2}\times S^{2}-(\ast\cup$ image $(\alpha))$ is compact
(i.e., image $(\alpha)$ is a deleted neighborhood of $\ast$ in $S^{2}\times
S^{2}$) and such that the $\alpha$-image of the $\partial B^{4}$-end of
$\operatorname*{int}B^{4}-0$ lies toward $\ast$ . Let $\beta=\alpha\times
id_{S^{1}}:(\operatorname*{int}B^{4}-0)\times S^{1}\rightarrow(S^{2}\times
S^{2}-\ast)\times S^{1}$. Then we can define $P^{5}=M^{4}\times S^{1}
-\bigcup_{i=1}^{k}(U_{i}\times S^{1}-h_{i}^{-1}$ (image $\beta$))
$\bigcup_{i=1}^{k}(\operatorname*{int}B^{4}\times S^{1})_{i}$ where, for each
$i$, the open subset $(\operatorname*{int}B^{4}-0)\times S^{1}$ of the
$i^{\text{th}}$ copy of $\operatorname*{int}B^{4}\times S^{1}$ is identified
to $h_{i}^{-1}$ (image $\beta$) by the homeomorphism $h_{i}^{-1}\beta$.
This manifold $P^{5}$ is known to topologists, being the unique manifold which
is homotopy equivalent, fixing boundary, to $cH^{3}\times S^{1}$ (cf.
introductory remarks of Part III; here however $P^{5}$ is not so interesting,
because the Rokhlin invariant of $H^{3}$ is 0). What is useful about the above
description of $P^{5}$ is that it provides a layering of $P^{5}$ into
cell-like subsets. Namely, for each $\theta\in S^{1}$, let $Q_{\theta}
=(M^{4}-\bigcup_{i=1}^{k}(U_{i}\times\theta))\cup\bigcup_{i=1}^{k}
(\operatorname*{int}B^{4}\times\theta)_{i}\subset P^{5}$ (although $Q_{\theta
}$ is compact, it is not necessarily an ANR, because its two parts may not
match up very smoothly where they come together). Let $H^{3}\times
\lbrack0,1)\hookrightarrow M^{4}-\bigcup_{i=1}^{k}U_{i}$ be an open boundary
collar for $\partial M^{4}$ in $M^{4}$. Define $p:P^{5}\rightarrow
cH^{3}\times S^{1}$ to be the \textquotedblleft
level-preserving\textquotedblright\ map gotten by sending the collar
$H^{3}\times\lbrack0,1)\times S^{1}$ homeomorphically onto $(cH^{3}-c)\times
S^{1}$, and by sending each compact set $Q_{\theta}-H^{3}\times\lbrack
0,1)\times\theta$ to the point $c\times\theta,\theta\in S^{1}$.
\textbf{Proposition}. $p$\emph{ is cell-like.}\textit{
}
It follows from properties (i) and (ii) above, together with the construction
of $P^{5}$, that for any open interval $(\theta_{0},\theta_{1})$ in $S^{1}$,
the manifold $p^{-1}(\theta_{0},\theta_{1}))$ is contractible. Hence each
point-inverse of $p$ has arbitrarily small contractible neighborhoods.
This completes the description of my original map $p:P^{5}\rightarrow
cH^{3}\times S^{1}$.
\end{proof}
\part{The triple suspension of any homology $3$--sphere is $S^{6}$}
The purpose of this part is to show how I proved the following
\begin{theorem}
[Triple Suspension]\textit{For any homology 3-sphere $H^{3}$, the triple
suspension $\Sigma^{3}H^{3}$ is homeomorphic to $S^{6}$.} \indent Recall that
Cannon subsequently has improved this by showing that $\Sigma^{2}H^{3}\approx
S^{5}$ \cite{Can1}.
\end{theorem}
My proof (done in October 1976) made use of my earlier suspension work,
specifically the result that $\Sigma^{2}sp(H^{3})\approx S^{6}$, where
$sp(H^{3})$ is the homology $4$-sphere gotten by spinning $H^{3}$ (defined
below). Aside from this fact, this part does not use anything from Parts I and
II. However, it does use very strongly the construction of Part III.
Given a compact metric space $H$ containing a collared $n$-cell $B^{n}$ (in
general, $H$ will be a homology $n$-sphere), the $k$-\textit{spin} of
$H(k\geq0)$ is the space $sp^{k}H\equiv\partial((H-\operatorname*{int}
B^{n})\times I^{k+1})$, i.e., $sp^{k}H=(H-\operatorname*{int}B^{n}
)\times\partial I^{k+1})\cup\partial B^{n}\times I^{k+1}\subset
(H-\operatorname*{int}B^{n})\times I^{k+1}$ (note that the 0-spin of $H$ is
the double of $H-\operatorname*{int}B^{n}$ along $\partial B^{n}$).
After proving the above theorem, I realized that the result could be
interpreted as a converse to work of Glaser \cite{Gl2}-\cite{Gl4} in 1969-70,
by casting it in the following manner.
\begin{theorem}
\textit{Given any compact metric space $H$ containing a collared $n$-cell, and
given any $k\geq2$, then
\[
\Sigma^{k}H\approx S^{n+k}\Leftrightarrow\Sigma^{2}(sp^{k-2}(H))\approx
S^{n+k}.
\]
}
\end{theorem}
The work of Glaser amounted to establishing the implication $\Rightarrow$ (see
\cite{Si4}, where this argument, extracted from Glaser's work, is presented in
a single page). My work below contains a proof for the implication
$\Leftarrow$ (the $k=2$ case is discussed in the Interlude following Part II).
The real case of interest here is when $k=3$ and $H$ is a homology 3-sphere,
in which case we know from Part II that the right hand conclusion is true. The
goal of the remainder of Part IV is to prove this special case, i.e., to show
that if $\Sigma^{2}sp(H^{3})\approx S^{6}$, then $\Sigma^{3}H^{3}\approx
S^{6}$.
The idea of the proof is to use the hypothesis to understand better a certain
cell-like map $\overline{f}:S^{6}\rightarrow\Sigma^{3}H^{3}$, which is
constructed in completely analogous fashion to the cell-like map
$f:S^{5}\rightarrow\Sigma^{2}H^{3}$ from Part III. Before expanding on this,
we emphasize that $\overline{f}$ is \textbf{not} the suspension of $f$; that
would make $\overline{f}$ have the undesirable feature of having uncountably
many nontrivial point-inverses. Instead, $\overline{f}$ is to have a countable
null sequence of nontrivial point-inverses, just as $f$ did.
The rule for constructing $\overline{f}$ is everywhere in Part III to increase
the dimension from 5 to 6, by increasing the dimension of the second
coordinate from 2 to 3. Thus, $G^{3}\times I^{2}$ becomes $G^{3}\times I^{3}$;
$M^{5}$ becomes $M^{6}$, which is the (unique) contractible manifold such that
$\partial M^{6}=\partial(G^{3}\times I^{3})$; and $N^{5}$ becomes $N^{6}$,
with its rel-boundary homotopy equivalence $\overline{\phi}:N^{6}\rightarrow
B^{3}\times T^{3}$. (The bar symbol, e.g. $\overline{f},\overline{\phi
},\overline{\rho},\overline{g}_{0}$, is used over maps which are the
one-dimension-higher analogues of maps from Part III.) The compactification
$\widetilde{N}^{5}\cup S^{1}$ becomes $\widetilde{N}^{6}\cup S^{2}$, using the
radial homeomorphism $\overline{\gamma}(x)=x/(1+\Vert x\Vert):\mathbb{R}
^{3}\overset{\approx}{\rightarrow}\operatorname*{int}D^{3}$, where $D^{3}$ is
the unit ball in $\mathbb{R}^{3}$. In the construction of the map
$\overline{\rho}:D^{3}\rightarrow D^{3}$, which is the analogue of $\rho
:D^{2}\rightarrow D^{2}$, one works in $\mathbb{R}^{3}$ instead of
$\mathbb{R}^{2}$, using the 3-dimensional cubes $I^{3}(i,j,k)\equiv\lbrack
i-\frac{1}{4},i+\frac{1}{4}]\times\lbrack j-\frac{1}{4},j+\frac{1}{4}
]\times\lbrack k-\frac{1}{4},k+\frac{1}{4}]$, for $(i,j,k)\in\mathbb{Z}
\oplus\mathbb{Z}\oplus\mathbb{Z}-(0,0,0)$. In particular, Figure III-1 becomes
a 3-dimensional picture, to be compactified by adding a $2$-sphere at $\infty
$. Each nontrivial point-inverse of $\overline{\rho}$ is a 3-cell
$\overline{\gamma}(I_{\#}^{3}(p,q,r))\cup(p,q,r)/\Vert(p,q,r)\Vert\subset
D^{3}$, where $(p,q,r)\in\mathbb{Z}\oplus\mathbb{Z}\oplus\mathbb{Z}-(0,0,0)$
is a relatively prime triple, i.e., a triple such that $gcd\{p,q,r\}=1$. These
changes and adaptations are all routine, and should require only a few moments
thought to absorb.
The idea of the following proof is this. Given the hypothesis that $\Sigma
^{2}sp(H^{3})\newline(\equiv\Sigma^{2}\partial(G^{3}\times I^{2}))$ is
homeomorphic to $S^{6}$, or more germanely that $c(\partial(G^{3}\times
I^{2}))\times\mathbb{R}^{1}$ is a manifold, it will be shown that then the
point-inverses of $\overline{f}:S^{6}\rightarrow\Sigma^{3}H^{3}$ can be
improved to be cellular arcs, which are embedded in $S^{6}$ in a certain
regular manner. Then it will be shown that this decomposition of $S^{6}$ into
points and arcs is shrinkable, and consequently $\overline{f}$ is approximable
by homeomorphisms.
The precise point of view that we adopt for this procedure is the following.
The map $\overline{f}:S^{6}\rightarrow\Sigma^{3}H^{3}$ will be factored into
two cell-like maps
\[
\overline{f}=\beta\alpha:S^{6}\overset{\alpha}{\longrightarrow}Q^{6}
\overset{\beta}{\longrightarrow}\Sigma^{3}H^{3},
\]
and each of these maps will be shown to be approximable by homeomorphisms.
This approximating will be trivial for $\alpha$, but will require some work
for $\beta$ (whose nontrivial point-inverses are arcs).
A key item for the construction below is the following description of $M^{6}$.
It is inspired by Glaser's constructions in \cite{Gl2}-\cite{Gl4}.
Define
\[
M_{\ast}^{6}=G^{3}\times I^{2}\times\lbrack0,1]/\{G_{1}^{3}\times I_{1}
^{2}\times t\mid\frac{1}{3}\leq t\leq\frac{2}{3}\}\text{,}
\]
that is, $M_{\ast}^{6}$ is the quotient space gotten from $G^{3}\times
I^{2}\times\lbrack0,1]$ by identifying to points each of the subsets
$G_{1}^{3}\times I_{1}^{2}\times t,\frac{1}{3}\leq t\leq\frac{2}{3}$, where
$G_{1}^{3}\subset\operatorname*{int}G^{3}$ and $I_{1}^{2}\subset
\operatorname*{int}I^{2}$ denote smaller copies of $G^{3}$ and $I^{2}$,
obtained as usual by taking complements of open collars. It is in the
following claim that the hypothesis of the theorem is used. (compare
\cite[Prop. 1]{Gl2}).
\textbf{Claim.} $M_{\ast}^{6}$\emph{ is a contractible manifold.
}
\textit{Proof.} $M_{\ast}^{6}$ is contractible because it is gotten by
identifying to an arc the spine of a manifold. To see that $M_{\ast}^{6}$ is a
manifold, one uses the hypothesis of the theorem, which amounts to assuming
that $c(\partial(G^{3}\times I^{2}))\times\mathbb{R}^{1}$ is a manifold. From
this one sees that $M_{\ast}^{6}$ is a manifold along the interior of the
arc-spine. This leaves the two endpoints of the arc-spine to analyze. Each
endpoint has a deleted neighborhood homeomorphic to $L^{5}\times\mathbb{R}
^{1}$, where $L^{5}=G^{3}\times I^{2}\cup_{\partial}c(\partial(G^{3}\times
I^{2}))$. Since $L^{5}$ is homotopically a 5-sphere and $L^{5}\times
\mathbb{R}^{1}$ is a manifold, it follows that $L^{5}\times\mathbb{R}^{1}$ is
homeomorphic to $S^{5}\times\mathbb{R}^{1}$ (see \cite[App. 1]{Si6}). Thus
$M_{\ast}^{6}$ is a manifold, henceforth denoted $M^{6}$.
By analogy with Part III, we regard the space $(\widetilde{N}^{6}\cup
S^{2})\cup_{\partial}B^{6}\ (\approx S^{6})$ and the map $\overline
{f}:(\widetilde{N}^{6}\cup S^{2})\cup_{\partial}B^{6}\rightarrow H^{3}\ast
S^{2}\ (\approx\Sigma^{3}H^{3})$ as being obtained by doing modifications to
the source of a certain cell-like map
\[
\overline{g}_{0}\cup homeo:G^{3}\ast S^{2}\cup_{\partial}B^{6}\rightarrow
G^{3}\ast S^{2}\cup_{\partial}B^{3}\ast S^{2}=H^{3}\ast S^{2}\text{.}
\]
The map $\overline{g}_{0}:G^{3}\ast S^{2}\rightarrow G^{3}\ast S^{2}$ is
defined, as earlier, by letting $\overline{g}_{0}|x\ast S^{2}\rightarrow x\ast
S^{2}$, for $x\in G^{3}$, where $\{\overline{\rho}_{t}\}$ is a pseudoisotopy
of $\overline{\rho}_{1}=\overline{\rho}:D^{3}\rightarrow D^{3}$ to
$\overline{\rho}_{0}\equiv id_{D^{3}}$, and $D^{3}=x\ast S^{2}$. The
nontrivial point-inverses of $\overline{g}_{0}$ are the sets $\{G_{1}
^{3}\times\overline{\gamma}(I_{\#}^{3}(p,q,r))\cup(p,q,r)/\Vert(p,q,r)\Vert
\}$, where $(p,q,r)\in\mathbb{Z}\oplus\mathbb{Z}\oplus\mathbb{Z}-(0,0,0)$ is a
relatively prime triple, as above. In order to be able to uniformly describe
these nontrivial point-inverses of $\overline{g}_{0}$, we choose for each
triple $(p,q,r)$ a neighborhood of $\overline{\gamma}(I_{\#}^{3}(p,q,r))$ in
$\operatorname*{int}D^{3}$, to be uniformly denoted $I^{2}\times
\lbrack0,\infty)$, such that all of these neighborhoods are disjoint, and such
that the spine $I_{1}^{2}\times\lbrack1,\infty)$ of $I^{2}\times
\lbrack0,\infty)$ coincides with the set $\overline{\gamma}(I_{\#}
^{3}(p,q,r))$. From now on we will refer to the nontrivial point-inverses of
$\overline{g}_{0}$ as the sets $\{G_{1}^{3}\times I_{1}^{2}\times
\lbrack1,\infty)\cup\infty\}$, which have disjoint pinched neighborhoods
$\{G^{3}\times I^{2}\times\lbrack0,\infty)\cup\infty\}$ in $G^{3}\ast S^{2}$,
and it will be understood that the indices $\{(p,q,r)\}$ are being suppressed.
Recall that the space $\widetilde{N}^{6}\cup S^{2}\ (\approx B^{6})$ is
regarded as being obtained from $G^{3}\ast S^{2}$ by replacing each of the
blocks $G_{1}^{3}\times\overline{\gamma}(I^{3}(i,j,k)),\ (i,j,k)\in
\mathbb{Z}\oplus\mathbb{Z}\oplus\mathbb{Z}-(0,0,0)$, by a copy $M^{6}(i,j,k)$
of $M^{6}$. Using the above explicit description of $M^{6}$ as $M_{\ast}^{6}$,
we can equivalently describe $\widetilde{N}^{6}\cup S^{2}$ as the quotient
space obtained from $G^{3}\ast S^{2}$ by the acyclic quotient map
$\overline{\chi}:G^{3}\ast S^{2}\rightarrow\widetilde{N}^{6}\cup S^{2}$, where
$\overline{\chi}$ makes the following quotient-identifications in each
nontrivial point-inverse of $\overline{g}_{0}$:
\[
G^{3}\times I^{2}\times\lbrack0,\infty)\overset{\left. \overline{\chi
}\right\vert }{\longrightarrow}G^{3}\times I^{2}\times I^{2}\times
\lbrack0,\infty)/\{G_{1}^{3}\times I_{1}^{2}\times t\mid\ell+\frac{1}{3}\leq
t\leq\ell+\frac{2}{3}\text{, }\ell\geq1\}
\]
(cf. above convention on notation). See Figure IV-1. Here, one is thinking of
the copies $M^{6}(i,j,k)$ of $M^{6}$ in $\widetilde{N}^{6}\cup S^{2}$ as being
say the various blocks $\overline{\chi}(G^{3}\times I^{2}\times\lbrack
\ell,\ell+1])$, for $\ell\geq1$ (actually, these blocks are enlarged-by-collar
versions of the genuine $M^{6}(i,j,k)$'s, but that is not important). Using
this description of $\widetilde{N}^{6}\cup S^{2}$, then the map $\overline
{f}_{0}:\widetilde{N}^{6}\cup S^{2}\rightarrow G^{3}\ast S^{2}$ is the map
which identifies to a point each of the sets $\overline{\chi}(G_{1}^{3}\times
I_{1}^{2}\times\lbrack1,\infty)\cup\infty)$. Note that this typical nontrivial
point-inverse of $\overline{f}_{0}$ looks like an infinite sequence of
(singlefold) suspensions of $G_{1}^{3}\times I_{1}^{2}$ (except that the first
\textquotedblleft suspension\textquotedblright, $G_{1}^{3}\times I_{1}
^{2}\times\lbrack1,\frac{4}{3}]/G_{1}^{3}\times I_{1}^{2}\times\frac{4}{3}$,
is really a cone), strung together by arcs joining adjacent members of the
sequence, and then compactified by the point at $\infty$. See Figure IV-1,
second from the top.
There is now an apparent factoring of $\overline{f}_{0}$ into two cell-like
maps,
\[
\overline{f}_{0}=\beta_{0}\alpha_{0}:\widetilde{N}^{6}\cup S^{2}
\overset{\alpha_{0}}{\longrightarrow}Q_{0}^{6}\overset{\beta_{0}
}{\longrightarrow}G^{3}\ast S^{2},
\]
which is described by factoring into two maps each of the trivial restricted
maps
\[
\overline{f}_{0}|:\overline{\chi}(G_{1}^{3}\times I_{1}^{2}\times
\lbrack1,\infty)\cup\infty)\rightarrow\infty\in S^{2}
\]
(see Figure IV-1).
\begin{figure}
\caption{The nontrivial point inverses of the various maps $\bar\chi,
\alpha_{0}
\end{figure}
The first map $\alpha_{0}|\overline{\chi}(G_{1}^{3}\times I_{1}^{2}
\times\lbrack1,\infty)\cup\infty)$ identifies to a point each of the countably
many suspension (or cone) subsets in each of the sets $\overline{\chi}
(G_{1}^{3}\times I_{1}^{2}\times\lbrack1,\infty)\cup\infty)$, thereby
producing from $\widetilde{N}^{6}\cup S^{2}$ a quotient space $Q_{0}^{6}$. The
second map $\beta_{0}$ completes the remaining identifications by identifying
each set $\alpha_{0}(\overline{\chi}(G_{1}^{3}\times I_{1}^{2}\times
\lbrack1,\infty)\cup\infty))$ to a point. In symbols, this becomes
\begin{multline*}
\overline{\chi}(G_{1}^{3}\times I_{1}^{2}\times\lbrack1,\infty)\cup\infty)=\\
(G_{1}^{3}\times I_{1}^{2}\times\lbrack1,\infty)\cup\infty)/\{G_{1}^{3}\times
I_{1}^{2}\times t\mid\ell+\frac{1}{3}\leq t\leq\ell+\frac{2}{3}\text{, }
\ell\geq1\}\\
\overset{\left. \alpha_{0}\right\vert }{\longrightarrow}(G_{1}^{3}\times
I_{1}^{2}\times\lbrack1,\infty)\cup\infty)/(\{G_{1}^{3}\times I_{1}^{2}\times
t\mid\ell+\frac{1}{3}\leq t\leq\ell+\frac{2}{3}\text{, }\ell\geq1\}\cup\\
\{G_{1}^{3}\times I_{1}^{2}\times\lbrack1,\frac{4}{3}]\}\cup\{G_{1}^{3}\times
I_{1}^{2}\times\lbrack\ell+\frac{2}{3},\ell+\frac{4}{3}]\mid\ell
\geq1\})
\\
=[1,\infty]/\{[1,\frac{4}{3}],[\frac{5}{3},\frac{7}{3}],[\frac{8}{3},\frac
{10}{3}],\ldots\}\approx arc\overset{\left. \beta_{0}\right\vert
}{\longrightarrow}\infty\in S^{2}.
\end{multline*}
Let
\begin{align*}
\alpha & \equiv\alpha_{0}\cup_{\sigma}id_{B^{6}}:(\widetilde{N}^{6}\cup
S^{2})\cup_{\sigma}B^{6}\ (\approx S^{6})\rightarrow Q^{6}\\
& \equiv Q_{0}^{6}\cup_{\sigma}B^{6}
\end{align*}
and let
\[
\beta\equiv\beta_{0}\cup_{\partial}id_{B^{3}\ast S^{2}}:Q^{6}\rightarrow
G^{3}\ast S^{2}\cup_{\partial}B^{3}\ast S^{2}=H^{3}\ast S^{2}.
\]
Then $\overline{f}=\beta\alpha$.
\textbf{Claim.}\emph{ }$\alpha$\emph{ is approximable by
homeomorphisms.
}
\textit{Proof.} This follows because $\alpha_{0}|\widetilde{N}^{6}$ is
arbitrarily majorant-closely approximable by homeomorphisms, which in turn
follows because $\alpha_{0}|\widetilde{N}^{6}$ satisfies the Bing Shrinking
Criterion, namely, $\alpha_{0}|\widetilde{N}^{6}$ has a countable discrete
collection of nontrivial point-inverses, each of which is cellular. This is
because, recalling the present description of $\widetilde{N}^{6}$ as a
quotient of $G^{3}\ast S^{2}-S^{2}\approx G^{3}\times\mathbb{R}^{2}$, the
nontrivial point-inverses of $\alpha_{0}|\widetilde{N}^{6}$ are each
homeomorphic to $c(G_{1}^{3}\times I_{1}^{2})$ or to $\Sigma(G_{1}^{3}\times
I_{1}^{2})$, each of which has a deleted neighborhood homeomorphic to
$(G^{3}\times I^{2}\cup c(\partial(G^{3}\times I^{2})))\times\mathbb{R}^{1}$
or to $\Sigma(\partial(G^{3}\times I^{2}))\times\mathbb{R}^{1}$, which are
each homeomorphic to $S^{5}\times\mathbb{R}^{1}$ (cf. earlier remarks on
$L^{5}\times\mathbb{R}^{1})$.
We now turn our attention to $\beta:Q^{6}\rightarrow H^{3}\ast S^{2}$, where
now we know from the preceding Claim that $Q^{6}$ is a manifold, homeomorphic
to $S^{6}$. The nontrivial point inverses of $\beta$ consist of a countable
null sequence of arcs. Inspection reveals that we can regard any one of these
arcs, $A$ say, as having a compact pinched neighborhood $W$ in $Q$, described
by the following model:
\begin{align*}
A & \equiv\{G_{1}^{3}\times I_{1}^{2}\times t\mid1\leq t<\infty\}\cup
\infty\ (\approx\lbrack1,\infty])\subset W\\
& \equiv(G^{3}\times I^{2}\times\lbrack0,\infty)/\{G_{1}^{3}\times I_{1}
^{2}\times t\mid1\leq t<\infty\}\cup\infty.
\end{align*}
There is a useful cone structure on $W$, displayed by writing
\[
W=c_{1}(G^{3}\times I^{2}\cup c_{\infty}(\partial(G^{3}\times I^{2}))),
\]
where $G^{3}\times I^{2}$ here is to be regarded as
\[
G^{3}\times I^{2}\times0\cup\partial(G^{3}\times I^{2})\times\lbrack0,1]
\]
in the first description, where $c_{\infty}$ denotes coning to the point
$\infty$, and where $c_{1}$ denotes coning to a point labelled $1$. In this
latter description, the arc $A$ becomes the interval $A=[c_{1},c_{\infty
}]\subset W$. Note that $W$ has a natural manifold interior,
$\operatorname*{int}W={\mathring{c}}_{1}(G^{3}\times I^{2}\cup c_{\infty
}(\partial(G^{3}\times I^{2})))$, but the \textquotedblleft
boundary\textquotedblright\ (i.e., the base of the cone) is not necessarily a
manifold. From the definitions, $A\cap\operatorname*{int}W=A-c_{\infty}$, and
also $A-(c_{1}\cup c_{\infty})$ has a product neighborhood in
$\operatorname*{int}W$ (but this neighborhood, after deleting the core, need
not be simply connected, as it has the homotopy type of $\partial(G^{3}\times
I^{2}))$.
Using this model, we can describe $Q^{6}$ and $\beta:Q^{6}\rightarrow
H^{3}\ast S^{2}$ this way. To obtain $Q^{6}$, start with $G^{3}\ast S^{2}
\cup_{\partial}B^{6}$, and replace each of the countably many nontrivial
$g_{0}$-point-inverse sets, i.e. each of the pinched neighborhoods
$G^{3}\times I^{2}\times\lbrack0,\infty)\cup\infty$, with a copy of $W$ in the
obvious manner. The map $\beta$ is the quotient map which identifies to a
point each arc $A$ in each copy of $W$ in $Q^{6}$. We will denote this
countable null collection of arcs and their pinched neighborhoods in $Q^{6}$
by $\{A_{\ell}\}$ and $\{W_{\ell}\}$.
To complete the proof, it remains to establish:
\textbf{Proposition.}\emph{ }$\beta:Q^{6}\rightarrow H^{3}\ast S^{2}$\emph{ is
approximable by homeomorphisms. That is, the above decomposition of }$Q^{6}
$\emph{ into arcs }$\{A_{\ell}\}$\emph{ and points is shrinkable.
}
The inspiration for this proposition and its proof comes from a theorem of
Bing \cite[Thm. 3]{Bi2}, who showed that a decomposition of a manifold whose
nontrivial elements comprise a countable (not necessarily null) collection of
flat arcs is a shrinkable decomposition. (Bing did not require his collection
to be null, because he gave an ingenious but simple argument about limits
\cite[p. 368]{Bi2}, which in effect let him assume that his collection was
null. Since our collection above is null to begin with, we will not need this
part of his argument.)
The heart of Bing's argument is a certain technical proposition \cite[Lemma
4]{Bi2} dealing with shrinking individual arcs, which we state here in a form
applicable to our situation.
\begin{lemma}
[Shrinking an individual arc]\label{shrinking-an-individual-arc}\emph{Suppose
}$A_{0}$\emph{ is one of the arcs in the collection }$\{A_{\ell}\}$\emph{, and
suppose }$\epsilon>0$\emph{ is given. Then there exists a neighborhood }
$U$\emph{ of }$A_{0}$\emph{ in }$N_{\epsilon}(A_{0})$\emph{ (the }$\epsilon
$\emph{-neighborhood of }$A_{0}$\emph{ in }$Q^{6}$\emph{) and there exists a
homeomorphism }$h:Q^{6}\rightarrow Q^{6}$\emph{, supported in }$U$\emph{, such
that for any arc }$A_{\ell}$\emph{, if }$A_{\ell}\cap U\neq\emptyset$\emph{,
then }$\operatorname*{diam}h(A_{\ell})<\epsilon$\emph{.}
\end{lemma}
Given this lemma, it is an easy matter to establish that the Bing Shrinking
Criterion holds for the decomposition $\{A_{\ell}\}$, for one only has to
apply the lemma to those finitely many members of $\{A_{\ell}\}$ whose
diameters are bigger than some preassigned $\epsilon>0$.
To prove his original version of the Lemma, Bing used the flatness of his arc
$A_{0}$. It turns out that our nonflat arcs have enough regularity so that all
the requisite motions of the lemma can be performed. In order to describe
these motions, we make a definition. An embedded arc $A$ (parametrized by
$[1,\infty]$, here) in a manifold-without-boundary $Q$ has the \emph{almost
covering retraction} property (toward the $1$-end, in the definition here)
provided that for any $a\in\lbrack1,\infty]$ and any $\epsilon>0$, there is a
homeomorphism $h:Q\rightarrow Q$, supported in $N_{\epsilon}([a,\infty])$,
such that the restriction $h|[1,\infty]$ is $\epsilon$-close to the
retraction-inclusion map $[1,\infty]\rightarrow\lbrack1,a]\hookrightarrow Q$.
(This concept has been used profitably by several people, among them
Bryant-Seebeck \cite{BS}, Cantrell-Lacher \cite{CL}, Bryant \cite{Bry} and
Price-Seebeck \cite{PS}.) The simplest example of an arc with this property is
a flat arc, or more generally, any subarc of an open arc having a product
neighborhood. Also, we have:
\begin{lemma}
\label{almost-covering-retraction-property}\textit{Suppose $A_{0}$ is one of
the arcs in the collection $\{A_{\ell}\}$. Then $A_{0}$ has the almost
covering retraction property, toward the 1-end}.
\end{lemma}
\textbf{Note.} If $\pi_{1}(G^{3})\neq1$, then $A_{0}$ cannot have the almost
covering retraction property toward the $\infty$-end, as a straightforward
fundamental group analysis shows.
\begin{proof}
[Proof of Lemma \ref{shrinking-an-individual-arc} from Lemma
\ref{almost-covering-retraction-property}]The following argument is adapted
from \cite[Lemma 4]{Bi2}. Pictures are indispensable (but are not provided
here). Given $A_{0}\approx\lbrack1,\infty]$ and given $\epsilon>0$, choose a
partition $1=a_{1}<a_{2}<\ldots<a_{n}<a_{n+1}=\infty$ of $[1,\infty]$ so fine
that each subinterval $[a_{1},a_{i+1}]$ in $Q$ has diameter $<\epsilon/6$. Let
$\delta\in(0,\frac{\varepsilon}{12})$ be so small that $2\delta$ is less than
the minimum distance between any two nonintersecting subintervals in this
partitioning, and such that for any arc $A_{\ell}$ aside from $A_{0}$, if
$A_{\ell}$ intersects $N_{\delta}(A_{0})$, then $\operatorname*{diam}A_{\ell
}<\epsilon/2$. Choose, in the order $U_{n},h_{n},U_{n-1},h_{n-1},\ldots
,U_{1},h_{1}$, a sequence of open subsets $\{U_{i}\}$ of $Q$ and
homeomorphisms $\{h_{i}\}$ of $Q$, satisfying the following conditions
(letting $U_{n+1}=\varnothing$ and $h_{n+1}=identity$). Each $U_{i}$ is a
neighborhood of $[a_{i},\infty]$ such that
\begin{enumerate}
\item $U_{i}-U_{i+1}\subset N_{\delta}([a_{i},a_{i+1}])$ (and hence, applying
induction, $\cup_{j=i}^{n}U_{j}\subset N_{\delta}([a_{i},\infty]))$,
\item (cf. (5) below for $h_{i+1})h_{i+1}(U_{i})\subset N_{\delta}
([a_{i},a_{i+1}])$ (and hence by (1), $h_{i+1}(U_{i})\cap\bigcup_{j=i+2}
^{n}U_{j}=\varnothing)$, and
\item no arc $A_{\ell}$ intersects both $U_{i}$ and $(\cup_{j=i+2}^{n}
U_{j})-U_{i+1}$ (note this latter set misses $A_{0}$).
\end{enumerate}
Each $h_{i}$ is chosen, using the almost covering retraction property for the
retraction $[1,\infty]\rightarrow\lbrack1,a_{i}]$, so that
\begin{enumerate}
\item[(4)] $h_{i}$ is supported in $U_{i}$, and
\item[(5)] $h_{i}([a_{i-1},\infty])\subset N_{\delta}([a_{i-1},a_{i}])$.
\end{enumerate}
The desired homeomorphism of Lemma \ref{shrinking-an-individual-arc} is
$h=h_{n}h_{n-1}\ldots h_{2}h_{1}$, which is supported in $U=\bigcup_{i=1}
^{n}U_{i}\subset N_{\delta}(A_{0})$. To verify the conclusion, suppose
$A_{\ell}$ is such that $A_{\ell}\cap U\neq\varnothing$. Let $i$ be the least
index such that $U_{i}\cap A_{\ell}\neq\varnothing$. Hence $A_{\ell}$ misses
the supports of $h_{1},\ldots,h_{i-1}$ and so $h(A_{\ell})=h_{n}\ldots
h_{i}(A_{\ell})$. By (3), $A_{\ell}$ misses $(\bigcup_{j=i+2}^{n}
U_{j})-U_{i+1}$, and so $A_{\ell}\cap U\subset U_{i}\cup U_{i+1}$. Now,
$h_{i+1}h_{i}(U_{i}\cup U_{i-1})\subset U_{i}\cup U_{i+1}$. By (2),
$h_{i+2}(U_{i+1})\subset N_{\delta}([a_{i+1},a_{i+2}])$, and by (1) and (4),
$U_{i}-U_{i+1}$ is not moved by $h_{i+2}$. Consequently $h_{i+2}(U_{i}\cup
U_{i+1})\subset N_{\delta}([a_{i},a_{i+2}])$, and $h_{i+2}(U_{i}\cup U_{i+1})$
misses $\bigcup_{j=i+3}^{n}U_{j}$. So $h(A_{\ell}\cap U)\subset h_{n}\ldots
h_{i+2}(U_{i}\cup U_{i+1})=h_{i+2}(U_{i}\cup U_{i+1})\subset N_{\delta}
([a_{i},a_{i+2}])$. So $\operatorname*{diam}h(A_{\ell}\cap U)<\epsilon/2$. Now
by choice of $\delta$, $\operatorname*{diam}(A_{\ell}-U)<\epsilon/2$. Hence
Lemma \ref{shrinking-an-individual-arc} is established from Lemma
\ref{almost-covering-retraction-property}
\end{proof}
\begin{proof}
[Proof of Lemma \ref{almost-covering-retraction-property}]Because of the
product and cone structure of $\operatorname*{int}W_{0}$, which in particular
ensures that any subarc [1,a] of $A_{0},\ a<\infty$, has the almost covering
retraction property toward the 1-end, it is clear that Lemma
\ref{almost-covering-retraction-property} can be deduced from the
\textbf{Claim.} \emph{Given any }$\delta>0$\emph{, there is a homeomorphism
}$h$\emph{ of }$Q$\emph{, supported in }$N_{\delta}(\infty)$\emph{, such that
}$h(A_{0})\subset\operatorname*{int}W_{0}$\emph{ where }$\infty$\emph{ denotes
the }$\infty$\emph{-endpoint of }$A_{0}$\emph{.
}
This claim is established by engulfing. Let $W_{1}\subset(\operatorname*{int}
W_{0})\cup\infty$ be a copy of $W_{0}$ gotten by squeezing $W_{0}$ inwards a
bit, keeping $\infty$ fixed, using an interior collar of $W_{0}$ which is
pinched at $\infty$. Then $\operatorname*{int}W_{0}\cup\operatorname*{ext}
W_{1}=Q^{6}-\infty$. Given $\delta>0$, the idea is to use engulfing to produce
two homeomorphisms $h_{0},h_{1}:Q^{6}\rightarrow Q^{6}$, each supported in
$N_{\delta}(\infty)$, such that $h_{0}(\operatorname*{int}W_{0})\cup
h_{1}(\operatorname*{ext}W_{1})=Q^{6}$. Then $h=h_{0}^{-1}h_{1}$ is the
desired homeomorphism of the Claim.
In order to construct these homeomorphisms using dual-skeleton engulfing
arguments, it suffices as usual to construct certain engulfing homotopies
(compare the following engulfing argument to that, in say, \cite{Se}). Letting
$U$ denote either $\operatorname*{int}W_{0}$ or $\operatorname*{ext}W_{1}$,
one wants establish that for any neighborhood $V_{0}$ of the point $\infty$ in
$Q^{6}$, there is a smaller neighborhood $V_{1}$ of $\infty$ and a homotopy
deformation of $U\cup V_{1}$ into $U$ such that all of the motion takes place
in $V_{0}$. Since $\operatorname*{cl}U$ is an ANR, it suffices to show that
for any $\delta>0$, there is a map $\operatorname*{cl}U\rightarrow U$ which is
$\delta$-close to the identity. The existence of this map is clear for
$U=\operatorname*{int}W_{0}$, because $W_{0}$ has an interior collar. For
$U=\operatorname*{ext}W_{1}$, one must analyze the situation more carefully.
Probably the quickest argument is to pass to the target space under the
cell-like map $\beta:Q^{6}\rightarrow H^{3}\ast S^{2}$. Using the fact that
$U$ is saturated with respect to the point-inverses of $\beta$, and that the
nontrivial point-inverses in $U$ have diameter tending to 0 as they approach
$\operatorname*{fr}U$, and using the map lifting property of cell-like maps,
(see for example \cite[Lemmas 2.3, 3.4]{Lac1}, \cite[Thm 1]{Ko} or \cite[Lemma
3]{Hav}), it suffices to show in the target $H^{3}\ast S^{2}$ that for any
$\delta>0$ there is a map $\beta(\operatorname*{cl}U)\rightarrow\beta(U)$
which is $\delta$-close to the identity. The construction of this map is
fairly clear, using the uniform structure of $H^{3}\ast S^{2}$ and the
explicit description of $\beta(\operatorname*{cl}U)$ available from
definitions, and using the interior collar on $B^{3}\ast S^{2}\subset
H^{3}\ast S^{2}$ to make sets disjoint from $G^{3}\ast S^{2}$ by pushing them
into $\operatorname*{int}(B^{3}\ast S^{2})$. This completes the proof of the
Theorem.
\end{proof}
The above proof does not apply in dimension $5$ for the simple reason that
there it would require the hypothesis that $\Sigma^{2}(H^{3}\#H^{3})\approx
S^{5}$, which is essentially what one is trying to prove. Still, the above
arguments in Lemmas \ref{shrinking-an-individual-arc} and
\ref{almost-covering-retraction-property} seem at first glance almost to work
in dimension $5$ to shrink the nontrivial point-inverses of the original
cell-like map $f:S^{5}\rightarrow\Sigma^{2}H^{3}$ constructed in Part III. But
the difficulty is that the nontrivial point-inverses of $f$, unlike arcs, have
thickness as well as length. Hence the almost covering retraction principle
fails to do the job completely. It would be interesting if such an argument
could be made to work, for that would provide a brief proof of the Double
Suspension Theorem.
\end{document}
|
\begin{document}
\title{Tilted Sperner Families}
\begin{abstract}
Let $\cal A$ be a family of subsets of an $n$-set such that $\mathcal A$ does not contain distinct sets
$A$ and $B$ with $\vert A \backslash B \vert = 2 \vert B \backslash A \vert$. How large can $\cal A$
be? Our aim in this note is to determine the maximum size of such an $\mathcal A$. This
answers a question of Kalai. We also give some related results and
conjectures.
\end{abstract}
\section{Introduction}
A set system $\mathcal A\subseteq \mathcal {P}[n]=\mathcal {P}(\{1,\ldots ,n\})$ is said to be an \emph{antichain} or \emph{Sperner family} if $A\not\subset B$ for all distinct $A,B\in \mathcal A$. Sperner's theorem \cite{sper} says that any antichain $\mathcal A$ has size at most
$\binom {n}{\lfloor n/2\rfloor }$. (See \cite{com} for general background.)
Kalai \cite{kal} noted that the antichain condition may be restated as: $\mathcal A$ does not
contain $A$ and $B$ such that, in the subcube of the $n$-cube spanned by $A$ and $B$,
they are the top and bottom points. He asked what happens if we `tilt' this
condition. For example, suppose that we instead forbid $A$,$B$ such that $A$ is
1/3 of the way up the subcube spanned by $A$ and $B$? Equivalently, $\mathcal A$ cannot
contain two sets $A$ and $B$ with $|A\backslash B|=2|B\backslash A|$.
An obvious example of such a system is any level set $[n]^{(i)}=\{ A\subset [n]:|A|=i\} $. Thus we may
certainly achieve size $\binom {n}{\lfloor n/2\rfloor }$. The system $[n]^{(\lfloor n/2\rfloor )}$ is not maximal, as
we may for example add to it all sets of size $\lfloor n/4\rfloor -1$ -- but that is a rather
small improvement. Kalai \cite{kal} asked if, as for Sperner families, it is still
true that our family $\mathcal A$ must have size $o(2^n)$.
Our aim in this note is to verify this. We show that the middle layer is
asymptotically best, in the sense that the maximum size of such a family
is $(1+o(1)) \binom {n}{\lfloor n/2\rfloor }$. We also find the exact extremal system, for $n$ even
and sufficiently large. We give similar results for any particular
`forbidden ratio' in the subcube spanned.
What happens if, instead of forbidding a particular ratio, we instead
forbid an absolute distance from the bottom point? For example, for
distance 1 this would correspond to the following: our set system $\mathcal A$ must not contain
sets $A$ and $B$ with $|A\backslash B|=1$. How large can $\mathcal A$ be?
Here the situation is rather different, as for example one cannot take an
entire level. We give a construction that has size about $\frac {1}{n} \binom {n}{\lfloor n/2\rfloor }$,
which is about (a constant fraction of) $1/n^{3/2}$ of the whole cube. But we are not able to show that this
is optimal: the best upper bound that we are able to give is ${2^n}/{n}$.
However, if we strengthen the condition to $\mathcal A$ not having $A$ and $B$ with
$|A\backslash B| \leq 1$ then we are able to show that the greatest family has size $\frac{1}{n} \binom{n}{\lfloor n/2\rfloor }$, up to a multiplicative constant.
\\
\section{Forbidding a fixed ratio}
In this section we consider the problem of finding the maximum size of a family $\mathcal A$ of subsets of $[n]$ which satisfies $p|A\backslash B|\neq q|B\backslash A|$ for all $A,B\in \mathcal A$ where $p:q$ is a fixed ratio. Initially we will focus on the first non-trivial case $1:2$ (note that $1:1$ is trivial as then the condition just forbids two sets of the same size in $\mathcal A$) and then at the end of the section we extend these results to any given ratio.
As mentioned in the Introduction, for the ratio $1:2$ we actually obtain the extremal family when $n$ is even and sufficiently large. This family, which we will denote by $\mathcal B_0$, is a union of level sets: $\mathcal B_0=\cup _{i\in I}[n]^{(i)}$. Here the set $I$ is defined as follows: $I=\{a_i:i\geq 0\}\cup \{b_i:i\geq 0\} $, where $a_0=b_0=\frac {n}{2}$ and $a_i$ and $b_i$ are defined inductively by taking $a_i=\lceil \frac {a_{i-1}}{2}\rceil -1$ and $b_i=\lfloor \frac {b_{i-1}+n}{2}\rfloor +1$ for all $i$. For example, if $n=2^k$ then $I=\{2^{k-1}\}\cup \{2^i-1:0\leq i\leq k-1\}\cup \{2^k-2^i+1:0\leq i\leq k-1\} $. Noting that for any sets $A$ and $B$ with either (i) $|A|=l$ where $l<\frac {n}{2}$ and $|B|>2l$ or (ii) $|A|=l$ where $l>\frac {n}{2}$ and $|B|<2l-n$ we have $|A\backslash B|\neq 2|B\backslash A|$, we see that $\mathcal B_0$ satisfies the required condition. Our main result is the following.
\begin{thm}
\label{main}
Suppose $\mathcal A$ is a set system on ground set $[n]$ such that $|A\backslash B|\neq 2|B\backslash A|$ for all distinct $A,B\in \mathcal A$. Then $|\mathcal A|\leq (1+o(1))\binom {n}{\lfloor n/2 \rfloor }$. Furthermore, if $n$ is even and sufficiently large then $|\mathcal A|\leq |\mathcal B_0|$, with equality
if and only if $\mathcal A=\mathcal B_0$.
\end{thm}
\noindent The main step in the proof of Theorem \ref{main} is given by the following lemma. The proof is a Katona-type (see \cite{katona}) averaging argument.
\begin{lem}
\label{inequality}
Let $\mathcal{A}$ be a set system on $[n]$ such that $|A\backslash B|\neq 2|B\backslash A|$ for all distinct $A,B\in \mathcal A$. Then
\begin{equation*}
\sum _{j=l}^{2l} \frac {|\mathcal {A}_j|}{\binom {n}{j}} \leq 1
\end{equation*}
for all $l\leq \frac {n}{3}$ and
\begin{equation*}
\sum _{j=2k-n}^{k} \frac {|\mathcal {A}_j|}{\binom {n}{j}} \leq 1
\end{equation*}
for all $k\geq \frac {2n}{3}$, where $\mathcal A_j=\mathcal A\cap [n]^{(j)}$.
\end{lem}
\begin{proof}
We only prove the first inequality, as the proof of the second is identical. Pick a random ordering of $[n]$ which we denote by $(a_1,a_2,\ldots ,a_{\lceil \frac {2n}{3} \rceil}, b_1,\ldots ,b_{\lfloor \frac {n}{3} \rfloor})$. Given this ordering, let $C_i=\{a_j:j\in[2i]\}\cup \{b_k:k\in [i+1,l]\} $ and let $\mathcal{C}=\{C_i:i\in [0,l]\}$. Consider the random variable $X=|\mathcal {A}\cap \mathcal {C}|$. Since each set $B\in [n]^{(i)}$ is equally likely to be $C_{i-l}$ we have $\mathbb {P}[B\in \mathcal {C}]= \frac {1}{\binom {n}{i}}$. Thus by linearity of expectation we have
\begin{equation}
\label{ref}
\mathbb{E}(X)=\sum_{i=l}^{2l}\frac {|\mathcal A_i|}{\binom {n}{i}}
\end{equation}
On the other hand, given any $C_i, C_j$ with $i<j$ we have $|C_i\backslash C_j|=2|C_j\backslash C_i|$ and so $\mathcal{A}$ can contain at most one of these sets. This gives $\mathbb{E}(X)\leq 1$. Together with (\ref{ref}) this gives the claimed inequality
\begin{equation*}
\sum_{i=l}^{2l}\frac {|A_i|}{\binom {n}{i}} \leq 1
\end{equation*}
\end{proof}
\noindent \emph{Proof of Theorem \ref{main}.} We first show $|\mathcal A| \leq (1+o(1))\binom {n}{\lfloor n/2 \rfloor}$. By standard estimates (See e.g. Appendix A of \cite{aands}) we have $|[n]^{(\leq \alpha n)} \cup [n]^{(\geq (1-\alpha)n)}| = o(\binom {n}{\lfloor n/2 \rfloor})$ for any fixed $\alpha \in [0,\frac {1}{2})$, so it suffices to show that $| \bigcup _{i={\frac {2n}{5}}}^{\frac {3n}{5}} \mathcal A_i|\leq \binom {n}{\frac {n}{2}}$. But this follows immediately from Lemma \ref{inequality} by taking $l=\lfloor \frac {n}{3} \rfloor $.
We now prove the extremal part of the claim in Theorem \ref{main}. We first show that the maximum of $f(x)=\sum _{i=0}^n x_i$ subject to the inequalities
\begin{equation}
\label{firstineq}
\sum _{j=l}^{2l} \frac {x_j}{\binom {n}{j}} \leq 1, \quad l\in \{ 0,1,\ldots ,\lfloor \frac {n}{3} \rfloor \}
\end{equation}
and
\begin{equation}
\label{secondineq}
\sum _{j=2k-n}^{k} \frac {x_j}{\binom {n}{j}} \leq 1, \quad k\in \{ \lceil \frac {2n}{3} \rceil ,\ldots ,n\}
\end{equation}
from Lemma \ref{inequality} occurs when $x_{n/2}= \binom {n}{\frac {n}{2}}$.
Indeed, suppose otherwise. At least one of these inequalities involving $x_{n/2}$ must occur with equality, as otherwise we can increase $x_{n/2}$ slightly, increase the value of $f(x)$ and still satisfy (\ref{firstineq}) and (\ref{secondineq}).
Pick $j>\frac {n}{2}$ as small as possible such that $x_j>0$. Let
$y_{n/2}=x_{n/2}+\epsilon \binom {n}{n/2}$, $y_j=x_j-\epsilon \binom {n}{j}$ and $y_i=x_i$ for all other $i$. As $f(y)>f(x)$ one of the (\ref{firstineq}) or (\ref{secondineq}) must fail. If $\epsilon$ is sufficiently small only the inequalities involving $y_{n/2}$ and not $y_j$ can be violated. Choose
$k<n/2$ maximal such that $y_k>0$ and $y_k$ does not occur in any inequality involving $y_j$. Note that we must have $j-k\geq \frac {n}{4}$. Decrease $y_k$ by $\epsilon \binom {n}{k}$. Since the only increased variable $y_{n/2}$ always occurs with one of $y_j$ or $y_k$, it follows that $y=(y_0,\ldots ,y_n)$ satisfies (\ref{firstineq}) and (\ref{secondineq}).
We claim that $f(y)>f(x)$. Indeed, we must have either $|j-\frac {n}{2}|\geq \frac {n}{8}$ or $|k-\frac{n}{2}|\geq \frac {n}{8}$. Without loss of generality assume that $|k-\frac {n}{2}|\geq \frac {n}{8}$. Then since
$\binom {n}{n/2}> \binom {n}{(n/2)+1} + \binom {n}{3n/8}$ for sufficiently large $n$ we have
\begin{equation*}
f(y)=f(x)+\epsilon \binom {n}{n/2}-\epsilon \binom {n}{j} - \epsilon \binom {n}{k}
>f(x)+\epsilon \binom {n}{n/2}-\epsilon \binom {n}{(n/2)+1} - \epsilon \binom {n}{3n/8}>f(x).
\end{equation*}
Therefore we must have $x_{n/2}=\binom {n}{n/2}$, as claimed.
Now, by the inequalities (\ref{firstineq}) and (\ref{secondineq}) we have
$x_j=0$ for all $\frac {n}{4}\leq j\leq \frac {3n}{4}$ with $j\neq \frac {n}{2}$. From here it is easy to see by a weight transfer argument that $f(x)$ has a unique maximum when $x_i=\binom {n}{i}$ for $i\in I$ and $x_i=0$ otherwise. For a set system $\mathcal A$ these values of $x_i=|\mathcal A_i|$ can only be achieved if $\mathcal A=\mathcal B_0$, as claimed. \hspace{2cm} $\square$\\
\noindent We remark that the statement of Theorem \ref{main} does not hold for all even $n$, as can be seen for example by taking $n=4$ and $\mathcal A= \mathcal P[n]\backslash [n]^{(2)}$.
We now extend Theorem \ref{main} from the ratio $1:2$ to any given ratio $p:q$. Let $p:q$ be in its lowest terms and $p<q$. If $A\in [n]^{(i+a)}$ and $B\in [n]^{(i)}$ satisfy $p|A\backslash B|=q|B\backslash A|$ then we have $p(a+b)=q(b)$ where $b=|B\backslash A|$. But then $pa=(q-p)b$ and since $p$ and $q$ are coprime we must have that $(q-p)|a$. Therefore any family $\mathcal A=\bigcup _{i\in I}[n]^{(i)}$, where $I$ is an interval of length $q-p$, satisfies $p|A\backslash B|\neq q|B\backslash A|$ for all $A,B\in \mathcal A$. Taking $\lfloor \frac {n}{2}\rfloor \in I$ gives $|\mathcal A|=(q-p+o(1))\binom {n}{\lfloor n/2 \rfloor }$. Our next result shows that this is asymptotically best possible.
\begin{thm}
\label{givenratio}
Let $p,q\in \mathbb{N}$ be coprime with $p<q$. Let $\mathcal A$ be a set system on ground set $[n]$ such that $p|A\backslash B|\neq q|B\backslash A|$ for
all distinct $A,B\in \mathcal A$. Then $|\mathcal A|\leq (q-p+o(1))\binom {n}{\lfloor n/2 \rfloor }$.
\end{thm}
The following lemma performs an analogous role to that of Lemma \ref{inequality} in the proof of Theorem \ref{main}.
\begin{lem}
\label{secondinequality}
Let $\mathcal{A}$ be a set system on $[n]$ such that $p|A\backslash B|\neq q|B\backslash A|$ for all distinct $A,B\in \mathcal A$. Then
\begin{equation*}
\sum _{j\in J_k} \frac {|\mathcal {A}_j|}{\binom {n}{j}} \leq 1
\end{equation*}
where $J_k=\{l:\lceil \frac {pn}{p+q}\rceil \leq l \leq \lfloor \frac {qn}{p+q}\rfloor, l\equiv k \pmod {(q-p)}\} $ for $0\leq k\leq q-p-1$.
\end{lem}
\begin{proof}
We only sketch the proof, as it is very similar to the proof of Lemma \ref{inequality}. For convenience we assume $n=(p+q)m$ (this assumption is easily removed). Fix $k\in [0,q-p-1]$ and let $k'\equiv k-pm\pmod {(q-p)}$ where $k'\in [0,q-p-1]$. Pick a random ordering of $[n]$ which we denote by $(a_1,a_2,\ldots ,a_{qm}, b_1,\ldots ,b_{pm})$. Given this ordering let $C_i=\{a_j:j\in[qi+k']\}\cup \{b_j:j\in [pi+1,pm]\} $ and let $\mathcal{C}=\{C_i:i\in [0,m-1]\}$. (Here if $k'=0$ we additionally adjoin $C_m$ to $\mathcal C$.) By choice of $k'$, we have $|C_i|\in J_k$ for all $i\in [0,m-1]$.
Again for any $C_i$ and $C_j$ with $i<j$ we have $q|C_i\backslash C_j|=p|C_j\backslash C_i|$, which implies that $\mathcal A$ contains at most one element of $\mathcal C$. Using this the rest of the proof is as in Lemma \ref{inequality}.
\end{proof}
The proof of Theorem \ref{givenratio} is now identical to the proof of Theorem \ref{main} taking Lemma \ref{secondinequality} in place of Lemma \ref{inequality}.
For simplicity we have given in Lemma \ref{secondinequality} only the inequalities that we needed in order to prove Theorem \ref{givenratio}. Further inequalities involving smaller level sets analogous to those in Lemma \ref{inequality} can also be obtained in a similar fashion. While we have not done so here, we note that it is possible to use these inequalities to again find an exact extremal family for any given ratio $p:q$ as in Theorem \ref{main}, provided $q-p$ and $n$ have the opposite parity and $n$ is sufficiently large.
\section{Forbidding a fixed distance}
In this final section we consider how large a family $\mathcal A$ can be if for all $A,B\in \mathcal A$ we do not allow $A$ to have a constant distance from the bottom of the subcube formed with $B$. For `distance exactly 1' this would mean that we exclude $\vert A \backslash B\vert= 1$ for $A,B\in \mathcal A$. Here the following family $\mathcal A^*$ provides a lower bound: let $\mathcal A^*$ consist of all sets $A$ of size $\lfloor n/2\rfloor $ such that $\sum _{i\in A} i \equiv r \pmod {n}$, where $r\in \{0,\ldots ,n-1\}$ is chosen to maximise $|\mathcal A^*|$. Such a choice of $r$ gives $| \mathcal A^* |\geq {\frac {1}{n}} {\binom{n} {\lfloor n/2\rfloor }}$. Note that if we had $|A\backslash B|=1$ for some $A,B\in \mathcal A^*$ then, since $|A|=|B|$, we would also have $|B\backslash A|=1$. Letting $A\backslash B=\{i\}$ and $B\backslash A=\{j\}$ we then have $i-j\equiv 0 \pmod {n}$, giving $i=j$, a contradiction.
We suspect that this bound is best.
\begin{conjecture}
\label{conject}
Let $\mathcal{A}\subset \mathcal{P}[n]$ be a family which satisfies $|A\backslash B|\neq 1$ for all $A,B\in \mathcal{A}$. Then $|\mathcal A|\leq (1+o(1))\frac {1}{n}\binom {n}{\lfloor n/2\rfloor }$.
\end{conjecture}
\noindent The following gives an upper bound that is a factor $n^{1/2}$ larger than this.
\begin{thm}
{
\label{exact}
Let $\mathcal{A}\subset \mathcal{P}[n]$ be a family such that $|A\backslash B|\neq 1$ for all $A,B\in \mathcal{A}$. Then there exists a constant $C$ independent of $n$ such that $|\mathcal{A}|\leq \frac{C}{n}2^n$.}
\end{thm}
\begin{proof}
{
An easy estimate gives that the number of subsets of $\mathcal{A}$ in
$[n]^{(\leq n/3)}\bigcup [n]^{(\geq 2n/3)}$ is at most
$4\binom{n}{n/3}=o(\frac{2^n}{n})$. Therefore it suffices to show that $|\mathcal{A}_i|\leq \frac{C}{n}\binom{n}{i}$ for all $i\in [\frac{n}{3},\frac{2n}{3}]$.
To see this, note that since $|A\backslash A'|\neq 1$ for all $A,A'\in \mathcal{A}$, each $B\in [n]^{(i+1)}$ contains at most one $A\in \mathcal{A}_i$. Double counting, we have
\begin{equation*}
{
\begin{split}
\frac {n}{3} | {\mathcal {A}} _i| \leq (n-i)| {\mathcal {A} }_i| &= | \lbrace (A,B): A\in \mathcal {A}_i, B \in [n]^{(i+1)}, A\subset B\rbrace | \\
& \leq \binom {n}{i+1} \leq 3\binom {n}{i}
\end{split}
}
\end{equation*}
as required.
}
\end{proof}
Our final result gives an upper bound on the size of a family $\mathcal A$ in which we forbid `distance at most 1' instead of `distance exactly 1', i.e.
where we have $|A\backslash B|> 1$ for all $A,B\in \mathcal A$. Again, the family $\mathcal A^*$ constructed above gives a lower bound for this problem. In general, if we forbid `distance at most $k$' then it is easily seen that the following family $\mathcal A_k^*$ gives a lower bound of $\frac {1}{n^k}\binom {n}{\lfloor n/2\rfloor }$: supposing $n$ is prime, let $\mathcal A_k^*$ consist of all sets $A$ of $\lfloor n/2 \rfloor $ which satisfy $\sum _{i\in A}i^d\equiv 0\pmod {n}$ for all $1\leq d\leq k$.
Our last result provides a upper bound which matches this up to a multiplicative constant. The proof is again a Katona-type argument. Here the condition $|A\backslash B|>k$ rather than $|A\backslash B|\neq k$ seems to be crucial.
\begin{thm}
\label{atmost}
Let $k\in \mathbb {N}$. Suppose $\mathcal {A}$ is a set system on $[n]$ such that $|A\backslash B|>k$ for all distinct $A,B \in \mathcal {A}$. Then $|\mathcal {A}|\leq \frac {(2^k-o(1))}{n^k}\binom {n}{\lfloor n/2 \rfloor}$.
\end{thm}
\begin{proof}
{
Consider the family $\partial ^{(k)} \mathcal A$, the $k$-shadow of $\mathcal A$, where
\begin{equation*}
{
\partial ^{(k)}\mathcal{A} = \{B\in \mathcal{P}[n]: B=A\backslash C
\mbox{ for some } A\in \mathcal {A} \mbox{ and }C\subset A \mbox { with } |C|=k\}.
}
\end{equation*}
Since $\mathcal{A}$ does not contain $A,B$ with $|A\backslash B|\leq k$, every element of $\partial ^{(k)}\mathcal{A}$ is contained in at most one element of $\mathcal{A}$. Therefore we have
\begin{equation}
{
\label{firstref}
|\partial ^{(k)}\mathcal{A}|=\sum_{i=0}^n (i)_k|\mathcal{A}_i|
}
\end{equation}
where $i_k=i(i-1)\cdots (i-k+1)$. Now, since $\mathcal{A}$ does not contain $A,B$ with $|A\backslash B|\leq k$, it follows that $\partial ^{(k)}\mathcal{A}$ is an antichain, and so by Sperner's theorem we have
\begin{equation}
{
\label{secondref}
|\partial ^{(k)}\mathcal{A}| \leq \binom{n}{\lfloor n/2 \rfloor}
}
\end{equation}
Finally, an estimate of the sum of binomial coefficients (Appendix A of \cite{aands}) gives
\begin{equation}
{
\label{thirdref}
\sum_{i=0}^{\frac{n}{2}-n^{2/3}}|\mathcal{A}_i| \leq \sum_{i=0}^{\frac{n}{2}-n^{2/3}} \binom{n}{i} \leq e^{-n^{1/3}}2^n.
}
\end{equation}
Combining (\ref{firstref}), (\ref{secondref}) and (\ref{thirdref}) we obtain
\begin{equation*}
{
\begin{split}
\binom {n}{\lfloor n/2 \rfloor} &\geq \sum_{i=0}^{\frac{n}{2}-n^{2/3}} (i)_k |\mathcal {A}_i| + \sum_{i=\frac{n}{2}-n^{2/3}}^n (i)_k|\mathcal {A}_i| \\
&\geq \sum_{i=0}^{\frac{n}{2}-n^{2/3}} (\frac {n}{2}-n^{2/3})_k|\mathcal {A}_i| - (\frac {n}{2}-n^{2/3})_ke^{-n^{1/3}}2^n + \sum_{i={\frac{n}{2}-n^{2/3}}}^n (\frac{n}{2}-n^{2/3})_k|\mathcal {A}_i| \\
&= (\frac {n}{2}- o(n))^k|\mathcal {A}| - o( \binom {n}{\lfloor n/2 \rfloor} )
\end{split}
}
\end{equation*}
which gives the desired result.
}
\end{proof}
Taking $k=1$ in Theorem \ref{atmost} we obtain an upper bound which differs by a factor of 2 from the lower bound given by the family $\mathcal A^*$. It would be interesting to close this gap.
\end{document}
|
\begin{document}
\title{Linear Stochastic Approximation Algorithms and Group Consensus over Random Signed Networks: A Technical Report with All Proofs}
\author{Ge~Chen,~\IEEEmembership{Member,~IEEE}, Xiaoming Duan, Wenjun Mei, Francesco Bullo,~\IEEEmembership{Fellow,~IEEE}
\IEEEcompsocitemizethanks{\IEEEcompsocthanksitem This material is
based upon work supported by, or in part by, the U.S.\ Army
Research Laboratory and the U.S.\ Army Research Office under grant
number W911NF-15-1-0577. The research of G.\ Chen was supported in part
by the National Natural Science Foundation of China under grants 91427304,
61673373 and 11688101, the National Key Basic Research Program of China
(973 program) under grant 2014CB845301/2/3, and the Leading research
projects of Chinese Academy of Sciences under grant QYZDJ-SSW-JSC003.
\IEEEcompsocthanksitem
Ge Chen is with the National Center for Mathematics and Interdisciplinary Sciences \& Key Laboratory of Systems and
Control, Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190,
China, {\tt [email protected]}
\IEEEcompsocthanksitem Xiaoming Duan, Wenjun Mei, and Francesco Bullo
are with the Department of Mechanical Engineering and the Center of
Control, Dynamical-Systems and Computation, University of California
at Santa Barbara, CA 93106-5070, USA. {\tt
[email protected]; [email protected];
[email protected]}
}
}
\IEEEtitleabstractindextext{
\begin{abstract}
This paper studies linear stochastic approximation (SA) algorithms
and their application to multi-agent systems in engineering and
sociology.
As main contribution, we provide necessary and sufficient
conditions for convergence of linear SA algorithms to a deterministic or random final vector. We
also characterize the system convergence rate, when the system is
convergent. Moreover, differing from non-negative gain functions
in traditional SA algorithms, this paper considers also the case
when the gain functions are allowed to take arbitrary real
numbers.
Using our general treatment, we provide necessary and sufficient
conditions to reach consensus and group consensus for first-order
discrete-time multi-agent system over random signed networks and
with state-dependent noise.
Finally, we extend our results to the setting of multi-dimensional
linear SA algorithms and characterize the behavior of the
multi-dimensional Friedkin-Johnsen model over random interaction
networks.
\end{abstract}
\begin{IEEEkeywords}
stochastic approximation, linear systems, multi-agent systems,
consensus, signed network
\end{IEEEkeywords}}
\maketitle
\IEEEdisplaynontitleabstractindextext
\IEEEpeerreviewmaketitle
\renewcommand{\arabic{section}}{\Roman{section}}
\section{Introduction}
\renewcommand{\arabic{section}}{\arabic{section}}
Distributed coordination of multi-agent systems has drawn much
attention from various fields over the past decades. For example,
engineers control the formations of mobile robots, satellites,
unmanned aircraft, and automated highway systems
\cite{JAF-RMM:04,WR-RWB:08}; physicists and computer scientists model
the collective behavior of animals \cite{TV-AC-EBJ-IC-OS:95,CWR:87};
sociologists investigate the evolution of opinion, belief and social
power over social networks
\cite{MHDG:74,PJ-AM-NEF-FB:13d,NEF-AVP-RT-SEP:16}. Many models for
distributed coordination have been proposed and analyzed; a common
thread in all these works is the study of a group of interacting
agents trying to achieve a collective behavior by using neighborhood
information allowed by the network topology.
Linear dynamical systems are a class of basic first-order dynamics with
application to many practical problems in multi-agent systems,
including distributed consensus of multi-agent systems, computation of
PageRank, sensor localization of wireless networks, opinion dynamics, and belief
evolution on social networks
\cite{CR-PF-RT-HI:15,AVP-RT:17,NEF-AVP-RT-SEP:16}. If the operator in a linear dynamical system is time-invariant, then the study of this system is
straightforward.
However, practical systems are very often subject to random
fluctuations, so that the operator in an linear dynamical system is
time-variant and the system may not converge. To overcome
this deficiency and eliminate the effects of fluctuation, a feasible
approach is to adopt models based on the stochastic approximation (SA)
algorithm \cite{MYH-JHM:09,RC-GC-PF-FG:11,NEL-AO:14,TL-JFZ:10,MH:12,HT-TL:15,GL-CG:17}.
The main idea of the SA algorithm is as follows: each agent has a
memory of its current state. At each time step, each agent updates
its state according to a convex combination of its current state and
the information received from its neighbors. Critically, the weight
accorded to its own state tends to $1$ as time grows (as a way to
model the accumulation of experience). The earliest SA algorithms
were proposed by Robbins and Monro \cite{VB-SM:51} who aimed to solve
root finding problems. SA algorithms have then attracted much
interest due to many applications such as the study of reinforcement
learning~\cite{JNT:94}, consensus protocols in multi-agent
systems~\cite{GL-CG:17}, and fictitious play in game
theory~\cite{JH-WHS:02}. A main tool in the study of SA algorithms
(see~\cite[Chapter 5]{HJK-GGY:97}) is the ordinary differential
equations (ODE) method, which transforms the analysis of asymptotic
properties of a discrete-time stochastic process into the analysis of
a continuous-time deterministic process.
In this paper, we consider linear SA algorithms
with random linear operators; these models are basic
first-order protocols with numerous applications in engineering and
sociology.
Currently, there are two main threads on the
theoretical research of linear SA algorithms. One thread is based on assumptions that guarantee the state of the system
converges to a deterministic point \cite{HFC:96,MAK:96,EC-IW-SK:99,VBT:04,MAK-SS:15}. Another thread is the research on consensus of multi-agent systems, where the system matrices are assumed to be row-stochastic \cite{TL-JFZ:10,MH:12,GL-CG:17}.
These two threads only consider a part of linear operators, and the critical condition for convergence is still unknown.
This paper
develops appropriate analysis methods for linear SA algorithms and also provides some sufficient and necessary conditions for convergence which
include critical conditions for convergence of linear operators.
It is shown that under critical convergence conditions the state of
the system will converge to random vectors, which is applied to consensus algorithms
over signed networks.
Moreover, an additional
restriction of traditional SA algorithms is that only non-negative
gain schedules are allowed. This paper relaxes this
requirement and provides necessary and sufficient conditions for
convergence of linear SA algorithms under
arbitrary gains. In addition, we analyze the convergence rate of the
system when it is convergent.
Our general theoretical results are directly applicable to certain
multi-agent systems. The first application is to the study of
consensus problems in multi-agent systems. As it is well known,
numerous works provide sufficient conditions for consensus in
time-varying multi-agent systems with row-stochastic interaction
matrices; an incomplete list of references is
\cite{LM:05,FF-SZ:08a,ATS-AJ:08,TL-JFZ:10,GC-ZL-LG:14,GL-CG:17}; see
also the classic works~\cite{SC-ES:77,RC:84,JNT-DPB-MA:86}.
Recently, motivated by the study of antagonistic interactions in
social networks, novel concepts of bipartite, group, and cluster
consensus have been studied over signed networks (mainly focusing on
continuous-time dynamical models); see
\cite{CA:13,JY-LW:10,JQ-CY:13,ZG-KMY-KHJ-MC-YH:16}. In this paper, we
apply and extend our results on linear SA algorithms to the setting of
first-order discrete-time multi-agent system over random signed
networks and with state-dependent noise; for such models, we provide
novel necessary and sufficient conditions to reach consensus and group
consensus.
As the second application of our results, we study the
Friedkin-Johnsen (FJ) model of opinion dynamics in social
networks. The FJ model was first proposed in \cite{NEF-ECJ:99}, where
each agent is assumed to be susceptible to other agents' opinions but
also to be anchored to his own initial opinion with a certain level of
stubbornness. Ravazzi \emph{et al.} proposed a gossip version of the
FJ model in \cite{CR-PF-RT-HI:15}, whereby each link in the network is
sampled uniformly and the agents associated with the link meet and
update their opinions. The agents' opinions were proven to converge in
mean square. Frasca \emph{et al.} considered a symmetric pairwise
randomization of FJ in \cite{PF-HI-CR-RT:15}, whereby a pair of agents
are chosen to update their opinions. Our work, by exploiting
stochastic approximation, largely relaxes the conditions for
convergence when applied to FJ model over random interaction
networks. The sociological meaning of stochastic approximated FJ model
is that agents have cumulative memory about their previous
opinions. The adoption of SA models in the study of human behavior is
widely adopted in game theory and economics; e.g.,
see~\cite{JH-WHS:02}.
The main contributions of this paper are summarized as follows.
\begin{enumerate}
\item
For linear SA systems, we provide some necessary and sufficient conditions to
guarantee convergence by developing appropriate methods different from previous works.
We derive some critical convergence conditions for linear operators for the first time.
The convergence rate is also obtained when the system is convergent. Moreover, we consider the convergence of linear SA systems whose gain functions can take arbitrary real numbers.
\item Using our results, we get the necessary and sufficient
conditions to reach consensus and group consensus of the first-order
discrete-time multi-agent system over random signed networks and
with state-dependent noise for the first time.
\item We extend our results to the multi-dimensional linear SA algorithms and provide applications to the multi-dimensional FJ model
over random interaction networks.
\end{enumerate}
\paragraph*{Organization}
The remainder of this paper is organized as follows. We briefly review
the time-varying linear dynamical systems and propose a stochastic
approximation version of it in Section \ref{Protocol1}. The main
results are presented in Section \ref{Main_results}. In particular, we
introduce some preliminaries and assumptions in Subsection
\ref{sec:assumption}. Sufficient conditions that guarantee the
convergence of linear SA algorithms are obtained in Subsection
\ref{sf_2}. We provide the results on convergence rate in the same
subsection. In Subsection \ref{Necessary_Conditions}, we prove that
the sufficient condition is also necessary. The necessary and
sufficient conditions for convergence are then summarized in
Subsection \ref{NScondition}. We generalize the results to
multi-dimensional models and discuss their application to group
consensus and the FJ model in Section \ref{sec:application}. Section
\ref{sec:conclusion} concludes the paper.
\renewcommand{\arabic{section}}{\Roman{section}}
\section{Linear Dynamical Systems}\label{Protocol1}
\renewcommand{\arabic{section}}{\arabic{section}}
\subsection{Review of a time-varying linear dynamical system}
In \cite{CR-PF-RT-HI:15,YH-WL-TC:13} a time-varying linear dynamical system was
considered as follows:
\begin{eqnarray}\label{AF}
x(s+1)=P(s) x(s)+u(s),~~s=0,1,\ldots,
\end{eqnarray}
where $P(s)\in\mathbb{R}^{n\times n}$ is a matrix associated to the
communication network between agents, and $u(s)\in\mathbb{R}^{n}$ is an
input vector. Given a matrix $A\in\mathbb{R}^{n\times n}$, let $\rho(A)$
denote its spectral radius, i.e., $\rho(A)=\max_i |\lambda_i(A)|$,
where $\lambda_i(A)$ is an eigenvalue of $A$. For system (\ref{AF}),
if $P(s)\equiv P$, $u(s) \equiv u$, and $\rho(A)<1$, then it is
immediate to see that $x(s)$ converges to $(I_{n}-P)^{-1} u$.
In this paper we will consider the case when $\{P(s)\}$ and $\{u(s)\}$ are stochastic matrices and vectors respectively.
We define the $\sigma$-algebra generated by $\{P(s)\}$ and $\{u(s)\}$ as
$\mathcal{F}_t=\sigma((P(s),u(s)),0\leq s\leq t).$ The probability space is
$(\Omega,\mathcal{F}_{\infty},P)$.
Since the system (\ref{AF}) does not
necessarily converge when $\{P(s)\}$ and $\{u(s)\}$ are stochastic, as an alternative, Ravazzi \emph{et
al.}~\cite{CR-PF-RT-HI:15} investigate the ergodicity of system
(\ref{AF}) as follows.
\begin{proposition}[Theorem 1 in \cite{CR-PF-RT-HI:15}]\label{Ravazzi_theorem}
Consider system (\ref{AF}) and assume $\{P(s)\}$ and $\{u(s)\}$ are
sequences of
independent identically distributed (i.i.d.) random matrices and vectors with finite first moments. Assume there exists a
constant $\alpha\in(0,1]$, a matrix $P\in\mathbb{R}^{n\times n}$ and a vector $u\in\mathbb{R}^{n}$ such that
\begin{eqnarray*}
\mathbb{E}[P(s)]=(1-\alpha)I_{n}+\alpha P,~~\mathbb{E}[u(s)]=\alpha u,~~\forall s\geq 0.
\end{eqnarray*}
If $\rho(P)<1$, then $x(s)$ converges to a random variable in
distribution, and $\frac{1}{s} \sum_{k=0}^{s-1} x(k)$ converges to
$(I_{n}-P)^{-1} u$ almost surely.
\end{proposition}
In this paper we adopt the stochastic approximation method to average
the effect of the stochastic $P(s)$ and $u(s)$ to the state $x(s)$.
In this case we study the sufficient and necessary conditions for
convergence of $x(s)$, and also obtain a convergence rate.
\subsection{Linear SA algorithms over random networks}
In this subsection we consider the stochastic-approximation version of
system (\ref{AF}), formulated as:
\begin{multline}\label{SAF_m1}
x(s+1)=(1-a(s))x(s) \\ +a(s) [P(s) x(s)+u(s)], \quad s=0,1,\ldots,
\end{multline}
where $a(s)\in\mathbb{R}$ is the gain function. The system (\ref{AF}) is so
called as linear SA algorithms
\cite{HFC:96,EC-IW-SK:99,VBT:04,TL-JFZ:10,MH:12,GL-CG:17}. Compared
to system (\ref{AF}), each agent in system (\ref{AF}) updates its
state depending not only on the linear map $P(s) x(s)+u(s)$ but also
on its own current state. If $a(s)=\frac{1}{s+1}$, then $x(s+1)$
equals the approximate average value of the previous $s$ linear maps
because $x(s)$ carries the information of the previous $s-1$ linear
maps. Intuitively, in this case $x(s)$ approximately
equals $\frac{1}{s} \sum_{k=0}^{s-1} x(k)$ in system (\ref{AF}), so
that it should have the same limit as in Proposition
\ref{Ravazzi_theorem}. In fact, this result can be deduced by the
following Proposition \ref{SAF_sc}. Of course, this paper considers
the more general case of $\{a(s)\}$ and $\{P(s)\}$.
The system (\ref{SAF_m1}) is a basic first-order discrete-time
multi-agent system with much prior theoretical analysis.
A main thread in the research of such a system is to
study the setting in which $x(s)$ converges to a deterministic
point. In \cite{HFC:96,EC-IW-SK:99}, convergence and convergence
rates are studied for bounded linear operators with the assumption
that there exists a matrix $P\in\mathbb{R}^{n\times n}$ whose
eigenvalues' real parts are all less than $1$ such that
\begin{eqnarray}\label{HFC_con}
\lim_{s\rightarrow\infty}\Big(\sup_{s\leq t \leq m(s,T)} \Big\|\sum_{i=s}^t a(i)(P(i)-P) \Big\|_2\Big)=0,
\end{eqnarray}
where $m(s,T):=\max\{k:a(s)+\cdots+a(k)\leq T\}$ with $T$ being an
arbitrary positive constant, and $\|\cdot\|_2$ denotes the Euclidean
norm. Later, Tadi\'{c} relaxed the boundary condition of $P(s)$ and
provided some convergence rates based on (\ref{HFC_con}) and the
assumption that the real parts of the eigenvalues of $P+\alpha I_n$
are all less than $1$, where $\alpha$ is a positive constant
\cite{VBT:04}. Additionally, there are results on convergence rates
by assuming that $\{I_n-P(s)\}_{s\geq 0}$ are a sequence of positive
semi-definite matrices and $I_n-P$ is a positive definite matrix
\cite{MAK:96,MAK-SS:15}. Another thread in the theoretical research
on system (\ref{SAF_m1}) is to consider its consensus behavior where
$\{P(s)\}$ and $\{u(s)\}$ are assumed to be row-stochastic matrices
and zero-mean noises respectively \cite{TL-JFZ:10,MH:12,GL-CG:17}. In
addition, system~(\ref{SAF_m1}) has many applications like computation of
PageRank \cite{WXZ-HFC-HTF:13}, sensor localization of wireless
networks \cite{UAK-SK-JMFM:09}, distributed consensus of multi-agent
systems, and belief evolution on social networks.
Despite all this prior theoretical research on system (\ref{SAF_m1}),
a key problem remains unsolved: What is the necessary and sufficient
condition for convergence regarding $\{P(s)\}$ and $u(s)$? Previous
works focused on the case when the real parts of the eigenvalues of
$P$ are all assumed to be less than $1$
\cite{HFC:96,EC-IW-SK:99,VBT:04,TL-JFZ:10,MH:12,GL-CG:17}, but it is
not known what happens when this condition is not satisfied. Also,
traditional SA algorithms consider only non-negative gains, so another
interesting problem is to investigate what happens if the gain
function $a(s)$ can take arbitrary real numbers. This paper considers
these two problems and studies the mean-square convergence of $x(s)$,
whose definition is given as follows:
\begin{definition}\label{MS_def}
For an $n$-dimensional random vector $x$, we say $x(s)$ converges to
$x$ in mean square if
\begin{eqnarray}\label{MS_def_1}
\mathbb{E}\|x\|_2^2<\infty~~\mbox{and}~~\lim_{s\to\infty}\mathbb{E}\|x(s)-x\|_2^2=0.
\end{eqnarray}
Also, we say $\{x(s)\}$ is mean-square convergent if there exists an
$n$-dimensional random vector $x$ such that (\ref{MS_def_1}) holds.
\end{definition}
\renewcommand{\arabic{section}}{\Roman{section}}
\section{Main results}\label{Main_results}
\renewcommand{\arabic{section}}{\arabic{section}}
\renewcommand{\arabic{section}}{\Roman{section}}
\subsection{Informal statement of main results}\label{sec:assumption}
\renewcommand{\arabic{section}}{\arabic{section}}
We start with some notation. Given a matrix $A\in\mathbb{R}^{n\times
n}$, define
$\widetilde{\rho}_{\max}(A):=\max_{i}\mbox{Re}(\lambda_i(A))$ and
$\widetilde{\rho}_{\min}(A):=\min_{i}\mbox{Re}(\lambda_i(A))$ to be
the maximum and minimum values of the real parts of the eigenvalues of
$A$ respectively. It is easy to show that
$|\widetilde{\rho}_{\max}(A)|\leq \rho(A)$.
For $\{P(s)\}$ and $\{u(s)\}$, we relax the i.i.d. condition in
\cite{CR-PF-RT-HI:15} to the following assumption:
\textbf{(A1)} Suppose there exist a matrix $P\in\mathbb{R}^{n\times
n}$ and a vector $u\in\mathbb{R}^n$ such that $\mathbb{E}[P(s)\,|\,x(s)]=P$ and
$\mathbb{E}[u(s)\,|\,x(s)]=u$ for any $s\geq 0$ and $x(s)\in\mathbb{R}^n$. Also,
assume $\mathbb{E}[\|P(s)\|_2^2\,|\,x(s)]$ and $\mathbb{E}[\|u(s)\|_2^2\,|\,x(s)]$ are uniformly
bounded.
For $\{a(s)\}$, generally SA algorithms use the following assumption:
\textbf{(A2)} Assume $\{a(s)\}$ are non-negative real numbers independent with $\{x(s)\}$, and satisfying $\sum_{s=0}^{\infty} a(s)=\infty$ and $\sum_{s=0}^{\infty} a^2(s)<\infty$.
We will also consider the following alternative assumption.
\textbf{(A2')} Assume $\{a(s)\}$ are non-positive real numbers independent with $\{x(s)\}$, and satisfying $\sum_{s=0}^{\infty} a(s)=-\infty$ and $\sum_{s=0}^{\infty} a^2(s)<\infty$.
Under the assumptions (A1) and (A2), the previous works has investigated the cases when $\widetilde{\rho}_{\max}(P)<1$ and $P(s)x+u(s)$ is a bounded linear operator for all
$s\geq 0$ \cite{HFC:96,EC-IW-SK:99}, or
$\widetilde{\rho}_{\max}(P+\alpha I_n)<1$ \cite{VBT:04}, or $\{P(s)\}$ are row-stochastic matrices and $u=\bf{0}$ \cite{TL-JFZ:10,MH:12,GL-CG:17}. This paper will consider all the cases of $P$ and $u$, and show
the necessary and
sufficient condition for the convergence of $x(s)$ in system
(\ref{SAF_m1}) is $\widetilde{\rho}_{\max}(P)<1$, or
$\widetilde{\rho}_{\max}(P)=1$ together with the following
condition for $P$ and $u$:
\textbf{(A3)} Assume any eigenvalue of $P$ whose real part is $1$ equals $1$,
and the eigenvalue $1$ has the same algebraic and geometric multiplicities, and $\xi^T u=0$ for any left eigenvector $\xi^T$ of $P$ corresponding to the eigenvalue $1$.
Similarly, under (A1) and (A2') the necessary and sufficient condition
for the convergence of $x(s)$ is $\widetilde{\rho}_{\min}(P)>1$, or
$\widetilde{\rho}_{\min}(P)=1$ with (A3).
Also, we will study the convergence rates when $x(s)$ is convergent,
and the convergence conditions when $\{a(s)\}$ are arbitrary real
numbers.
\renewcommand{\arabic{section}}{\Roman{section}}
\subsection{Sufficient convergence conditions and convergence rates}\label{sf_2}
\renewcommand{\arabic{section}}{\arabic{section}}
Recall that $P$ and $u$ are the expectations of $P(s)$ and $u(s)$ respectively.
Let
\begin{eqnarray}\label{Jordan}
P=H^{-1}\mbox{diag}(J_1,\ldots,J_K)H:=H^{-1} D H,
\end{eqnarray}
where $H\in\mathbb{C}^{n\times n}$ is an invertible matrix, and $D$ is the Jordan normal form of $P$
with
\begin{eqnarray*}\label{Jordan_1}
J_i=
\begin{bmatrix}
\lambda_{i'}(P) & 1 & & \\
& \lambda_{i'}(P) & \ddots & \\
& & \ddots & 1 \\
& & & \lambda_{i'}(P)
\end{bmatrix}_{m_i\times m_i}
\end{eqnarray*}
for $1\leq i\leq K$, where $\lambda_{i'}(P)$ is the eigenvalue of $P$ corresponding to the Jordan block $J_i$.
Let $r$ be the algebraic multiplicity of the eigenvalue $1$ of $P$.
We first consider the case $\widetilde{\rho}_{\max}(P)=1$ (or
$\widetilde{\rho}_{\min}(P)=1$) with (A3), which implies that $r\geq
1$ and that the geometric multiplicity of the eigenvalue $1$ is
equal to $r$. We choose a suitable $H$ such that
$\lambda_1(P)=\cdots=\lambda_r(P)=1$. Then the Jordan normal form $D$
can be written as
\begin{eqnarray}\label{Jordan_temp}
D=
\begin{bmatrix}
I_r & {\mathbf{0}}_{r\times(n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & \underline{D}_{(n-r)\times(n-r)} \\
\end{bmatrix}\in\mathbb{C}^{n\times n},
\end{eqnarray}
where $\underline{D}:=\mbox{diag}(J_{r+1},\ldots,J_K)\in
\mathbb{C}^{(n-r)\times (n-r)}$. For any vector $y\in\mathbb{C}^n$,
throughout this subsection we set $\bar{y}:=(y_1,\ldots,y_r)^\top$ and
$\underline{y}:=(y_{r+1},\ldots,y_n)^\top$.
\begin{theorem}\label{SAF_c1}(Convergence of linear SA algorithms at critical point)
Consider the system (\ref{SAF_m1}) satisfying (A1), (A2), and (A3)
with $\widetilde{\rho}_{\max}(P)=1$, or satisfying (A1), (A2'), and
(A3) with $\widetilde{\rho}_{\min}(P)=1$. Let $H$ be the matrix
defined by (\ref{Jordan}) such that the Jordan normal form $D$ has
the form of (\ref{Jordan_temp}). Then, for any initial state,
$x(s)$ converges to $H^{-1} y$ in mean square, where $\bar{y}$ is a
random vector satisfying $\mathbb{E}\bar{y}=\overline{Hx(0)}$ and
$\mathbb{E}\|\bar{y}\|_2^2<\infty$, and
$\underline{y}=(I_{n-r}-\underline{D})^{-1} \underline{Hu}$.
\end{theorem}
From Theorem \ref{SAF_c1}, $x(s)$ converges to a random vector under the critical condition $\widetilde{\rho}_{\max}(P)=1$ (or $\widetilde{\rho}_{\min}(P)=1$), which is different from the previous works where $x(s)$ converges to a deterministic vector under non critical conditions \cite{HFC:96,EC-IW-SK:99,VBT:04,TL-JFZ:10,MH:12,GL-CG:17}.
Due to this difference, the traditional method cannot be used in the proof of Theorem \ref{SAF_c1}. We propose a new method to prove this theorem as follows.
\begin{IEEEproof}[Proof of Theorem \ref{SAF_c1}]
Let $y(s):=H x(s)$, $v(s):=H u(s)$ and $D(s):=H P(s) H^{-1}$, then by (\ref{SAF_m1}) we have
\begin{multline}\label{SAF_c1_4}
H^{-1} y(s+1)\\=(1-a(s))H^{-1} y(s)+a(s)[P(s) H^{-1} y(s)+ u(s)],
\end{multline}
which implies
\begin{eqnarray}\label{SAF_c1_5}
y(s+1)=y(s)+a(s)[(D(s)-I_n)y(s)+v(s)].
\end{eqnarray}
Let $v:=\mathbb{E}[v(s)]=Hu$. From (\ref{Jordan}) we have $HP=DH$, which implies $H_i P= H_i$ for $1\leq i\leq r$, where $H_i$ is the $i$-th row of the matrix $H$.
Thus, $H_i$, $1\leq i\leq r$, is a left eigenvector corresponding to the eigenvalue $1$. By (A3) we have
\begin{eqnarray}\label{SAF_c1_5_1}
v_i=H_i u=0, ~~~~\forall 1\leq i\leq r.
\end{eqnarray}
Recall that $\underline{v}=(v_{r+1},\ldots,v_n)^\top$. Also, $I_{n-r}-\underline{D}$ is an invertible matrix, so we can set
\begin{eqnarray*}\label{SAF_c1_6}
z:=
\begin{bmatrix}
\mathbf{0}_{r\times 1} \\
(I_{n-r}-\underline{D})^{-1}\underline{v} \\
\end{bmatrix}\in \mathbb{C}^n.
\end{eqnarray*}
From (\ref{Jordan_temp}) and (\ref{SAF_c1_5_1}) we have
\begin{eqnarray}\label{SAF_c1_6_1}
(D-I_n)z+v=\mathbf{0}_{n\times 1}.
\end{eqnarray}
Set $\theta(s):=y(s)-z$. From (\ref{SAF_c1_5}) we obtain
\begin{eqnarray}\label{SAF_c1_7}
\theta(s+1)=\theta(s)+a(s)[(D(s)-I_n)(\theta(s)+z)+v(s)].
\end{eqnarray}
We first consider the case when $\widetilde{\rho}_{\max}(P)=1$, which
implies that $\underline{D}-I_{n-r}$ is a Hurwitz matrix. Thus, by the
stability theory of continuous Lyapunov equation
(see~\cite[Corollary~2.2.4]{RAH-CRJ:94}), there exists a Hermitian
positive definite matrix $A\in\mathbb{C}^{(n-r)\times (n-r)}$ such that
\begin{eqnarray}\label{SAF_c1_8}
\begin{aligned}
(\underline{D}-I_{n-r}) ^* A+ A(\underline{D}-I_{n-r})=-I_{n-r},
\end{aligned}
\end{eqnarray}
where $(\cdot)^*$ denotes the conjugate transpose of the matrix or vector.
Set
\begin{eqnarray*}\label{SAF_c1_9}
A_1:=
\begin{bmatrix}
I_r & {\mathbf{0}}_{r\times(n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & A_{(n-r)\times(n-r)} \\
\end{bmatrix}\in \mathbb{C}^{n\times n},
\end{eqnarray*}
then $A_1$ is still a Hermitian positive definite matrix.
Define the Lyapunov function $V_1(\theta):=\theta^* A_1 \theta$.
By (\ref{SAF_c1_7}), (A1) and (\ref{SAF_c1_8}), for any $\theta(s)$ we have
\begin{align}\label{SAF_c1_10}
\mathbb{E} [ &V_1(\theta(s+1))|\theta(s)] \\
&\leq V_1(\theta(s))+a(s)\theta^* (s)\big[ (D-I_n)^* A_1+A_1 (D-I_n) \big] \theta(s)\nonumber \\
&\quad+O\big(a^2(s)(\|\theta(s)\|_2^2+1)\big)\footnotemark.\nonumber
\end{align}
\footnotetext{Given two sequences of positive numbers $\{g_1(s)\}_{s=0}^{\infty}$ and
$\{g_2(s)\}_{s=0}^{\infty}$, we say $g_1(s) = O(g_2(s))$ if there exist a
constants $c>0$ such that $g_1(s)\leq c g_2(s)$ for
all $s\geq 0$.}
From (\ref{Jordan_temp}) and (\ref{SAF_c1_8}), we obtain
\begin{align}
(&D-I_n)^* A_1 + A_1 (D-I_n)\nonumber \\
&=\begin{bmatrix}
{\mathbf{0}}_{r\times r} & {\mathbf{0}}_{r\times (n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & (\underline{D}-I_{n-r})^* A+A(\underline{D}-I_{n-r}) \\
\end{bmatrix}\nonumber\\
&=\begin{bmatrix}
{\mathbf{0}}_{r\times r} & {\mathbf{0}}_{r\times (n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & -I_{n-r} \\
\end{bmatrix},\label{SAF_c1_11}
\end{align}
so (\ref{SAF_c1_10}) implies
\begin{eqnarray}\label{SAF_c1_12}
\mathbb{E}[V_1(\theta(s+1))]\leq [1+c_1 a^2(s)] \mathbb{E}[V_1(\theta(s))]+c_2 a^2(s),
\end{eqnarray}
where $c_1$ and $c_2$ are two positive constants.
Using (\ref{SAF_c1_12}) repeatedly we get
\begin{align}\label{SAF_cl_13}
\mathbb{E}[&V_1(\theta(s+1))]\\
&\leq \prod_{i=0}^s [1+c_1 a^2(i)] +\sum_{i=0}^s c_2 a^2(i)\prod_{j=i+1}^s [1+c_1 a^2(j)]\nonumber\\
&<\infty ~~~~\mbox{as}~~s\to\infty,\nonumber
\end{align}
where the last inequality uses the condition that $\sum_{s=0}^{\infty} a^2(s)<\infty$.
Also, because $A_1$ is a Hermitian positive definite matrix,
\begin{eqnarray}\label{SAF_c1_13_1}
\frac{1}{\rho(A_1)} V_1(\theta(s))\leq \|\theta(s)\|_2^2\leq \frac{1}{\lambda_{\min}(A_1)}V_1(\theta(s)).
\end{eqnarray}
Combining (\ref{SAF_cl_13}) and
(\ref{SAF_c1_13_1}) yields
\begin{eqnarray}\label{SAF_cl_14}
\sup_{s} \mathbb{E}\|\theta(s)\|_2^2\leq \sup_s \frac{\mathbb{E}[V_1(\theta(s))]}{\lambda_{\min}(A_1)}<\infty.
\end{eqnarray}
Inequality (\ref{SAF_cl_14}) shows that $\theta(s)$ will not diverge, however we need to prove its convergence.
We first consider the convergence of $\underline{\theta}(s)$.
Set
\begin{eqnarray*}\label{SAF_c1_15}
A_2:=
\begin{bmatrix}
{\mathbf{0}}_{r\times r} & {\mathbf{0}}_{r\times (n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & A_{(n-r)\times(n-r)} \\
\end{bmatrix}\in \mathbb{C}^{n\times n}
\end{eqnarray*}
and define $V_2(\theta):=\theta^* A_2 \theta=\underline{\theta}^* A
\underline{\theta}$. Similar to (\ref{SAF_c1_10}), we have
\begin{align}\label{SAF_c1_16}
\mathbb{E}[&V_2(\theta(s+1))]\\
&\leq \mathbb{E}\Big[V_2(\theta(s))+a(s)\theta^*(s) (D-I_n)^* A_2 \theta(s)\nonumber \\
&\quad+a(s) \theta^*(s) A_2 (D-I_n)\theta(s)+O\big(a^2(s)(\|\theta(s)\|_2^2+1)\big)\Big]\nonumber\\
&=\mathbb{E}\Big[V_2(\theta(s))+a(s)\theta^*(s)
\begin{bmatrix}
{\mathbf{0}}_{r\times r} & {\mathbf{0}}_{r\times (n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & -I_{n-r} \\
\end{bmatrix}
\theta(s)\Big]\nonumber\\
&\quad+O(a^2(s)) \nonumber\\
&\leq \Big(1-\frac{a(s)}{\rho(A)}\Big) \mathbb{E}[V_2(\theta(s))] +O\big(a^2(s)\big),\nonumber
\end{align}
where the forth line uses (\ref{SAF_c1_11}) and (\ref{SAF_cl_14}), and
the last inequality does a similar computation as (\ref{SAF_c1_13_1}).
By (\ref{SAF_c1_16}) and Lemma \ref{lguo} in Appendix
\ref{App_lemmas}, we obtain $\lim_{s\to\infty} \mathbb{E}[V_2(\theta(s))]=0$,
which implies
\begin{eqnarray}\label{SAF_c1_17}
\lim_{s\to\infty}\mathbb{E}\|\underline{\theta}(s)\|_2^2=0.
\end{eqnarray}
It remains to consider the convergence of $\bar{\theta}(s)$.
Set
\begin{eqnarray*}\label{SAF_c1_18}
A_3:=
\begin{bmatrix}
I_r & {\mathbf{0}}_{r\times(n-r)} \\
{\mathbf{0}}_{(n-r)\times r} & {\mathbf{0}}_{(n-r)\times(n-r)} \\
\end{bmatrix}\in \mathbb{R}^{n\times n},
\end{eqnarray*}
and define $V_3(\theta)=\theta^* A_3 \theta= \bar{\theta}^* \bar{\theta}$. By (\ref{Jordan_temp}) and (\ref{SAF_c1_5_1}) we get
$A_3 (D-I_n)=\mathbf{0}_{n\times n}$ and $A_3 v=\mathbf{0}_{n\times 1}$, thus by (A1) for any $i<j$ we have
\begin{eqnarray}\label{SAF_c1_19}
\begin{aligned}
&E\Big[[(D(i)-I_n)(\theta(i)+z)+v(i)]^* A_3 \\
&\qquad\times [(D(j)-I_n)(\theta(j)+z)+v(j)]\Big]\\
&=E\Big[[(D(i)-I_n)(\theta(i)+z)+v(i)]^* A_3 \\
&\qquad\times [(D-I_n)(\theta(j)+z)+v]\Big]=0.
\end{aligned}
\end{eqnarray}
Similarly, the equation (\ref{SAF_c1_19}) still holds for $i>j$.
From these and (\ref{SAF_c1_7}) we get for any $s_2> s\geq 0$,
\begin{align}\label{SAF_c1_20}
\mathbb{E}[V_3(\theta&(s_2)-\theta(s))] \\
&=\mathbb{E}\Big[V_3\Big(\sum_{i=s}^{s_2-1}[\theta(i+1)-\theta(i)]\Big)\Big]\nonumber\\
&=\mathbb{E}\bigg[ \Big(\sum_{i=s}^{s_2-1} a(i)[(D(i)-I_n)(\theta(i)+z)+v(i)]\Big)^* A_3\nonumber \\
&\quad\times \Big(\sum_{i=s}^{s_2-1} a(i)[(D(i)-I_n)(\theta(i)+z)+v(i)]\Big) \bigg]\nonumber\\
&=\sum_{i=s}^{s_2-1} a^2(i) \mathbb{E}\big[ [(D(i)-I_n)(\theta(i)+z)+v(i)]^* \nonumber\\
&\quad\times A_3 [(D(i)-I_n)(\theta(i)+z)+v(i)] \big]\nonumber\\
&= O\Big(\sum_{i=s}^{s_2-1}a^2(i)\Big),\nonumber
\end{align}
where the last line uses (A1) and (\ref{SAF_cl_14}). Since $\sum_{i=0}^{\infty} a^2(i)<\infty$, from
(\ref{SAF_c1_20}) we have
\begin{multline}\label{SAF_c1_21}
\lim_{s\to\infty}\lim_{s_2\to\infty} \mathbb{E}\|\bar{\theta}(s_2)-\bar{\theta}(s)\|_2^2\\
=\lim_{s\to\infty} \lim_{s_2\to\infty} \mathbb{E}[V_3(\theta(s_2)-\theta(s))]=0.
\end{multline}
By the Cauchy criterion (see~\cite[page~58]{JAH:07}),
$\bar{\theta}(s)$ has a mean square limit $\bar{\theta}(\infty)$.
Also, from (\ref{SAF_c1_7}), (A1) and (\ref{SAF_c1_5_1}) we have
\begin{align}\label{SAF_c1_22}
\mathbb{E}[A_3 \theta&(s+1)]\\
&=\mathbb{E}\big[\mathbb{E}[A_3 \theta(s+1)\,|\,\theta(s)]\big]\nonumber\\
&=\mathbb{E}\big[A_3 \theta(s)+a(s)A_3[(D-I_n)(\theta(s)+z)+v]\big]\nonumber\\
&=\mathbb{E}[A_3 \theta(s)]=\cdots= A_3 \theta(0),\nonumber
\end{align}
which is followed by
\begin{eqnarray}\label{SAF_c1_23}
\mathbb{E}\bar{\theta}(\infty)=\bar{\theta}(0)=\bar{y}(0)=\overline{Hx}(0).
\end{eqnarray}
We remark that $x(s)=H^{-1}[\theta(s)+z]$. Let $y$ be a vector
satisfying
$\underline{y}=\underline{z}=(I_{n-r}-\underline{D})^{-1}\underline{v}$ and
$\bar{y}=\bar{\theta}(\infty)+\bar{z}=\bar{\theta}(\infty).$ By
(\ref{SAF_c1_17}) and (\ref{SAF_c1_21}) we have that $x(s)$ converges to
$H^{-1}y$ in mean square. By (\ref{SAF_c1_23}) and (\ref{SAF_cl_14})
we get $\mathbb{E} \bar{y}=\overline{Hx}(0)$ and $\mathbb{E}\|\bar{y}\|_2^2<\infty$.
For the case that $\widetilde{\rho}_{\min}(P)=1$, which implies
$I_{n-r}-\underline{D}$ is a Hurwitz matrix. Set $b(s)=-a(s)\geq 0$ and substitute it to (\ref{SAF_c1_7}) we obtain
\begin{eqnarray*}\label{SAF_c1_24}
\theta(s+1)=\theta(s)+b(s)[(I_n-D(s))(\theta(s)+z)-v(s)].
\end{eqnarray*}
Finally, a process similar to that from (\ref{SAF_c1_8}) to
(\ref{SAF_c1_23}) yields our result.
\end{IEEEproof}
For the case when $\widetilde{\rho}_{\max}(P)<1$ or $\widetilde{\rho}_{\min}(P)>1$,
from the proof of Theorem \ref{SAF_c1}
we have the following proposition:
\begin{proposition}\label{SAF_sc}
Consider the system (\ref{SAF_m1}) satisfying (A1), (A2) and $\widetilde{\rho}_{\max}(P)<1$, or satisfying
(A1), (A2') and $\widetilde{\rho}_{\min}(P)>1$.
Then, for any initial state, $x(s)$ converges to $(I_n-P)^{-1} u$ in mean square.
\end{proposition}
\begin{IEEEproof}
We can set $r=0$ in the proof of Theorem \ref{SAF_c1}, then we obtain that $x(s)$ converges to $H^{-1}(I_n-D)^{-1}Hu=(I_n-P)^{-1}u$ in mean square.
\end{IEEEproof}
Next, we give the convergence rate when $x(s)$ is mean-square convergent.
\begin{theorem}\label{SAF_r1}(Convergence rates of linear SA algorithms)
Consider the system (\ref{SAF_m1}) satisfying (A1)
and one of the following four cases: i) $\widetilde{\rho}_{\max}(P)<1$; ii) $\widetilde{\rho}_{\min}(P)>1$; iii) $\widetilde{\rho}_{\max}(P)=1$ with (A3);
and iv) $\widetilde{\rho}_{\min}(P)=1$ with (A3).
Let $\beta>0, \gamma\in (\frac{1}{2},1]$, and $\alpha$ be a large positive number.
Choose $a(s)=\frac{\alpha}{(s+\beta)^{\gamma}}$ if $\widetilde{\rho}_{\max}(P)\leq 1$,
and $a(s)=\frac{-\alpha}{(s+\beta)^{\gamma}}$ if $\widetilde{\rho}_{\min}(P)\geq 1$.
Then for any initial state,
\begin{multline*}
\mathbb{E}\big\|x(s)-x\big\|_2^2\\
= \begin{cases}
O(s^{-\gamma}), &\mbox{ if } \widetilde{\rho}_{\max}(P)<1 \mbox{ or } \widetilde{\rho}_{\min}(P)>1\\
O(s^{1-2\gamma}), &\mbox{ if } \widetilde{\rho}_{\max}(P)=1 \mbox{ or } \widetilde{\rho}_{\min}(P)=1
\end{cases}
\end{multline*}
where $x$ is a mean square limit of $x(s)$ whose expression is
provided by Theorem \ref{SAF_c1} and Proposition \ref{SAF_sc}.
\end{theorem}
The proof of this theorem is postponed to Appendix \ref{proof_SAF_r1}.
\begin{remark}
For the case when $\widetilde{\rho}_{\max}(P)<1$, there
exist results on the convergence and convergence rates of $x(s)$
provided some additional conditions hold, beside (A1)-(A2). For
example, if $\lim_{s\rightarrow\infty}\sum_{k=0}^{s-1}\frac{
\|P(s)\|_2}{s}$ a.s. exists and $\widetilde{\rho}_{\max}(P+\alpha
I_n)<1$ with $\alpha$ being a positive constant, then Theorem 2 in
\cite{VBT:04} provides sufficient and necessary conditions for the
convergence rate of $x(s)$; if $\|x(s)\|_2$ is uniformly bounded
a.s., then by the ODE method in SA theory (Theorem 5.2.1 in
\cite{HJK-GGY:97} or Theorem 2.2 in \cite{VSB-SPM:00}) we have
$x(s)$ converges to $(I_n-P)^{-1} u$ a.s. However, to the best of
our knowledge, our results in Proposition \ref{SAF_sc} and Theorem
\ref{SAF_r1} cannot be deduced from existing results without
additional conditions.
\end{remark}
\renewcommand{\arabic{section}}{\Roman{section}}
\subsection{Necessary conditions for convergence}\label{Necessary_Conditions}
\renewcommand{\arabic{section}}{\arabic{section}}
We first consider necessary conditions of convergence under the assumptions (A1) and (A2) or (A2'):
\begin{theorem}\label{SAF_nca}
Consider the system (\ref{SAF_m1}) satisfying (A1). Then:\\
i) If $\widetilde{\rho}_{\max}(P)>1$, or
$\widetilde{\rho}_{\max}(P)=1$ but (A3) does not hold,
there exist some initial states such that $x(s)$ is not mean-square convergent for any
$\{a(s)\}$ satisfying (A2).\\
ii) If $\widetilde{\rho}_{\min}(P)<1$, or
$\widetilde{\rho}_{\min}(P)=1$ but (A3) does not hold,
there exist some initial states such that $x(s)$ is not mean-square convergent for any
$\{a(s)\}$ satisfying (A2').
\end{theorem}
The proof of this theorem is postponed to Appendix \ref{proof_nca}.
The necessary condition of convergence in Theorem \ref{SAF_nca} has a
constraint that the gain function $\{a(s)\}$ must satisfy the
assumption (A2) or (A2'). An interesting problem is to understand what
happens if $\{a(s)\}$ are chosen as arbitrary real numbers. Obviously,
from protocol (\ref{SAF_m1}) if $\{a(s)\}$ has only finite non-zero
elements, then $x(s)$ will converge to a random variable. Thus, we only
consider the setting whereby $x(s)$ does not converge to a
deterministic vector for arbitrary gains.
Recall that
\begin{eqnarray*}
P=H^{-1} \mbox{diag}(J_1,\ldots,J_K)H=H^{-1} D H,
\end{eqnarray*}
where $H\in\mathbb{C}^{n\times n}$ is an invertible matrix, and $D$ is
the Jordan normal form of $P$. For $1\leq i\leq K$, define
\begin{eqnarray}\label{Jordan_2}
\widetilde{I}_i=\mbox{diag}(0,\ldots, I_{m_i},\ldots,0)\in\mathbb{R}^{n\times n},
\end{eqnarray}
which corresponds to the Jordan block $J_i$ and then $D\widetilde{I}_i=\mbox{diag}(0,\ldots,J_i,\ldots,0)$.
To study the necessary condition
for convergence of system (\ref{SAF_m1}), we need the following two assumptions:
\textbf{(A4)} Assume there is a Jordan block $J_{j}$ in $D$ associated with the eigenvalue $\lambda_{j'}(P)$
such that $\mbox{Re}(\lambda_{j'}(P))=1$ and
\begin{equation}\label{Assump_ag_1}
\mathbb{E}\big[\| \widetilde{I}_j H[(P(s)-P)x(s)+u(s)-u]\|_2^2\,|\,x(s) \big]
\geq c_1 \|x(s)\|_2^2+c_2
\end{equation}
for any $s\geq 0$ and $x(s)\in\mathbb{R}^n$,
where $P$, $u$, $H$, $D$ and $\widetilde{I}_j$ are defined by (A1), (\ref{Jordan}), and (\ref{Jordan_2}), and
$c_1$ and $c_2$ are constants satisfying $c_1\geq 0$, $c_2\geq 0$, and $c_1+c_2>0$.
\textbf{(A4')} Assume there are two Jordan blocks $J_{j_1}$ and $J_{j_2}$ associated with the eigenvalues
$\lambda_{j_1'}(P)$ and $\lambda_{j_2'}(P)$ respectively
such that $\mbox{Re}(\lambda_{j_1'}(P))<1<\mbox{Re}(\lambda_{j_2'}(P))$ and
(\ref{Assump_ag_1}) holds for $j=j_1, j_2$.
\begin{theorem}\label{SAF_r2}
Consider the system (\ref{SAF_m1}) satisfying (A1) and (A4) or (A4'). In addition, assume there exists a constant $c_3>0$ such that
for any $s\geq 0$ and $x(s)\in\mathbb{R}^n$,
\begin{eqnarray}\label{SAF_r2_01}
\begin{aligned}
&\mathbb{E}\big[\|(P(s)-P)x(s)+u(s)-u\|_2^2|x(s)\big]\geq c_3.
\end{aligned}
\end{eqnarray}
Then for any deterministic vector $b\in\mathbb{R}^n$, any initial state $x(0)\neq b$, and any real number sequence $\{a(s)\}_{s\geq 0}$ independent with
$\{x(s)\}_{s\geq 0}$, $x(s)$ cannot converge to $b$ in mean square.
\end{theorem}
The proof of this theorem is postponed to Appendix \ref{Proof_SAF_r2}.
If $u(s)$ is a degenerate random vector which means that $\mathbb{E}\|u(s)-u\|_2^2=0$, then the condition (\ref{SAF_r2_01}) may not be satisfied.
\begin{theorem}\label{SAF_r3}
Consider the system (\ref{SAF_m1}) satisfying (A1), and $\mathbb{E}[\|u(s)-u\|_2^2\,|\,x(s)]=0$
for any $s\geq 0$ and $x(s)\in\mathbb{R}^n$. Assume (A4) or (A4') holds but using
\begin{eqnarray}\label{SAF_r3_01}
\begin{aligned}
\mathbb{E}\big[\| \widetilde{I}_j H(P(s)-P)x(s)\|_2^2\,|\,x(s)\big]\geq c_1 \|x(s)\|_2^2
\end{aligned}
\end{eqnarray}
instead of (\ref{Assump_ag_1}).
For any deterministic vector $b\in\mathbb{R}^n$ and any initial state $x(0)\neq b$,
if one of the following three conditions holds:\\
i) $u\neq {\mathbf{0}}_{n\times 1}$ and $x(0)\neq {\mathbf{0}}_{n\times 1}$;\\
ii) $u\neq {\mathbf{0}}_{n\times 1}$, $x(0)={\mathbf{0}}_{n\times 1}$, and $b \neq \alpha u $ for any $\alpha\in\mathbb{R}$; or\\
iii) $u={\mathbf{0}}_{n\times 1}$, and the eigenvalues $\lambda_{j'}(P)$ in (A4), or $\lambda_{j_1'}(P)$ and $\lambda_{j_2'}(P)$ in (A4') are not real numbers,\\
then $x(s)$ cannot converge to $b$ in mean square for any real number sequence $\{a(s)\}_{s\geq 0}$ independent with
$\{x(s)\}_{s\geq 0}$.
\end{theorem}
The proof of this theorem is postponed to Appendix \ref{Proof_SAF_r3}.
\renewcommand{\arabic{section}}{\Roman{section}}
\subsection{Necessary and sufficient conditions for convergence}\label{NScondition}
\renewcommand{\arabic{section}}{\arabic{section}}
From Theorems \ref{SAF_c1} and \ref{SAF_nca} and Proposition \ref{SAF_sc}, the following
necessary and sufficient condition for convergence with non-negative gains is obtained immediately.
\begin{theorem}\label{SAF_sn}(Necessary and sufficient condition for convergence of linear SA algorithms with non-negative gains)
Consider the system (\ref{SAF_m1}) satisfying (A1) and (A2).
Then $x(s)$ is mean-square convergent for any initial state
if and only if $\widetilde{\rho}_{\max}(P)<1$, or $\widetilde{\rho}_{\max}(P)=1$ with (A3).
\end{theorem}
\begin{remark}
We remark that Theorem \ref{SAF_sn} is completely different from previous sufficient and necessary conditions of convergence
in linear SA algorithms where only the case when $\widetilde{\rho}_{\max}(P)<1$ is considered and the assumptions are different from (A2) (Theorem 2 in \cite{HFC:96}; Theorem 1 in \cite{EC-IW-SK:99}; Theorems 1 and 2 in \cite{VBT:04}). In fact, the convergence of $x(s)$ at the critical point $\widetilde{\rho}_{\max}(P)=1$ has some applications such as the group consensus
over random signed networks; see Subsection \ref{subsec_gc}.
\end{remark}
Similarly, from Theorems \ref{SAF_c1} and \ref{SAF_nca} and Proposition \ref{SAF_sc}, the following
necessary and sufficient condition for convergence with non-positive gain is obtained immediately.
\begin{theorem}\label{SAF_sn2}(Necessary and sufficient condition for convergence of linear SA algorithms with non-positive gains)
Consider the system (\ref{SAF_m1}) satisfying (A1) and (A2').
Then $x(s)$ is mean-square convergent for any initial state
if and only if $\widetilde{\rho}_{\min}(P)>1$, or $\widetilde{\rho}_{\min}(P)=1$ with (A3).
\end{theorem}
\begin{remark}
Compared to Theorem 1 in \cite{CR-PF-RT-HI:15}, Theorem \ref{SAF_sn}
extends the convergence condition from $\rho(P)<1$ to the sufficient
and necessary condition. In fact, for the basic linear dynamical system
$x(s+1)=P x(s)+u$, $x(s)$ converges if and only if $\rho(P)<1$.
However, if we consider the time-varying linear dynamical system and adopt the SA
method to eliminate the effect of fluctuation, then the convergence
condition can be substantially weakened.
\end{remark}
Theorems \ref{SAF_sn} and \ref{SAF_sn2} have a constraint that the gain
function $\{a(s)\}$ must satisfy the assumption (A2) or (A2').
Without this constraint we can get the following necessary and
sufficient condition for convergence to a deterministic vector, but
with some additional conditions on $\{u(s)\}$ or $\{P(s)\}$.
\begin{theorem}[Necessary and sufficient condition for convergence of linear SA algorithms with arbitrary gains]\label{SAF_r4}
Consider the system (\ref{SAF_m1}) which satisfies (A1).
Suppose there exists a constant $c\in(0,1)$ such that for any $s\geq 0$, $x(s)\in\mathbb{R}^n$,
$\xi_1,\ldots,\xi_m \in \{P_{ij}(s),1\leq i,j\leq n; u_i(s), 1\leq i\leq n\}$
and $c_1,\ldots,c_m\in\mathbb{C}$,
\begin{equation}\label{SAF_r4_01}
\mathbb{E}\Big[\Big|\sum_{i=1}^m c_i (\xi_i-\mathbb{E} \xi_i) \Big|^2\,|\,x(s)\Big]
\geq c\sum_{i=1}^m |c_i|^2 \mathbb{E}\big[(\xi_i-\mathbb{E} \xi_i)^2\,|\,x(s)\big].
\end{equation}
In addition, assume one of the following two conditions holds:\\
i) $\inf_{k,s} \mathbb{E}[(u_k(s)-u_k)^2\,|\,x(s)]>0$.\\
ii) $\mathbb{E}[\|u(s)-u\|^2\,|\,x(s)]=0$, $u\neq {\mathbf{0}}_{n\times 1}$, $x(0)\neq {\mathbf{0}}_{n\times 1}$, and $\inf_{i,j,s} \mathbb{E}[(P_{ij}(s)-P_{ij})^2\,|\,x(s)]>0$.\\
Then we can choose a real number sequence $\{a(s)\}_{s\geq 0}$ independent with
$\{x(s)\}_{s\geq 0}$ such that $x(s)$ converges to a deterministic vector different from $x(0)$ in mean square
if and only if $\widetilde{\rho}_{\max}(P)<1$ or $\widetilde{\rho}_{\min}(P)>1$.
\end{theorem}
\begin{IEEEproof}
If $\widetilde{\rho}_{\max}(P)<1$ or $\widetilde{\rho}_{\min}(P)>1$, by Proposition \ref{SAF_sc} we obtain that
$x(s)$ converges to $(I_n-P)^{-1}u$ in mean square.
For $\widetilde{\rho}_{\min}(P)\leq 1 \leq \widetilde{\rho}_{\max}(P)$, we
set $\widetilde{P}(s):=P(s)-P$ and $\widetilde{u}(s):=u(s)-u$. Define $H$ and $K$ by
(\ref{Jordan}), and define $\widetilde{I}_i$ by (\ref{Jordan_2}).
For any $j\in\{1,\ldots,K\}$,
since $H$ is an invertible matrix, $\widetilde{I}_j H$ contains at least one non-zero row $H_{j'}$.
Thus, for any
$x(s)\in\mathbb{R}^n$ we have
\begin{align}\label{SAF_r4_1}
\mathbb{E}\big[&\| \widetilde{I}_j H[\widetilde{P}(s)x(s)+\widetilde{u}(s)]\|_2^2\,|\,x(s)\big]\nonumber\\
&\geq \mathbb{E}\big[| H_{j'} [\widetilde{P}(s)x(s)+\widetilde{u}(s)]|^2\,|\,x(s)\big]\nonumber\\
&= \mathbb{E}\Big[\Big|\sum_{i,k} H_{j'i} \widetilde{P}_{ik}(s)x_k(s)+\sum_{i} H_{j' i}\widetilde{u}_i(s)\Big|^2\,|\,x(s)\Big]\nonumber\\
&\geq c\sum_{i,k} |H_{j'i}|^2 \mathbb{E}\big[\widetilde{P}_{ik}^2(s)\,|\,x(s)\big] x_k^2(s)\nonumber\\
&\quad +c\sum_{i} |H_{j' i}|^2 \mathbb{E}\big[\widetilde{u}_i^2(s)\,|\,x(s)\big],
\end{align}
where the last inequality uses (\ref{SAF_r4_01}).
If Condition i) holds, we have there exists a constant $d_1>0$ such that
$\mathbb{E}\big[\widetilde{u}_i^2(s)\,|\,x(s)\big]\geq d_1$ for $s\geq 0$ and $1\leq i\leq n$.
Combing this with (\ref{SAF_r4_1}) and the assumption $\widetilde{\rho}_{\min}(P)\leq 1 \leq \widetilde{\rho}_{\max}(P)$, we obtain that (\ref{SAF_r2_01}) and (A4) or (A4') hold.
By Theorem \ref{SAF_r2}, $x(s)$ cannot converge to a deterministic vector different from $x(0)$ in mean square.
If Condition ii) holds, we have $\mathbb{E}[\|\widetilde{u}(s)\|_2^2\,|\,x(s)]=0$ and
there exists a constant $d_2>0$ such that
$\mathbb{E}\big[\widetilde{P}_{ik}^2(s)\,|\,x(s)\big]\geq d_2$ for $s\geq 0$ and $1\leq i,k\leq n$.
By (\ref{SAF_r4_1}) we obtain
\begin{align*}
\mathbb{E}\big[&\| \widetilde{I}_j H[\widetilde{P}(s)x(s)]\|_2^2\,|\,x(s)\big]\\
&\geq c d_2\sum_{i,k} |H_{j'i}|^2 x_k^2(s)= c d_2 \|x(s)\|_2^2 \sum_{i} |H_{j'i}|^2,
\end{align*}
which is followed by (\ref{SAF_r3_01}). By Theorem \ref{SAF_r3} i) $x(s)$ cannot converge to a deterministic vector different from $x(0)$ in mean square.
\end{IEEEproof}
\renewcommand{\arabic{section}}{\Roman{section}}
\section{Some Applications and Extension}\label{sec:application}
\renewcommand{\arabic{section}}{\arabic{section}}
\renewcommand{\arabic{section}}{\Roman{section}}
\subsection{Necessary and sufficient conditions for group consensus
over random signed networks and with state-dependent noise}\label{subsec_gc}
\renewcommand{\arabic{section}}{\arabic{section}}
As we discuss in the Introduction, consensus problems in multi-agent
systems have drawn a lot of attention from various fields including
physics, biology, engineering and mathematics in the past two decades.
Typically, a general assumption is adopted that the interaction matrix
associated with the network is row-stochastic at every time. Recently,
motivated by the possible antagonistic interaction in social networks,
bipartite/group/cluster consensus problems have been studied over
signed networks (focusing on continuous-time dynamic models), e.g.,
see \cite{CA:13,JY-LW:10,JQ-CY:13,ZG-KMY-KHJ-MC-YH:16}.
On the other hand, SA has become a effective tool for the distributed consensus to
eliminate the effects of fluctuations \cite{MYH-JHM:09,RC-GC-PF-FG:11,NEL-AO:14,TL-JFZ:10,MH:12,HT-TL:15,GL-CG:17}.
Interestingly, if we consider the linear SA algorithms over random signed networks with state-dependent noise,
from Theorems \ref{SAF_sc}, \ref{SAF_sn} and \ref{SAF_sn2} we can
obtain some results for the consensus or group consensus.
Assume the system contains $n$
agents. Each agent $i$ has a state $x_i(s)\in\mathbb{R}$ at time $s$
which can represent the opinion, social power or others, and is
updated according to the current state and the interaction from the
others. In detail, for $1\leq i\leq n$ and $s\geq 0$, the state of
agent $i$ is updated by
\begin{multline}\label{model_consensus}
x_i(s+1)=(1-a(s))x_i(s)\\
+a(s)\sum_{j\in\mathcal{N}_i(s)} P_{ij}(s)\left[x_j(s)+f_{ji}(x(s))w_{ji}(s)\right],
\end{multline}
where $a(s)\geq 0$ is the gain at time $s$, $\mathcal{N}_i(s)$ is the neighbors of node $i$ at time $s$, $P_{ij}(s)$ is the weight of the edge
$(j,i)$ at time $s$, and $f_{ji}(x(s)) w_{ji}(s)$ is the noise of agent $i$ receiving information from agent $j$ at time $s$. Here
we consider the noise may be state-dependent which means that $f_{ji}(x(s))$ is a function of the state vector $x(s)$.
Let $P_{ij}(s)=0$ if $j\notin\mathcal{N}_i(s)$, and set
$$u_i(s):=\sum_{j\in\mathcal{N}_i(s)} P_{ij}(s)f_{ji}(x(s))w_{ji}(s),$$
then system (\ref{model_consensus}) can be rewritten as
\begin{eqnarray*}
x(s+1)=(1-a(s))x(s)+a(s) \left[ P(s)x(s)+u(s)\right].
\end{eqnarray*}
If $P_{ij}(s)$ is a stationary stochastic process with uniformly bounded variance, and
$w_{ji}(s)$ is a zero-mean noise with uniformly bounded variance for any $x(s)$, $P_{ji}(s)$, and $j\in\mathcal{N}_i(s)$, then
(A1) is satisfied with $u={\mathbf{0}}_{n\times 1}$.
We say the subsets $S_1,\ldots,S_{r'} (r'\geq 1)$ is \emph{a partition
of} $\{1,\ldots,n\}$ if $\emptyset\subset S_i \subseteq
\{1,\ldots,n\}$ for $1\leq i\leq r'$, $S_i \cap S_j=\emptyset$ for
$i\neq j$, and $\cup_{i=1}^{r'} S_i=\{1,\ldots,n\}$. Following
\cite{JY-LW:10} with some modifications we introduce the definition
for group consensus:
\begin{definition}\label{def_consensus}
Let the subsets $S_1,\ldots,S_{r'}$ be a partition of
$\{1,\ldots,n\}$. If $x(s)$ is mean-square convergent, and
$\lim_{s\to\infty} \mathbb{E}|x_i(s)-x_j(s)|=0$ when $i$ and $j$ belong to a
same subset, then we say $x(s)$ asymptotically reaches
$\{S_i\}_{i=1}^{r'}$-group consensus in mean square.
\end{definition}
The group consensus turns to cluster consensus if different groups have different limit values \cite{YH-WL-TC:13}.
From Definition \ref{def_consensus} we can know that consensus is a
special case of the $\{S_i\}_{i=1}^{r'}$-group consensus with $r'=1$.
Before the statement of our results, we need to introduce some
notations and an assumption:
For a partition $S_1,\ldots,S_{r'}$ of $\{1,\ldots,n\}$, let $\mathds{1}^i\in\mathbb{R}^n (1\leq i\leq r')$ denote the column vector satisfying
$\mathds{1}_k^i=1$ if $k\in S_i$ and $\mathds{1}_k^i=0$ otherwise. A linear combination of $\{\mathds{1}^i\}_{i=1}^{r'}$ is $c_1 \mathds{1}^1+\ldots
+c_{r'} \mathds{1}^{r'}$ with $c_1,\ldots,c_{r'}\in\mathbb{C}$ being constants.
\textbf{(A5)}
Assume any eigenvalue of $P$ whose real part is $1$ equals $1$,
and the algebraic and geometric multiplicities of the eigenvalue $1$ equal $r\in[1,r']$, and any right eigenvector of $P$ corresponding to the eigenvalue $1$
can be written as a linear combination of $\{\mathds{1}^i\}_{i=1}^{r'}$.
With Theorems \ref{SAF_sc}, \ref{SAF_sn} and Proposition \ref{SAF_sc} we obtain the following result:
\begin{theorem}\label{SAF_con_sn1}(Necessary and sufficient condition for group consensus with non-negative gains)
Consider the system (\ref{SAF_m1}) or (\ref{model_consensus}) satisfying (A1) with $u={\mathbf{0}}_{n\times 1}$ and (A2). Let $S_1,\ldots,S_{r'}$ be a partition of $\{1,\ldots,n\}$.
Then $x(s)$ asymptotically reaches $\{S_i\}_{i=1}^{r'}$-group consensus in mean square for any initial state
if and only if $\widetilde{\rho}_{\max}(P)<1$, or (A5) holds with $\widetilde{\rho}_{\max}(P)=1$.
\end{theorem}
\begin{IEEEproof}
Before proving our result, we introduce some notes first.
For any matrix $A\in\mathbb{C}^{n\times n}$, let $A_i$ and $A^{i}$ denote the $i$-th row and $i$-th column of $A$ respectively.
Set $A^{[i,j]}=(A^i,A^{i+1},\ldots,A^j)\in\mathbb{C}^{n\times (j-i+1)}$.
We first consider the sufficient part. If $\widetilde{\rho}_{\max}(P)<1$, by Proposition \ref{SAF_sc} and the fact $u={\mathbf{0}}_{n\times 1}$ we obtain that $x(s)$ converges to ${\mathbf{0}}_{n\times 1}$ in mean square for all initial states. Hence, the $\{S_i\}$-group consensus can be reached.
If (A5) holds with $\widetilde{\rho}_{\max}(P)=1$, which implies that (A3) holds together with the fact $u={\mathbf{0}}_{n\times 1}$.
Let $P=H^{-1} D H$, where $H$ is an invertible matrix, and $D$ is the Jordan normal form of $P$ with the same expression as (\ref{Jordan_temp}).
Then, by Theorem \ref{SAF_c1}, for any initial state there exist random variables $y_1,\ldots,y_r$ such that
in mean square
\begin{eqnarray}\label{con_sn1_1}
x(s) \rightarrow y_1 [H^{-1}]^1+\cdots+y_r [H^{-1}]^{r}~~\mbox{as}~~s\to\infty.
\end{eqnarray}
Also, from $P H^{-1}=D H^{-1}$ and (\ref{Jordan_temp}) we have
\begin{eqnarray}\label{con_sn1_2}
P [H^{-1}]^i = [H^{-1}]^i,~~1\leq i\leq r.
\end{eqnarray}
Hence, by (\ref{con_sn1_1}) and (A5), there exist random variables $z_1,\ldots,z_{r'}$ such that in mean square
\begin{eqnarray*}\label{con_sn1_3}
x(s) \rightarrow z_1 \mathds{1}^1+\cdots+z_{r'} \mathds{1}^{r'}~~\mbox{as}~~s\to\infty,
\end{eqnarray*}
which implies that $x(s)$ asymptotically reaches $\{S_i\}_{i=1}^{r'}$-group consensus in mean square for any initial state.
Next we prove the necessary part.
Since $x(s)$ asymptotically reaches $\{S_i\}_{i=1}^{r'}$-group consensus in mean square for any initial state,
then, by Definition \ref{def_consensus}, $x(s)$ is mean-square convergent for any initial state. Hence, by Theorem \ref{SAF_sn},
we obtain that $\widetilde{\rho}_{\max}(P)<1$, or (A3) holds with $\widetilde{\rho}_{\max}(P)=1$.
It remains to show (A5) holds for the case when (A3) holds.
For any complex right eigenvector $\mathbf{a}+\mathbf{b}i\in\mathbb{C}^n$ of $P$ corresponding to eigenvalue $1$, we have
$P \mathbf{a}=\mathbf{a}$ and $P \mathbf{b}=\mathbf{b}$, which implies that $\mathbf{a}$ and $\mathbf{b}$ are real right eigenvectors
of $P$ corresponding to eigenvalue $1$. Thus, any complex right eigenvector of $P$ corresponding to the eigenvalue $1$ can be written as a linear combination of real right eigenvectors corresponding to the eigenvalue $1$. Also, from (\ref{Jordan_temp}) we have $PH^{-1}=H^{-1} D$ if and only if
(\ref{con_sn1_2}) and $P [H^{-1}]^{[r+1,n]}=[H^{-1}]^{[r+1,n]} \underline{D}$ hold.
Thus, we can choose suitable $H$ such that $P=H^{-1} D H$ and $[H^{-1}]^1,\ldots,[H^{-1}]^{r}$ are real vectors.
By Theorem \ref{SAF_c1}, we have
\begin{eqnarray}\label{con_sn1_4}
\lim_{s\to\infty} \mathbb{E} x(s)=\sum_{i=1}^{r}H_i x(0) \cdot [H^{-1}]^i
\end{eqnarray}
Also, from $H H^{-1}=I_n$ we have $H_i [H^{-1}]^j$ equals $1$ if $i=j$ and $0$ otherwise. If we choose
$x(0)=[H^{-1}]^i$ ($1\leq i\leq r$), by (\ref{con_sn1_4}) we have
$\lim_{s\to\infty} \mathbb{E} x(s)=[H^{-1}]^i$.
Because for any initial state, $x(s)$ asymptotically reaches $\{S_i\}_{i=1}^{r'}$-group consensus in mean square, which implies
$Ex(s)$ also asymptotically reaches $\{S_i\}_{i=1}^{r'}$-group consensus, $[H^{-1}]^i (1\leq i\leq r)$ can be written as a linear combination of
$\{\mathds{1}^j\}_{j=1}^{r'}$. From the linear independence of $[H^{-1}]^1,\ldots,[H^{-1}]^{r}$
we have $r\leq r'$, and $[H^{-1}]^1,\ldots,[H^{-1}]^{r}$ is a basis of the eigenspace $\mathcal{R}^1$ which consists of all the right eigenvectors of $P$ corresponding to the eigenvalue $1$ and together with the zero vector. Hence, any vector in $\mathcal{R}^1$ can be written
as a linear combination of $[H^{-1}]^1,\ldots,[H^{-1}]^{r}$, and thus a linear combination of $\{\mathds{1}^j\}_{j=1}^{r'}$.
\end{IEEEproof}
Similar to Theorem \ref{SAF_con_sn1} we have the following theorem:
\begin{theorem}\label{SAF_con_sn2}(Necessary and sufficient condition for group consensus with non-positive gains)
Consider the system (\ref{SAF_m1}) or (\ref{model_consensus}) satisfying (A1) with $u={\mathbf{0}}_{n\times 1}$ and (A2').
Let $S_1,\ldots,S_{r'}$ be a partition of $\{1,\ldots,n\}$.
Then $x(s)$ asymptotically reaches $\{S_i\}_{i=1}^{r'}$-group consensus in mean square for any initial state
if and only if $\widetilde{\rho}_{\min}(P)>1$, or (A5) holds with $\widetilde{\rho}_{\min}(P)=1$.
\end{theorem}
By Theorems \ref{SAF_con_sn1} and \ref{SAF_con_sn2} with $r'=1$, we immediately obtain the following two corollaries for consensus:
\begin{corollary}\label{SAF_con_sn3}
Consider the system (\ref{SAF_m1}) or (\ref{model_consensus}) satisfying (A1) with $u={\mathbf{0}}_{n\times 1}$ and (A2).
Then $x(s)$ asymptotically reaches consensus in mean square for any initial state
if and only if one of the following condition holds:\\
i) $\widetilde{\rho}_{\max}(P)<1$;\\
ii) The sum of of each row of $P$ equals $1$, and $P$
has $n-1$ eigenvalues whose real parts are all less than $1$.
\end{corollary}
\begin{corollary}\label{SAF_con_sn4}
Consider the system (\ref{SAF_m1}) or (\ref{model_consensus}) satisfying (A1) with $u={\mathbf{0}}_{n\times 1}$ and (A2').
Then $x(s)$ asymptotically reaches consensus in mean square for any initial state
if and only if one of the following condition holds:\\
i) $\widetilde{\rho}_{\min}(P)>1$;\\
ii) The sum of of each row of $P$ equals $1$, and $P$
has $n-1$ eigenvalues whose real parts are all bigger than $1$.
\end{corollary}
The communication topology is an important aspect in the research of multi-agent systems consensus.
In fact, our result can also give some topology conditions of consensus for some special $P$.
We first introduce some definitions concerning graphs.
For a matrix $A\in \mathbb{R}^{n\times n}$ with $A_{ij}\geq 0$ for $j\neq i$.
let $\mathcal{V}=\{1,2,\ldots n\}$ denote the set of nodes, and $\mathcal{E}$ denote the set of edges
where an ordered pair $(j,i)\in \mathcal{E}$ if and only if $A_{ij}>0$. The digraph associated with $A$ is defined by
$\mathcal{G}=\{\mathcal{V},\mathcal{E}\}$.
A sequence $(i_1, i_2), (i_2, i_3), \ldots, (i_{k-1}, i_k)$ of edges is called
a directed path from node $i_1$ to node $i_k$. $\mathcal{G}$ contains a directed spanning tree if
there exists a root node $i$ such that $i$ has a directed path to $j$ for any node $j\neq i$.
We need the following lemma in our results.
\begin{lemma}[Lemma 3.3 in \cite{WR-RWB:05}]\label{Ren05}
Given a matrix $A\in \mathbb{R}^{n\times n}$, where for any $i\in\mathcal{V}$, $A_{ii}\leq 0, A_{ij}\geq 0$ for $j\neq i$, and $\sum_{j=1}^n A_{ij}=0$, then $A$ has at least
one zero eigenvalue and all of the non-zero eigenvalues have negative real parts. Furthermore, $A$ has exactly one zero eigenvalue
if and only if the directed graph associated with $A$ contains a directed spanning
tree.
\end{lemma}
From Corollary \ref{SAF_con_sn3} and Lemma \ref{Ren05} we have the following result.
\begin{corollary}\label{SAF_con_sn5}
Consider the system (\ref{SAF_m1}) or (\ref{model_consensus}) satisfying (A1) and (A2).
Assume that $P$ is a row-stochastic matrix and $u={\mathbf{0}}_{n\times 1}$. Then $x(s)$ asymptotically reaches consensus in mean square for any initial state
if and only if the digraph associated with $P$ contains a directed spanning
tree.
\end{corollary}
\begin{IEEEproof}
Let $A=P-I_n$ and `$\leftrightarrow$' denote the `if and only if'. The digraph associated with $P$ contains a directed spanning
tree $\leftrightarrow$ the digraph associated with $A$ contains a directed spanning
tree $\stackrel{\underleftrightarrow{Lemma ~\ref{Ren05}}}{ }$
$A$ has exactly one zero eigenvalue, and all the non-zero
eigenvalues have negative real parts $\leftrightarrow$
$P$
has $n-1$ eigenvalues whose real parts are all less than $1$ $\stackrel{\underleftrightarrow{Corollary~\ref{SAF_con_sn3}}}{ }$
$x(s)$ asymptotically reaches consensus in mean square for any initial state, where the last two `$\leftrightarrow$'
uses the hypothesis that $P$ is a row-stochastic matrix which has at least one eigenvalue that is equal to $1$.
\end{IEEEproof}
Corollary \ref{SAF_con_sn5} coincides with the consensus condition for the continuous-time consensus protocol with time-invariant interaction topology
(Theorem 3.8 in \cite{WR-RWB:05}).
If $P$ is not a row-stochastic matrix, the consensus may be also reached.
For example, let
\begin{eqnarray}\label{eq:consensusmatrix}
P:=
\begin{bmatrix}
0.5 &0.3 &0 &0.3& -0.1 \\
-0.1& 0.3 &0.3 &0 &0.5 \\
0& 0.2 &0.4& 0.5& -0.1 \\
0.1& 0& 0.6 &0.4 &-0.1 \\
0.1& -0.1& 0.1& 0.3& 0.6
\end{bmatrix}.
\end{eqnarray}
The eigenvalues of $P$ are $1, 0.5708, -0.2346,0.4319+0.3270i,0.4319-0.3270i$. By Corollary~\ref{SAF_con_sn3} $x(s)$ asymptotically reaches consensus in mean square.
Different from consensus, the group consensus does not require that the sum of each row of $P$ equals $1$. For example,
if
\begin{eqnarray}\label{eq:gconsensusmatrix}
P=
\begin{bmatrix}
0.3 & 0.5 & 0.5 & -0.4 \\
0.5 & 0.3 & -0.4 & 0.5 \\
-0.1 & 0.5 & 0.4 & 0.4\\
0.5 & -0.1 & 0.4 & 0.4
\end{bmatrix},
\end{eqnarray}
then $P[1,1,2,2]^\top=[1,1,2,2]^\top$, and the eigenvalues of $P$ are $1,0.6,-0.1+0.728i,-0.1-0.728i$.
Let $S_1=\{1,2\}$ and $S_2=\{3,4\}$, by Theorem \ref{SAF_con_sn1}
$x(s)$ can asymptotically reach $\{S_1,S_2\}$-group consensus in mean square for any initial state.
In the following, we simulate system (\ref{SAF_m1}) to show consensus and group consensus using $P$ matrices in \eqref{eq:consensusmatrix} and \eqref{eq:gconsensusmatrix} respectively. For $s\geq 0$, $P(s)$ and $u(s)$ are generated by i.i.d. matrix and vector
with mean $P$ and ${\mathbf{0}}_{n\times 1}$ respectively.
We set the gain function $a(s)=\frac{1}{s}$. From Fig. \ref{fig:consensusunderSA}, we can see that consensus and group consensus are reached as guaranteed by Corollary \ref{SAF_con_sn3} and Theorem \ref{SAF_con_sn1}, respectively.
\begin{figure}
\caption{Consensus and group consensus under linear SA protocol (\ref{SAF_m1}
\label{fig:consensusunderSA}
\end{figure}
\subsection{An extension to multidimensional linear SA algorithms}
Our results in Section \ref{Main_results} can be extended to multidimensional linear SA algorithms in which
the state of each agent is a $m$-dimensional vector.
The dynamics is, for all $s\geq 0$
\begin{equation}\label{MSAF}
X(s+1)=(1-a(s))X(s)+a(s)\big[P(s)X(s)C^\top(s)+U(s)\big],
\end{equation}
where $X(s)\in\mathbb{R}^{n\times m}$ is the state matrix, $P(s)\in\mathbb{R}^{n\times n}$ is still an interaction matrix,
$C\in\mathbb{R}^{m\times m}$ is an interdependency matrix,
and $U(s)\in\mathbb{R}^{n\times m}$ is
an input matrix.
The system (\ref{MSAF}) can be transformed to one dimensional system (\ref{SAF_m1}) by the following way:
Given a pair of matrices $A\in\mathbb{R}^{n\times m}$, $B\in\mathbb{R}^{p\times q}$,
their Kronecker product is defined by
\begin{eqnarray*}
A\otimes B=
\begin{bmatrix}
A_{11}B & \cdots & A_{1m} B \\
\cdots & \cdots & \cdots \\
A_{n1}B & \cdots & A_{nm} B
\end{bmatrix}\in\mathbb{R}^{np\times mq}.
\end{eqnarray*}
Let $Q(s):=P(s)\otimes C(s)$.
From (\ref{MSAF}) we have
\begin{eqnarray}\label{MSAF_sc_1}
&&X_{ij}(s+1)=(1-a(s))X_{ij}(s)\\
&&~~+a(s)\Big[\sum_{k_1,k_2} P_{ik_1}(s)X_{k_1,k_2}(s)C_{jk_2}(s)+U_{ij}(s)\Big].\nonumber
\end{eqnarray}
for any $s\geq 0$, $1\leq i\leq n$, and $1\leq j\leq m$.
Let
\begin{eqnarray*}\label{MSAF_sc_2}
y(s):=(X_{11}(s),\ldots,X_{1m}(s),\ldots,X_{n1}(s),\ldots,X_{nm}(s))^\top
\end{eqnarray*}
and
\begin{eqnarray*}\label{MSAF_sc_3}
v(s):=~(U_{11}(s),\ldots,U_{1m}(s),\ldots,U_{n1}(s),\ldots,U_{nm}(s))^\top
\end{eqnarray*}
be the vector in $\mathbb{R}^{nm}$ transformed from the matrices $X(s)$ and $U(s)$ respectively.
By (\ref{MSAF_sc_1}) we have
\begin{align*}
y&_{(i-1)m+j}(s+1)=X_{ij}(s+1)\\
&=(1-a(s))X_{ij}(s)\\
&\quad+a(s)\Big[\sum_{k_1,k_2} P_{ik_1}(s)X_{k_1,k_2}(s)C_{jk_2}(s)+U_{ij}(s)\Big]\\
&=(1-a(s))y_{(i-1)m+j}(s)+\\
&\quad a(s)\Big[\sum_{k_1,k_2}Q_{(i-1)m+j,(k_1-1)m+k_2}(s)y_{(k_1-1)m+k_2}(s)\\
&\quad\quad\quad+v_{(i-1)m+j}(s)\Big],
\end{align*}
which implies
\begin{eqnarray*}\label{MSAF_sc_5}
y(s+1)=(1-a(s))y(s)+a(s)[Q(s)y(s)+v(s)].
\end{eqnarray*}
The system (\ref{MSAF_sc_5}) has the same form as the system (\ref{SAF_m1}), so the results in
Section \ref{Main_results} can be applied to the multidimensional linear SA algorithms.
\subsection{SA Friedkin-Johnsen model over time-varying interaction network}
The Friedkin-Johnsen (FJ) model proposed by \cite{NEF-ECJ:99}
considers a community of $n$ social actors (or agents) whose opinion
column vector is $x(s)=(x_1(s),\dots, x_n(s))^\top \in\mathbb{R}^{n}$ at time
$s$. The FJ model also contains a row-stochastic matrix of
interpersonal influences $P\in\mathbb{R}^{n\times n}$ and a diagonal matrix
of actors' susceptibilities to the social influence
$\Lambda\in\mathbb{R}^{n\times n}$ with ${\mathbf{0}}_{n\times n}\leq \Lambda \leq I_n$. The state
of the FJ model is updated by
\begin{eqnarray}\label{FJ}
x(s+1)=\Lambda P x(s)+(I_n-\Lambda)x(0),~~s=0,1,\ldots.
\end{eqnarray}
By \cite{SEP-AVP-RT-NEF:17}, if ${\mathbf{0}}_{n\times n}\leq \Lambda<I_n$, then
\begin{eqnarray}\label{FJ_r1}
\lim_{s\to\infty}x(s)=(I_n-\Lambda P)^{-1}(I_n-\Lambda)x(0).
\end{eqnarray}
However, if the interpersonal influences are affected by noise, then the system (\ref{FJ}) may not converge.
The FJ model (\ref{FJ}) was extended to the multidimensional case in
\cite{SEP-AVP-RT-NEF:17,NEF-AVP-RT-SEP:16}. The multidimensional FJ model still
contains $n$ individuals, but each individuals has beliefs on $m$ truth
statements. Let $X(s)\in\mathbb{R}^{n\times m}$ be the matrix of $n$
individuals' beliefs on $m$ truth statements at time $s$. Following
\cite{NEF-AVP-RT-SEP:16}, it is updated by
\begin{eqnarray}\label{MFJ}
X(s+1)=\Lambda P X(s) C^\top +(I_n-\Lambda)X(0)
\end{eqnarray}
for $s=0,1,\ldots,$ where $\Lambda,P\in\mathbb{R}^{n\times n}$ are the same
matrices in (\ref{FJ}), and $C\in\mathbb{R}^{m\times m}$ is a
row-stochastic matrix of interdependencies among the $m$ truth
statements. The convergence of system (\ref{MFJ}) has been analyzed in
\cite{SEP-AVP-RT-NEF:17}. Similar to (\ref{FJ}) it is easy to see that
if system (\ref{MFJ}) is affected by noise, then it will not
converge. We will adopt the stochastic-approximation method to smooth
the effects of the noise.
\begin{proposition}\label{SFJ_r2}
Consider the system
\begin{multline}\label{SFJ_r2_00}
X(s+1)=(1-a(s))X(s)+a(s)[\Lambda(s) P(s) X(s)C(s)^\top \\
+(I_n-\Lambda(s))X(0)],
\end{multline}
for $s=0,1,\ldots$, where $\Lambda(s)\in\mathbb{R}^{n\times n}$, $P(s)\in \mathbb{R}^{n\times n}$ and $C(s)\in \mathbb{R}^{m\times m}$ are independent matrix sequence
with invariant expectation $\Lambda$, $P$, and $C$ respectively. Assume $E\|\Lambda(s)\|_2^2$, $E\|P(s)\|_2^2$, and $E\|C(s)\|_2^2$ are uniformly bounded.
Suppose
$P$ and $C$ are row-stochastic matrix, and ${\mathbf{0}}_{n\times n}\leq \Lambda<I_n$, and
the gain function $a(s)$ satisfies (A2).
Then for any initial state, $X(s)$ converges to $X^*$ in mean square,
where $X^*$ is the unique solution of the equation
\begin{eqnarray}\label{SFJ_r2_01}
X=\Lambda P X C^\top+(I_n-\Lambda)X(0).
\end{eqnarray}
\end{proposition}
\begin{IEEEproof}
Since $P$ and $C$ are row-stochastic matrices, $P\otimes C$ is still a row-stochastic matrix. Together with the condition that ${\mathbf{0}}_{n\times n}\leq \Lambda <I_n$,
we have that the sum of each row of $(\Lambda P)\otimes C$ is less than $1$. Thus, using the Ger\v{s}gorin Disk Theorem we obtain
$\widetilde{\rho}_{\max}((\Lambda P)\otimes C)<1$.
Let $Q:=(\Lambda P)\otimes C$, $U(s):=(I_n-\Lambda(s))X(0)$,
\begin{eqnarray*}
y(s):=(X_{11}(s),\ldots,X_{1m}(s),\ldots,X_{n1}(s),\ldots,X_{nm}(s))^\top,
\end{eqnarray*}
\begin{eqnarray*}
v(s):=(U_{11}(s),\ldots,U_{1m}(s),\ldots,U_{n1}(s),\ldots,U_{nm}(s))^\top,
\end{eqnarray*}
and $v:=Ev(s)$.
By Proposition \ref{SAF_sc} and the transformation from (\ref{MSAF}) to (\ref{MSAF_sc_5}), we obtain that
$y(s)$ converges to $(I_{mn}-Q)^{-1}v$ in mean square.
It remains to discuss the relation between $(I_{mn}-Q)^{-1} v$ and $X^*$.
Let
\begin{eqnarray*}\label{MSAF_sc_6}
y^*:=(X_{11}^*,\ldots,X_{1m}^*,\ldots,X_{n1}^*,\ldots,X_{nm}^*)^\top\in\mathbb{R}^{nm}.
\end{eqnarray*}
By (\ref{SFJ_r2_01}), similar to (\ref{MSAF_sc_5}) we have $y^*=Qy^*+v$, which has a unique solution
$y^*=(I_{mn}-Q)^{-1} v$ since $I_{mn}-Q$ is an invertible matrix by $\widetilde{\rho}_{\max}(Q)<1$.
Thus, with the fact that $y(s)$ converges to $(I_{mn}-Q)^{-1} v$ in mean square we obtain that $X(s)$ converges to $X^*$ in mean square.
\end{IEEEproof}
\begin{remark}
According to Theorem \ref{SAF_c1} and Proposition \ref{SAF_sc}, the conditions of $\Lambda(s),P(s)$ and $C(s)$ in Proposition \ref{SFJ_r2} can be further relaxed for convergence, such as $P$ and $C$ are not row-stochastic matrices, and ${\mathbf{0}}_{n\times n}\leq \Lambda<I_n$ may be extended to $\Lambda<{\mathbf{0}}_{n\times n}$ or $\Lambda\geq I_n$.
\end{remark}
\renewcommand{\arabic{section}}{\Roman{section}}
\section{Conclusion}\label{sec:conclusion}
\renewcommand{\arabic{section}}{\arabic{section}}
In this paper, we study a time-varying linear dynamical system,
where the state of the system features persistent oscillation and does
not converge. We consider a stochastic approximation-based approach and obtain necessary and sufficient conditions to guarantee
mean-square convergence. Our theoretical results largely extend the
conditions on the spectrum of the expectation of the system matrix and
thus can be applied in a much broader range of applications. We also
derived the convergence rate of the system. To illustrate the
theoretical results, we applied them in two different applications:
group consensus in multi-agent systems and FJ model with
time-varying interactions in social networks.
This work leaves various problems for future research. First, the
system matrix and input are assumed to have constant expectations in
this paper. However, it would be more interesting, yet challenging, to
study systems with time-varying expectation of the system matrix and
input. Second, we only considered linear dynamical systems in this
paper. How and whether the proposed framework can be extended to
non-linear system are important and intriguing questions. Finally, we
have illustrated our results in two different application scenarios;
there are other possible applications such as gossip algorithms for
consensus.
\appendices
\section{}\label{App_lemmas}
\begin{lemma}\label{lguo}
Suppose the non-negative real number sequence $\{y_s\}_{s\geq 1}$ satisfies
\begin{equation}\label{lguo_00}
y_{s+1}\leq (1-a_s)y_s+b_s,
\end{equation}
where $b_s\geq 0$ and $a_s\in[0,1)$ are real numbers.
If $\sum_{s=1}^{\infty}a_s =\infty$ and $\lim_{s\to\infty} b_s/a_s=0$, then
$\lim_{s\to\infty}y_s=0$ for any $y_1\geq 0$.
\end{lemma}
\begin{IEEEproof}
Repeating (\ref{lguo_00}) we obtain
\begin{eqnarray*}\label{lguo_1}
\begin{aligned}
y_{s+1}&\leq y(1) \prod_{t=1}^{s}(1-a_t)+\sum_{i=1}^s b_i \prod_{t=i+1}^s (1-a_t).
\end{aligned}
\end{eqnarray*}
Here we define $\prod_{t=i}^s (\cdot):=1$ when $i>s$. From the hypothesis $\sum_{t=1}^{\infty} a_t=\infty$
we have $\prod_{t=1}^{\infty}(1-a_t)=0$. Thus,
to obtain $\lim_{s\to\infty}y_s=0$ we just need to prove that
\begin{eqnarray}\label{lguo_2}
\lim_{s\to\infty}\sum_{i=1}^s b_i \prod_{t=i+1}^s (1-a_t)=0.
\end{eqnarray}
Since $\lim_{s\to\infty} b_s/a_s=0$, for any real number $\varepsilon>0$, there exists an integer $s^*>0$ such that
$b_s\leq \varepsilon a_s$ when $s\geq s^*$. Thus,
\begin{align}\label{lguo_3}
\sum_{i=1}^s &b_i \prod_{t=i+1}^s (1-a_t)\\
&\leq \sum_{i=1}^{s^*-1} b_i \prod_{t=i+1}^s (1-a_t)+\sum_{i=s^*}^{s} \varepsilon a_i \prod_{t=i+1}^s (1-a_t)\nonumber\\
&=\sum_{i=1}^{s^*-1} b_i \prod_{t=i+1}^s (1-a_t)+ \varepsilon\bigg( 1-\prod_{t=s^*}^s (1-a_t)\bigg)\nonumber\\
&\rightarrow \varepsilon ~~\mbox{as}~~s\to\infty,\nonumber
\end{align}
where the first equality uses the classic equality
\begin{eqnarray}\label{lguo_4}
\sum_{t=s^*}^{s} c_t \prod_{k=t+1}^{s} (1-c_k)=1-\prod_{t=s^*}^{s} (1-c_t)
\end{eqnarray}
with $\{c_t\}$ being any complex numbers, which can be obtained by induction. Here we define $\prod_{k=s_1}^{s_2}(\cdot)=1$ if $s_2<s_1$.
Let $\varepsilon$ decrease to $0$, then (\ref{lguo_3}) is followed by (\ref{lguo_2}).
\end{IEEEproof}
\section{Proof of Theorem \ref{SAF_r1}}\label{proof_SAF_r1}
We prove this theorem under the following three cases:
\textbf{Case I:} $\widetilde{\rho}_{\max}(P)<1$. Define $\theta(s)$,
$A$ and $A_2$ as in the proof of Theorem \ref{SAF_c1} but with $r=0$.
Set $V(\theta):=\theta^* A \theta$ for any $\theta\in\mathbb{C}^n$,
where $\theta^*$ denotes the conjugate transpose of $\theta$. We
remark that $A_2=A\in\mathbb{C}^{n\times n}$ under the case $r=0$, so
that, by (\ref{SAF_c1_16}), we have
\begin{align}\label{SAF_cr_1}
\mathbb{E}[&V(\theta(s+1))]\\
&\leq \Big(1-\frac{\alpha}{\rho(A)(s+\beta)^{\gamma}}\Big) \Big) \mathbb{E}[V(\theta(s))]+O\Big(\frac{1}{(s+\beta)^{2\gamma}} \Big).\nonumber
\end{align}
Set
\begin{eqnarray*}\label{SAF_cr_2}
\Phi(s,i):=\prod_{k=i}^s \Big(1-\frac{\alpha}{\rho(A)(k+\beta)^{\gamma}}\Big)
\end{eqnarray*}
and define $\prod_{k=i}^s (\cdot):=1$ if $s<i$. We compute
\begin{align}\label{SAF_cr_3}
\Phi(s,i)& =O \bigg( \exp\Big[ \sum_{k=i}^s -\frac{\alpha}{\rho(A)(k+\beta)^{\gamma}} \Big]\bigg)\\
&=O \bigg(\exp\Big( \int_{i}^s -\frac{\alpha}{\rho(A)(k+\beta)^{\gamma}}dk \Big) \bigg)\nonumber\\
&=
\left\{
\begin{array}{ll}
O \Big(\big(\frac{s+\beta}{i+\beta}\big)^{-\alpha/\rho(A)}\Big), \mbox{ if } \gamma=1, \nonumber\\
O\Big(\exp\big(\frac{-\alpha}{(1-\gamma)\rho(A)}[(s+\beta)^{1-\gamma}-(i+\beta)^{1-\gamma}]\big)\Big),\nonumber\\
~~~~~~~~~~~~~~~~~~~~~~~~\mbox{ if } \frac{1}{2}<\gamma<1.\nonumber
\end{array}
\right.
\end{align}
Also, using (\ref{SAF_cr_1}) repeatedly we obtain
\begin{align}\label{SAF_cr_4}
\mathbb{E}[&V(\theta(s+1))]\\
&\leq \Phi(s,0)\mathbb{E}[V(\theta(0))]+\sum_{i=0}^s \Phi(s,i+1)O\Big( \frac{1}{(i+\beta)^{2\gamma}}\Big).\nonumber
\end{align}
Assume $\alpha\geq \rho(A)$. We first consider the case that
$\gamma=1$. From (\ref{SAF_cr_3}) and (\ref{SAF_cr_4}) we have
\begin{equation}\label{SAF_cr_5}
\mathbb{E}[V(\theta(s+1))]
= o\Big(\frac{1}{s}\Big)+O\bigg(\sum_{i=0}^s \frac{(s+\beta)^{-\alpha/\rho(A)}}{(i+\beta)^{2-\frac{\alpha}{\rho(A)}}}\bigg)=O\Big(\frac{1}{s}\Big).
\end{equation}
For the case when $\gamma\in(\frac{1}{2},1)$, we take
$b=\frac{\alpha}{(1-\gamma)\rho(A)}$, and from (\ref{SAF_cr_3}) and
(\ref{SAF_cr_4}) we can obtain
\begin{align}\label{SAF_cr_6}
\mathbb{E}[&V(\theta(s+1))]\nonumber\\
&=e^{-b(s+\beta)^{1-\gamma}}\cdot O\bigg(1+ \sum_{i=0}^s \frac{e^{b(i+\beta)^{1-\gamma}}}{(i+\beta)^{2\gamma}}\bigg)\nonumber\\
&=e^{-b(s+\beta)^{1-\gamma}}\cdot O\bigg(\sum_{i=0}^s \sum_{k=0}^{\infty} \frac{b^k (i+\beta)^{(1-\gamma)k-2\gamma}}{k!}\bigg)\nonumber\\
&=e^{-b(s+\beta)^{1-\gamma}}\cdot O\bigg(\sum_{k=0}^{\infty} \frac{b^k}{k!} \sum_{i=0}^s (i+\beta)^{(1-\gamma)k-2\gamma}\bigg)\nonumber\\
&=e^{-b(s+\beta)^{1-\gamma}}\cdot O\bigg(\sum_{k=0}^{\infty} \frac{b^k (s+\beta)^{(1-\gamma)k-2\gamma+1}}{k![(1-\gamma)k-2\gamma+1]} \bigg)\nonumber\\
&=\frac{e^{-b(s+\beta)^{1-\gamma}}}{(s+\beta)^{\gamma}}\cdot O\bigg(\sum_{k=0}^{\infty} \frac{b^{k+1} (s+\beta)^{(1-\gamma)(k+1)}}{(k+1)!} \bigg)\nonumber\\
&=O\big(s^{-\gamma} \big).
\end{align}
By (\ref{SAF_cr_5}) and (\ref{SAF_cr_6}), we have
$\mathbb{E}[V(\theta(s))]=O(s^{-\gamma})$ for $\frac{1}{2}<\gamma\leq
1$. Combing this with the definition of $\theta(s)$ yields our result.
\textbf{Case II:} $\widetilde{\rho}_{\max}(P)=1$. Let $\theta(s)$,
$\underline{\theta}(s)$, $\bar{\theta}(s)$, $\bar{\theta}(\infty)$,
$H$, $y$ and $z$ be the same variables as in the proof of Theorem
\ref{SAF_c1}. With (\ref{SAF_c1_16}) and following the similar
process from (\ref{SAF_cr_1}) to (\ref{SAF_cr_6}), we have
$\mathbb{E}\|\underline{\theta}(s)\|_2^2=O(s^{-\gamma})$. Also, from
(\ref{SAF_c1_20}) we have
\begin{align*}
\mathbb{E}\|\bar{\theta}(\infty)-\bar{\theta}(s)\|_2^2&=O\Big(\sum_{k=s}^{\infty}a^2(k)\Big)=O\Big(\sum_{k=s}^{\infty}a^2(k)\Big) \\
&=O\Big(\sum_{k=s}^{\infty}\frac{1}{(s+\beta)^{2\gamma}}\Big)=O\Big(\frac{1}{s^{2\gamma-1}}\Big).
\end{align*}
Since $x(s)=H^{-1}[\theta(s)+z]$ and $H^{-1}y$ is a mean square limit
of $x(s)$, the arguments above imply
\begin{eqnarray*}\label{SAF_cr_8}
\begin{aligned}
\mathbb{E}\|x(s)-H^{-1}y\|_2^2&=\max\big\{O\big(s^{-\gamma}\big), O\big(s^{1-2\gamma}\big) \big\}\\
&= O\big(s^{1-2\gamma}\big).
\end{aligned}
\end{eqnarray*}
\textbf{Case III:} $\widetilde{\rho}_{\min}(P)\geq 1$. The protocol
(\ref{SAF_m1}) is written as
\begin{eqnarray*}\label{SAF_cr_9}
x(s+1)=x(s)+\frac{\alpha}{(s+\beta)^{\gamma}}[(I_n-P(s)) x(s)-u(s)].
\end{eqnarray*}
Because $\widetilde{\rho}_{\max}(I_n-P)\leq 0$, arguments similar to
that for Cases I) and II) yield our result.
\section{Proof of Theorem \ref{SAF_nca}}\label{proof_nca}
i) As same as Subsection \ref{sf_2}, the Jordan normal
form of $H$ is
$$D=\mbox{diag}(J_1,\ldots,J_k)=H P H^{-1}.$$ We also set $y(s):=H
x(s)$, $v(s):=H u(s)$, $D(s):=H P(s) H^{-1}$, $D=\mathbb{E} D(s)=HPH^{-1}$, and
$v=Ev(s)=Hu$. By (\ref{SAF_c1_5}) and (A1) we have
\begin{align}\label{SAF_nn_1}
\mathbb{E} y(s+1)&=\mathbb{E}\big[\mathbb{E}[y(s+1)\,|\,y(s)]\big]\nonumber\\
&=\mathbb{E} y(s)+a(s)[(D-I_n)Ey(s)+v].
\end{align}
Let $B(s):=I_n+a(s)(D-I_n)$. Using (\ref{SAF_nn_1}) repeatedly we obtain
\begin{equation}\label{SAF_nn_2}
\mathbb{E}[y(s+1)]
= B(s)\cdots B(0)y(0) +\sum_{t=0}^s a(t) B(s)\cdots B(t+1)v.
\end{equation}
We will continue the proof under the following two
cases:\\ \textbf{Case I}: $\widetilde{\rho}_{\max}(P)>1$. Without
loss of generality we assume $\mbox{Re}(\lambda_{1}(P))>1$. Let $J_1$
be a Jordan block in $D$ corresponding to $\lambda_{1}(P)$. Let $m_1$
be the row index of $D$ corresponding to the last line of $J_1$, i.e.,
\begin{eqnarray}\label{SAF_nn_3}
D_{m_1}=(0,\ldots,0,\lambda_{1}(P),0,\ldots,0).
\end{eqnarray}
Then by (\ref{SAF_nn_2})
\begin{align}\label{SAF_nn_4}
\mathbb{E}[&y_{m_1}(s+1)]\nonumber\\
&= y_{m_1}(0)\prod_{t=0}^s [1-a(i)[1-\lambda_{1}(P)]]+\frac{v_{m_1}}{1-\lambda_{1}(P)}\nonumber\\
&\quad \times \sum_{t=0}^s a(t)[1-\lambda_{1}(P)] \prod_{k=t+1}^s (1-a(k)[1-\lambda_{1}(P)])\nonumber\\
&= y_{m_1}(0)\prod_{t=0}^s \left(1-a(t)[1-\lambda_{1}(P)]\right)+\frac{v_{m_1}}{1-\lambda_{1}(P)}\nonumber\\
&\quad \times \Big(1- \prod_{t=0}^s \left(1-a(t)[1-\lambda_{1}(P)]\right)\Big),
\end{align}
where the last equality uses the equality (\ref{lguo_4}).
Since $\sum_{s} a(s)=\infty$,
\begin{multline}\nonumber
\prod_{t=0}^{\infty} |1-a(t)[1-\lambda_{1}(P)]|^2\\
\geq \prod_{t=0}^{\infty} \big\{1+2a(t)[\mbox{Re}(\lambda_{1}(P))-1]\}=\infty.
\end{multline}
Hence, from (\ref{SAF_nn_4}), if $y_{m_1}(0)\neq \frac{v_{m_1}}{1-\lambda_{1}(P)}$, then
\begin{eqnarray}\label{SAF_nn_5_1}
\lim_{s\to\infty} |\mathbb{E}[y_{m_1}(s)]|=\infty,
\end{eqnarray}
which implies $\lim_{s\to\infty}\mathbb{E}\|x(s)\|_2^2=\infty$.\\
\textbf{Case II}: $\widetilde{\rho}_{\max}(P)=1$. Under this case we consider the following three
situations:\\
(a) There is an eigenvalue $\lambda_j(P)=1+\mbox{Im}(\lambda_{j}(P))i$ with $\mbox{Im}(\lambda_{j}(P))\neq 0$, where
$\mbox{Im}(\lambda_{j}(P))$ denotes the imaginary part of $\lambda_{j}(P)$.
Similar to (\ref{SAF_nn_3}), we can choose a row $D_{j'}$ of $D$ which is equal to
$(0,\ldots,0,\lambda_{j}(P),0,\ldots,0)$. Similar to (\ref{SAF_nn_4}), we have
\begin{align}
\mathbb{E}[y_{j'}(s+1)]&= y_{j'}(0)\prod_{t=0}^s [1-a(t)[1-\lambda_{j}(P)]]\nonumber\\
& +\frac{v_{j'}}{1-\lambda_{j}(P)}\cdot \Big(1- \prod_{t=0}^s [1-a(t)[1-\lambda_{j}(P)]]\Big)\label{SAF_nn_6}.
\end{align}
We write
\begin{align*}
1-a(t)[1-\lambda_{j}(P)]&=1+a(t)\mbox{Im}(\lambda_{j}(P))i \\
&=r_t e^{i \varphi_t}=r_t(\cos \varphi_t+i\sin\varphi_t),
\end{align*}
where $r_t=\sqrt{1+a^2(t)\mbox{Im}^2(\lambda_{j}(P))}$ and
\begin{eqnarray}\label{SAF_nn_8}
\begin{aligned}
\varphi_t&=\arctan [a(t)\mbox{Im}(\lambda_{j}(P))]\\
&=a(t)\mbox{Im}(\lambda_{j}(P))+\sum_{k=1}^{\infty} \frac{(-1)^k}{2k+1}[a(t)\mbox{Im}(\lambda_{j}(P))]^{2k+1},
\end{aligned}
\end{eqnarray}
so
\begin{eqnarray}\label{SAF_nn_9}
\prod_{t=0}^s \left(1-a(t)[1-\lambda_{j}(P)]\right)=\exp\Big(i\sum_{t=0}^{s} \varphi_t\Big) \prod_{t=0}^s r_t.
\end{eqnarray}
Assume $y_{j'}(0)\neq \frac{v_{j'}}{1-\lambda_{j}(P)}$. Since
$\sum_{t=0}^{\infty}a(t)=\infty$, equations (\ref{SAF_nn_6}),
(\ref{SAF_nn_9}), and (\ref{SAF_nn_8}) imply
\begin{eqnarray}\label{SAF_nn_10}
\overline{\lim}_{s\to\infty}\overline{\lim}_{s_2\to\infty} |\mathbb{E}[y_{j'}(s_2)-y_{j'}(s)]|>0.
\end{eqnarray}
Next we consider the convergence of $x(s)$. Because $x(s)=H^{-1}
y(s)$, using Jensen's inequality we have
\begin{align}
\mathbb{E}\|x(s_2)-x(s)\|_2^2&=\mathbb{E}\|H^{-1}[y(s_2)-y(s)]\|_2^2 \nonumber\\
&\geq \sigma_n^2(H^{-1}) \mathbb{E}\|y(s_2)-y(s)\|_2^2 \nonumber\\
&\geq \sigma_n^2(H^{-1}) \mathbb{E}|y_{j'}(s_2)-y_{j'}(s)|^2 \nonumber\\
&\geq \sigma_n^2(H^{-1}) |\mathbb{E}[y_{j'}(s_2)-y_{j'}(s)]|^2\label{SAF_nn_11},
\end{align}
where $\sigma_n(H^{-1})=\inf_{\|x\|_2=1} \|H^{-1} x\|_2$ denotes the
least singular value of $H^{-1}$. Because $H^{-1}$ is invertible, we
have $\sigma_n(H^{-1})>0$. Hence, by (\ref{SAF_nn_10}) and
(\ref{SAF_nn_11}), we obtain
\begin{eqnarray*}\label{SAF_nn_12}
\overline{\lim}_{s\to\infty}\overline{\lim}_{s_2\to\infty} \mathbb{E}\|x(s_2)-x(s)\|_2^2>0.
\end{eqnarray*}
By the Cauchy criterion (see~\cite[page~58]{JAH:07}), $x(s)$ is not mean
square convergent.\\ (b) The geometric multiplicity of the eigenvalue
$1$ is less than its algebraic multiplicity. By (a), we only need to
consider the case when any eigenvalue of $P$ with $1$ as real part has
zero imaginary part. Thus, the Jordan normal form $D$ contains a
Jordan block
\begin{eqnarray*}\label{SAF_nn_13}
J_j=
\begin{bmatrix}
1 & 1 & & \\
& 1 & \ddots & \\
& & \ddots & 1 \\
& & & 1
\end{bmatrix}_{m_j\times m_j}
\end{eqnarray*}
with $m_j\geq 2$. Let $j'$ be the row index of $D$ corresponding to
the second line from the bottom of $J_{j}$. It can be computed that
\begin{eqnarray*}\label{SAF_nn_14}
[B(s)\cdots B(t)]_{j',j'+1}=\sum_{k=t}^s a(k).
\end{eqnarray*}
Since $\sum_{k=0}^{\infty}a(k)=\infty$, from (\ref{SAF_nn_2}), there
are some initial states such that $\lim_{s\to\infty}
|\mathbb{E}[y_{j'}(s)]|=\infty,$ which is followed by
$\lim_{s\to\infty}\mathbb{E}\|x(s)\|_2^2=\infty$.\\ (c) There is a left
eigenvector $\xi^T$ of $P$ corresponding to the eigenvalue
$1$ such that $\xi^T u\neq 0$. By (\ref{SAF_m1}) and (A1) we have
\begin{align*}
\xi^T \mathbb{E} x(s+1)&=(1-a(s))\xi^T \mathbb{E} x(s)+a(s)[ \xi^T P \mathbb{E} x(s)+\xi^T u]\\
&=\xi^T \mathbb{E} x(s)+a(s)\xi^T u\\
&=\cdots=\xi^T x(0)+ \sum_{k=0}^s a(k) \xi^T u ,
\end{align*}
which implies $\lim_{s\to\infty} \mathbb{E}\|x(s)\|_2^2=\infty$ by $\sum_{k=0}^{\infty} a(k)=\infty$.\\
ii) It can be obtained by the similar method as i).
\section{Proof of Theorem \ref{SAF_r2}}\label{Proof_SAF_r2}
We prove our result by contradiction: Suppose that there exists a real
number sequence $\{a(s)\}_{s\geq 0}$ independent with $\{x(s)\}$ such
that
\begin{eqnarray}\label{SAF_r2_1}
\lim_{s\to\infty} \mathbb{E}\big\|x(s)-b\big\|_2^2=0.
\end{eqnarray}
We assert that $\lim_{s\to\infty} a(s)=0$. This assertion will be proved still by contradiction:
Assume that there exists a subsequence $\{a(s_k)\}_{k\geq 0}$ which does not converge to zero.
Let
$\widetilde{P}(s):=P(s)-P$ and $\widetilde{u}(s)=u(s)-u$ for any $s\geq 0$, then by (\ref{SAF_m1}), (A1) and (\ref{SAF_r2_01}) we have
\begin{align}
\mathbb{E}\Big[&\big\|x(s_k+1)-b\big\|_2^2\,|\,x(s_k)\Big]\nonumber\\
&=\mathbb{E}\Big[\big\|\xi+a(s_k)\big(\widetilde{P}(s_k) x(s_k)+\widetilde{u}(s_k) \big)\big\|_2^2\,|\,x(s_k)\Big]\nonumber\\
&=\|\xi\|_2^2 +a^2(s_k)\mathbb{E}\Big[\big\|\widetilde{P}(s_k) x(s_k)+\widetilde{u}(s_k)\big\|_2^2\,|\,x(s_k)\Big]\nonumber\\
&\geq a^2(s_k) c_3\label{SAF_r2_2},
\end{align}
where $$\xi:=(1-a(s_k))x(s_k)+a(s_k)(Px(s_k)+u)-b.$$ From
(\ref{SAF_r2_2}) we know that $\mathbb{E}\|x(s_k+1)-b\|_2^2$ will not converge
to $0$ as $k$ grows to infinity, which is in contradiction with
(\ref{SAF_r2_1}).
Since $x(0)\neq b$, to guarantee the convergence of $x(s)$, the gain
function $\{a(s)\}_{s\geq 0}$ must at least contain one non-zero element. Also,
from (\ref{SAF_r2_2}), we can obtain that the number of the non-zero
elements in the sequence $\{a(s)\}_{s\geq 0}$ must be infinite. Thus,
together with the assertion of $\lim_{s\to\infty} a(s)=0$,
there exists an integer $s^*> 0$ such that $a(s^*-1)\neq 0$,
$\{a(i)\}_{i=0}^{s^*-2}$ contains non-zero element, and
\begin{eqnarray}\label{SAF_r2_3_1}
2|a(s)(1-\mbox{Re}(\lambda_j(P))|<1, ~\forall s\geq s^*, 1\leq j\leq n.
\end{eqnarray}
Let $A(s):=(1-a(s))I_n+ a(s)P(s)$. By (\ref{SAF_m1}) we have
\begin{eqnarray*}\label{SAF_r2_3_2}
x(s+1)=A(s) x(s)+a(s)u(s),~~s\geq s^*.
\end{eqnarray*}
By (A1), we obtain
\begin{align*}
\mathbb{E}[&x(s+1)\,|\,x(s^*)]-(I_n-P)^{-1} u\\
&=\big[I_n- a(s)(I_n-P)\big] \mathbb{E}[x(s)\,|\,x(s^*)]\\
&\quad+a(s) u -(I_n-P)^{-1} u\nonumber\\
&=\big[I_n-a(s)(I_n-P)\big] \big(\mathbb{E}[x(s)\,|\,x(s^*)]-(I_n-P)^{-1} u\big)\nonumber\\
&=\cdots=\Big(\prod_{k=s^*}^s \mathbb{E}[A(k)]\Big) \big(x(s^*)-(I_n-P)^{-1} u\big),\nonumber
\end{align*}
which implies
\begin{align}\label{SAF_r2_4}
\mathbb{E}[x(s+1)|x(s^*)]&=H^{-1} \Big(\prod_{i=s^*}^s [I_n -a(i)(I_n-D)]\Big) \\
&\quad \times H \big(x(s^*)-(I_n-P)^{-1} u\big)+(I_n-P)^{-1} u\nonumber
\end{align}
from (\ref{Jordan}). Set
\begin{align}
z(s):= &\Big(\prod_{i=s^*}^s [I_n -a(i)(I_n-D)]\Big) H\nonumber\\
&\cdot\big(x(s^*)-(I_n-P)^{-1} u\big)+H(I_n-P)^{-1}u-H b.\label{SAF_r2_5}
\end{align}
Using Jensen's inequality and (\ref{SAF_r2_4}) we have
\begin{align}
\mathbb{E}\big[ \|x(s+1)-b\|_2^2 \,|\,x(s^*)\big]&\geq \big\| \mathbb{E}\big[(x(s+1)-b)\,|\,x(s^*)\big] \big\|_2^2 \nonumber\\
&= \big\| \mathbb{E}[(x(s+1)\,|\,x(s^*)]-b \big\|_2^2\label{SAF_r2_6}\\
&=\| H^{-1} z(s) \|_2^2 \geq \sigma_n^2(H^{-1}) \| z(s) \|_2^2\nonumber,
\end{align}
where $\sigma_n(H^{-1})=\inf_{\|x\|_2=1} \|H^{-1} x\|_2$ denotes the least singular value of $H^{-1}$. Because $H^{-1}$ is invertible,
$\sigma_n(H^{-1})>0$.
Define
\begin{equation}\label{SAF_r2_6_1}
w_j(s):=\prod_{i=s^*}^s (1-a(i)[1-\lambda_j(P)])
\end{equation}
and
\begin{equation}\label{SAF_r2_6_2}
M_{j}:=\prod_{i=s^*}^{\infty} [I_{m_j} -a(i)(I_{m_j}-J_{j})].
\end{equation}
We can compute that
\begin{align*}
|w_j(s)|^2&=\prod_{i=s^*}^s |1-a(i)[1-\lambda_j(P)]|^2\\
&=\prod_{i=s^*}^s \big\{1-2a(i)[1-\mbox{Re}(\lambda_j(P))]\\
&\quad+a^2(i)[1-2\mbox{Re}(\lambda_j(P))+|\lambda_j(P)|^2 \big\}.
\end{align*}
From this and (\ref{SAF_r2_3_1}) we have $w_j(s)\neq 0$ for any finite $s$. Also, if
$w_j(\infty)=0$, then $[1-\mbox{Re}(\lambda_j(P))]\sum_{i=s^*}^{\infty} a(i)=\infty$.
Hence, by (A4) or (A4'), there exists a Jordan block $J_{j_1}$ associated with the eigenvalue
$\lambda_{j_1'}(P)$ such that
$w_{j_1'}(\infty)\neq 0$ and (\ref{Assump_ag_1}) holds.
Because $M_{j_1}$ is an upper triangular matrix whose diagonal elements are all $w_{j_1'}(\infty)\neq 0$,
we can obtain the least singular value
\begin{equation*}\label{SAF_r2_9}
\sigma_{m_{j_1}}(M_{j_1})>0.
\end{equation*}
Also, by (\ref{SAF_r2_6}) and (\ref{Jordan_2}), we obtain
\begin{align}
\mathbb{E}\|x(\infty)-b\|_2^2&=\mathbb{E}\big[\mathbb{E}[ \|x(\infty)-b\|_2^2 \,|\,x(s^*)]\big]\nonumber\\
&\geq \sigma_n^2(H^{-1}) \mathbb{E} \| z(\infty) \|_2^2\nonumber\\
&\geq \sigma_n^2(H^{-1}) \mathbb{E} \| z(\infty)-Ez (\infty) \|_2^2\nonumber\\
&\geq \sigma_n^2(H^{-1}) \mathbb{E} \big\|\widetilde{I}_{j_1}[ z(\infty)-Ez (\infty)] \big\|_2^2\nonumber\\
&= \sigma_n^2(H^{-1}) \mathbb{E} \Big\|\widetilde{I}_{j_1}\Big(\prod_{i=s^*}^{\infty} [I_n -a(i)(I_n-D)]\Big)\nonumber\\
&\quad\times H\big(x(s^*)- Ex(s^*)\big) \Big\|_2^2\nonumber\\
&= \sigma_n^2(H^{-1}) \mathbb{E} \big\|M_{j_1} \widetilde{I}_{j_1} H\big(x(s^*)- Ex(s^*)\big) \big\|_2^2\nonumber\\
&\geq \sigma_n^2(H^{-1}) \sigma_{m_{j_1}}^2(M_{j_1}) \nonumber\\
&\quad\times\mathbb{E} \big\|\widetilde{I}_{j_1} H\big(x(s^*)- Ex(s^*)\big) \big\|_2^2.\label{SAF_r2_10}
\end{align}
Using (\ref{SAF_m1}) and (\ref{Assump_ag_1}) we have
\begin{align}
\mathbb{E} &\left\{ \big\| \widetilde{I}_{j_1} H (x(s^*)-\mathbb{E} x(s^*))\big\|_2^2 \,|\,x(s^*-1)\right\}\nonumber\\
&=a^2(s^*-1) \mathbb{E} \Big\{\big\|\widetilde{I}_{j_1} H\widetilde{P}(s^*-1)x(s^*-1)\nonumber\\
&\quad+\widetilde{I}_{j_1} H \widetilde{u}(s^*-1) \big\|_2^2\,|\,x(s^*-1)\Big\} \nonumber\\
&\geq a^2(s^*-1)\left(c_1 \|x(s^*-1)\|_2^2 + c_2\right).\label{SAF_r2_11}
\end{align}
Because $c_1$ and $c_2$ cannot be zero at the same time, we consider the case when $c_2>0$ first.
With the fact that $a(s^*-1)\neq 0$ and (\ref{SAF_r2_11}) we obtain
\begin{align*}
\mathbb{E} &\big\| \widetilde{I}_{j_1} H (x(s^*)-\mathbb{E} x(s^*))\big\|_2^2\\
&=\mathbb{E} \left\{ \mathbb{E} \big\| \widetilde{I}_{j_1} H (x(s^*)-\mathbb{E} x(s^*))\big\|_2^2 \,|\,x(s^*-1)\right\}>0.
\end{align*}
Substituting this into (\ref{SAF_r2_10}) yields
$\mathbb{E}\|x(\infty)-b\|_2^2>0$, which is contradictory with
(\ref{SAF_r2_1}).
For the case when $c_1>0$, by (\ref{SAF_r2_10}) and (\ref{SAF_r2_11}),
we have
\begin{align}
\mathbb{E}&\|x(\infty)-b\|_2^2\nonumber\\
&\geq \sigma_n^2(H^{-1}) \sigma_{m_{j_1}}^2(M_{j_1}) a^2(s^*-1)\nonumber\\
&\quad\cdot \mathbb{E} \left(\big\|\widetilde{I}_{j_1} H\widetilde{P}(0)x(s^*-1) \big\|_2^2 \,|\,x(s^*-1)\right)\nonumber\\
&\geq \sigma_n^2(H^{-1}) \sigma_{m_{j_1}}^2(M_{j_1}) a^2(s^*-1) c_1 \mathbb{E}\|x(s^*-1)\|_2^2.\label{SAF_r2_13}
\end{align}
Because $\{a(i)\}_{i=0}^{s^*-2}$ contains non-zero elements, we set $s'$ to be the biggest number such that $s'\leq s^*-2$
and $a(s')\neq 0$. By (\ref{SAF_r2_2}) we have
\begin{align*}
\mathbb{E}\big\|x(s^*-1)\big\|_2^2&=\mathbb{E}\big\|x(s'+1)\big\|_2^2 \nonumber\\
&\geq a^2(s')EE\big[\big\|\widetilde{P}(s') x(s')+\widetilde{u}(s')\big\|_2^2\,|\,x(s')\big]\nonumber\\
&\geq a^2(s') c_3>0.
\end{align*}
Substituting this into (\ref{SAF_r2_13}) we get $\mathbb{E}\|x(\infty)-b\|_2^2>0$, which is
contradictory with (\ref{SAF_r2_1}).
\renewcommand{\arabic{section}}{\Roman{section}}
\section{Proof of Theorem \ref{SAF_r3}}\label{Proof_SAF_r3}
\renewcommand{\arabic{section}}{\arabic{section}}
Similar to the proof of Theorem \ref{SAF_r2} we prove our result by contradiction: Suppose that there exists a real number sequence $\{a(s)\}_{s\geq 0}$ independent with $\{x(s)\}$ such that (\ref{SAF_r2_1}) holds. Since $x(0)\neq b$, by (\ref{SAF_r2_1}) $\{a(s)\}_{s\geq 0}$ must contain non-zero elements.
We consider the following three cases respectively to deduce the contradiction:\\
\textbf{Case I}: The condition i) is satisfied. Similar to the proof of Theorem \ref{SAF_r2}, we
first prove $\lim_{s\to\infty} a(s)=0$ by contradiction: Suppose there exists a subsequence $\{a(s_k)\}$ that does not converge to zero.
For the case when $b\neq {\mathbf{0}}_{n\times 1}$, by (\ref{SAF_r2_1}), there exists a time $s_1\geq 0$ such that
\begin{equation}\label{SAF_r3_1}
\mathbb{E}\big\|x(s)-b\big\|_2^2 \leq \frac{1}{4}\|b \|_2,~~\forall s>s_1.
\end{equation}
Because for any $x(s_k)$,
\begin{align*}
\|b\|_2^2 & \leq \big(\|x(s_k)\|_2+ \big\|b-x(s_k)\big\|_2\big)^2\\
& \leq 2\big(\|x(s_k)\|_2^2+ \big\|b-x(s_k)\big\|_2^2\big),
\end{align*}
(\ref{SAF_r3_1}) is followed by
\begin{equation}\label{SAF_r3_3}
\mathbb{E}\|x(s_k)\|_2^2 \geq \frac{1}{2}\|b\|_2- \mathbb{E}\big\|x(s_k)-b\big\|_2^2 \geq \frac{1}{4}\|b\|_2
\end{equation}
for large $k$. By (\ref{SAF_r2_2}), (\ref{SAF_r3_01}) and (\ref{SAF_r3_3}) we obtain
\begin{align}
\mathbb{E}\big\|x(s_k+1)-b\big\|_2^2 &\geq a^2(s_k) \mathbb{E}\| \widetilde{P}(s_k) x(s_k)\|_2^2\nonumber\\
&= a^2(s_k) \mathbb{E}\|H^{-1} H \widetilde{P}(s_k) x(s_k)\|_2^2\nonumber\\
&\geq a^2(s_k) \sigma_n^2(H^{-1}) \mathbb{E}\| H \widetilde{P}(s_k) x(s_k)\|_2^2\nonumber\\
&\geq a^2(s_k) \sigma_n^2(H^{-1}) c_1 \mathbb{E}\|x(s_k)\|_2^2\nonumber\\
&\geq \frac{1}{4}a^2(s_k) \sigma_n^2(H) c_1\|b\|_2,\label{SAF_r3_4}
\end{align}
which is contradictory with (\ref{SAF_r2_1}).
For the case when $b={\mathbf{0}}_{n\times 1}$, by (\ref{SAF_r2_2}) and (\ref{SAF_r3_4}), we have
\begin{small}
\begin{align}
\mathbb{E}\big\|x(s_k+1)\big\|_2^2&\geq \mathbb{E}\|(1-a(s_k))x(s_k)+a(s_k)(Px(s_k)+u)\|_2^2 \nonumber\\
&\quad+a^2(s_k) \sigma_n^2(H^{-1}) c_1 \mathbb{E}\|x(s_k)\|_2^2\label{SAF_r3_5}.
\end{align}
\end{small}
If $\|(1-a(s_k))I_n+a(s_k)P\|_2 \mathbb{E}\|x(s_k)\|_2 > \frac{1}{2}\|a(s_k)u\|_2$, by (\ref{SAF_r3_5}) and Jensen's inequality we have
\begin{small}
\begin{align}\label{SAF_r3_6}
\mathbb{E}\big\|x(s_k+1)\big\|_2^2&\geq a^2(s_k) \sigma_n^2(H^{-1}) c_1 (\mathbb{E}\|x(s_k)\|_2)^2\nonumber\\
&\geq \frac{a^4(s_k) \sigma_n^2(H^{-1}) c_1 \|u\|^2}{4 \|(1-a(s_k))I_n+a(s_k)P\|_2^2}\nonumber\\
& \nrightarrow 0 ~\mbox{if}~ a(s_k)\nrightarrow 0.
\end{align}
\end{small}
Otherwise,
\begin{align*}
\mathbb{E}&\|(1-a(s_k))x(s_k)+a(s_k)(Px(s_k)+u)\|_2\nonumber\\
&\geq \|a(s_k)u\|_2-\mathbb{E}\|(1-a(s_k))x(s_k)+a(s_k)Px(s_k)\|_2 \nonumber\\
&\geq \|a(s_k)u\|_2-\mathbb{E}\|(1-a(s_k))I_n+a(s_k)P\|_2 \|x(s_k)\|_2\nonumber\\
&\geq \frac{1}{2}\|a(s_k)u\|_2,
\end{align*}
Hence, using (\ref{SAF_r3_5}) and Jensen's inequality again, we obtain
\begin{small}
\begin{align}\label{SAF_r3_8}
\mathbb{E}\big\|x(s_k+1)\big\|_2^2&\geq \big(\mathbb{E}\|(1-a(s_k))x(s_k)+a(s_k)(Px(s_k)+u)\|_2\big)^2 \nonumber\\
&\geq \|a(s_k)u\|_2^2/4.
\end{align}
\end{small}
Combining (\ref{SAF_r3_6}) and (\ref{SAF_r3_8}) yields
$\mathbb{E}\big\|x(s_k+1)\big\|_2^2$. This quantity does not converge to zero,
which is in contradiction with (\ref{SAF_r2_1}). By summarizing the
arguments above we prove the assertion of
$\lim_{s\to\infty}a(s)=0$.
Because $\lim_{s\to\infty}a(s)=0$ and because $\{a(s)\}_{s\geq 0}$
contains non-zero elements, there exists an integer $s^*>0$ such that
$a(s^*-1)\neq 0$ and (\ref{SAF_r2_3_1}) holds. Define $w_j(s)$ and
$M_{j}$ by (\ref{SAF_r2_6_1}) and (\ref{SAF_r2_6_2}) respectively.
With the arguments similar to the proof of Theorem \ref{SAF_r2}, we can
find a Jordan block $J_{j_1}$ associated with the eigenvalue
$\lambda_{j_1'}(P)$ such that $w_{j_1'}(\infty)\neq 0$ and
(\ref{SAF_r3_01}) holds. Similar to (\ref{SAF_r2_13}) we obtain
\begin{small}
\begin{equation}\label{SAF_r3_9}
\mathbb{E}\|x(\infty)-b\|_2^2\geq \sigma_n^2(H) \sigma_{m_{j_1}}^2(M_{j_1}) a^2(s^*-1) c_1 \mathbb{E}\|x(s^*-1)\|_2^2.
\end{equation}
\end{small}
By (\ref{SAF_r3_5}) we have that if $\mathbb{E}\|x(s)\|_2^2>0$, then
$\mathbb{E}\|x(s+1)\|_2^2>0$ for any $a(s)\in\mathbb{R}$. Then with the condition
$x(0)\neq {\mathbf{0}}_{n\times 1}$, we have $\mathbb{E}\|x(s^*-1)\|_2^2>0$. Using this and
(\ref{SAF_r3_9}) we get $\mathbb{E}\|x(\infty)-b\|_2^2>0$, which is
contradictory with (\ref{SAF_r2_1}).
\textbf{Case II}: The condition ii) is satisfied. Since
$\{a(s)\}_{s\geq 0}$ contains non-zero elements, we define $s_1$ to be
the first $s$ such that $a(s)\neq 0$. Then $x(s_1+1)=a(s_1)u\neq b$
almost surely. Let $s_1+1$ be the initial time and by the same
arguments as in Case I we obtain $\mathbb{E}\|x(\infty)-b\|_2^2>0$.
\textbf{Case III}: The condition iii) is satisfied. If $x(0)={\mathbf{0}}_{n\times 1}$,
we obtain $\mathbb{E}\|x(s)\|_2^2=0$ for any $s\geq 0$, which is contradictory
with (\ref{SAF_r2_1}). Thus, we just need to consider the case when
$x(0)\neq {\mathbf{0}}_{n\times 1}$. Since $\{a(s)\}_{s\geq 0}$ contains non-zero
elements, we define $s_1$ to be the first $s$ such that $a(s)\neq 0$.
Set $x^*:=x_1+1$. Define $w_j(s)$ and $M_{j}$ by (\ref{SAF_r2_6_1})
and (\ref{SAF_r2_6_2}) respectively. If $\lambda_j(P)$ is not a real
number, then $w_{j}(s)$ cannot be equal to $0$ for any finite $s$. By the
similar arguments as in the proof of Theorem \ref{SAF_r2}, there exists a
Jordan block $J_{j_1}$ associated with the eigenvalue
$\lambda_{j_1'}(P)$ such that $w_{j_1'}(\infty)\neq 0$ and
(\ref{SAF_r3_01}) holds. By (\ref{SAF_r3_9}) we have
\begin{small}
\begin{align*}
\mathbb{E}\|x(\infty)-b\|_2^2
&\geq \sigma_n^2(H^{-1}) \sigma_{m_{j_1}}^2(M_{j_1}) a^2(s^*-1) c \mathbb{E}\|x(s^*-1)\|_2^2\\
&=\sigma_n^2(H^{-1}) \sigma_{m_{j_1}}^2(M_{j_1}) a^2(s_1) c \|x(0)\|_2^2>0,
\end{align*}
\end{small}
which is contradictory with (\ref{SAF_r2_1}).
\ifCLASSOPTIONcaptionsoff
\fi
\end{document}
|
\begin{document}
\def{\mathbb C}{{\mathbb C}}
\def{\mathbb N}{{\mathbb N}}
\def{\mathbb Z}{{\mathbb Z}}
\def{\mathbb R}{{\mathbb R}}
\def{\cal B}{{\cal B}}
\def{\cal P}{{\cal P}}
\def\varepsilon{\varepsilon}
\def\varphi{\varphi}
\def\leqslant{\leqslantslant}
\def\geqslant{\geqslantslant}
\def\mathop{\hbox{$\overline{\hbox{\rm lim}}$}}\limits{\mathop{\hbox{$\overline{\hbox{\rm lim}}$}}\limits}
\def\mathop{\hbox{$\underline{\hbox{\rm lim}}$}}\limits{\mathop{\hbox{$\underline{\hbox{\rm lim}}$}}\limits}
\def\hbox{\tt dim}\,{\hbox{\tt dim}\,}
\def\hbox{\tt span}\,{\hbox{\tt span}\,}
\title{A weighted bilateral shift with cyclic square is supercyclic}
\author{Stanislav Shkarin}
\date{}
\maketitle
\begin{abstract} It is shown that for a bounded weighted bilateral
shift $T$ acting on $\ell_p({\mathbb Z})$ for $1\leqslant p\leqslant 2$ supercyclicity
of $T$, weak supercyclicity of $T$, cyclicity of $T\oplus T$ and
cyclicity of $T^2$ are equivalent. A new sufficient condition for
cyclicity of a weighted bilateral shift is proved, which implies, in
particular, that any compact weighted bilateral shift is cyclic.
\end{abstract}
\small \noindent{\bf MSC:} \ \ 47A16, 37A25
\noindent{\bf Keywords:} \ \ Cyclicity, Supercyclicity,
Hypercyclicity, Quasisimilarity, Weighted bilateral shifts
\normalsize
\section{Introduction \label{s1}}\rm
Throughout\symbolfootnote[0]{Partially supported by Plan Nacional
I+D+I grant no. MTM2006-09060 and Junta de Andaluc\'{\i}a FQM-260
and British Engineering and Physical Research Council Grant
GR/T25552/01.} this article all vector spaces are supposed to be
over the field ${\mathbb C}$ of complex numbers, ${\mathbb Z}$ is the set of integers,
${\mathbb Z}_+$ is the set of non-negative integers and ${\mathbb N}$ is the set of
positive integers. As usual, symbol $L({\cal B})$ stands for the space of
bounded linear operators on a Banach space ${\cal B}$ and ${\cal B}^*$ is the
space of continuous linear functionals on ${\cal B}$.
For $w\in\ell_\infty({\mathbb Z})$ and $1\leqslant p\leqslant\infty$, $T_{w,p}$ stands
for the bounded linear operator acting on $\ell_p({\mathbb Z})$ if $1\leqslant
p<\infty$ or $c_0({\mathbb Z})$ if $p=\infty$, defined on the canonical basis
$\{e_n\}_{n\in{\mathbb Z}}$ by
$$
T_{w,p}e_n=w_ne_{n-1}\qquad \text{for\ \ }n\in{\mathbb Z}.
$$
If additionally $w_n\neq 0$ for each $n\in{\mathbb Z}$, the operator
$T_{w,p}$ is called the {\it weighted bilateral shift} with the
weight sequence $w$. In the particular case $w_n\equiv1$ we have the
{\it unweighted bilateral shift}.
Recall that a bounded linear operator $T$ on a Banach space ${\cal B}$ is
called {\it cyclic} if there exists $x\in {\cal B}$ such that
$\hbox{\tt span}\,\{T^nx:n\in{\mathbb Z}_+\}$ is dense in ${\cal B}$. $T$ is called {\it
supercyclic} if there is $x\in {\cal B}$ for which $\{\lambda
T^nx:\lambda\in{\mathbb C},\ n\in{\mathbb Z}_+\}$ is dense in ${\cal B}$. Similarly $T$ is
called {\it hypercyclic} if there is $x\in {\cal B}$ such that the orbit
$\{T^nx:x\in{\mathbb Z}_+\}$ is dense in ${\cal B}$. Finally $T$ is called {\it
weakly supercyclic} or {\it weakly hypercyclic} if the density is
required with respect to the weak topology. We refer to surveys
\cite{ge1,ge2,msa} for additional information on hypercyclicity and
supercyclicity. One of the attractive features of weakly supercyclic
operators is that all their powers are again weakly supercyclic and
therefore cyclic. For norm supercyclicity this statement was proved
by Ansari \cite{ansa} and the same proof works for weak
supercyclicity.
Cyclicity properties of weighted bilateral shifts have been
intensely studied. Hypercyclicity and supercyclicity of weighted
bilateral shifts were characterized by Salas \cite{sal,sal1} in
terms of the weight sequences. It was observed in
\cite[Proposition~5.1]{shk1} that the Salas conditions admit the
following simpler equivalent form.
\begin{thmsas}
For $1\leqslant p\leqslant \infty$, a weighted bilateral shift $T=T_{w,p}$ is
hypercyclic if and only if for any $m\in{\mathbb Z}_+$,
\begin{equation}
\mathop{\hbox{$\underline{\hbox{\rm lim}}$}}\limits\limits_{n\to\infty} \max\{\widetilde{w}(m-n+1,m),
(\widetilde{w}(m+1,m+n))^{-1}\}=0 \label{sal3}
\end{equation}
and $T$ is supercyclic if and only if for any $m\in{\mathbb Z}_+$,
\begin{equation}
\mathop{\hbox{$\underline{\hbox{\rm lim}}$}}\limits\limits_{n\to+\infty}
\widetilde{w}(m-n+1,m)\widetilde{w}(m+1,m+n)^{-1}=0, \label{sal4}
\end{equation}
where
\begin{equation}
\widetilde{w}(a,b)=\prod_{j=a}^b\,|w_j|\quad \ \ \text{for}\ \
a,b\in{\mathbb Z}\ \text { with } \ a\leqslant b. \label{beta}
\end{equation}
\end{thmsas}
On the other hand, cyclicity of a weighted bilateral shift turns out
to be a much more subtle issue, see, for instance,
\cite{her0,her2,nik,shields}. It is worth noting that unlike for
hypercyclicity or supercyclicity, cyclicity of a weighted bilateral
shift depends on $p$. For instance, the unweighted bilateral shift
is cyclic on $\ell_2({\mathbb Z})$ and non-cyclic on $\ell_1({\mathbb Z})$. There
exist several necessary and several sufficient conditions of
cyclicity of a weighted bilateral shift, see for instance, the works
of Herrero \cite{her0,her2}. Prominent among these conditions is the
observation that if the adjoint of a weighted bilateral shift $T$ on
$\ell_p({\mathbb Z})$ for $1\leqslant p<\infty$ has non-empty point spectrum then
$T$ is non-cyclic. This implies, in particular, that the weighted
bilateral shift $T_{w,p}$ with the weight sequence $w_n=a$ for
$n\leqslant 0$ and $w_n=b$ for $n>0$ with $0<|b|<|a|$ is non-cyclic for
any $p\in[1,\infty]$. This is precisely the shape of the first
example of a non-cyclic weighted bilateral shift, obtained by
Beauzamy \cite{boze}.
Recall that a bounded linear operator $T$ on a Banach space ${\cal B}$ is
said to satisfy the {\it Supercyclicity Criterion} \cite{msa} if
there exist a strictly increasing sequence $\{n_k\}_{k\in{\mathbb Z}_+}$ of
positive integers, dense subsets $E$ and $F$ of ${\cal B}$ and a map
$S:F\to F$ such that $TSy=y$ for each $y\in F$ and
$\|T^{n_k}x\|\|S^{n_k}y\|\to 0$ as $k\to \infty$ for any $x\in E$
and $y\in F$. The following two results are proved in \cite{msa}.
\begin{thmsc}\it Any operator satisfying the Supercyclicity
Criterion is supercyclic. \end{thmsc}\rm
\begin{thmms}A weighted bilateral shift on $\ell_p({\mathbb Z})$ for $1\leqslant p<\infty$ or
on $c_0({\mathbb Z})$ is supercyclic if and only if it satisfies the
Supercyclicity Criterion. \end{thmms}\rm
There is no great mystery about the last theorem. One just has to
take $E=F$ being the space of sequences with finite support, $S$
being the inverse of the restriction of $T$ to $F$ and use Theorem~S
to find an appropriate sequence $\{n_k\}$. Note also that weak
hypercyclicity and weak supercyclicity of weighted bilateral shifts
were studied in \cite{bm,cs,san1,shk1}. In \cite{shk1} it is shown
that for $p\leqslant 2$ any weighted bilateral shift on $\ell_p({\mathbb Z})$ is
either supercyclic or is not weakly supercyclic. We extend this
dichotomy.
\begin{theorem}\label{dich}Let $1\leqslant p\leqslant 2$ and $T$ be a weighted bilateral
shift on $\ell_p({\mathbb Z})$. Then the following statements are equivalent:
\noindent{\rm(C1)} \ $T$ satisfies the Supercyclicity Criterion$;$
\noindent{\rm(C2)} \ $T$ is supercyclic$;$
\noindent{\rm(C3)} \ $T$ is weakly supercyclic$;$
\noindent{\rm(C4)} \ $T\oplus T$ is cyclic$;$
\noindent{\rm(C5)} \ there is $n\geqslant 2$ for which $T^n$ is cyclic$;$
\noindent{\rm(C6)} \ for any $n\in{\mathbb N}$, $T^n$ is cyclic.
\end{theorem}
We would like to stress that, as it is shown in \cite{shk1}, for
each $p>2$ there is a weakly supercyclic non-supercyclic weighted
bilateral shift $T$ on $\ell_p({\mathbb Z})$. Since powers of a weakly
supercyclic operators are cyclic, we see that (C6) does not imply
(C2) when $p>2$. From this observation and equivalence of (C5), (C6)
and (C2) for $p\leqslant 2$, we immediately obtain the following
corollary.
\begin{corollary}\label{square}A weighted bilateral shift $T$ acting
on $\ell_p({\mathbb Z})$ with $1\leqslant p\leqslant 2$ is supercyclic if and only if
$T^2$ is cyclic. On the other hand for each $p>2$ there exists a
non-supercyclic weighted bilateral shift on $\ell_p({\mathbb Z})$ whose
powers are all cyclic.
\end{corollary}
It is also worth noting that a sufficient condition of weak
supercyclicity of weighted bilateral shifts $T$ in \cite{shk1}
automatically gives weak supercyclicity and therefore cyclicity of
$T\oplus T$. On the other hand, weakly supercyclic non-supercyclic
operators $T$ constructed in \cite{san1,bm} have the property that
$T\oplus T$ is not cyclic. In {\rm \cite{unicell}} a sufficient
condition for a weighted bilateral shift to be unicellular (and
therefore cyclic) is given. This result together with Theorem~S
imply that there are cyclic non-supercyclic weighted bilateral
shifts on $\ell_p({\mathbb Z})$ for $1\leqslant p<\infty$ and on $c_0({\mathbb Z})$. Thus
the condition $n\geqslant 2$ in (C5) is essential. From the proof of
Theorem~\ref{dich} below it will be clear which relations between
the conditions (C1--C6) hold for any bounded linear operator on a
separable Banach space. We shall also show that the implication
${\rm (C5)}\,\Longrightarrow\,{\rm (C4)}$ is satisfied for any
weighted bilateral shift $T=T_{w,p}$ with $1\leqslant p\leqslant \infty$. The
last implication though is not satisfied for general operators. For
instance, the Volterra operator $Vf(t)=\int_0^x f(t)\,dt$, acting on
$L_2[0,1]$, satisfies (C6) and does not satisfy (C4), see, for
instance, \cite{mos}.
Finally, we shall prove yet another sufficient condition for
cyclicity of a weighted bilateral shift. It does not follow from any
known sufficient condition including the following most recent one
due to Abakumov, Atzmon and Grivaux \cite{aba}.
\begin{thmaag} \it Let $w=\{w_n\}_{n\in{\mathbb Z}}$ be a bounded
sequence of non-zero complex numbers, $\alpha_0=1$,
$\alpha_n=(\widetilde w(1,n))^{-1}$ for $n>0$ and
$\alpha_n=\widetilde w(1+n,0)$ for $n<0$, where the numbers
$\widetilde w(a,b)$ are defined in $(\ref{beta})$. Assume also that
there exist $k\in{\mathbb N}$ and a submultiplicative sequence
$\{\rho_n\}_{n\in{\mathbb Z}_+}$ of positive numbers such that
$\ln^+(\rho_n)=o(\sqrt{n})$, $\alpha_{-n}=O(n^k)$ and
$\alpha_n=O(\rho_n)$ as $n\to+\infty$. Then the weighted bilateral
shift $T=T_{w,p}$ is cyclic if and only if the sequence
$\{\alpha_n^{-1}\}_{n\in{\mathbb Z}}$ does not belong to $\ell_q({\mathbb Z})$, where
$\frac1p+\frac1q=1$.
\end{thmaag} \rm
This highly non-trivial result does not give a characterization of
cyclicity for weighted bilateral shifts. For instance, the
conditions imposed on the weight sequence rule out compact weighted
bilateral shifts. The next theorem can be applied for a wider
variety of weight sequences, although becomes a weaker statement,
when applied to the weights satisfying the conditions of
Theorem~A\!$^2$\nobreak\hskip-1pt\nobreak G.
\begin{theorem} \label{suco} Let $w=\{w_n\}_{n\in{\mathbb Z}}$ be a bounded
sequence of non-zero complex numbers such that for any $a\in{\mathbb N}$,
\begin{equation}\label{a123}
\inf\{\widetilde w(1,m)^{-1}\widetilde w(-j(m-a),0)^{1/j}:j\in{\mathbb N},\
m\geqslant a\}=0.
\end{equation}
Then the weighted bilateral shift $T_{w,p}$ is cyclic for $1\leqslant
p\leqslant \infty$.
\end{theorem}
Substituting $m=a+1$ into (\ref{a123}), we immediately obtain the
following corollary.
\begin{corollary} \label{suco1} Let $w=\{w_n\}_{n\in{\mathbb Z}}$ be a bounded
sequence of non-zero complex numbers such that
\begin{equation}\label{b123}
\mathop{\hbox{$\underline{\hbox{\rm lim}}$}}\limits_{n\to+\infty}\widetilde w(1-n,0)^{1/n}=0.
\end{equation}
Then the weighted bilateral shift $T_{w,p}$ is cyclic for $1\leqslant
p\leqslant\infty$.
\end{corollary}
Since $\|T_{w,p}^n\|\geqslant \|T_{w,p}^ne_0\|=\widetilde w(1-n,0)$ for
each $n\in{\mathbb Z}_+$, the spectral radius formula implies that any
quasinilpotent weighted bilateral shift satisfies (\ref{b123}). Note
also that any compact weighted bilateral shift is quasinilpotent.
Thus, the following corollary holds true.
\begin{corollary} \label{sucoco} Any quasinilpotent weighted bilateral
shift is cyclic. In particular, any compact weighted bilateral shift
is cyclic. \end{corollary}
If we fix $j\in{\mathbb N}$ in (\ref{a123}), we immediately obtain the
following corollary.
\begin{corollary} \label{suco2} Let $w=\{w_n\}_{n\in{\mathbb Z}}$ be a bounded
sequence of non-zero complex numbers for which there exists $j\in{\mathbb N}$
such that
\begin{equation}\label{c123}
\mathop{\hbox{$\underline{\hbox{\rm lim}}$}}\limits_{m\to+\infty}\frac{\widetilde w(a-jm,0)}{(\widetilde
w(1,m))^j}=0\quad\text{for each}\quad a\in{\mathbb N}.
\end{equation}
Then the weighted bilateral shift $T_{w,p}$ is cyclic for $1\leqslant
p\leqslant\infty$.
\end{corollary}
\begin{example} \rm Let $a,b>0$, $0<\alpha\leqslant 1$ and
$w=\{w_n\}_{n\in{\mathbb Z}}$ be a sequence of positive numbers such that
$1-w_n\sim an^{-\alpha}$ as $n\to+\infty$ and $1-w_n\sim
b(-n)^{-\alpha}$ as $n\to -\infty$. From Corollary~\ref{suco2} it
follows easily that the weighted bilateral shift $T_{w,p}$ is cyclic
for $1\leqslant p\leqslant \infty$. On the other hand,
Theorem~A\!$^2$\nobreak\hskip-1pt\nobreak G is applicable only if
$\alpha>1/2$. Note also that by Theorem~S, $T$ is supercyclic if
$b>a$ and is non-supercyclic if $b<a$.
\end{example}
\section{Proof of Theorem~\ref{dich} \label{s2}}
We start with the following three easy and known but nice
observations.
\begin{lemma}\label{quasi} Let ${\cal B}_1$ and ${\cal B}_2$ be Banach spaces
and $T_1\in L({\cal B}_1)$, $T_2\in L({\cal B}_2)$ be such that there exists a
bounded linear operator $J:{\cal B}_1\to {\cal B}_2$ with dense range satisfying
$T_2J=JT_1$. Then cyclicity of $T_1$ implies cyclicity of $T_2$.
\end{lemma}
\begin{proof} Observe that
$\hbox{\tt span}\,\{T^n_2 Jx:n\in{\mathbb Z}_+\}=J\bigl(\hbox{\tt span}\,\{T_1^nx:n\in{\mathbb Z}_+\}\bigr)$
for each $x\in{\cal B}_1$. Hence $Jx$ is a cyclic vector for $T_2$ for
each cyclic vector $x$ for $T_1$. \end{proof}
\noindent{\bf Remark.} \ The same argument shows that
Lemma~\ref{quasi} remains true if cyclicity is replaced by
hypercyclicity, supercyclicity, weak hypercyclicity or weak
supercyclicity.
\begin{lemma}\label{adj} Let ${\cal B}$ be a Banach space and $T\in
L({\cal B})$. Then the operator $T\oplus T^*$ acting on ${\cal B}\times {\cal B}^*$ is
non-cyclic. \end{lemma}
\begin{proof}Let $(x,f)\in {\cal B}\times {\cal B}^*$ be different from zero.
Then the continuous linear functional $F$ on ${\cal B}\oplus{\cal B}^*$ defined
by $F(y,g)=f(y)-g(x)$ is non-zero. We have,
\begin{equation*}
F(T^nx,T^{*n}f)=f(T^nx)-T^{*n}f(x)=f(T^nx)-f(T^nx)=0\quad\text{for
any}\quad n\in{\mathbb Z}_+.
\end{equation*}
Thus, the orbit of any non-zero vector under $T\oplus T^* $ is
contained in the kernel of a non-zero continuous linear functional.
Therefore, $T\oplus T^*$ is not cyclic. \end{proof}
\begin{corollary}\label{tplust} Let ${\cal B}$ be a Banach space and
$T\in L({\cal B})$ be such that there exists a bounded linear operator
$J:{\cal B}\to{\cal B}^*$ with dense range satisfying $T^*J=JT$. Then the
operator $T\oplus T$ acting on ${\cal B}\oplus {\cal B}$ is non-cyclic.
\end{corollary}
\begin{proof} Since $T^*J=JT$, we have $(T\oplus T^*)(I\oplus J)=
(I\oplus J)(T\oplus T)$. Assume that $T\oplus T$ is cyclic. Since
$I\oplus J:{\cal B}\times {\cal B}\to {\cal B}\times {\cal B}^*$ is bounded and has dense
range, Lemma~\ref{quasi} implies that $T\oplus T^*$ is cyclic, which
is impossible according to Lemma~\ref{adj}. \end{proof}
\begin{lemma} \label{pow} Let $j\in{\mathbb N}$ and $T$ be a bounded linear
operator with dense range on a Banach space ${\cal B}$ such that $T^j$ is
cyclic. Let also $z=e^{2\pi i/j}$. Then the operator
$$
S=T\oplus zT\oplus z^2 T\oplus {\dots} \oplus z^{j-1}T,
$$
acting on ${\cal B}^j$, is cyclic. \end{lemma}
\begin{proof} Let $x$ be a cyclic vector for $T^j$. Then
$L=\{r(T^j)x:r\in{\cal P}\}$ is dense in ${\cal B}$, where ${\cal P}={\mathbb C}[t]$ is the
space of polynomials on one variable with complex coefficients.
Since $T$ has dense range, the spaces $T(L),\dots,T^{j-1}(L)$ are
also dense in ${\cal B}$. It suffices to verify that $u=(x,x,\dots,x)\in
{\cal B}^j$ is a cyclic vector for $S$. Let $M$ be the closed linear span
of the orbit of $u$ under $S$, $0\leqslant k\leqslant j-1$ and $r\in {\cal P}$. Then
$$
S^kr(S^j)u=(T^kr(T^j)x,z^kT^kr(T^j)x,\dots,z^{k(j-1)}T^kr(T^j)x)\in
M.
$$
Thus, $M$ contains the vectors of the shape
$(a,z^ka,\dots,z^{k(j-1)}a)$ for $a\in T^k(L)$ and $0\leqslant k\leqslant
j-1$. Since $M$ is closed and $T^k(L)$ is dense in ${\cal B}$, we see that
$$
M\supseteq N_k=\{(a,z^ka,\dots,z^{k(j-1)}a):a\in{\cal B}\}\quad\text{for
$0\leqslant k\leqslant j-1$.}
$$
Finally, the matrix $\{z^{kl}\}_{k,l=0}^{j-1}$ is invertible since
its determinant is a Van der Monde one. Invertibility of the latter
matrix implies that the union of $N_k$ for $0\leqslant k\leqslant j-1$ spans
${\cal B}^j$. Hence $M={\cal B}^j$ and therefore $u$ is a cyclic vector for $S$.
\end{proof}
For weighted bilateral shifts, the last lemma can be written in a
nicer form. Recall that if $|w_n|=|u_n|$ for any $n\in{\mathbb Z}$, then the
weighted bilateral shifts $T_{w,p}$ and $T_{u,p}$ are isometrically
similar for each $p\in[1,\infty]$. Indeed, consider the sequence
$\{d_n\}_{n\in{\mathbb Z}_+}$ defined as $d_0=1$,
$d_n=\widetilde{w}(1,n)/\widetilde{u}(1,n)$ for $n\geqslant1$ and
$d_n=\widetilde{u}(n+1,0)/\widetilde{w}(n+1,0)$ for $n<0$. Then
$|d_n|=1$ for each $n\in{\mathbb Z}_+$ and therefore the diagonal operator
$D$, which acts on the basic vectors according to the formula
$De_n=d_ne_n$ for $n\in{\mathbb Z}_+$, is an invertible isometry. One can
easily verify that $T_{w,p}=D^{-1}T_{u,p} D$. That is, $T_{w,p}$ and
$T_{u,p}$ are isometrically similar. In particular, any $T_{w,p}$ is
similar to $zT_{w,p}$ if $z\in{\mathbb C}$ and $|z|=1$. This observation
together with the above lemma lead to the following corollary.
\begin{corollary}\label{pow1} Let $T=T_{w,p}$ be a weighted bilateral
shift for which there exists $j\geqslant 2$ such that $T^j$ is cyclic.
Then $T\oplus T$ is cyclic.
\end{corollary}
\begin{proof} By Lemma~\ref{pow} the operator
$T\oplus zT\oplus{\dots}\oplus z^{j-1}T$ is cyclic, where $z=e^{2\pi
i/j}$. From the above observation it follows that $T$ is similar to
$z^kT$ for $0\leqslant k\leqslant j-1$. Hence the direct sum of $j$ copies of
$T$ is cyclic and therefore $T\oplus T$ is cyclic since $j\geqslant 2$.
\end{proof}
The next lemma provides a sufficient condition for a direct sum of
two weighted bilateral shifts to be non-cyclic.
\begin{lemma}\label{sumbws}Let $w$ be a bounded sequence of non-zero
complex numbers, $p_1,p_2\in[1,\infty]$ and
\begin{equation} \label{qq}
q=q(p_1,p_2)=\left\{\begin{array}{ll}\frac{p_1p_2}{p_1p_2-p_1-p_2}&\text{if
$p_1+p_2<p_1p_2$,}\\ \infty&\text{otherwise.}\end{array}\right.
\end{equation}
Assume also that there exists $m\in{\mathbb Z}_+$ such that
$a=\{a_n\}_{n\in{\mathbb Z}_+}\in\ell_q$, where
\begin{equation}\label{aa}
a_n=\frac{\widetilde{w}(m+1,m+n)}{\widetilde{w}(m-n+1,m)}\quad\text{for
$n\in{\mathbb Z}_+$.}
\end{equation}
Then $T_{w,p_1}\oplus T_{w,p_2}$ is non-cyclic.
\end{lemma}
\begin{proof} For shortness let ${\cal B}_p=\ell_p({\mathbb Z})$ if $1\leqslant
p<\infty$ and ${\cal B}_\infty=c_0({\mathbb Z})$. Consider the bilateral sequence
$\{d_n\}_{n\in{\mathbb Z}}$ defined by
$$
d_0=1,\ \ d_n=\prod_{j=1}^n\frac{w_j}{w_{2m+1-j}}\ \ \text{if $n>0$\
\ and}\ \ d_n=\prod_{j=1}^{|n|}\frac{w_{2m+j}}{w_{1-j}}\ \ \text{if
$n<0$.}
$$
It is straightforward to verify that
$d_{n+m}=d_{m-n}=(\widetilde{w}(m+1,2m))^{-1} \widetilde{w}(1,m)a_n$
for each $n>m$. Since $a\in\ell_q$, we have $d\in\ell_q({\mathbb Z})$. Let
$p_1'\in[1,\infty]$ be defined by the formula
$\frac1{p_1}+\frac1{p_1'}=1$. From the definition of $q$, we have
$\frac1{p_1'}\leqslant \frac1q+\frac1{p_2}$. Since $d\in\ell_q({\mathbb Z})$, we,
thanks to the H\"older inequality, have a bounded linear operator
$J:{\cal B}_{p_2}\to {\cal B}_{p_1'}$ defined on the canonical basis as
$Je_n=d_ne_{2m-n}$. It is straightforward to verify, by computing
the values of the operators on the basic vectors $(e_k,e_n)$, that
$(T_{w,p_1}\oplus S)(I\oplus J)=(I\oplus J)(T_{w,p_1}\oplus
T_{w,p_2})$, where $S$ is the bounded linear operator on ${\cal B}_{p_1'}$
defined as $Se_n=w_{n+1}e_{n+1}$ for $n\in{\mathbb Z}$.
Assume that $T_{w,p_1}\oplus T_{w,p_2}$ is cyclic. Since $I\oplus J$
has dense range, Lemma~\ref{quasi} implies that $T_{w,p_1}\oplus S$
is cyclic, which is impossible according to Lemma~\ref{adj}. Indeed,
if $1<p_1\leqslant\infty$, then $S=T^*_{w,p_1}$ and if $1\leqslant
p_1<\infty$, then $T_{w,p_1}=S^*$. Thus, in any case,
$T_{w,p_1}\oplus S$ is a direct sum of an operator with its dual.
\end{proof}
The following corollary is the particular case $p_1=p_2$ of the
above lemma.
\begin{corollary}\label{sumbws1}Let $w$ be a bounded sequence of non-zero
complex numbers, $p$ and $q=\infty$ if $p\leqslant 2$, $q=p/(p-2)$ if
$p>2$. Assume also that there exists $m\in{\mathbb Z}_+$ such that
$a=\{a_n\}_{n\in{\mathbb Z}_+}\in\ell_q$, where $a$ is defined in
$(\ref{aa})$. Then $T_{w,p}\oplus T_{w,p}$ is non-cyclic.
\end{corollary}
In order to prove the next proposition, we apply Lemma~\ref{sumbws}
in the case $\frac1{p_1}+\frac1{p_2}=1$.
\begin{proposition}\label{ttbws} Let $w=\{w_n\}_{n\in{\mathbb Z}}$ be a
bounded sequence of non-zero complex numbers. Then $T_{w,p}$ is
supercyclic if and only if $T_{w,p}\oplus T_{w,p'}$ is cyclic, where
$\frac1p+\frac1{p'}=1$.
\end{proposition}
\begin{proof} By Theorem~S, supercyclicity of $T_{w,p}$ does not depend on
$p$. In particular, $T_{w,p}$ is supercyclic if and only if
$T_{w,p'}$ is supercyclic. Thus, without loss of generality, we can
assume that $p\leqslant p'$. If $T_{w,p}$ is non-supercyclic, then by
Theorem~MS, $T_{w,p}$ satisfies the Supercyclicity Criterion. Since
an operator $T$ satisfies the Supercyclicity Criterion if and only
if $T\oplus T$ does, we have that $T_{w,p}\oplus T_{w,p}$ satisfies
the Supercyclicity Criterion and therefore $T_{w,p}\oplus T_{w,p}$
is cyclic. Since $\ell_p({\mathbb Z})\times \ell_p({\mathbb Z})$ is densely and
continuously embedded into $\ell_p({\mathbb Z})\times \ell_{p'}({\mathbb Z})$ if
$p'<\infty$ and into $\ell_p({\mathbb Z})\times c_0({\mathbb Z})$ if $p'=\infty$, we
see that cyclicity of $T_{w,p}\oplus T_{w,p}$ implies cyclicity of
$T_{w,p}\oplus T_{w,p'}$. Thus, $T_{w,p}\oplus T_{w,p'}$ is cyclic.
Assume now that $T_{w,p}$ is non-supercyclic. Theorem~S implies the
existence of $m\in{\mathbb Z}_+$ such that (\ref{sal4}) is not satisfied.
Then $a=\{a_n\}_{n\in{\mathbb Z}_+}\in\ell_\infty$, where $a$ is defined in
(\ref{aa}). It is easy to see from (\ref{qq}) that
$q=q(p,p')=\infty$. By Lemma~\ref{sumbws}, $T_{w,p}\oplus T_{w,p'}$
is non-cyclic.
\end{proof}
\begin{proposition}\label{pp1} Let $T$ be a bounded linear operator on a separable Banach
space ${\cal B}$. Then the condition {\rm (C1--C6)} of
Theorem~{\rm\ref{dich}} are related in the following way:
$$
{\rm (C4)}\,\Longleftarrow\,{\rm (C1)}\,\Longrightarrow\,{\rm
(C2)}\,\Longrightarrow\,{\rm (C3)}\,\Longrightarrow\,{\rm
(C6)}\,\Longrightarrow\,{\rm (C5)}.
$$
\end{proposition}
\begin{proof} By Theorem~SC, (C1) implies (C2). The implication ${\rm
(C1)}\,\Longrightarrow\,{\rm (C4)}$ follows from the same theorem
and the fact that $T$ satisfies the Supercyclicity Criterion if and
only if $T\oplus T$ does. Since the powers of a weakly supercyclic
operator are weakly supercyclic, we see that (C3) implies (C6). The
implications ${\rm (C2)}\,\Longrightarrow\,{\rm (C3)}$ and ${\rm
(C6)}\,\Longrightarrow\,{\rm (C5)}$ are trivial.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{dich}] According to
Corollary~\ref{pow1} (C5) implies (C4). By Theorem~MS (C2) implies
(C1). Taking into account Proposition~\ref{pp1}, we see that it
suffices to show that (C4) implies (C2). If $T\oplus T$ is cyclic on
$\ell_p({\mathbb Z})\oplus \ell_p({\mathbb Z})$ then, since $p\leqslant 2$, it is cyclic on
$\ell_p({\mathbb Z})\oplus \ell_q({\mathbb Z})$. By Proposition~\ref{ttbws} $T$ is
supercyclic and therefore (C4) implies (C2). \end{proof}
\section{Proof of Theorem~\ref{suco}} \rm
Recall the following general result on universal families, see
\cite[p.~348--349]{ge1}. Let ${\cal F}=\{f_\alpha:\alpha\in A\}$ be
a family of continuous maps from a complete metric space $X$ to a
separable metric space $Y$. Then the set $\bigl\{x\in
X:\{f_a(x):a\in A\}\ \ \text{is dense in $Y$}\bigr\}$ of universal
elements for $\cal F$ is dense in $X$ if and only if the set
$\{(x,f_a(x)):x\in X,\ a\in A\}$ is dense in $X\times Y$.
A direct application of this result to the family $\{r(T):r\in{\cal P}\}$,
where $T$ is a bounded linear operator on a Banach space, gives us
the following theorem.
\begin{thmdc} \it Let ${\cal B}$ be a separable Banach space and $T:{\cal B}\to {\cal B}$ be a bounded
linear operator. Then the set of cyclic vectors for $T$ is dense in
${\cal B}$ if and only if the set $\{(x,r(T)x):x\in {\cal B},\ r\in {\cal P}\}$ is
dense in ${\cal B}\times {\cal B}$.\rm
\end{thmdc}
We say that a subset $A$ of a Banach space ${\cal B}$ is {\it cyclic} for
a bounded linear operator $T$ acting on ${\cal B}$ if
$\bigcup\limits_{r\in{\cal P}}r(T)(A)$ is dense in ${\cal B}$. Theorem~DC admits
the following refinement.
\begin{corollary} \label{dc} Let ${\cal B}$ be a separable Banach space,
$T:{\cal B}\to {\cal B}$ be a bounded linear operator and $A,B$ be two cyclic
subsets for $T$. Assume also that the point spectrum $\sigma_p(T^*)$
of the dual operator $T^*$ has empty interior. Then the set of
cyclic vectors for $T$ is dense in ${\cal B}$ if and only if for any $x\in
A$, $y\in B$ and $\varepsilon>0$, there exist $u\in{\cal B}$ and $r\in {\cal P}$
such that $\|x-u\|<\varepsilon$ and $\|y-r(T)u\|<\varepsilon$.
\end{corollary}
\begin{proof} The 'only if' part follows immediately from
Theorem~DC. It remains to proof the 'if' part. Clearly
$$
B\subseteq M_\delta=\overline{\bigcup\limits_{r\in{\cal P},\ x\in
A}r(T)(x+\delta U)}\quad \text{for any $\delta>0$},\ \text{where
$U=\{x\in {\cal B}:\|x\|<1\}$.}
$$
For any $\delta>0$ and $q\in{\cal P}$, we have $q(T)(M_\delta)\subseteq
M_\delta$ and therefore $q(T)(B)\subseteq M_\delta$. Since each
$M_\delta$ is closed and $B$ is cyclic for $T$, we have
$M_\delta={\cal B}$ for any $\delta>0$. Let ${\cal P}^\dagger$ be the set of the
polynomials $q\in{\cal P}$, whose zeros are all in
${\mathbb C}\setminus\sigma_p(T^*)$. Then $q(T)$ has dense range for any
$q\in{\cal P}^\dagger$. In particular, we see that the set
$$
q(T)\biggl(\ \bigcup\limits_{r\in{\cal P},\ x\in A}r(T)(x+\delta
U)\biggr)=\bigcup\limits_{r\in{\cal P},\ x\in A}r(T)(q(T)x+\delta q(T)(U))
$$
is dense in ${\cal B}$ for any $\delta>0$ and $q\in{\cal P}^\dagger$. Finally,
since $q(T)(U)\subseteq \|q(T)\|U$, we have
$$
\bigcup\limits_{r\in{\cal P},\ x\in q(T)(A)}r(T)(x+\varepsilon U)\quad
\text{is dense in ${\cal B}$ for any $\varepsilon>0$ and $q\in{\cal P}^\dagger$.}
$$
Since $\sigma_p(T^*)$ has empty interior, using the definition of
${\cal P}^\dagger$ and cyclicity of $A$ for $T$, we obtain
$$
\bigcup_{q\in{\cal P}^\dagger} q(T)(A)\quad \text{is dense in ${\cal B}$}.
$$
The last two displays immediately imply that the set
$\{(x,r(T)x):x\in {\cal B},\ r\in {\cal P}\}$ is dense in ${\cal B}\times{\cal B}$. By
Theorem~DC, the set of cyclic vectors for $T$ is dense in ${\cal B}$.
\end{proof}
We shall apply the above corollary to weighted bilateral shifts. The
following statement is a particular case of Corollary~\ref{dc}.
\begin{corollary}\label{dcs} Let $T$ be a bounded linear operator on
a Banach space ${\cal B}$ and $\{f_j\}_{j\in{\mathbb Z}}$ be a sequence of elements
of ${\cal B}$ such that $\hbox{\tt span}\,\{f_j:j\in{\mathbb Z}\}$ is dense in ${\cal B}$ and
$Tf_j=f_{j+1}$ for each $n\in{\mathbb Z}$. Then the set of cyclic vectors for
$T$ is dense if and only if for any $n,k\in{\mathbb N}$, $n>k$ and any
$\varepsilon>0$, there exists $r\in{\cal P}$ and $u\in{\cal B}$ such that
$\|f_{-k}-u\|\leqslant\varepsilon$ and $\|f_{-n}-r(T)u\|\leqslant\varepsilon$.
\end{corollary}
\begin{proof} The 'only if' part is a trivial consequence of
Theorem~DC. It remains to prove the 'if' part. Let $A=\{f_m:m<0\}$.
Since $Tf_j=f_{j+1}$ for each $n\in{\mathbb Z}$, we have
$$
\bigcup_{r\in{\cal P}}r(T)(A)=\hbox{\tt span}\,\{f_j:j\in{\mathbb Z}\} \ \ \text{is dense in
${\cal B}$.}
$$
Hence $A$ is a cyclic set for $T$. Let $x,y\in A$. Then $x=e_{-k}$
and $y=e_{-n}$ for some $k,n\in{\mathbb N}$. If $n\leqslant k$, then there is a
constant $c\in{\mathbb C}$ such that $r(T)x=y$, where $r(z)=cz^{k-n}$. In
particular $\|x-u\|=0<\varepsilon$ and $\|y-r(T)u\|=0<\varepsilon$ for
$u=x$ for any $\varepsilon>0$. If $n>k$ and $\varepsilon>0$, then by the
assumptions, there exists $r\in{\cal P}$ and $u\in{\cal B}$ such that
$\|x-u\|<\varepsilon$ and $\|y-r(T)u\|<\varepsilon$. It remains to apply
Corollary~\ref{dc}.
\end{proof}
\begin{proposition} \label{suco3} Let $T$ be a bounded linear operator on
a Banach space ${\cal B}$ and $\{f_j\}_{j\in{\mathbb Z}}$ be a sequence of elements
of ${\cal B}$ such that $\hbox{\tt span}\,\{f_j:j\in{\mathbb Z}\}$ is dense in ${\cal B}$ and
$Tf_j=f_{j+1}$ for each $j\in{\mathbb Z}$. Assume also that that
\begin{equation}\label{u123}
\inf\{\|f_{-m}\|\|f_{(m-a)j}\|^{1/j}:j\in{\mathbb N}\ m\geqslant a\}=0\quad
\text{for any $a\in{\mathbb N}$}.
\end{equation}
Then $T$ has dense set of cyclic vectors. \end{proposition}
\begin{proof} Let $\varepsilon>0$ and $n,k\in{\mathbb N}$ are such that $n>k$.
For any $j\in{\mathbb N}$ and $m\geqslant n>k$ we consider $x_m\in{\cal B}$ and a
polynomial $q_{j,m}$ defined by
$$
x_m=f_{-k}-\frac{\varepsilon}{\|f_{-m}\|}f_{-m}\quad \text{and}\quad
q_{j,m}(z)=-\frac{\|f_{-m}\|z^{m-n}}{\varepsilon}\sum_{l=0}^{j-1}
\biggl(\frac{\|f_{-m}\|z^{m-k}}{\varepsilon}\biggr)^l.
$$
Using the fact that $Tf_l=f_{l+1}$ for each $l\in{\mathbb Z}$ and the
summation formula for a finite geometric progression, one can easily
verify that
$q_{j,m}(T)x_m=f_{-n}-(\|f_{-m}\|/\varepsilon)^jf_{(m-k)j-n}$. Hence
$$
\|q_{j,m}(T)x_m-f_{-n}\|=(\|f_{-m}\|/\varepsilon)^j\|f_{(m-k)j-n}\|.
$$
Now let $a=n+k$ and assume that $m\geqslant a$. Then
$$
\|f_{(m-k)j-n}\|=\|f_{(m-a)j+n(j-1)}\|=\|T^{n(j-1)}f_{(m-a)j}\|\leqslant
\|T^n\|^{j-1}\|f_{(m-a)j}\|.
$$
From the last two displays, we obtain
$$
\|q_{j,m}(T)x_m-f_{-n}\|\leqslant(\|f_{-m}\|/\varepsilon)^j\|T^n\|^{j-1}\|f_{(m-a)j}\|.
$$
Now, from (\ref{u123}) it follows that $j\in{\mathbb N}$ and $m\geqslant a$ can be
chosen in such a way that the right hand side of the above
inequality does not exceed $\varepsilon$. In this case
$\|q_{j,m}(T)x_m-f_{-n}\|\leqslant\varepsilon$. Since from the definition of
$x_m$ it follows that $\|x_m-f_{-k}\|=\varepsilon$, Corollary~\ref{dcs}
implies that $T$ has dense set of cyclic vectors.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{suco}]
As we have already mentioned, if $|w_n|=|u_n|$ for any $n\in{\mathbb Z}$,
then the weighted bilateral shifts $T_{w,p}$ and $T_{u,p}$ are
isometrically similar for each $p\in[1,\infty]$. Thus, we can,
without loss of generality, assume that $w_n>0$ for each $n\in{\mathbb Z}$.
Let $f_n=c_ne_{-n}$ for $n\in{\mathbb Z}$, where $c_n=1$ if $n=0$,
$c_n=\widetilde w(1-n,0)$ if $n>0$ and $c_n=(\widetilde
w(1,-n))^{-1}$ if $n<0$. It is straightforward to see that
$Tf_n=f_{n+1}$ for each $n\in{\mathbb Z}$. Clearly
$\hbox{\tt span}\,\{f_n:n\in{\mathbb Z}\}=\hbox{\tt span}\,\{e_n:n\in{\mathbb Z}\}$ is dense. Now, since
$\|f_n\|=c_n$, (\ref{u123}) is equivalent to (\ref{a123}). It
remains to apply Proposition~\ref{suco3}.
\end{proof}
\section{Concluding remarks \label{s6}} \rm
It is worth mentioning another dichotomy for weighted bilateral
shifts, provided in \cite{shk2}. Namely if $T$ is a weighted
bilateral shift on $\ell_p({\mathbb Z})$ with $1<p<\infty$ then either $T$ is
supercyclic or $T^nx/\|T^nx\|$ is weakly convergent to zero as
$n\to\infty$ for each non-zero $x\in\ell_p({\mathbb Z})$. The same holds true
for weighted bilateral shifts on $c_0({\mathbb Z})$ and fails for weighted
bilateral shifts on $\ell_1({\mathbb Z})$. We would like to stress that the
following problem remains open.
\begin{problem}\label{p1} Characterize cyclic weighted bilateral
shifts on $\ell_p({\mathbb Z})$ for $1\leqslant p<\infty$ and on $c_0({\mathbb Z})$.
\end{problem}
As mentioned in \cite{aba}, it is not known whether certain specific
weighted bilateral shifts are cyclic. For instance, let
$0<\alpha\leqslant 1$, $w_n=1$ if $n\leqslant 1$ and $w_n=1-n^{-\alpha}$ if
$n\geqslant 2$. It is easy to see that the point spectrum of $T^*_{w,1}$
is non-empty (it coincides with the unit circle) and therefore,
according to Herrero, $T_{w,1}$ is non-cyclic for any $\alpha$. From
Theorem~A\!$^2$\nobreak\hskip-1pt\nobreak G it follows that
$T_{w,p}$ is cyclic for $1<p\leqslant \infty$ when $\alpha>1/2$.
\begin{problem}\label{p2} Let $w$ is the above weight sequence with
$\alpha=1/2$. For which $p\in(1,\infty]$ is $T_{w,p}$ cyclic?
\end{problem}
We conjecture that the answer to the following question must be
affirmative.
\begin{problem}\label{p3} Let $2<p\leqslant\infty$. Does there exist a
weighted bilateral shift $T=T_{w,p}$ such that $T^n$ is cyclic for
any $n\in{\mathbb N}$ and $T$ is not weakly supercyclic?
\end{problem}
Note that Salas \cite{sal} proved that $I+T$ is hypercyclic for any
(unilateral) backward weighted shift $T$. We would like to raise the
following problem.
\begin{problem}\label{p4} Characterize weighted bilateral shifts $T_{w,p}$
for which $I+T$ is hypercyclic. What about supercyclicity?
\end{problem}
What the author has been able to observe so far is that for $1\leqslant
p\leqslant 2$, supercyclicity of $I+T$ implies supercyclicity of $T$ for
any weighted bilateral shift $T$ on $\ell_p({\mathbb Z})$.
Finally, we would like to raise the following problem.
\begin{problem}\label{p5} Does there exists a bounded linear
operator $T$ on a separable Banach space such that $T\oplus T$ is
cyclic and $T^2$ is non-cyclic?
\end{problem}
{\bf Acknowledgements.} The author would like to thank the referee
for numerous helpful remarks and suggestions.
\break
\small\rm
\vskip1truecm
\scshape
\noindent Stanislav Shkarin
\noindent Queen's University Belfast
\noindent Department of Pure Mathematics
\noindent University road, Belfast BT7 1NN, UK
\noindent E-mail address: \qquad {\tt [email protected]}
\end{document}
|
\begin{document}
\title{Distance to boundary and minimum-error discrimination}
\author{Erkka Haapasalo}
\affiliation{Turku Centre for Quantum Physics,~Department of Physics and Astronomy,~University of Turku,~FI-20014 Turku,~Finland}
\author{Michal Sedl\'ak}
\affiliation{Department of Optics, Palack\'y University, 17. listopadu 1192/12, CZ-77146 Olomouc, Czech Republic }
\affiliation{RCQI,~Institute of Physics,~Slovak Academy of Sciences,~D\'ubravsk\'a cesta 9,~84511 Bratislava,~Slovakia}
\author{M\'ario Ziman}
\affiliation{RCQI,~Institute of Physics,~Slovak Academy of Sciences,~D\'ubravsk\'a cesta 9,~84511 Bratislava,~Slovakia}
\affiliation{Faculty of Informatics,~Masaryk University,~Botanick\'a 68a,~60200 Brno,~Czech Republic}
\begin{abstract}
We introduce the concept of boundariness capturing the most efficient way
of expressing a given element of a convex set as a probability
mixture of its boundary elements. In other words, this number
measures (without the need of any explicit topology) how far the given element
is from the boundary. It is shown that one of the elements from the
boundary can be always chosen to be an extremal element.
We focus on evaluation of this quantity for quantum sets of states,
channels and observables. We show that boundariness is intimately
related to (semi)norms that provide an operational
interpretation of this quantity. In particular, the minimum error
probability for discrimination of a pair of quantum devices is lower
bounded by the boundariness of each of them. We proved that
for states and observables this bound is saturated and conjectured this
feature for channels. The boundariness is zero for infinite-dimensional
quantum objects as in this case all the elements are boundary elements.
\end{abstract}
\pacs{3.67.-a}
\maketitle
\section{Introduction}
The experimetal ability to switch randomly between physical apparatuses
of the same type naturally endows mathematical representatives of
physical objects with a convex structure. This makes the convexity
(and intimately related concept of probability) one of the key
mathematical features of any physical theory. Even more,
the particular ``convexity flavor'' plays a crucial role in the differences
not only between the types of physical objects, but also between the theories.
For example, the existence of non-unique convex decomposition of density
operators is the property distinguishing quantum theory from the classical one \cite{Holevo}.
Our goal is to study the convex structures that naturally appear in the quantum theory and to illustrate the operational meaning of the concepts directly linked to the convex structure. However, most of our findings are applicable for any convex set. The main goal of this paper is to introduce and investigate the concept of boundariness quantifying how far the individual elements of the convex set are from its boundary. Intuitively, the boundariness determines the most non-uniform (binary) convex decomposition into boundary elements, hence, it quantifies how mixed the element is. We will show that this concept is operationally related to specification of the most distinguishable element (in a sense of minimum-error discrimination probability). For instance, for states the evaluation of boundariness coincides with the specification of the best distinghuishable state from the given one, hence it is proportional to trace-distance \cite{Helstrom}.
The paper is organized as follows:
Section II introduces the concept of boundariness and related results
in general convex sets, the boundariness for quantum sets is evaluated
in Section III and the relation to minimum-error discrimination is described
in Section IV. Section V shortly summarizes the main results. The appendices
contain mathematical details concerning the properties of weight function,
characterization of the boundary elements of all considered quantum sets
and numerical details of the case study.
\section{Convex structure and boundariness}
\langlebel{sect:generalZ}
In any convex set $Z$ we may define a convex preorder $\leq_C$. We say $x\leq_C y$ if $x$ may appear in the convex decomposition of $y$ with a non zero weight, i.e. there exist
$z\in Z$ such that $y=t x +(1-t)z$ with $0<t\leq 1$. If $x\leq_C y$, then $y$ has $x$ in its convex decomposition, hence, (losely speaking) $y$ is ``more'' mixed than $x$. The
value of $t$ (optimized over $z$) can be used to quantify this relation. Namely, for any element $y\in Z$ we define the {\it weight function} $t_y:Z \to [0,1]$ assigning for every
$x\in Z$ the supremum of possible weights $t$ of the point $x$ in the convex
decomposition of $y$, i.e.
$$
t_y(x)=\sup\Big\{0\leq t < 1\,\Big|\,z=\varphirac{y-tx}{1-t}\in Z\Big\}\,.
$$
Obviously, $t_y(y)=1$ and $t_y(x)=0$ whenever $x \nleq_C y$. In order to understand the geometry of the optimal $z$ for a given pair of elements $x,y$, it is useful to express the
element $z$ in the form $z=y+\varphirac{t}{1-t}(y-x)$. As $t$ increases, $z$ moves in the direction of $y-x$ until (for value $t=t_y(x)$) it leaves the set $Z$ (see Fig. \ref{fig:ilfdefty}
for illustration). If the element $z$ associated with $t_y(x)$ is an element of $Z$, then it can be identified as a boundary element of $Z$.
\begin{figure}
\caption{Illustration of elements $z$ and $x^\prime$ emerging in the definition of the weight function $t_y(x)$ and in the property $t_y(x^\prime)\leq t_y(x)$, respectively.}
\end{figure}
The (algebraic) boundary $\partial Z$ contains all elements $y$ for which there exists $x$ such that $x\nleq_C y$ (let us stress this is equivalent with the definition used in Ref.~\cite{dlp}).
Hence, for each boundary element $y$ the weight function $t_y(x)=0$ for some $x$
and also the opposite claim holds, i.e.,\ if there exists $x\in Z: t_y(x)=0$ then $y\in \partial Z$. As a consequence, $t_y(x)>0$ $\varphiorall x\in Z$ for all inner points $y\in Z\setminus \partial Z$.
This motivates the following definition of \emph{boundariness}
$$
b(y)=\inf_{x\in Z} t_y(x)
$$
measuring how far the given element of $Z$ is from the boundary $\partial Z$.
Suppose $x^\prime$ belongs to the line generated by $x$ and $y$, i.e. $x^\prime = y-k(y-x)$ ($x^\prime=x$ for $k=1$ and $x^\prime=y$ for $k=0$). Then
$t_y(x^\prime)\leq t_y(x)$ whenever $k\geq 1$ (see Fig. \ref{fig:ilfdefty}). Hence, the infimum can be approximated again by some boundary element of $Z$. In other words, the
value of boundariness is determined by the most non-uniform convex decomposition of $y$ into boundary elements of $Z$, i.e. $y$ can be, in a sense, approximated by expressions
$b(y)x+(1-b(y))z$ with $x,z\in\partial Z$. Therefore, $b(y)\leq 1/2$.
See Fig. \ref{fig:figure} for illustration of boundariness for simple convex sets.
\begin{figure}
\caption{(Color online) Contour plots of boundariness for simple convex sets. Let us note that the maximal value of boundariness is not the same in all of them.}
\end{figure}
\begin{lemma}\langlebel{lemma:tconvex}
Let $y\in Z$. The inverse $x\mapsto 1/t_y(x)$ of the weight function $t_y$ is convex, i.e.,
\begin{align}
\langlebel{eq:tconvex}
\varphirac{1}{t_y\big(sx_1+(1-s)x_2\big)}\leq\varphirac{s}{t_y(x_1)}+\varphirac{1-s}{t_y(x_2)} \nonumber
\end{align}
for all $x_1,\,x_2\leq_C y$ and $0\leq s\leq 1$.
\end{lemma}
\par\noindent{\bf Proof. }
For every $0<t_i<t_y(x_i)$ $i=1,2$ we define $z_i=y-\varphirac{t_i}{1-t_i}(x_i-y)\in Z$. Further, we define
$x=sx_1+(1-s)x_2$ and $z=uz_1+(1-u)z_2$, where $x,\,z\in Z$ because $s\in[0,1]$ and
\begin{equation}
u=\varphirac{s\varphirac{1-t_1}{t_1}}{s\varphirac{1-t_1}{t_1}+(1-s)\varphirac{1-t_2}{t_2}}\;\in[0,1].
\end{equation}
See Fig. \ref{fig:ilflemma1} for illustration.
Straightforward calculation shows that we may write $y=tx+(1-t)z$, where $t^{-1}=st_1^{-1}+(1-s)t_2^{-1}$.
From the definition of the weight function, we have $t\leq t_y(x)$. Since this holds for all $0<t_i<t_y(x_i)$ $i=1,2$ we get
$(\varphirac{s}{t_y(x_1)}+\varphirac{1-s}{t_y(x_2)})^{-1}\leq t_y(x)$, which concludes the proof.
$\,\blacksquare$\par
\begin{figure}
\caption{Illustration of the proof of the Lemma \ref{lemma:tconvex}
\end{figure}
The following proposition is one of the key results of this section. It guarantees that one of the elements of the optimal decomposition (determining the boundariness) can be chosen
to be an extreme point of $Z$.
It is shown in Appendix \ref{sec:appropty} that, whenever $Z\subset\mb R^n$ for some $n\in\mb N$, the weight function $t_y$ is continuous if (and only if)
$y\in Z\setminus\partial Z$. Continuity of $t_y$ is studied in the appendices also in a slightly more general context.
\begin{proposition}\langlebel{prop:extremal_deco}
Suppose that $Z\subset\mb R^n$ is convex and compact set. For every $y\in Z\setminus\partial Z$ there exists an extreme point $x\in Z$ such that $b(y)=t_y(x)$.
\end{proposition}
\par\noindent{\bf Proof. }
The continuity implies that $t_y$ acquires its lowest value on
the compact set $Z$, i.e.,\ $b(y)=\inf_{x\in Z}t_y(x)=\min_{x\in Z}t_y(x)$.
Since $y\in Z\setminus\partial Z$, we have $t_y(x)>0$.
Moreover, because of the convexity of $x\mapsto 1/t_y(x)$ proven in Lemma \ref{lemma:tconvex},
it follows that
\begin{eqnarray}
\nonumber
\min_{x\in Z}t_y(x)&=&\big(\max_{x\in Z}1/t_y(x)\big)^{-1}\\\nonumber
&=&\big(\max_{x\in\mr{ext}\,Z}1/t_y(x)\big)^{-1}=\min_{x\in\mr{ext}\,Z}t_y(x)\,,
\end{eqnarray}
where $\mr{ext}\,Z$ denotes the set of extreme points of $Z$.
$\,\blacksquare$\par
The convex sets appearing in quantum theory are typically compact and convex subsets of $\mb R^n$, meaning that the above proposition is applicable in our
subsequent analysis. It is easy to show that, in the context of Proposition \ref{prop:extremal_deco}, for any $y\in Z\setminus\partial Z$ and $x\in Z$ there exists an element
$z\in\partial Z$ such that $y=t_y(x)x+\big(1-t_y(x)\big)z$. This, combined with Proposition \ref{prop:extremal_deco}, yields that for any $y\in Z\setminus\partial Z$ there is
$x\in\mr{ext}\,Z$ and $z\in\partial Z$ such that $y=b(y)x+\big(1-b(y)\big)z$ when $Z$ is a convex and compact subset of $\mb R^n$.
Suppose that $y\in Z\setminus\partial Z$, where $Z\subset\mb R^n$ is a convex and compact set. Let $x\in\mr{ext}\,Z$ be an element, whose existence is guaranteed by
Proposition \ref{prop:extremal_deco}, such that $b(y)=t_y(x)$. If one had $b(y)=0$, this would mean that $t_y(x)=0$ implying that $x$ does not appear in any convex
decomposition of $y$. This yields the counterfactual result $y\in\partial Z$. Hence, $b(y)>0$ for any non-boundary element $y\in Z$, and we see that, in the context of Proposition
\ref{prop:extremal_deco}, $b(y)=0$ if and only if $y\in\partial Z$. Compactness is an essential requirement for this property. Consider, e.g.,\ a convex set $Z\subset\mb R^n$ that
has a direction, i.e.,\ there is a vector $v\in\mb R^n$ and a point $y\in Z\setminus\partial Z$ such that $y+\alpha v\in Z$ for all $\alpha>0$. Such set is not compact and one easily
sees that $b(y)=0$.
\begin{remark}{\rm (Evaluation of boundariness)}
\langlebel{prop:algorithm}
\newline
In practise, it is useful to think about some numerical way how to evaluate
the boundariness. It follows from the definition of boundariness
that for any element $y\in Z$ written as a convex combination $y=tx+(1-t)z$ with $z\in\partial Z$
the value of $t$ (being $t_y(x)$ in this case) provides an upper bound on the
boundariness, hence $t\equiv t_y(x)\geq b(y)$. Suppose we are
given $y$ and choose some value of $t$. Recall that for a fixed $y\in Z$
and for every $x\in Z$ the element $z_t(x)=(y-tx)/(1-t)$ leaves the set $Z$
for $t=t_y(x)$. Therefore, if we choose $t\leq b(y)$ implying $t\leq t_y(x)$,
then $z_t(x)\in Z$ for all $x\in Z$. However, if it happens that $t>b(y)$,
then for some $x$ we find $t>t_y(x)$ and consequently $z_t(x)\notin Z$. Even more, according to
Proposition \ref{prop:extremal_deco} such $x$ (determining the element
$z_t(x)$ out of $Z$) can be chosen to be extremal. In conclusion,
if $t>b(y)$, then there exist $x\in\mr{ext}Z$ such that
$z_t(x)=(y-tx)/(1-t)\notin Z$.
This observation provides the basics of the numerical method we used to
test whether a given value of $t$ coincide with $b(y)$, or not.
In particular, for any $y$
we start with the maximal value of $t=1/2$ (if we do not have a better
estimate) and decrease it until we reach the value of $t$ for which
$z_t(x)\in Z$ for all $x\in\mr{ext}Z$. Equivalently, we may start with $t=0$
and increase its value until we find $t$ for which $z_{t+\varepsilon}(x)\notin Z$ for some
$x\in\mr{ext}Z$ and $\varphiorall \varepsilon>0$.
\end{remark}
In what follows we will formulate a proposition that related relates the value
of boundariness to any (bounded) seminorm defined on the (real) vector
space $V$ containing the convex set $Z$.
\begin{proposition}\langlebel{prop:normbound}
Consider a (semi)norm $p:V\to[0,\infty)$ such that $p(x)\leq a$
for all $x\in Z$ with some $a\geq 0$. Then
\begin{equation}\langlebel{eq:normbound}
p(x-y)\leq2a\big(1-t_y(x)\big)\leq2a\big(1-b(y)\big)
\end{equation}
for all $x,\,y\in Z$.
\end{proposition}
\par\noindent{\bf Proof. }
Pick $x,\,y\in Z$. The last inequality in (\ref{eq:normbound}) follows immediately from the definition of boundariness so we concentrate on the first inequality. If $t_y(x)=0$ then the
claim is trivial and follows from the triangle inequality for the seminorm. Let us assume that $t_y(x)>0$ and pick $t\in[0,t_y(x))$. According to the definition of the weight function,
we have $z(t)=(1-t)^{-1}(y-tx)\in Z$. It follows that $x-y=(1-t)\big(x-z(t)\big)$ yielding
\begin{eqnarray*}
p(x-y)&=&(1-t)p\big(x-z(t)\big)\leq(1-t)\big(p(x)+p\big(z(t)\big)\big)\\
&\leq&2a(1-t).
\end{eqnarray*}
As we let $t$ to approach $t_y(x)$ from below, we obtain the first inequality of (\ref{eq:normbound}).
$\,\blacksquare$\par
In Section \ref{sec:reldiscr} we will employ this proposition to
relate the concept of boundariness to error rate of
minimum-error discrimination in case of quantum convex sets of states,
channels and observables. Shortly, the optimal values of error
probabilities are associated with the so-called
{\it base norms}~\cite{jencova2013,reeb_etal2011}, thus setting $p(x-y)=\|x-y\|_Z$
in Eq.~\eqref{eq:normbound} we obtain an operational meaning
of boundariness. Let us stress that the base norm $\|x-y\|_Z$ can be introduced
only if certain conditions are met.
In particular, let us assume that the real vector space $V$ is equipped
with a {\it cone} $C\subset V$, i.e., $C$ is a convex set such that
$\alpha v\in C$ for any $v\in C$ and $\alpha\geq0$.
Moreover, we assume that $C$ is {\it pointed},
i.e.,\ $C\cap(-C)=\{0\}$ and {\it generating}, i.e.,\ $C-C=V$. Further,
suppose $Z\subset C$ is a {\it base} for $C$, i.e.,\ $Z$ is convex and
for any $v\in C$ there are unique $x\in Z$ and $\alpha\geq0$ with
$v=\alpha x$. Especially when $x\in Z$, there is no non-negative factor
$\alpha\neq 1$ such that $\alpha x\in Z$. Moreover, it follows that
$0\notin Z$.
Let us note that all quantum convex sets are bases for generating cones for
their ambient spaces. For example, the set of density operators $\mc S(\mc H)$
on a Hilbert space $\mc H$ is the base for the cone of
positive trace-class operators which, in turn, generates the real vector
space of selfadjoint trace-class operators. This is the natural ambient space
for $\mc S(\mc H)$ rather than the entire space of selfadjoint bounded
operators, although the value for the boundariness of an individual state
does not change if the considered ambient space is larger than the space of selfadjoint
trace-class operators.
Whenever $Z$ is a base of a generating cone in $V$ one can define the
base norm $\|\cdot\|_Z:V\rightarrow [0,\infty)$. In particular, for each $v\in V$
$$
\|v\|_Z=\inf_{\langlembda,\mu\geq 0}\{\langlembda+\mu|v=\langlembda x-\mu y
{\rm\ for\ some\ } x,y\in Z\}
$$
By definition $\|x\|_Z\leq 1$ for all $x\in Z$, hence,
according to Proposition \ref{prop:normbound}
\begin{align}\langlebel{eq:base-norm-boundariness}
\|x-y\|_Z\leq 2(1-b(x))\,.
\end{align}
If $Z$ defines a base of a generating pointed cone in $V$ the weight function $t_y(x)$ has a relation to Hilbert's projective metric. Details of this relation are discussed in Appendix \ref{sec:apphpm}.
Since members of a base $Z$ can be seen as representatives of the projective space $\mb PV$, the projective metric also defines a way to compare elements of $Z$
which can be used to relate this metric to distinguishability measures \cite{reeb_etal2011}.
\section{Quantum convex sets}
There are three elementary types of quantum devices: sources (states),
measurements (observables) and transformations (channels).
They are represented by density operators, positive-operator valued measures,
and completely positive trace-preserving linear maps, respectively (for more details see for instance \cite{heinosaari12}).
\subsection{States}
Let us illustrate the concept of boundariness for the convex set of
{\it quantum states}, i.e. for the set of {\it density operators}
$$
\mc{S}(\mc{H}_d)=\{\varrho: \varrho\geq O,\tr{\varrho}=1\}\, ,
$$
where $\varrho\geq O$ stands for the positive-semidefinitness of the operator $\varrho$.
Suppose that the Hilbert space $\mc{H}_d$
is finite dimensional with the dimension $d$.
The boundariness $b(\varrho)$ determines a decomposition (it need not be unique)
of the state $\varrho$ into boundary elements $\xi$ and $\zeta$
$$
\varrho=b(\varrho)\xi+(1-b(\varrho))\zeta\,.
$$
A density operator belongs to the boundary if and only if it has
a nontrivial kernel (i.e. it has $0$ among its eigenvalues,
for details see appendix \ref{sec:appbfstates}). In other words there exists
vectors $\ket{\varphi}$ and $\ket{\psi}$ such that
$\xi\ket{\varphi}=0=\zeta\ket{\psi}$, respectively. Therefore,
\begin{align}
\langlembda_{\min}\leq\bra{\psi}\varrho\ket{\psi}&=
b(\varrho)\bra{\psi}\xi\ket{\psi}\,, \nonumber \\
\langlembda_{\min}\leq\bra{\varphi}\varrho\ket{\varphi}&=
[1-b(\varrho)]\bra{\varphi}\zeta\ket{\varphi}\,, \nonumber
\end{align}
where $\langlembda_{\min}$ is the minimal eigenvalue of $\varrho$. Moreover, since
$\bra{\varphi}\zeta\ket{\varphi}\leq 1$ and $\bra{\psi}\xi\ket{\psi}\leq 1$
(because $\varrho\leq I$) it follows that boundariness is bounded in
the following way
\begin{align}
\langlebel{eq:boundstates}
\langlembda_{\min}\leq b(\varrho)\leq 1-\langlembda_{\min}\,.
\end{align}
The upper bound in (\ref{eq:boundstates}) holds trivially,
because, in general, the boundariness is smaller or equal $1/2$.
On the other side, the tightness of the lower bound (\ref{eq:boundstates}) is exactly what we are interested in.
Based on our general consideration (Proposition~\ref{prop:extremal_deco})
we know we may choose $\xi$ to be the extremal element, i.e. a
one-dimensional projection. Set $\xi=\ket{\psi}\bra{\psi}$, where
$\ket{\psi}$ is the eigenvector of $\varrho$ associated with the
minimal eigenvalue $\langlembda_{\rm min}$. Then
$$
\varrho=\langlembda_{\min} \ket{\psi}\bra{\psi}+(1-\langlembda_{\min})
\varphirac{\varrho-\langlembda_{\min}\ket{\psi}\bra{\psi}}{1-\langlembda_{\min}}
$$
is the convex decomposition of $\varrho$ into boundary elements
saturating the above lower bound, hence we have just proved the
following proposition.
\begin{proposition}
The boundariness of a state $\varrho$ of a finite-dimensional quantum system is given by
$$
b(\varrho)=\langlembda_{\min}\,,
$$
where $\langlembda_{\min}$ is the minimal eigenvalue of the density operator
$\varrho$.
\end{proposition}
Thus,\ the minimal eigenvalue possesses a direct operational interpretation
of the mixedness of the density operator. Indeed, the maximum $b(\varrho)=1/d$
is achieved only for the maximally mixed state $\varrho=\varphirac{1}{d}I$.
The infinite-dimensional case is somewhat trivial, because, according to Proposition \ref{prop:boundarystate} in the appendices, all infinite-dimensional states are on the boundary,
i.e.,\ $\partial\mc{S}(\mc{H}_\infty)=\mc{S}(\mc{H}_\infty)$. Consequently,
the boundariness of any state in this case is zero.
\subsection{Observables}
In quantum theory, the statistics of measurements is fully captured by
{\it quantum observables} which are mathematically represented by
{\it positive-operator valued measures} (POVM). Any observable $\ms C$ with
finite number of outcomes labeled as $1,\dots,n$ is represented
by positive operators (called effects) $C_1,\dots,C_n\in\mc{L}(\mc{H})$
such that $\sum_j C_j=I$. Suppose the system is prepared in a state $\varrho$.
Then, in the measurement of $\ms C$, the outcome $j$ occurs with probability
$p_j=\tr{\varrho C_j}$. The set of all observables with the fixed number $n$
of outcomes is clearly convex. We interpret $\ms C=t\ms A+(1-t)\ms B$
as an $n$-outcome measurement with effects $C_j=t A_j+(1-t)B_j$.
Let us concentrate on the finite-dimensional case $\mc H=\mc H_d$ and denote by $\sigma(\ms C)$ the union of all eigenvalues (spectra)
of all effects $C_j$ of a POVM $\ms C$ and denote by $\langlembda_{\min}$ the smallest
number in $\sigma(\ms C)$. An observable $\ms C$ belongs to the boundary if and only if \cite{dlp}
$\langlembda_{\min}=0$; this is also proved in appendix \ref{sec:appbfobs}. Using the same argumentation
as in the case of states we find that
\begin{align}
\langlebel{eq:obsbineq}
\langlembda_{\min}\leq b(\ms C)\,.
\end{align}
Suppose $\ket{\psi}$ is the eigenvector associated with
the eigenvalue $\langlembda_{\min}$ of the effect $C_k$ for some
value of $k\in\{1,\dots,n\}$. Define an extremal (and projective) $n$-valued
observable $\ms A$ (in accordance with Proposition~\ref{prop:extremal_deco})
\begin{align}
\langlebel{eq:observable_A}
A_j=\left\{
\begin{array}{lcl}
\ket{\psi}\bra{\psi} &\qquad& {\rm if\quad} j=k\\
I-\ket{\psi}\bra{\psi} &\qquad& {\rm for\ unique\quad} j\neq k\\
O &\qquad& {\rm otherwise}
\end{array}
\right.
\end{align}
The observable $\ms B$ with effects
\begin{align}
B_j=\varphirac{1}{1-\langlembda_{\min}}(C_j-\langlembda_{\min} A_j) \nonumber
\end{align}
belongs to the boundary, because
$$(1-\langlembda_{\min})B_k\ket{\psi}=C_k\ket{\psi}-\langlembda_{\min}
A_k\ket{\psi}=0\,,$$ hence $0\in\sigma(\ms B)$.
Using these two boundary elements of the set of $n$-valued
observables we may write $\ms C=\langlembda_{\min}\ms A+(1-\langlembda_{\min})\ms B$, hence
the lower bound \ref{eq:obsbineq}
can be saturated and we can formulate the following proposition:
\begin{proposition}
\langlebel{prop:boundariness_observables}
Given an $n$-valued observable $\ms C$ of a finite-dimensional quantum system, the boundariness equals
$$
b(\ms C)=\langlembda_{\min}\,,
$$
where $\langlembda_{\min}$ is the minimal eigenvalue of all effects
$C_1,\dots,C_n$ forming the POVM of the observable $\ms C$.
\end{proposition}
\subsection{Channels}
\langlebel{sec:channels}
Transformation of a quantum systems over some time interval is described
by a {\it quantum channel} mathematically represented as a trace-preserving completely
positive linear map. It is shown in the appendix \ref{sec:appbfchannels} that for infinite-dimensional
quantum systems the boundary of the set of channels coincide with the whole
set of channels, hence, the boundariness (just like for states)
vanishes. Therefore, we will focus on finite-dimensional quantum systems,
for which the channels can be isomorphically represented by so-called
Choi-Jamiolkowski operators. In particular, for a channel $\mc{E}$
on a $d$-dimensional quantum system its Choi-Jamiolkowski operator
is the unique positive operator $E=(\mc{E}\otimes \mc{I})(P_+)$, where
$P_+=\ket{\psi_+}\bra{\psi_+}$ and
$\ket{\psi_+}=\varphirac{1}{\sqrt{d}}\sum_{j=1}^d \ket{j}\otimes\ket{j}$.
By definition, $E$ belongs to a subset of density operators
on $\mc{H}_d\otimes\mc{H}_d$ satisfying the normalization
${\rm tr}_1 E=\varphirac{1}{d}I$, where ${\rm tr}_1$ denotes the partial
trace over the first system (on which the channel acts).
While the extremality of channels is a bit more complicated
than for the states, the boundary elements of the set of channels
can be characterized in exactly the same way as for states. In fact,
${\cal E}$ is a boundary element if and only if the associated Choi-Jamiolkowski
operator $E$ contains zero in its spectrum
(see Appendix \ref{sec:appbfchannels}). Given a channel ${\cal E}$ we may
use the result (\ref{eq:boundstates}) derived for density operators
to lower bound the boundariness
\begin{align}
\langlebel{eq:chbound}
\langlembda_{\min}\leq b({\cal E})\; ,
\end{align}
where $\langlembda_{\min}$ is the minimal eigenvalue
of the Choi-Jamiolkowski operator $E$. However, since the structures
of extremal elements for channels and states are different, the
tightness of the lower bound (\ref{eq:chbound}) does not follow from
the consideration of states. Surprisingly, the following example shows
that this is indeed not the case.
\newline\newline
\noindent
Case study: Erasure channels.
Consider a qubit ``erasure'' channel ${\cal E}_p$ transforming an arbitrary
input state $\varrho$ into a fixed output state
$\xi_p=p\ket{0}\bra{0}+(1-p)\ket{1}\bra{1}$, $0<p<1/2$
inducing Choi-Jamiolkowski operator $E_p=\xi_p\otimes \varphirac{1}{2}I$.
In order to evaluate boundariness of the channel ${\cal E}_p$, according to proposition \ref{prop:extremal_deco}, it suffices to inspect convex decompositions
\begin{align}
\langlebel{eq:ccdecomp1}
E_p=t F + (1-t)G,
\end{align}
where $F$ corresponds to an extremal qubit channel, $G$ is a channel from the boundary. Our goal is to minimize the value of $t\equiv t_{{\cal E}_p}(\mathcal{F})$ over extremal channels $\mathcal{F}$ in order to determine the value of boundariness.
The extremality conditions (linear independence of the set
$\{A_j^\dagger A_k\}_{jk}$) implies that extremal qubit channels
can be expressed via at most two Kraus operators $A_j$. Consequently,
the corresponding Choi-Jamiolkowski operators are either rank-one
(unitary channels), or rank-two operators. In what follows we will
discuss only the analysis of rank-one extremal channels, because it turns
out that they are minimizing the value of weight function
$t_{{\cal E}_p}(\mathcal{F})$. The details concerning the analysis of
rank-two extremal channels (showing they cannot give boundariness)
are given in Appendix \ref{sec:appccchannel}.
Any qubit unitary channel $\mathcal{F}(\rho)=U\rho U^\dagger$ is represented by a Choi-Jamiolkowski operator $F=\ket{U}\bra{U}$, where $\ket{U}=\varphirac{1}{\sqrt{2}}
(\ket{u}\otimes\ket{0}+\ket{u^\perp}\otimes\ket{1})$ is a maximally entangled state and $\ket{u}\equiv U\ket{0}$, $\ket{u^\perp}\equiv U\ket{1}$. Our goal is to evaluate $t$
for which the operator $G$ specified in Eq.~(\ref{eq:ccdecomp1}) describes the channel $\mathcal{G}$ from the boundary. This reduces to analysis of eigenvalues of $(1-t)G$ that
reads $\{p,1-p,\varphirac{1}{2}(1-2t-\sqrt{D}), \varphirac{1}{2}(1-2t+\sqrt{D})\}$, where $D=(1-2p)^2 + 4 t^2$. It is straightforward to observe that they are all strictly positive for
$t<p(1-p)$, thus, the identity $t=p(1-p)$ defines the cases when channels $\mathcal{G}$ belong to the boundary of the set of channels independently of the particular choice of
the unitary channel $\mathcal{F}$. In conclusion, all unitary channels determine the same value of $t=p(1-p)$, hence, the boundariness of erasure channels equals
$b({\cal E}_p)=p(1-p)$.
\begin{figure}
\caption{(Color online) The strict difference between the boundariness $b$ (upper line)
and minimal eigenvalue $\langlembda_{\min}
\end{figure}
The example of a qubit ``erasure'' channel ${\cal E}_p$ illustrates (see Figure \ref{fig:boundarinesscc}) that, unlike for states and observables, the boundariness of a channel ${\cal E}$
may differ from the lower bound (\ref{eq:chbound}) given by the minimal eigenvalue of the Choi operator $E$. This finding is summarized in the following proposition.
\begin{proposition}
\langlebel{prop:erasure}
For qubit ``erasure'' channels ${\cal E}_p$ with $0<p<1/2$ the boundariness
is strictly larger than the minimal eigenvalue of the Choi-Jamiolkowski
operator. In particular, $b({\cal E}_p)=p(1-p)>\langlembda_{\min}=p/2$.
\end{proposition}
Further, we will investigate for which channels (if for any)
the lower bound on boundariness is tight, i.e. when
$b({\cal E}_p)=\langlembda_{min}$. A trivial example is provided by
channels from the boundary for which $b({\cal E}_p)=\langlembda_{min}=0$, but
are there any other examples? Consider a channel ${\cal E}$ such that
the minimal eigenvalue subspace of the associated Choi-Jamiolkowski
operator $E$ contains a maximally entangled state. Then a decomposition
with $t =\langlembda_{min}$ exists and it corresponds to a mixture of a unitary
channel (extremal element) and some other channel from the boundary.
On the other hand, if the subspace of the minimal eigenvalue of $E$
does not contain any maximally entangled state it is natural to conjecture
that the boundariness will be strictly greater then $\langlembda_{min}$.
The following proposition proves that this conjecture is valid.
\begin{proposition}
\langlebel{prop:nolmindecomp}
Consider an inner element ${\cal E}$ of the set of channels such that
the minimal eigenvalue subspace of its Choi-Jamiolkowski operator
$E$ does not contain any maximally entangled state. Then its boundariness
is strictly larger than the minimal eigenvalue, i.e.
$b(\mathcal{E})>\langlembda_{min}$.
\end{proposition}
\par\noindent{\bf Proof. }
We split the proof into two parts. First, we prove $t_{{\cal E}}(\mathcal{F})>\langlembda_{min}$ for any unitary channel $\mathcal{F}$ and then we prove it for any other channel
$\mathcal{F}$. Let us write the spectral decomposition of operator $E$ as
\begin{align}
E&=\sum_{k=1}^r \langlembda_k P_k ,
\end{align}
where the eigenvalues $\langlembda_k > 0$ are non-decreasing with $k$ (i.e. $\langlembda_1=\langlembda_{min}$), $P_k$ are the projectors onto eigensubspaces corresponding to $\langlembda_k$ and $\sum_k P_k=I$ is the identity operator on $\mc{H}_d\otimes\mc{H}_d$. Since ${\cal E}$ is an inner point $\langlembda_1\neq 0$. The Choi-Jamiolkowski operators associated with unitary channels $\mathcal{F}$ have the form $F=\ket{\varphi}\bra{\varphi}$, where $\ket{\varphi}$ is a maximally entangled state. The assumption of the proposition implies that $P_1\ket{\varphi}\neq \ket{\varphi}$. In order to prove that $t_{{\cal E}}(\mathcal{F})>\langlembda_{min}$ it suffices to show that there exists $t>\langlembda_{min}$ such that $E-tF\geq 0$ (implying $G=(E-tF)/(1-t)$ describes a quantum channel $\mathcal{G}$). It is useful to write
\begin{align}
\ket{\varphi}=\sqrt{\alpha}\ket{v}+\sqrt{1-\alpha}\ket{v_\perp},
\end{align}
where $0\leq \alpha < 1$, $P_1\ket{v}=\ket{v}$ and $P_1\ket{v_{\perp}}=0$.
Define a positive operator $X=\langlembda_1 \ket{v}\bra{v} + \langlembda_2 \ket{v_\perp}\bra{v_\perp}$
and write $E-tF=E-X+X-tF$. The operator $E-X$ is clearly positive. Further, we will show that $X-tF$ is positive
when we set $t=\langlembda_1\langlembda_2 / [\langlembda_1+(\langlembda_2-\langlembda_1)\alpha] > \langlembda_{\min}$
and as a consequence $E-tF\geq 0$.
By definition, $X-tF$ acts nontrivially in two-dimensional subspace spanned by
vectors $\ket{v}$ and $\ket{v_\perp}$. Within this subspace it has eigenvalues
$0$ and $\langlembda_2+\langlembda_1-t>0$, hence, it is positive. This concludes the
first part of the proof concerning decompositions with unitary channels.
Now, let us assume that the channel $\mathcal{F}$ is not unitary.
Since the Choi-Jamiolkowski operator $F$ associated with the channel
$\mathcal{F}$ is a density operator, it follows that its maximal
eigenvalue $\mu_{max}\leq 1$ (saturated only for unitary channels).
Set $t=\langlembda_{min}/ \mu_{max}$. Then for non-unitary channels
$t>\langlembda_{\min}$
and since
$0<\langlembda_{min}\leq 1/d^2\leq\mu_{max}$ it follows that $0<t\leq 1$.
For all vectors $\ket{\varphi}$
\begin{align}
\langlebel{eq:expGlb}
\bra{\varphi}E-t F\ket{\varphi}\geq \langlembda_{min}
-\varphirac{\langlembda_{min}}{\mu_{max}} \mu_{max}=0,
\end{align}
and, therefore, $G=(E-tF)/(1-t)\geq 0$, too. As in the first part of the proof
this means that $t_{\mathcal{E}}(\mathcal{F})>\langlembda_{min}$ for all
non-unitary boundary channels $\mathcal{F}$, because we found decomposition
$\mathcal{E}=t\mathcal{F}+(1-t)\mathcal{G}$ with $t>\langlembda_{min}$.
The above two parts of the proof show that $t_{\mc E}(\mc F)>\langlembda_{min}$ for the channel $\mc E$ of the claim and for any channel $\mc F$. The claim follows from the
observation that, according to Proposition \ref{prop:extremal_deco}, $b(\mc E)=t_{\mc E}(\mc F)$ for some (extreme) channel $\mc F$ and, especially for this optimal channel,
$t_{\mc E}(\mc F)>\langlembda_{min}$.
$\,\blacksquare$\par
\section{Relation to minimum-error discrimination}
\langlebel{sec:reldiscr}
Quantum theory is known to be probabilistic, hence, individual outcomes
of experiments have typically very limited (if any) operational interpretation.
One example of this type is the question of {\it discrimination} among
a limited number of quantum devices. In its simplest form
the setting is the following. We are given an unknown quantum device,
which is with equal prior probability
either $A$, or $B$ ($A$ and $B$ are known to us). Our task is to design an
experiment in which we are allowed to use the given device only once
and we are asked to conclude the identity of the device.
Clearly, this cannot be done in all cases unless some imperfections
are allowed. There are various ways how to formulate the discrimination task.
The most traditional \cite{Helstrom, Holevo} one is aimed to minimize
the average probability of error of our conclusions. Surprisingly,
the success is quantified by norm-induced distances \cite{chiribella2009},
hence, the discrimination problem provides a clear operational interpretation
of these norms.
We may express the optimal error probability of minimum-error
discrimination as follows
\begin{align}
\langlebel{eq:pevnorm}
p_{\rm error}(A,B)=\varphirac{1}{2}(1-\varphirac{1}{2}\|A-B\|)\, ,
\end{align}
where the type of the norm $\|A-B\|$ depends on the considered problem.
Recently, it was shown in Ref.~\cite{jencova2013}
that in general convex settings the so-called base norms are solutions to minimum-error discrimination problems. In particular, it was also shown that base norms coincide with the completely
bounded (CB) norms in the case of quantum channels, states and observables, thus, according to Proposition \ref{prop:normbound} and
Eq.~\eqref{eq:base-norm-boundariness} the following inequality
holds
$$
\|A-B\|_Z\equiv\|A-B\|_{\rm cb}\leq 2(1-b(A))\,.
$$
In rest of this section we will illustrate that for quantum structures the
base norms (being completely bounded norms) and boundariness are
intimately related. We will investigate how tight the above
inequalities are for particular quantum convex sets.
\subsection{States}
Let us start with the case of quantum states, for which
the CB norm coincides with the trace-norm (see for instance
\cite{jencova2013,chiribella2009}),
i.e. $||A||_{\rm tr}=\tr{|A|}$.
Recall that the conclusion of Proposition \ref{prop:normbound},
when applied for states, is
\begin{align}
\langlebel{eq:prop1fstates}
\|\varrho-\xi\|_{\rm tr}\leq 2[1-b(\varrho)]\,.
\end{align}
Using the absolute scalability of the norm the roles of $\varrho$ and $\xi$ can be exchanged and from (\ref{eq:pevnorm}) and (\ref{eq:prop1fstates}) it follows that
$$
p_{\rm error}(\varrho,\xi)\geq\varphirac{1}{2}\max\{b(\varrho),b(\xi)\}\,,
$$
i.e. the mixedness of states measured by their boundariness lower bounds
the optimal error probability of discrimination between them. Moreover, for a given
state $\varrho$ we may write
$$
\min_\xi p_{\rm error}(\varrho,\xi) \geq \varphirac{1}{2}b(\varrho)\,,
$$
hence interpreting the boundariness as the limiting value of
the best distinguishability of the state $\varrho$ from any
other state. In other words, the boundariness determines
the information potential of the state as the distinguishability
of states is the key figure of merit for quantum communication
protocols \cite{wilde2013}.
As before, let $\ket{\psi}$ be the state for which
$\varrho\ket{\psi}=\langlembda_{\min}\ket{\psi}$. It is straightforward to
see that
$$
\|\varrho - \ket{\psi}\bra{\psi}\|_{\rm tr}=2(1-\langlembda_{\min})\,.
$$
Hence, the upper
bound (\ref{eq:prop1fstates}) can be saturated and we have proven the
following proposition.
\begin{proposition}
For a given state $\varrho$
$$
\sup_\xi \|\varrho - \xi\|_{\rm tr}=2(1-b(\varrho))\,.
$$
\end{proposition}
In particular, this implies that the states from the boundary
(with $b(\varrho)=0$) can be used as noiseless carriers of bits
of information as for each of them one can find a perfectly
distinguishable "partner" state.
\subsection{Observables}
For observables we may formulate an analoguous result:
\begin{proposition}
Suppose that $\ms C$ is an $n$-valued observable. Then
$$
\sup_{\ms A} \|\ms C-\ms A\|=2(1-b(\ms C))\,,
$$
where $\|\cdot\|$ is the base norm (identified with completely
bounded norm) for observables.
\end{proposition}
\par\noindent{\bf Proof. }
We will prove that $\ms A$ defined in Eq. (\ref{eq:observable_A})
yields the supremum of the claim.
Let us recall that $\ket{\psi}$ (used in definition of $\ms A$)
is the vector defined by the relation $C_k\ket{\psi}=\langlembda_{\min}\ket{\psi}$
for some $k$. According to Proposition \ref{prop:normbound}
\begin{align}
\langlebel{eq:prop1fobs}
\|\ms C-\ms A\|\leq 2(1-\langlembda_{\min})\,,
\end{align}
where the norm $\|\ms C-\ms A\|$ (the base norm $=$ completely bounded norm $=$ diamond norm) can be evaluated as \cite{jencova2013}
\begin{eqnarray*}
\|\ms C-\ms A\|
&=&\sup_\varrho \sum_j|\tr{\varrho(C_j-A_j)}|
\end{eqnarray*}
Assuming $\varrho=\ket{\psi}\bra{\psi}$ we obtain
$$
\|\ms C-\ms A\|\geq 1-\langlembda_{\min}+\sum_{j\neq k} \bra{\psi}C_j\ket{\psi}
$$
because $\bra{\psi}A_j\ket{\psi}=0$ for $j\neq k$,
$\bra{\psi}A_k\ket{\psi}=1$ and $\bra{\psi}C_k\ket{\psi}=\langlembda_{\min}$.
Moreover, since $\sum_{j\neq k}\bra{\psi}C_j\ket{\psi}=1-\bra{\psi}C_k\ket{\psi}=
1-\langlembda_{\min}$ we find that for the chosen observables $\ms C$, $\ms A$ we have
$\|\ms C-\ms A\|\geq 2(1-\langlembda_{\min})$. Combining this with the
lower bound (\ref{eq:prop1fobs}) valid for any observable we have proven the proposition.
$\,\blacksquare$\par
\subsection{Channels}
For channels the boundariness is not given by minimal eigenvalue of
the Choi-Jamiolkowski operator. Actually, we are missing an analytical form
of channel's boundariness.
Hence, in general
the saturation of the inequality
\begin{align}
\langlebel{eq:channelbound1}
\sup_{\mc{F}}\|\mc{E}-\mc{F}\|_{\rm cb}\leq 2(1-b(\mc{E}))
\end{align}
is open and we
chose to
test the saturation of the bound for the examples
of quantum channels that we studied in Section \ref{sec:channels}. Let us
stress that analytical expressions of the completely bounded norm are
rather rare, but there exist efficient numerical methods for its
evaluation \cite{watrous2009}.
For the qubit ``erasure'' channel ${\cal E}_p$ that transforms an arbitrary input state $\varrho$ into a fixed output state $\xi_p=p\ket{0}\bra{0}+(1-p)\ket{1}\bra{1}$ the completely
bounded norm $\|{\cal E}_p-\mc{F}\|_{\rm cb}$ can be expressed as
\begin{align}
\|{\cal E}_p-\mc{F}\|_{\rm cb}=\sup_{\|\psi\|=1} \|({\cal E}_p-\mc{F})\otimes \mathcal{I} (\ket{\psi}\bra{\psi})\|_{\rm tr} ,
\end{align}
where $\mathcal{I}$ is the qubit identity channel and $\ket{\psi}$ is a two qubit state.
Choice of $\mathcal{F}=\mathcal{I}$ and
\begin{align}
\ket{\psi}=\sqrt{1-p}\ket{0}\otimes\ket{0}+\sqrt{p}\ket{1}\otimes\ket{1}
\end{align}
lower bounds the norm in (\ref{eq:channelbound1}) by $2(1-p(1-p))$ as can be seen by direct calculation.
Due to the result $b({\cal E}_p)=p(1-p)$ from section \ref{sec:channels} this can be equivalently written as
$2(1-b({\cal E}_p))\leq \sup_{\mathcal{F}} \|{\cal E}_p-\mc{F}\|_{\rm cb}$, which implies that the bound (\ref{eq:channelbound1}) is tight for the channel ${\cal E}_p$.
Let us, further, consider the class of channels whose Choi operator $E$
contains some maximally entangled state $\ket{\phi}$ in its minimal
eigenvalue subspace. For these channels $b({\cal E})=\langlembda_{min}$
(see section \ref{sec:channels}). Choose $\mathcal{F}$ to be a unitary channel,
i.e. $F=\ket{\phi}\bra{\phi}$ and set
$\ket{\psi}=1/\sqrt{2}(\ket{0}\otimes\ket{0}+\ket{1}\otimes\ket{1})$ (maximally
entangled state). Then
\begin{align}
\nonumber
\|E-F\|_{\rm tr}=\|({\cal E}-\mc{F})\otimes \mathcal{I} (\ket{\psi}\bra{\psi})\|_{\rm tr} \leq \|{\cal E}-\mc{F}\|_{\rm cb},
\end{align}
and direct calculation gives $\|E-F\|_{\rm tr}=2(1-\langlembda_{min})=2(1-b({\cal E}))$.
Altogether, we have shown
\begin{align}
2(1-b({\cal E})) \leq \sup_{\mc{F}}\|{\cal E}-\mc{F}\|_{\rm cb},
\end{align}
which means that for this type of channels the bound
(\ref{eq:channelbound1}) is tight.
\section{Summary}
Convexity is one of the main mathematical features of modern science and it is natural to ask how the physical concepts and structures are interlinked with the existing convex structure. Using only the convexity we introduced the concept of boundariness and investigated its physical meaning in statistical theories such as quantum mechanics. Intuitively, the boundariness quantifies how far an element of the convex set is from its boundary. The definition of the boundary is based solely on the convexity and no other mathematical structure of the set is assumed.
We have shown that the value of boundariness $b(y)$ identifies the most
non-uniform convex decomposition of inner element $y$ into a pair of
boundary elements. Further, we showed (Proposition \ref{prop:extremal_deco})
that for compact convex sets such optimal decomposition is achieved
when one of the boundary points is also extremal. This surprising property
simplifies significantly our analysis of quantum convex sets and allowes us
to evaluate the value of boundariness.
In particular, we have found that,
in contrast to the case of states
and observables, for channels the general lower bound on boundariness
($b\geq \langlembda_{\min}$) given by the minimal eigenvalue of the Choi-Jamiolkowski
representation is not saturated (see Section III). We illustrated this
feature explicitely for the class of qubit ``erasure'' channels ${\cal E}_p$
mapping whole state space into a fixed state $\xi_p=p\ket{0}\bra{0}+(1-p)\ket{1}\bra{1}$ ($0<p<1/2$). The boundariness of this channel was found to be
$b({\cal E}_p)=p(1-p)>\langlembda_{\min}=p/2$ (Proposition \ref{prop:erasure}). We showed that the saturation of the bound is equivalent with existence of maximally entangled state in
the minimal eigenvalue subspace of the channel's Choi-Jamiolkowski operator. Let us stress that the boundariness vanishes for infinite dimensional systems, because the associated convex
sets contain no interior points (discussed in Appendix B).
Concerning the operational meaning of boundariness, we first demonstrated that the boundariness can be used to upper bound any (semi)norm induced distance providing the
(semi)norm is bounded on the convex set. An example of such norm is the base norm which is induced solely by the convex structure of the set. Recently, it was shown in
Ref.~\cite{jencova2013} that for the sets of quantum states, measurements and evolutions base norms coincide with so-called completely bounded norms. These norms are
known \cite{chiribella2009, gutoski2012} to appear naturally in quantum minimum error discrimination tasks. As a result, this connection provides a clear operational interpretation for
the boundariness as described in Section IV.
More precisely, if we want to determine in which of the two known (equally likely) possibilities $A$ or $B$ an unknown state (or measurement, or channel) was prepared and given to us, the probability of making an erroneous conclusion exceeds one half times the boundariness for any of the elements $A$ and $B$. For a generic pair of possibilities $A$ and $B$ this bound is not necessarily tight, however if we keep $A$ fixed then the boundariness of $A$ is proportional to the minimum error probability discrimination of $A$ and the most distinguishable quantum device from $A$. To be precise this was shown only for states and observables (in which case the analytic formula for boundariness was derived), but we conjecture that this feature holds also for quantum channels. We verified this conjecture for erasure channels and the class of channels containing a maximally entangled state in the minimum eigenvalue subspace of their Choi-Jamiolkoski operators.
In conclusion let us mention a rather intriguing observation. In all the cases
we have met the ``optimal'' decompositions (determining the value of boundariness)
contain pure states, sharp observables and unitary
channels. In other words, only special subsets of extremal elements
(for observables and channels) are needed. This is true for all states and
for all observables. The case of channels is open, but no counter-example
is known. This observation suggests that the concept of boundariness
could provide some operational meaning to sharpness of observables
and unitarity of evolution.
\acknowledgments
The authors would like to thank Teiko Heinosaari for stimulating this work
and insightful discussions. This work was supported by COST Action MP1006
and VEGA 2/0125/13 (QUICOST). E.H. acknowledges financial support from the
Alfred Kordelin foundation.
M.S. acknowledges support by the Operational Program Education for Competitiveness - European Social Fund
(project No. CZ.1.07/2.3.00/30.0004) of the Ministry of Education, Youth and Sports of the Czech Republic.
M.Z. acknowledges the
support of projects GACR P202/12/1142 and RAQUEL.
\appendix
\section{Properties of the weight function}
\langlebel{sec:appropty}
The purpose of this appendix is to prove results that are needed for Proposition \ref{prop:extremal_deco}.
Let us first recall a few basic definitions in linear analysis. Suppose that $V$ is a real vector space.
For a subset $X\subset V$ we denote by $V_X$ the smallest affine subspace of $V$
containing $X$. For any $x\in X$, the linear subspace $V_X-x$ is just the linear hull of $X-x$,
where we introduced the notation $X-x \equiv \{y-x\,|\,y\in X\}$.
We say that $U\subset V$ is {\it absorbing} if for every $v\in V$ there
is $\alpha>0$ such that $\alpha^{-1}v\in U$; especially $0\in U$.
The following lemma gives another characterization for the boundary of a convex set $Z$, which is useful for studying the continuity properties of the weight function.
\begin{lemma}\langlebel{prop:innerabsorb}
Suppose that $Z$ is a convex subset of a real vector space $V$. An element $y\in Z$ is inner, i.e.,\ $y\in Z\setminus\partial Z$ if and only if $Z-y$ is absorbing in the subspace
$V_Z-y$.
\end{lemma}
\par\noindent{\bf Proof. }
Let us assume that $y$ is an inner point of $Z$ and suppose that $v\in V_Z-y$. For simplicity, let us assume that $v\neq0$. The convexity of $Z-y$ and the definition of $V_Z$ yield
that there are $d_+,\,d_- \in Z-y$ and $\langlembda_+,\,\langlembda_-\geq0$, where $\langlembda_+>0$ or $\langlembda_->0$ such that $v=\langlembda_+d_+-\langlembda_-d_-$. The fact that $y$ is an
inner point implies that when $d_-\in Z-y$ then $\exists q>0$ such that $-q d_-\in Z-y$. Hence, $v=\alpha d$, where $\alpha=\langlembda_++\langlembda_-/q>0$,
$d=\varphirac{\langlembda_+}{\alpha}d_++\varphirac{\langlembda_-}{q\alpha}(-q d_-)\in Z-y$, which proves that $Z-y$ is absorbing in $V_Z-y$. Suppose now that $Z-y$ is absorbing in $V_Z-y$
and $x\in Z$, so that $x-y=d\in Z-y$. Also $-d\in V_Z-y$ and because $Z-y$ is absorbing, there is $\alpha>0$ such that $-\alpha^{-1}d\in Z-y$, i.e.,\
$y-\alpha^{-1}d=z\in Z$ and
$$
y=\varphirac1{1+\alpha}x+\varphirac{\alpha}{1+\alpha}z.
$$
This means that for all $x\in Z$, $x\leq_Cy$, i.e.,\ $y\notin\partial Z$.
$\,\blacksquare$\par
The weight function can be associated with a function called as Minkowski gauge. This connection gives more insight in the properties of the weight function in the
infinite-dimensional case. When $A$ is an absorbing subset of a real vector space $W$, we may define a function $P_A:W\to\mb R$,
$$
P_A(w)=\inf\{\alpha\geq0\,|\,\alpha^{-1}w\in A\},\quad w\in W.
$$
$P_A$ is called the {\it Minkowski gauge of $A$}. For basic properties of this function, we refer to \cite{infdimanalysis}. If $A$ is convex, then $P_A$ is a convex function, and
$$
\{v\in W\,|\,P_A(v)<1\}\subset A\subset\{v\in W\,|\,P_A(v)\leq1\}.
$$
When $A$ is an absorbing convex balanced subset, $P_A$ has many properties reminiscent to a norm, whose unit ball is $A$. When $W$ is a (locally convex) topological vector space,
the Minkowski gauge $P_A$ is continuous if and only if $A$ is a neighbourhood of the origin.
Suppose that $Z$ is a convex subset of a real vector space $V$ and $y\in Z$. The basis for connecting a Minkowski gauge to the weight function $t_y$ is provided by the following
observation: Consider a vector $y-x\in V_Z-y$, where $x\in Z$. As can be seen from Fig. (\ref{fig:bdreltoMG}), the scaling factor $\alpha$ that shrinks or extends this vector to the
border of the set $Z-y$ defines a point $z(t)$, which determines the value of the weight function $t_y$. These considerations can be formulated mathematically as follows:
Pick $t\in[0,t_y(x))$ and define $z(t)=(1-t)^{-1}(y-tx)\in Z$. Now $z(t)-y=t(1-t)^{-1}(y-x)\in Z-y$. As $t$ approaches $t_y(x)$ from below, $\alpha(t)=(1-t)/t$ decreases and
from this we see that $\big(1-t_y(x)\big)/t_y(x)=P_{Z-y}(y-x)$ or, when we denote the Minkowski gauge $P_{Z-y}:V_Z-y\to[0,\infty)$ of $Z-y$ by $p_y(x)\equiv P_{Z-y}(y-x)$,
\begin{equation}\langlebel{eq:t_yp_y}
t_y(x)=\varphirac1{1+p_y(x)}.
\end{equation}
\begin{figure}
\caption{
The scalar $\alpha$ extending $y-x$ from the starting point $y$ to the boundary coincides with the Minkowski gauge $P_{Z-y}
\end{figure}
According to Lemma \ref{prop:innerabsorb} the gauge $p_y$ is well defined, when $y\in Z\setminus\partial Z$. From the convexity of the Minkowski gauge we again see that
$x\mapsto1/t_y(x)=1+p_y(x)$ is convex on $Z$ whenever $y\in Z\setminus\partial Z$. We immediately see that, in the case of a topological vector space $V$, whenever
$y\in Z\setminus\partial Z$, the weight function $t_y$ is continuous if and only if the Minkowski gauge $p_y$ is continuous, i.e.,\ $Z-y$ is a neighbourhood of the origin of $V_Z-y$.
In finite-dimensional settings, any convex absorbing set is a neighbourhood of origin (as one may easily check). Thus we obtain the following result needed for proving Proposition
\ref{prop:extremal_deco}.
\begin{proposition}
Suppose that $Z\subset\mb R^n$ for some $n\in\mb N$. The weight function $t_y$ is continuous if and only if $y\in Z\setminus\partial Z$.
\end{proposition}
The quantum physical sets of states, POVMs and channels are all compact (even in the infinite-dimensional case with respect to suitable topologies), implying that, e.g.,\ Proposition
\ref{prop:extremal_deco} is applicable for the sets of (finite dimensional) quantum devices.
\section{Relation to Hilbert's projective metric}
\langlebel{sec:apphpm}
The weight function is also related to the Hilbert's projective metric.
Suppose $C\subset V$ is a pointed generating
cone of a real vector space $V$ (see definition
in Section II).
We may define the functions
\begin{eqnarray*}
\inf(v/w)&=&\sup\{\langlembda\in\mb R\,|\,v-\langlembda w\in C\},\\
\sup(v/w)&=&\inf\{\langlembda\in\mb R\,|\,\langlembda w-v\in C\},
\end{eqnarray*}
$v,\,w\in V$. Through these functions, one can define {\it Hilbert's projective metric} $\mf h:V\times V\to[0,\infty]$,
$\mf h(v,w)=\ln{\big(\sup(v/w)/\inf(v/w)\big)}$
that can be
lifted into a well-defined metric in the projective space $\mb PV$; for more on this subject, see \cite{eveson1990,reeb_etal2011,gaubert2013}
When $Z$ is a base for $C$, one can easily show that, for $x,\,y\in Z$, $\inf(y/x)=\sup\{t\in[0,1)\,|\,y-tx\in C\}$. Moreover, if $x,\,y\in Z$ and $y-tx\in C$ for some $t\in[0,1)$, then $y-tx=sz$ for some (unique) $s\geq0$ and $z\in Z$. If $s\neq1-t$, then one sees that both $y\in Z$ and
\begin{align}
\varphirac{1}{s+t}\; y=\varphirac{t}{s+t}\; x+\varphirac{s}{s+t}\;z
\end{align}
belongs to $Z$
contradicting the fact that $Z$ is a base. Hence $s=1-t$ and
$$
\inf(y/x)=\sup\{t\in[0,1)\,|\,y-tx\in(1-t)Z\}=t_y(x).
$$
Similarly, the convex function $x\mapsto1/t_y(x)$ is associated with the $\sup$-function.
\section{Boundary of quantum convex sets}\langlebel{sec:boundaryQCS}
The question of the boundary elements for states, observables and channels can be treated in a unified way as all these objects can be understood as transformations represented
by completely positive linear maps. In this section, we give conditions of being on the boundary for all relevant quantum devices. For the sake of brevity, we characterize the
boundary for all relevant quantum convex sets in one go. This, however, necessitates the use of Heisenberg picture which is
used only in this section.
Let us fix a Hilbert space $\mc H$ and a unital $C^* $-algebra $\mc A$. We say that a linear map $\Phi:\mc A\to\mc L(\mc H)$ is {\it completely positive} (CP) if for any $n=1,\,2,\ldots$ and
$a_1,\ldots,\,a_n\in\mc A$ and $|v_1\rangle,\ldots,\,|v_n\rangle\in\mc H$
$$
\sum_{j,k=1}^n\sis{v_j}{\Phi(a_j^\dagger a_k)|v_k}\geq0.
$$
For any CP map $\Phi$ there is a Hilbert space $\mc M$, a linear map $J:\mc H\to\mc M$ and a linear map $\pi:\mc A\to\mc L(\mc M)$ such that
$\pi(1)=I_{\mc M}$, $\pi(a^\dagger )=\pi(a)^\dagger $ and $\pi(ab)=\pi(a)\pi(b)$ for all $a,\,b\in\mc A$ (i.e.,\ $\pi$ is a unital *-representation of $\mc A$ on $\mc M$) that constitute a
{\it minimal Stinespring dilation} for $\Phi$. This means that $\Phi(a)=J^\dagger \pi(a)J$ for all $a\in\mc A$ and the subspace of $\mc M$ generated by the vectors $\pi(a)J|v\rangle$, $a\in\mc A$,
$|v\rangle\in\mc H$, is dense in $\mc M$.
In what follows, we only study unital CP maps, i.e.,\ $\Phi(1_{\mc A})=I_\mc H$. We denote the set of all unital CP maps $\Phi:\mc A\to\mc L(\mc H)$ by $\mc{CP}(\mc A;\mc H)$. Since the set
$\mc{CP}(\mc A;\mc H)$ is convex, it is equipped with the preorder $\leq_C$. We denote $\Phi=_C\Psi$ if $\Phi\leq_C\Psi$ and $\Psi\leq_C\Phi$. For any $\Psi\in\mc{CP}(\mc A;\mc H)$ we may define
the set
$$
\mc F(\Psi)=\{\Phi\in\mc{CP}(\mc A;\mc H)\,|\,\Phi\leq_C\Psi\}.
$$
Let us fix a minimal dilation $(\mc M,\pi,J)$ for $\Psi$. Let us define $F(\Psi)$ as the set of positive operators $E\in\mc L(\mc M)$ such that $E\pi(a)=\pi(a)E$ for all $a\in\mc A$ and
$J^\dagger EJ=I$. The following proposition is essentially due to \cite{raginsky2003}.
\begin{proposition}\langlebel{prop:FPsiEPsi}
Suppose that $\Psi\in\mc{CP}(\mc A;\mc H)$ is equipped with the minimal dilation $(\mc M,\pi,J)$. The sets $\mc F(\Psi)$ and $F(\Psi)$ are in one-to-one correspondence set up by
\begin{equation}\langlebel{eq:FPsiEPsi}
\Phi(a)=J^\dagger \pi(a)EJ,\qquad\Phi\in\mc F(\Psi),\quad E\in F(\Psi)
\end{equation}
for all $a\in\mc A$.
\end{proposition}
\begin{lemma}\langlebel{lemma:PsieqPhi}
Suppose that $\Phi,\,\Psi\in\mc{CP}(\mc A;\mc H)$ and fix the minimal dilation $(\mc M,\pi,J)$ for $\Psi$. Now $\Phi=_C\Psi$ if and only if there is $E\in F(\Psi)$ with bounded inverse such that
$\Phi(a)=J^\dagger \pi(a)EJ$ for all $a\in\mc A$.
\end{lemma}
\par\noindent{\bf Proof. }
Case $\Phi=\Psi$ is obvious. Let us concentrate on the case $\Phi\neq\Psi$.
Let us assume that $\Phi=_C\Psi$. Because, especially, $\Phi\leq_C\Psi$, there is an operator $E\in F(\Psi)$ such that $\Phi(a)=J^\dagger \pi(a)EJ$ for all $a\in\mc A$. Denote the closure of the
range of $\sqrt E$ by $\mc M_E$ and the projection of $\mc M$ onto this subspace by $P_E$. Since $E$ commutes with $\pi$, also $P_E$ commutes with $\pi$, and we may define the map
$\pi_E:\mc A\to\mc L(\mc M_E)$, $\pi_E(a)=P_E\pi(a)|_{\mc M_E}$. Also define $J_E=\sqrt EJ$. It is straight-forward to check that the triple $(\mc M_E,\pi_E,J_E)$ constitutes a minimal dilation
of $\Phi$. Since also $\Psi\leq_C\Phi$ and $\Phi\neq\Psi$, it follows that there is $t\in(0,1)$ and $\Psi'\in\mc{CP}(\mc A;\mc H)$ such that $\Phi=t\Psi+(1-t)\Psi'$. In other words, there is a number
$t\in(0,1)$ such that the map $\Psi'$,
$$
\Psi'(a)=\varphirac1{1-t}(\Phi-t\Psi)=\varphirac1{1-t}J^\dagger \pi(a)(E-tI)J,\qquad a\in\mc A,
$$
is completely positive or, equivalently, $E\geq tI$. Hence $E$ has a bounded inverse.
Suppose that $E\in F(\Psi)$ is as in the first part of the proof and $E^{-1}\in\mc L(\mc M)$. From Proposition \ref{prop:FPsiEPsi} it follows immediately that $\Phi\leq_C\Psi$. Denote
$E'=P_EE^{-1}|_{\mc M_E}$. We have $E'\geq0$, $J_E^\dagger E'J_E=J^\dagger J=I$ and
\begin{eqnarray*}
E'\pi_E(a)&=&P_EE^{-1}\pi(a)|_{\mc M_E}=P_EE^{-1}\pi(a)EE^{-1}|_{\mc M_E}\\
&=&P_EE^{-1}E\pi(a)E^{-1}|_{\mc M_E}=P_E\pi(a)E^{-1}|_{\mc M_E}\\
&=&\pi_E(a)E'
\end{eqnarray*}
for all $a\in\mc A$, so that $E'\in F(\Phi)$ when we fix the dilation $(\mc M_E,\pi_E,J_E)$ for $\Phi$. Furthermore
$$
J_E^\dagger \pi_E(a)E'J_E=J^\dagger \pi(a)\sqrt EE^{-1}\sqrt EJ=J^\dagger \pi(a)J=\Psi(a)
$$
for all $a\in\mc A$. According to Proposition \ref{prop:FPsiEPsi} this means that $\Psi\leq_C\Phi$.
$\,\blacksquare$\par
We denote the spectrum of an operator $E\in\mc L(\mc M)$ on a Hilbert space $\mc M$ by $\mr{sp}(E)$. The following proposition, which is an immediate corollary of the previous lemma,
characterizes the boundary elements of the set of unital CP maps.
\begin{proposition}\langlebel{prop:boundaryCP}
Suppose that $\Phi\in\mc{CP}(\mc A;\mc H)$. The map $\Phi$ is on the boundary of $\mc{CP}(\mc A;\mc H)$ if and only if there is $\Psi\in\mc{CP}(\mc A;\mc H)$ with a minimal dilation
$(\mc M,\pi,J)$ such that $\Phi\leq_C\Psi$ and $\Phi$ corresponds to an operator $E\in F(\Psi)$ with $0\in\mr{sp}(E)$.
\end{proposition}
\par\noindent{\bf Proof. }
The condition $\Phi\in\partial\mc{CP}(\mc A;\mc H)$ is equivalent with the fact that there is $\Psi\in\mc{CP}(\mc A;\mc H)$ such that $\Phi\leq_C\Psi$ but $\Psi\not\leq_C\Phi$. Indeed, if
$\Psi'\in\mc{CP}(\mc A;\mc H)$ is such that $\Psi'\not\leq_C\Phi$, we may define $\Psi=\varphirac12\Phi+\varphirac12\Psi'$ so that $\Phi\leq_C\Psi$. Moreover, if $\Psi\leq_C\Phi$, it would follow that
$\Psi'\leq_C\Psi\leq_C\Phi$ yielding $\Psi'\leq\Phi$ yielding a contradiction. Suppose that $\Psi\in\mc{CP}(\mc A;\mc H)$ is such that $\Phi\leq_C\Psi$ and $\Psi\not\leq_C\Phi$ and $\Psi$ has the
minimal dilation $(\mc M,\pi,J)$ and $\Phi$ corresponds to the operator $E\in F(\Psi)$ according to Equation (\ref{eq:FPsiEPsi}). According to Lemma \ref{lemma:PsieqPhi}, the condition
$\Psi\not\leq_C\Phi$ is equivalent to $E$ not having a bounded inverse or, in other words, $0\in\mr{sp}(E)$.
$\,\blacksquare$\par
The CP maps of quantum physics are normal. This is because in this section we have described our quantum devices jointly in Heisenberg picture and, in order to transcend to the Schr\"odinger
picture, we generally need normality. However, when $\mc H$ and $\mc A$ are finite dimensional the maps $\Phi\in\mc{CP}(\mc A;\mc H)$ are automatically normal. The results of this section also
hold for the restricted class of normal elements in $\mc{CP}(\mc A;\mc H)$ because this class is a face of $\mc{CP}(\mc A;\mc H)$, i.e.,\ if $\Phi$ is normal and $\Phi'\leq_C\Phi$ then also $\Phi'$ is
normal.
\subsection{States}
\langlebel{sec:appbfstates}
Suppose that $\mc K$ is a Hilbert space. We will denote the set of states by $\mc S(\mc K)$ containing positive trace-class operators on $\mc K$ with trace 1. The states are in one-to-one
correspondence with the normal (completely) positive unital maps $\varphi:\mc L(\mc K)\to\mb C$, i.e. the set of normal elements in $\mc{CP}\big(\mc L(\mc K);\mb C\big)$.
\begin{proposition}\langlebel{prop:boundarystate}
A state $\varrho\in\partial\mc S(\mc K)$ if and only if $\rho$ has 0 in its spectrum.
\end{proposition}
\par\noindent{\bf Proof. }
First, let us assume that $\dim{\mc K}<\infty$. Suppose that $\varrho\in\mc S(\mc K)$ is such that there is a unit vector $|v\rangle\in\mc K$ such that $\varrho|v\rangle=0$. Let us define the operator
$D=\ktb{v}{v}-(d-1)^{-1}(I-\ktb{v}{v})$. Denote the smallest non-zero eigenvalue of $\varrho$ by $\langlembda_{min}$. It is easy to see that whenever $\varepsilon\leq\langlembda_{\min}$,
$\varrho+\varepsilon D\in\mc S(\mc K)$ but $\varrho-\varepsilon D$ is not positive for any $\varepsilon>0$. Hence $\varrho\in\partial\mc S(\mc K)$.
Suppose now $\varrho\in\partial\mc S(\mc K)$, i.e.,\ there is a state $\sigma\in\mc S(\mc K)$ such that when we denote $D=\sigma-\varrho$, then $\varrho-\varepsilon D$ is not positive for any
$\varepsilon>0$. We may write $\mc K=\mc K_+\oplus\mc K_0\oplus\mc K_-$, where $\mc K_+$ is the direct sum of the eigenspaces corresponding to the positive eigenvalues of $D$ and $\mc K_0$ is
the kernel of $D$. We infer that $\mc K_+\cap\ker{\varrho}$ is non-trivial and hence also $\ker{\varrho}$ is non-trivial. This means that $0$ is an eigenvalue of $\varrho$.
Now, let us assume that $\mc K$ is infinite dimensional. Assume that $\varrho\in\mc S(\mc K)$ would be in the interior, i.e.,\ $\varrho\notin\partial\mc S(\mc K)$. Then, especially,
$\ktb{v}{v}\leq_C\varrho$ for all unit vectors $|v\rangle\in\mc K$. Whenever $\langlembda\ktb{v}{v}\leq A$ for some $\langlembda>0$ and some positive $A\in\mc L(\mc K)$, it follows \cite{buschgudder}
that $|v\rangle\in\mr{ran}(\sqrt A)$ or, in other words, $|v\rangle=\sqrt A|w\rangle$ for some $|w\rangle\in\mc K$. In the case where $A$ is a state operator, this result was already proven in
\cite{hadjisavvas}. Hence, $\mr{ran}(\sqrt\varrho)=\mc K$, i.e.,\ $\sqrt\varrho$ is surjective. If $\varrho$ had a non-trivial kernel, it could not be in the interior for then
$\ktb{v}{v}\not\leq_C\varrho$ for any unit vector $|v\rangle\in\mr{ker}(\varrho)$. Hence, $\rho$ is injective and so $\sqrt\varrho$ is injective as well. All this implies that
$\sqrt\varrho:\mc K\to\mc K$ is a bijection and the open mapping theorem yields that there is a continuous inverse $\sqrt\varrho^{-1}:\mc K\to\mc K$. Hence, there is a bounded inverse
$\varrho^{-1}=\sqrt\varrho^{-1}\sqrt\varrho^{-1}$. However, this is impossible, since in the infinite-dimensional case all state operators have 0 in their spectra.
$\,\blacksquare$\par
The previous proposition tells us that the boundary of the set of states depends dramatically on the dimensionality of the Hilbert space: If the space is finite dimensional, boundary states are
exactly those whose kernel is non-trivial. In the infinite-dimensional case, the set of states coincides with its boundary.
\subsection{Effects and finite outcome observables}\langlebel{sec:appbfobs}
Denote $\Omega=\{1,\ldots,\,N\}$ and define $\mc O^N(\mc H)$ as the set of {\it positive-operator-valued measures on $\mc H$ and taking values in $\Omega$} ($N$-outcome observables), i.e.,
$\ms M\in\mc O^N(\mc H)$ is a collection $\ms M=\{M_j\}_{j=1}^N$ of positive operators on $\mc H$ such that $\sum_{j=1}^NM_j=I$. It should be noted that whenever $\ms M\in\mc O^N(\mc H)$
then $\ms M\leq_C\ms E^N$, where $\ms E^N=\{E_j^N\}_{j=1}^N$, $E_j^N=N^{-1}I$ for all $j=1,\ldots,\,N$. Note that we may identify $\mc O^N(\mc H)$ with the set of normal elements in
$\mc{CP}(\mc A^N,\mc H)$, where $\mc A^N$ is just the algebra $\mb C^N$ with componentwise operations.
\begin{proposition}\langlebel{prop:boundaryeffect}
The boundary $\partial\mc O^N(\mc H)$ consists of POVMs $\ms M=\{M_j\}$ with $0\in\mr{sp}(\ms M_j)$ for some $j=1,\ldots,\,N$.
\end{proposition}
\par\noindent{\bf Proof. }
Endow $\mb C^N$ with an orthonormal basis $\{|1\rangle,\ldots\,|N\rangle\}$ and denote $P_r=\ktb{r}{r}$, $r=1,\ldots\,N$. Define the PVM $\ms Q\in\mc O^N(\mc H\otimes\mb C^N)$,
$Q_r=I\otimes P_r$, $r=1,\ldots\,N$, and the isometry $J:\mc H\to\mc H\otimes\mb C^N$, $J|v\rangle=N^{-1/2}|v\rangle\otimes(|1\rangle+\cdots+|N\rangle)$. It is immediately seen that
$(\mc H\otimes\mb C^N,\ms Q,J)$ is a minimal dilation of $\ms E^N=\{N^{-1}I,\ldots\,N^{-1}I\}$, i.e.,\ $J^\dagger Q_rJ=N^{-1}I$, $r=1,\ldots\,2$. Let $F(\ms E^N)$ be the set of positive
operators $E$ on $\mc H\otimes\mb C^N$ that commute with $\ms Q$ and $J^\dagger EJ=I$ so that $\mc O^N(\mc H)$ is in one-to-one affine correspondence with $F(\ms E^N)$. It follows that
$F(\ms E^N)$ consists of operators of the form $\sum_{j=1}^NA_j\otimes P_j$, where $A_j\in\mc L(\mc H)$ are positive operators with $A_j\leq2I$. Any $\ms M\in\mc O^N(\mc H)$ corresponds to
such an operator, where $A_j=2M_j$. A POVM $\ms M$ is thus on the boundary if and only if the corresponding operator $2\sum_{j=1}^NM_j\otimes P_j$ has 0 in its spectrum. This happens
exactly when $0\in\mr{sp}(M_j)$ for some $j$.
$\,\blacksquare$\par
It is often denoted $\mc O^2(\mc H)=\mc E(\mc H)$ and $\ms E\in\mc E(\mc H)$ are called {\it effects}. An effect $\ms E=\{E_1,E_2\}\in\mc E(\mc H)$ is usually identified with its value $E_1$ and hence
effects are characterized as positive operators $E\in\mc L(\mc H)$ with $E\leq I$. One easily sees from the previous proposition that an effect $E$ is on the boundary if and only if $0\in\mr{sp}(E)$
or $1\in\mr{sp}(E)$.
\subsection{Channels}
\langlebel{sec:appbfchannels}
In this subsection, we assume that $\mc H$ and $\mc K$ are (separable) Hilbert spaces. We denote by $\mc C(\mc K;\mc H)$ the set of (normal) unital CP maps $\mc E:\mc L(\mc K)\to\mc L(\mc H)$
and call these maps as {\it channels}. Note the the physical input space of these channels is $\mc H$ and output is $\mc K$. The minimal Stinespring dilation $(\mc M,\pi,J)$ of a channel
$\mc E\in\mc C(\mc K;\mc H)$ can be chosen so that $\mc M$ is separable and $\pi:\mc L(\mc K)\to\mc L(\mc H)$ is a normal unital *-representation. This means that there is a separable Hilbert
space $\mc K'$ such that we may choose $\mc M=\mc K\otimes\mc K'$ and $\pi(B)=B\otimes I_{\mc K'}$ for all $B\in\mc L(\mc K)$. Hence we usually denote a minimal Stinespring dilation of a
channel $\mc E$ in the form $(\mc K',J)$ where $J:\mc H\to\mc K\otimes\mc K'$ is an isometry such that
$$
\mc E(B)=J^\dagger (B\otimes I_{\mc K'})J,\qquad B\in\mc L(\mc K).
$$
Suppose that $\mc K$ is infinite-dimensional and $\mc E\in\mc C(\mc K;\mc H)\setminus\partial\mc C(\mc K;\mc H)$. For each unit vector $|v\rangle\in\mc K$ define the channel
$\mc F^{|v\rangle}\in\mc C(\mc K;\mc H)$ by $\mc F^{|v\rangle}(B)=\sis{\varphi}{B\varphi}I$. The predual map $\mc F_*^{|v\rangle}:\mc T(\mc H)\to\mc T(\mc K)$ of $\mc F^{|v\rangle}$ is given by
$\mc F_*^{|v\rangle}(T)=\tr{T}\ktb{v}{v}$ for all trace-class operators $T\in\mc T(\mc H)$. It follows that $\mc F^{|v\rangle}\leq_C\mc E$ for all unit vectors $|v\rangle\in\mc K$ which means that for all
unit vectors $|v\rangle\in\mc K$ there is a number $t_{|v\rangle}\in(0,1]$ such that for all positive $T\in\mc T(\mc H)$ and $B\in\mc L(\mc K)$ one has
$$
\tr{T(\mc E-t_{|v\rangle}\mc F^{|v\rangle})(B)}=\tr{B(\mc E_*-t_{|v\rangle}\mc F_*^{|v\rangle})(T)}\geq0
$$
yielding $t_{|v\rangle}\mc F_*^{|v\rangle}(T)\leq\mc E_*(T)$. By picking a positive operator $T$ of trace one, we find that $\ktb{v}{v}\leq_C\mc E_*(T)$ for all unit vectors $|v\rangle\in\mc K$ when
$\mc E_*(T)$ is considered as a state. As in the proof of Proposition \ref{prop:boundarystate}, one can show that this result leads into a contradiction. This means that if $\mc K$ is infinite
dimensional, $\mc C(\mc K;\mc H)$ coincides with its boundary.
Suppose that $\dim{\mc K}=d<\infty$ and fix an orthonormal basis $\{|n\rangle\}_{n=1}^d\subset\mc K$. Define for each $\mc F\in\mc C(\mc K;\mc H)$ the {\it Choi operator}
$$
E(\mc F)=d\sum_{m,n=1}^d\ktb{m}{n}\otimes\mc F(\ktb{m}{n})\in\mc L(\mc K\otimes\mc H).
$$
Define the vector $|\psi_d\rangle=d^{-1/2}(|1,1\rangle+\cdots+|d,d\rangle)\in\mc K\otimes\mc K$ and the isometry $J:\mc H\to\mc K\otimes\mc K\otimes\mc H$ with $J|v\rangle=|\psi_d\rangle\otimes|v\rangle$ for all
$|v\rangle\in\mc H$. One can easily check that the pair $(\mc K\otimes\mc H,J)$ constitutes a minimal dilation for the channel $\mc E\in\mc C(\mc K;\mc H)$, $\mc E(B)=d^{-1}\tr{B}I_\mc H$. Suppose that
$\mc F\in\mc C(\mc K;\mc H)$. We find
\begin{eqnarray*}
J^\dagger \big(B\otimes E(\mc F)\big)J&=&d\sum_{m,n=1}^d\sis{\psi_d}{B\otimes\ktb{m}{n}|\psi_d}\mc F(\ktb{m}{n})\\
&=&\sum_{m,n,r,s=1}^d\sis{r}{B|s}\sis{r}{m}\sis{n}{s}\mc F(\ktb{m}{n})\\
&=&\sum_{m,n=1}^d\sis{m}{B|n}\mc F(\ktb{m}{n})=\mc F(B)
\end{eqnarray*}
for all $B\in\mc L(\mc K)$. This means that $\mc C(\mc K;\mc H)=\mc F(\mc E)$ when $\mc K$ is finite-dimensional and the operator on the dilation space of $\mc E$ corresponding to a channel
$\mc F\in\mc C(\mc K;\mc H)$ is the Choi operator. Hence we can give the following characterization for boundary channels:
\begin{proposition}\langlebel{prop:boundarychannelfinite}
Suppose that $\dim{(\mc K)}<\infty$. A channel $\mc F\in\mc C(\mc K;\mc H)$ is on the boundary $\partial\mc C(\mc K;\mc H)$ if and only if the Choi operator $E(\mc F)$ has 0 in its spectrum.
\end{proposition}
In the case when both $\dim{\mc K}=d_{\mc K}$ and $\dim\mc H=d_\mc H$ are finite, the above result means that a channel is on the boundary if and only if its Kraus rank is strictly less than
$d_{\mc K}d_\mc H$. Suppose now that $\{|m\rangle\}_{m=1}^{d_\mc H}\subset\mc H$ is an orthonormal basis. Since $E(\mc F)$ is positive for any channel $\mc F$, we may give it the
spectral decomposition $E(\mc F)=d_{\mc K}\sum_{j=1}^r\ktb{L_j}{L_j}$. Let us define the operators
$L_j=\sum_{m=1}^{d_\mc H}\sum_{n=1}^{d_{\mc K}}\sis{n,m}{L_j}\ktb{m}{n}$. One may check that the operators $K_j=L_j^\dag$ constitute a minimal set of Kraus operators for
$\mc F$, i.e.,\ $\mc F(B)=\sum_{j=1}^rK_j^\dag BK_j$. Moreover, the more familiar Choi operator associated with the Schr\"odinger (predual) version of $\mc F$ is given by
$$
C(\mc F_*)=\sum_{m,n=1}^{d_\mc H}\ktb{m}{n}\otimes\mc F_*(\ktb{m}{n})=\sum_{j=1}^r\ktb{K_j}{K_j},
$$
where
$|K_j\rangle=\sum_{m=1}^{d_\mc H}\sum_{n=1}^{d_{\mc K}} \bra{n}K_j\ket{m} \ket{m,n}=\sum_{m=1}^{d_\mc H}\sum_{n=1}^{d_{\mc K}} \bra{L_j}n,m\rangle\ \ket{m,n}$,
$|K_j\rangle\in\mc H\otimes\mc K$.
Let us note, that orthogonality of vectors $\ket{L_j}$ implies the orthogonality of vectors $\ket{K_j}$, while their norm $\bra{L_j}L_j\rangle^{1/2}=\bra{K_j}K_j\rangle^{1/2}$ is the same.
Hence, we demonstrated the following.
\begin{proposition}\langlebel{prop:schrodinger}
Suppose that $\dim{\mc K}=d_{\mc K}$ and $\dim\mc H=d_\mc H$ are finite. A completely positive trace preserving map (i.e. a channel in the Schr\"odinger picture)
is on the boundary of the set of channels
if and only if the rank of its Choi operator is strictly less than $d_{\mc K}d_\mc H$.
\end{proposition}
Thus, also in the Schr\"odinger picture the channel is on the boundary, when zero is the spectrum of its Choi operator.
\section{Evaluation of boundariness for a qubit ``erasure'' channel}
\langlebel{sec:appccchannel}
The aim of this appendix is to study two-element convex decompositions of the channel ${\cal E}_p$ into extremal rank-two qubit channels $\mathcal{F}$ and channels $\mathcal{G}$.
Any such channel $\mathcal{F}$ has a Choi matrix, which can be written in the spectral form:
\begin{align}
\langlebel{eq:ccdeff}
F=\varphirac{1}{2}(1+q)\ket{\psi}\bra{\psi}+\varphirac{1}{2}(1-q)\ket{\phi}\bra{\phi}\,,
\end{align}
where $\ket{\psi},\ket{\phi}$ are mutually orthogonal unit vectors
on $\mc{H}_2\otimes\mc{H}_2$ and $0\leq q < 1$, hence $\tr{F}=1$.
Vectors $\ket{\psi}$,$\ket{\phi}$ can be written in the Schmidt form
\begin{align}
\langlebel{eq:ccschmidt}
\ket{\psi}&=\sqrt{s}\ket{u}\ket{v}+\sqrt{1-s}\ket{u_\perp}\ket{v_\perp}, \\
\ket{\phi}&=\sqrt{r}\ket{w}\ket{v^\prime}+\sqrt{1-r} \ket{w_\perp}\ket{v^\prime_\perp}
\end{align}
with $1/2< s\leq 1$ and
$0 \leq r\leq 1$. Let us note that $s=1/2$ does not correspond to an extremal channel, but to a mixture of unitary channels (i.e. it leads to $r=1/2$).
The condition ${\rm tr}_1 F = \varphirac{1}{2}I$
requires that $\ket{v^\prime}=\ket{v}$ and
\begin{align}
\langlebel{eq:relqsr}
r=\varphirac{1-(1+q)s}{1-q}.
\end{align}
The orthogonality $\langle{\psi}|{\phi}\rangle=0$ gives
\begin{align}
\langlebel{eq:ccortho}
0=\sqrt{sr}\langle{u}|{w}\rangle+\sqrt{(1-s)(1-r)}\langle{u_\perp}|{w_\perp}\rangle .
\end{align}
For any two states of a qubit it holds that $|\langle{u}|{w}\rangle|=|\langle{u_\perp}|{w_\perp}\rangle |$.
Thus, Eq. (\ref{eq:ccortho}) can be satisfied only in two ways: i) $\langle{u}|{w}\rangle=-\langle{u_\perp}|{w_\perp}\rangle$ and $rs=(1-s)(1-r)$, which is, according to Eq. (\ref{eq:relqsr}),
equivalent to $q=0$ ii) both overlaps in Eq. (\ref{eq:ccortho}) vanish.
Let us start with the case i),
i.e both nonzero eigenvalues of Choi operator $F$ are equal to $1/2$
and the scalar products of vectors $u,v$ and $u_\perp, v_\perp$ have
opposite sign.
Since channel $\mathcal{G}$ must belong to the boundary of the set of channels, there exists a normalized vector $\ket{\varphi}$ from the kernel of $G$, i.e. $\bra{\varphi}G\ket{\varphi}=0$.
We compute the expectation value of $E_p$ along the vector $\ket{\varphi}$. Using Eq. (\ref{eq:ccdecomp1}) we get
\begin{align}
\langlebel{eq:chr2lb1}
\varphirac{p}{2}\leq \bra{\varphi}E_p\ket{\varphi}=t \bra{\varphi}F\ket{\varphi}=t c,
\end{align}
where the lower bound on the left follows from the eigenvalues of $E_p$ being greater or equal to $p/2$ and we denoted $c\equiv\bra{\varphi}F\ket{\varphi}$.
We notice that $0<c \leq 1/2$, because $F$ is positive semidefinite and its eigenvalues are zero and $1/2$.
From Eq. (\ref{eq:chr2lb1}) we get the lower bound $t\geq p/(2c) \geq p >p(1-p)$.
In other words the weight function $t_{{\cal E}_p}(\mathcal{F})$ gives on these channels $\mathcal{F}$ values higher then $p(1-p)$.
Thus, we conclude that the convex decompositions (\ref{eq:ccdecomp1}) with rank-two
channels $\mathcal{F}$ having $\langle{u}|{w}\rangle\neq 0$ can not achieve as small value of $t$ as it is achieved by the unitary channels.
So let us investigate the case ii) and assume $\langle{u}|{w}\rangle=0$.
Our aim is to show that also in this case $t>p(1-p)$.
Unfortunately, we were
not able to solve this part of the problem completely analytically and we had to rely on numerical approach outlined in Remark \ref{prop:algorithm}.
Thus, the test whether the Choi-operators $G$ generated by operators $F$ and the weight $p(1-p)$ correspond to channels was done numerically.
More precisely, for $t=p(1-p)$ we calculated the smallest eigenvalues of $G$
for many choices of $F$ from the current subclass of extremal rank-two qubit
channels and we confirmed that in all cases the obtained value is non-negative, i.e. $G$ always corresponded to a channel. Below are some details on how the actual test was done.
Without loss of generality we can write
\begin{align}
\langlebel{eq:ccmc1}
\ket{\phi}&=\sqrt{r}\ket{u_\perp}\ket{v}+e^{i\alpha}\sqrt{1-r} \ket{u}\ket{v_\perp} .
\end{align}
The Choi-operator $E_p$ is invariant under the unitary transformations $I\otimes V$ on the input Hilbert space. These transformations do not change eigenvalues, so to investigate
eigenvalues of $G$ we can equivalently investigate $(I\otimes V) \, G \, (I\otimes V^\dagger)$, which is for $V\ket{v}=\ket{0}$ the same as choosing $\ket{v}=\ket{0}$ in Eqs.
(\ref{eq:ccschmidt}), (\ref{eq:ccmc1}) and working directly with $G$. Moreover, we parameterize the vectors $\ket{u}$, $\ket{u_\perp}$ as:
\begin{align}
\langlebel{eq:ccdefog}
&\ket{u}=\cos{\varphirac{\theta}{2}}\ket{0}+e^{i\beta}\sin{\varphirac{\theta}{2}} \ket{1} \nonumber \\
&\ket{u_\perp}=e^{i\gamma}\sin{\varphirac{\theta}{2}}\ket{0}-e^{i(\gamma+\beta)} \cos{\varphirac{\theta}{2}}\ket{1}
\end{align}
In this way operator
\begin{align}
G&=\varphirac{1}{1-p(1-p)} \left(E_p - p(1-p)F \right)
\end{align}
further specified by Eqs.
(\ref{eq:ccdeff}-\ref{eq:ccschmidt}), (\ref{eq:relqsr}), (\ref{eq:ccmc1}-\ref{eq:ccdefog})
and $\ket{v}=\ket{0}$ becomes a function of parameters $q,s,\alpha,\beta,\gamma,\theta$.
Let us note that Eq. (\ref{eq:relqsr}) requires parameters $q$ and $s$ to fulfill $s \leq 1/(1+q)$, since one must have $r\geq 0$. Especially, $q\rightarrow 1$ requires
$s\rightarrow 1/2$ and the operator $F$ converges to a Choi operator of a unitary channel. In such case we expect that $\langlembda_G$, the minimal eigenvalue of $G$, will converge
to zero, because $\mathcal{G}$ must converge to a boundary in the set of channels.
\begin{figure}
\caption{(Color online) Illustration of the dependance of the minimal eigenvalue $\langlembda_G$ of operator $G$ on the parameter $q$ depicted for different values of the remaining parameters $s,\alpha,\beta,\gamma,\theta$ and $p$.}
\end{figure}
For this reason it is useful to plot $\langlembda_G$ as a function of $q$ for some choice of remaining parameters (see Fig. \ref{fig:numresults}). By numerically analyzing the actual
dependence of the graphs on the parameters $s,\alpha,\beta,\gamma,\theta$ we observed that for a fixed $q$ the minimum and the maximum value of $\langlembda_G$ can be achieved
only when $s=1/(1+q)$ and $\theta=0$; $\theta=\pi/2$, respectively. In such case parameters $\alpha,\beta$ and $\gamma$ do not influence $\langlembda_G$
and it can be calculated analytically. The obtained dependencies $G_{min}(p,q)$ and $G_{max}(p,q)$ are visualized on Fig. \ref{fig:numresults} as red lines, which form the boundary
of the area where $\langlembda_G$, the minimal eigenvalue of $G$, lies for any possible choice of its parameters. We can show that the minimum of $G_{min}(p,q)$ is zero and it is
achieved only for $q=1$ corresponding to a unitary channel $\mathcal{F}$. Similarly, all the blue points in the Fig. \ref{fig:numresults} corresponding to the minimal eigenvalue of $G$
for some choice of its parameters were having $\langlembda_G>0$, which proves that $G\geq 0$ in the considered range of parameters $q,s,\alpha,\beta,\gamma,\theta$. In conclusion, we proved that the boundariness is indeed achieved for
decompositions containing at least one unitary channel, thus, it reads
$b({\cal E}_p)=p(1-p)$.
\end{document}
|
\begin{document}
\title{Using Cascade in Quantum Key Distribution}
\author{Devashish Tupkary}
\email{[email protected]}
\affiliation{Institute for Quantum Computing and Department of Physics and Astronomy, University of Waterloo, Waterloo, Ontario, Canada, N2L 3G1}
\author{Norbert L\"utkenhaus}
\email{[email protected]}
\affiliation{Institute for Quantum Computing and Department of Physics and Astronomy, University of Waterloo, Waterloo, Ontario, Canada, N2L 3G1}
\begin{abstract}
We point out a critical flaw in the analysis of Quantum Key Distribution (QKD) protocols that employ the two-way error correction protocol Cascade. Specifically, this flaw stems from an incomplete consideration of all two-way communication that occurs during the Cascade protocol. We present a straightforward and elegant alternative approach that addresses this flaw and produces valid key rates. We exemplify our new approach by comparing its key rates with those generated using older, incorrect approaches, for Qubit BB84 and Decoy-State BB84 protocols. We show that in many practically relevant situations, our rectified approach produces the same key rate as older, incorrect approaches. However, in other scenarios, our approach produces valid key rates that are lower, highlighting the importance of properly accounting for all two-way communication during Cascade.
\end{abstract}
\maketitle
\section{Introduction}
Quantum Key Distribution (QKD) \cite{bennett2014Quantum,bruss1998Optimal,bennett1992Quantum} can provide information-theoretic security of secret keys between two communicating parties, Alice and Bob. Since the quantum channel connecting Alice and Bob is not perfect in any practical realization, QKD protocols implement an error-correction step to correct errors in the measurement data collected by Alice and Bob. This involves classical communication between the two parties, and leaks additional information to the eavesdropper Eve, which must be accounted for when calculating the achievable secret key rate. Cascade \cite{brassard1994Secretkey} is one of the most widely used two-way error correction protocol for QKD. A lot of work has been done optimizing various parameters of the Cascade protocol, such as its blocksizes, number of rounds, efficiency etc \cite{reis2019Quantum,pedersen2015High,martinez-mateo2015Demystifying,elkouss2009Efficient,calver2011empirical,mao2021High,pacher2015information,erven2008Entangled}. Cascade has also been used in a large number of QKD experiments \cite{dixon2017Quantum,su2009Continuous,gobby2004Quantum,tentrup2019Largealphabet,lorenz2004Continuousvariable}.
Our main result is to rectify a flaw in the analysis of QKD protocols using Cascade, which stems from an incomplete consideration of the two-way classical communication during Cascade. We observe that in past literature, only the communication from Alice to Bob has been accounted for when considering information leakage about the key to Eve. For a rigorous security proof, the communication from Bob to Alice must also be included when bounding the information leaked to Eve.
\begin{comment}One valid approach is to simply adding the number of bits leaked during the Bob to Alice communication to the information leakage term. However, we show that such an approach results in low key rates, since it almost doubles the information leaked to Eve during error correction. A better approach is required to make Cascade fit for use in QKD protocols.
\end{comment}
We propose a straightforward and elegant alternative approach that produces valid key rates. The main idea is to compute key rates for a protocol that leaks all the communnication from Alice to Bob during Cascade, along with all location of errors in Alice and Bob's raw data to Eve, in the information reconciliation step. We show that this leaks more information to Eve than Cascade, and thus any key rate for such a protocol is a valid key rate for the original protocol that uses Cascade.
We apply our solution to the qubit based BB84 protocol, and the polarization encoded weak coherent pulse (WCP) BB84 with decoy intensities, for a variety of channel models and constraints. We use the numerical framework from \cite{Winick2018} for our calculations. In this work, we restrict our attention to the asymptotic regime for simplicity, where one can assume an IID collective attack without loss of generality \cite{Renner2016, christandl2009Postselection}. However, our solution can be directly adapted to the analysis of finite size protocols. This is because many such analyses ultimately involve the optimization of the same objective function \cite{George2021, valerio_2008, valeriotwoway, Tomamichel_2017} (with different constraints), and our approach only modifies the objective function.
This paper is organized as follows. In Sec. \ref{sec:background} we explain the steps in a generic QKD protocol and explain the Cascade protocol briefly. In Sec. \ref{sec:usingcascade} we explain the problem with past analysis of QKD protocols using Cascade, and present our arguments for correcting it. We also review the numerical framework that we used to compute key rates in this work. In Sec. \ref{sec:qubit_bb84} and Sec. \ref{sec:WCP_Decoy} we apply our solution to the BB84 protocol implemented using qubits, and WCP states with decoy intensities respectively. In Sec. \ref{sec:conclusion} we present concluding remarks.
\section{Background}
\label{sec:background}
\subsection{Protocol Description}
In this subsection, we give a description of the asymptotic formulation of a typical QKD protocol that can use Cascade in the information reconciliation step.
\begin{enumerate}
\item \textbf{Quantum Phase:} In an entanglement-based protocol, Alice and Bob receive states from a source and perform measurements on them. In a prepare-and-measure protocol, Alice prepares and sends signals to Bob, who measures them. The security analysis of a prepare-and-measure scheme can be reduced to that of an entanglement-based scheme using the source replacement scheme \cite{curty2004Entanglement}.
\item \textbf{Acceptance Test (Parameter Estimation):} Alice and Bob announce the measurements obtained, and signals sent, for a small fraction of signals. They then perform a test to decide whether to abort or continue the protocol. This step is modelled as Alice and Bob performing some measurements given by POVMs $\{ \Gamma_k\}$, obtaining expectation values $\{ \gamma_k \}$. The POVMs and expectations values depends on whether the protocol implements ``fine-graining'' or ``coarse-graining'' during the acceptance test \cite{wang2021Numerical}, and the exact nature of the coarse-graining.
\item \textbf{Classical processing:} For the remaining signals, Alice and Bob perform some blockwise processing of data. This involves operations such as public announcements and sifting to remove unwanted signals. Alice then implements a key map that maps her local data and the information exchanged in the blockwise processing, to her raw key.
\item \textbf{Error correction and verification:} Alice and Bob implement error correction by exchanging classical information. Cascade can be used in this step. Alice and Bob then compare a randomly chosen hash of their raw keys for error verification, and abort the protocol if the hashes do not match.
\item \textbf{Privacy Amplification :} Alice and Bob choose a common two-universal hash function and apply it to their raw keys to generate their final secret key.
\end{enumerate}
If $\tilde{A}, \tilde{B}$ denote announcements made by Alice and Bob in the blockwise processing step, and $Z$ denotes the result of the key map implemented by Alice, and $E$ is Eve's quantum system, then the key rate is given by \cite{Renner2016,Devetak2005}
\begin{equation} \label{eq:basickey rate}
R = \min_{\rho \in S(\vec{\gamma})} S(Z| E \tilde{A} \tilde{B} ) - p_{\text{pass}} \times \delta_\text{leak},
\end{equation}
where the minimization is over all states $\rho$ belonging to $S(\vec{\gamma}) = \{ \rho \in H_+ | \text{Tr}(\Gamma_k \rho)= \gamma_k \}$ and $H_+$ denotes positive semidefinite operators, $p_\text{pass}$ denotes the probability of the signal to pass sifting, and $\delta_{\text{leak}}$ is the number of bits used during error correction, per bit of raw key.
\subsection{Cascade}
In this subsection we briefly describe the error correction protocol Cascade \cite{brassard1994Secretkey}. Cascade is a simple and efficient error-correction protocol, and its principal limitation is the requirement for highly interactive communications, as compared to approaches such as LDPC codes (which suffer from a high computational cost in iterative decoding) \cite{xu2020Secure}. To understand Cascade we first look at a subprotocol called BINARY, which corrects a single error in bit strings that contain an odd number of errors.
\begin{itemize}
\item \textbf{BINARY: } If bit strings X and Y have odd number of errors, then Alice divides her string into halves and sends the parity of the first half to Bob. Bob divides his string the same way, and announces whether the parity of the first half is wrong, or the parity of the second half is wrong. Alice and Bob repeat the operation on the half whose parity was wrong.
\item The process terminates when Alice reveals the single bit which contains an error, and Bob corrects that error.
\item This process involves sending $\approx \log(k)$ bits from Alice to Bob, and $\approx \log(k)$ bits from Bob to Alice where $k$ is the length of the strings $X$ and $Y$. The process corrects one error.
\end{itemize}
\textbf{Cascade:} The Cascade protocol consists of several passes and proceeds as follows.
\begin{enumerate}
\item Alice and Bob divide their bit strings $X_1...X_N$ and $Y_1...Y_N$, where $N$ is the total number of sifted bits, into blocks the size of $k_1$. In pass 1, Alice and Bob reveal the parity of each block to determine the blocks with an odd number of errors. For each block with odd number of errors, Alice and Bob run BINARY to correct one error. At the end of pass 1, all blocks have even number of errors.
\item In any pass $i \geq 1$, Alice and Bob choose a blocksize $k_i$ and random function $f_i : [1...N] \rightarrow [1 ... N/k_i]$, which assigns each bit to a block in round $i$. The bits whose position belongs to $K^i_j = \{ l | f_i (l)=j\}$ form the $j$th block in the $i$th round.
\item Alice sends the parity of each block $P_{(A,i,j)} = \oplus_{l \in K^i_j} X_l $ to Bob, who computes his parity for the same block and announces it. For each block where $P_{(A,i,j)} \neq P_{(B,i,j)}$, Alice and Bob perform the following operations.
\begin{enumerate}
\item Alice and Bob perform BINARY on the block defined by $K^i_j$ and correct one error, say at position $l$. Now, all blocks in previous rounds which contained $l$ have an odd number of errors. In this way, a single error corrected in each block in later rounds leads to the identification of several error-containing blocks in earlier rounds. Let the set of such blocks be $\mathcal{K}$.
\item Alice and Bob choose the smallest block from $\mathcal{K}$ and run BINARY to correct one error. They again compute the set of blocks containining an odd number of errors $\mathcal{K}$. This process is repeated until $\mathcal{K}$ contains no blocks.
\end{enumerate}
\item At the end of pass $i$, all blocks generated in all rounds contain an even number of errors. Alice and Bob then move to the next pass.
\end{enumerate}
\begin{remark} The main ingredient of Cascade that we will use is the fact that for every parity bit Alice sends to Bob, Bob sends the corresponding parity bit to Alice (except in the last bit of the BINARY protocol, where Bob simply corrects his bit). There are several variants of the Cascade protocol, which vary in the manner in which blocks are created, blocksizes used, and number of passes. Such variations do not change the fact that Bob announces the same set of parities as Alice, and thus our claims will hold for all such variants. \label{remark_cascade} \end{remark}
We note that the details of the blocks generated in a given pass have to be communicated between Alice and Bob. However, the blocks are generated randomly and independent of the QKD protocol. Therefore, the act of communicating the details of these blocks does not provide any additional information to Eve about the key \cite{Scarani2009}.
\section{Using Cascade in QKD protocols}
\label{sec:usingcascade}
\subsection{The Problem}
In the original proposal for Cascade \cite{brassard1994Secretkey}, an analytical upper bound $\delta^\text{A}_\text{leak}$ on the number of bits sent from Alice to Bob per bit of raw key is obtained. In an actual experiment, an upper bound $\delta^\text{A}_{\text{leak}}$ can also be chosen empirically by running multiple iterations of Cascade for the expected error rate.
For the purposes of this work, it does not matter how $\delta^\text{A}_{\text{leak}}$ is obtained. For convenience, we will denote the upper bound as $\delta^\text{A}_{\text{leak}} = f h(e)$, where $e$ is the error-rate in the raw key, $h$ is the binary entropy function, and $f$ is a number that denotes the efficiency. Typical values of $f$ for Cascade are between $1$ and $1.5$, and can be found in \cite{brassard1994Secretkey,mao2021High,elkouss2009Efficient,martinez-mateo2015Demystifying}.
The original Cascade paper \cite{brassard1994Secretkey} only provides an upper bound on the number of bits sent from Alice to Bob, i.e $\delta^\text{A}_\text{leak}$, and defines `efficiency' of Cascade as the ratio of the actual number of bits per signal sent from Alice to Bob, and $h(e)$, where $e$ is the error rate and $h$ is the binary entropy function.
Therefore, it has been erroneously assumed that $\delta^\text{A}_\text{leak}$ is the true value of $\delta_{\text{leak}}$ in Eq.~\eqref{eq:basickey rate} when Cascade is used in QKD. It is assumed incorrectly that
\begin{equation}
\label{eq:incorrect_cascade}
R_{\text{incorrect}} = \min_{\rho \in S(\vec{\gamma})} S(Z| E \tilde{A} \tilde{B} ) - p_{\text{pass}} \times \delta^\text{A}_\text{leak}
\end{equation}
is the expression for the key rate. However, \textit{all classical communication} must be assumed to be known to Eve, and the above equation does not account for the communication from Bob to Alice during Cascade. In fact, since Bob's data is correlated with that of Alice, it is entirely possible for Bob's communication to leak additional information about Alice's raw key to Eve.
\subsection{A Naive Approach}
One naive approach to fix Eq.~\eqref{eq:incorrect_cascade} is to include $\delta^\text{B}_{\text{leak}}$, an upper bound on the number of bits leaked during the Bob to Alice communication, in $\delta_{\text{leak}}$ in Eq.~\eqref{eq:basickey rate}. Therefore, a naive, but correct expression for key rate would be
\begin{equation}
\label{eq:naive_cascade}
R_{\text{naive}} = \min_{\rho \in S(\vec{\gamma})} S(Z| E \tilde{A} \tilde{B} ) - p_{\text{pass}} \times (\delta^\text{A}_\text{leak} + \delta^\text{B}_\text{leak}).
\end{equation}
Here $\delta^\text{A}_\text{leak}$ can be replaced with $f h(e)$. Cascade requires Bob to send a bit to Alice for every bit Alice sends to Bob, unless the bit corresponds to the last step of the BINARY protocol. The last step of the BINARY protocol corrects a single bit, and every bit corrected is the last step of some BINARY protocol. Therefore $\delta^\text{B}_\text{leak}=\delta^\text{A}_\text{leak} - e$, since every BINARY protocol in the last step corrects exactly one error.
Therefore, $(\delta^\text{A}_\text{leak} + \delta^\text{B}_\text{leak}) = 2 f h(e) - e $, which almost doubles the cost of error correction. Using this value in $\delta_\text{leak }$ in Eq.~\eqref{eq:naive_cascade} will yield valid key rates. However, the values obtained will be far worse than the ones obtained for any one-way error correction protocol, therefore making Cascade typically unsuitable for QKD.
\subsection{Our Solution}
\label{subsec:our_solution}
We show that one can do better than Eq.~\eqref{eq:naive_cascade}. Recalling Remark \ref{remark_cascade}, we note that the communication from Bob to Alice can be computed from two pieces of information: (1) the communication from Alice to Bob, and (2) the knowledge of the location of errors $W_i=X_i\oplus Y_i$ for each bit of Alice and Bob's data. This is because for any parity bit $P_{(A,i,j)}$ sent by Alice during Cascade, Bob sends a bit $P_{(B,i,j)}$ that is the parity of the \textit{same set of bits} of his data. Therefore $P_{(A,i,j)} \oplus P_{(B,i,j)} = \sum_{l \in K^i_j} X_l \oplus Y_l = \sum_{l \in K^i_j} W_l$, where we recall that $K^i_j$ is the set of positions of the bits that made up the $j$th block in the $i$th pass of Cascade.
This property implies that a modified protocol that leaks all communication from Alice to Bob, and additionally leaks $W$, \textit{can only leak more (or equal) information} to Eve than Cascade. Thus, to lower bound the key rate for a QKD protocol using Cascade, we can lower bound the key rate for the QKD protocol that announces $W$, and only involves Alice to Bob part of the communication from Cascade. Therefore, we can compute
\begin{equation}
\label{eq:improved_cascade}
R = \min_{\rho \in S(\vec{\gamma})} S(Z| E W \tilde{A} \tilde{B} ) - p_{\text{pass}} \times \delta^\text{A}_\text{leak}
\end{equation}
as a valid key rate for any QKD protocol using Cascade.
We note that Eq.~\eqref{eq:incorrect_cascade} will always produce a key rate that is greater than or equal to the one produced from Eq.~\eqref{eq:improved_cascade}, since $S(Z|E\tilde{A} \tilde{B} ) \geq S(Z|EW\tilde{A} \tilde{B} )$ from subadditivity. We show that both the equality and inequality can occur, therefore proving that using Eq.~\eqref{eq:incorrect_cascade} can produce key rates that are not justified. In some cases, we obtain equality, which indicates that announcing $W$ gives Eve no new information. In such cases, although the valid key rate does not change, the argument that properly address the communication from Bob to Alice is lacking in the literature and is provided by this work.
\subsection{Computing Key rates}
We are interested in the difference between the incorrect key rate from Eq.~\eqref{eq:incorrect_cascade} and the key rate from our proposed approach from Eq.~\eqref{eq:improved_cascade}. Therefore we compute two quantities, $F =\min_{\rho \in S(\vec{\gamma})} S(Z| E \tilde{A} \tilde{B} )$ and $F^\prime = \min_{\rho \in S(\vec{\gamma})} S(Z| E W \tilde{A} \tilde{B} )$.
We use the numerical framework from \cite{Winick2018} to perform these optimizations. This framework equivalently describes the steps in the QKD protocol via Kraus operators $\{K_i\}$, which represent measurements, announcements and sifting done by Alice and Bob, and $\{Z_j\}$ which implement a pinching channel on the key register. Our solution, which requires the computation of $F^\prime$ instead of $F$ can be easily implemented by a suitable change in the Kraus operators for the optimization problem for $F$.
The numerical framework equivalently describes the optimization problem for $F$ as
\begin{equation}
F = \min_{\rho \in S(\vec{\gamma})} f(\rho),
\end{equation}
where
\begin{equation}
\begin{aligned}
f(\rho) &= D (\mathcal{G} (\rho) || \mathcal{Z} (\mathcal{G}(\rho) ) ), \\
\mathcal{G}(\rho) &= \sum_i K_i \rho K_i^\dagger, \\
\mathcal{Z} (\mathcal{G}(\rho) ) &= \sum_j Z_j \mathcal{G} (\rho) Z_j^\dagger,
\end{aligned}
\end{equation}
and where $D(X || Y) = \text{Tr} (X \log (X) ) - \text{Tr} (X \log (Y))$ is the quantum relative entropy with $\log$ as the matrix logarithm.
Let $\alpha$, $\beta$ be announcements (such as basis choice) made by Alice and Bob, and let Alice and Bob's POVMs be given by $P^A = \{P^A_{(\alpha,x)}\}$, and $P^B = \{P^B_{(\beta,y)}\}$, where $x,y$ denote bits that represent measurement outcomes. Let $\mathbf{A}$ be the set of announcements $(\alpha,\beta)$ that are kept after sifting. Furthermore, let $r(\alpha,\beta,x)$ be the keymap Alice implements. Then, the Kraus operators for Eq.~\eqref{eq:incorrect_cascade} are given by
\begin{equation} \label{eq:generic_kraus}
\begin{aligned}
K_{\alpha,\beta}& =\sum_{x,y} \ket{r(\alpha,\beta,x) }_Z \otimes \sqrt{P^A_{(\alpha,x)} \otimes P^B_{(\beta,y)} }\\
& \otimes \ket{x}_{X} \otimes \ket{y}_{Y} \otimes \ket{\alpha,\beta}_{\tilde{A} \tilde{B}},
\end{aligned}
\end{equation}
and the set of operators generating the $\mathcal{G}$ map is given by $\{K_i\} = \{ K_{\alpha,\beta} | (\alpha,\beta) \in \mathbf{A} \}$ \cite{Winick2018}.
The $\mathcal{Z}$ map is implemented by operators $\{ Z_i\} $ given by $Z_i = \ket{i} \bra{i}_Z \otimes I_{ABX Y \tilde{A} \tilde{B}}$. Notice that the output state $\mathcal{G}(\rho)$ is classical in $\alpha,\beta$, which reflects the fact that the basis choices are announced and known to Eve.
To compute $F^\prime$, we must include an additional announcement that announces $w=x \oplus y$. This is implemented by
\begin{equation} \label{eq:newkraus}
\begin{aligned}
K^\prime_{\alpha,\beta, w} &=\sum_w \sum_{\substack{x,y \\ x\oplus y = w}} \ket{r(\alpha,\beta,x) }_Z \otimes \sqrt{P^A_{(\alpha,x)} \otimes P^B_{(\beta,y)} }\\
& \otimes \ket{x}_X \otimes \ket{y}_Y \otimes \ket{\alpha,\beta}_{\tilde{A} \tilde{B} } \otimes \ket{w}_{W},
\end{aligned}
\end{equation}
where the set of operators generating the new $\mathcal{G}^\prime$ map can be given by $\{K^\prime_i\} = \{ K^\prime_{\alpha,\beta, w} | (\alpha,\beta) \in \mathbf{A} \}$. The $\mathcal{Z}^\prime$ map is implemented by $\{ Z^\prime_i\} $ given by $Z^\prime_i = \ket{i} \bra{i}_Z \otimes I_{ABX Y \tilde{A} \tilde{B}W}$.
In the remainder of this paper, we compute both $F= \min_{\rho \in S(\vec{\gamma})} f(\rho)$ and $F^\prime = \min_{\rho \in S(\vec{\gamma})} f^\prime(\rho)$ for the various implementations of the BB84 protocol. If we find that $F=F^\prime$, then this indicates that the previous analysis of Cascade is wrong but gives correct answers. In this case, Eqs.~\eqref{eq:incorrect_cascade} and \eqref{eq:improved_cascade} will give identical key rates.
If we find that $F > F^\prime$, then this indicates that the previous analysis was wrong and gave incorrect answers. The difference between the key rates obtained from Eqs.~\eqref{eq:incorrect_cascade} and ~\eqref{eq:improved_cascade} is equal to $F-F^\prime$.
Note that the above formulation applies to situations where Alice and Bob generate a bit string from their measurements ($x,y,x \oplus y$ are bits). Events such as no-detection either need to be discarded during sifting, or should be mapped to bits. This assumption is necessary to use Cascade, since it is a protocol that corrects errors in \textit{bit strings}. We also remark that since many finite-size key rate analyses involve the optimization of the same objective function ($F$) over different constraints, our solution can be easily applied to such finite-size analyses as well, by simply changing $F$ to $F^\prime$.
\section{Qubit BB84}
\label{sec:qubit_bb84}
\subsection{Protocol Description}
In this section, we present our results for the standard qubit-based BB84 protocol, where Alice prepares each of the four signal states $\{ \ket{0}, \ket{1}, \ket{+}, \ket{-} \}$ with equal probability, and Bob chooses the $Z$ or $X$ basis with equal probability.
Alice and Bob then implement an acceptance test on their observed statistics.
If the protocol accepts, Alice and Bob announce their basis, and throw away signals where they measured in different basis. Alice maps her measurement outcome to the raw key, and then proceeds to perform error-correction (Cascade) and privacy amplification. For the descriptions of the exact Kraus operators of the protocol, we refer the reader to Appendix \ref{appendix:qubitBB84}.
\begin{table}
\centering
\begin{tabular}{|c|ccccc|}
\hline
\multicolumn{6}{|c|}{Bob Measures} \\
\hline
& & H & V & + & - \\ \hline
& H & \mk{$ \gamma_{HH}$ } & \mk{$\gamma_{HV}$} & $\gamma_{H+}$ & $\gamma_{H-}$ \\
Alice& V & \mk{$\gamma_{VH}$} & \mk{$\gamma_{VV}$} & $\gamma_{V+}$ & $\gamma_{V-}$ \\
Sends&+& $\gamma_{+H}$ & $\gamma_{+V}$ & \mk{$\gamma_{++}$ } & \mk{$\gamma_{+-}$} \\
& -& $\gamma_{-H}$ & $\gamma_{-V}$ & \mk{$\gamma_{-+}$} & \mk{$\gamma_{--}$ }\\
\hline
\end{tabular}
\caption{Format of fine-grained statistics obtained for qubit BB84. The rows denote the state sent by Alice, and the columns denote the measurement outcome measured by Bob. $H$ and $+$ correspond to measurement outcome $0$, while $V$ and $-$ correspond to measurement outcome $1$. $\gamma_{x,y}$ denotes the probability of Alice obtaining outcome $x$ and Bob obtaining outcome $y$.}
\label{table:qubit_constraints}
\end{table}
Alice and Bob obtain statistics shown in Table \ref{table:qubit_constraints} during the acceptance test. There are a variety of ways they can use these statistics to perform the acceptance test. We use the phrase ``fine-grained constraints'' to refer to the case where all the entries in Table \ref{table:qubit_constraints} are used for the acceptance test, and therefore in the constraints for $S(\vec{\gamma})$. We use ``sifted fine-grained'' to refer to the case where only the entries marked in red are used. We use ``coarse-grained" constraints to refer to the case where only the (unnormalized) QBER and Gain constraints for each basis (given by $\text{Q}_Z = \gamma_{HV}+ \gamma_{VH}$, $\text{Q}_X = \gamma_{+-} + \gamma_{-+}$, $\text{gain}_Z = \gamma_{HH}+ \gamma_{HV}+ \gamma_{VH}+ \gamma_{VV}$, and $\text{gain}_X = \gamma_{++}+ \gamma_{+-}+ \gamma_{-+}+ \gamma_{--}$) are used. The gain sets the constraints on the probability of choosing each basis for measurement, while the QBER in each basis sets the constraints on the observed error-rate. We note that this is a departure from the nomenclature of \cite{wang2021Numerical}, where the ``coarse-grained'' case refers to the ``sifted fine-grained case'' as defined above. Additionally, we use the constraints from source-replacement that characterize Alice's system for prepare and measure protocols.
We consider a channel with misalignment and depolarization to compute statistics in Table \ref{table:qubit_constraints}, which is described in Appendix \ref{appendix:qubitBB84}. We also consider a channel model that includes a ``replacement'' channel $\Phi_{\text{replace}}$, that replaces the state leaving Alice's lab with the fixed signal state $\ket{0}$ ($\ket{H}$) with probability $\lambda=0.2$. The output of the replacement channel is then sent through a channel with misalignment and depolarization. The replacement channel is interesting because it breaks symmetries in the observed statistics.
If the replacement channel is also included in the channel model, then the new statistics can be obtained by replacing each row $\vec{\gamma}_i$ of Table \ref{table:qubit_constraints} by $(1-\lambda)\vec{\gamma}_i + \lambda\vec{\gamma}_H$ (since Alice sends each state with equal probability).
\subsection{Reduction to Bell-diagonal states}
\label{subsec:reduction}
In certain cases, the optimization of $f(\rho)$ over all states $\rho$ in $S(\vec{\gamma)}$, can be reduced to that over all Bell-diagonal states in $S(\vec{\gamma})$, denoted by $S_\text{bell} (\vec{\gamma})$. That is, it can be shown that
\begin{equation} \label{eq:reduction}
\min_{\rho \in S_\text{bell}(\vec{\gamma}) } f(\rho) = \min_{\rho \in S (\vec{\gamma})} f(\rho).
\end{equation}
For Bell-diagonal states shared between Alice and Bob, Eve's state $E$ is always block-diagonal in the parities $W$ (see Appendix \ref{appendix:bell}), and therefore the additional announcement $W$ gives Eve no new information. In such cases, $F=F^\prime$.
There are several such arguments in the literature, which are identical at their core, but differ only in details of the protocols (such as type of constraints, number of basis used for key generation, and type of classical processing). Ref.~\cite{ferenczi2011Symmetries} proves Eq.~\eqref{eq:reduction} for the case where $f(\rho)$ is the total key rate including the information leakage term, and the states are constrained in $S$ only by the average QBER over all bases. The analysis is done for $d$ dimensional systems in general. Ref.~\cite{Watanabe2007} proves Eq.~\eqref{eq:reduction} for BB84 and six-state protocols, where $f(\rho)$ is the uncertainity of Eve about the raw key (with a modified classical processing), and the states are constrained in $S$ by each individual QBER, but only the Z basis is used for key generation. Ref.~\cite{tupkary2022Improved} generalizes this to a wider variety of classical processing in $f(\rho)$, while still constraining $S$ with separate QBERs, but using only the Z basis for key generation. In this work, we will attempt to present a coherent unified picture of all such arguments for the convenience of the reader. We also point out how symmetry in \textit{observed values} can help in proving the reduction to Bell-diagonal states.
We start by defining the ``twirling map" \cite{bennett1996Mixed} as
\begin{equation}
\mathcal{T}(\rho)= \frac{1}{4} \sum_{i=1}^4 \rho_i = \frac{1}{4} \sum_{i=1}^4 (\sigma_i \otimes \sigma_i) \rho (\sigma_i \otimes \sigma_i)^\dagger.
\end{equation}
where $\sigma_i$ for $i \in \{0,1,2,3\}$ denotes the identity and the Pauli X,Y and Z operators. $\mathcal{T}(\rho)$ is often referred to as the ``twirled'' state, and can be shown to be always Bell-diagonal \cite{ferenczi2011Symmetries,bennett1996Mixed}. The proof of Eq.~\eqref{eq:reduction} now proceeds in two steps.
\noindent
\textit{Step 1 :} It is shown that
\begin{equation} \label{eq:first_condition}
f(\mathcal{T}(\rho)) \leq f(\rho) \quad \forall \rho.
\end{equation}
\textit{Step 2 :} It is shown that
\begin{equation} \label{eq:second_condition}
\rho \in S \implies \mathcal{T}(\rho) \in S_{\text{bell}}.
\end{equation}
The proof of Eq.~\eqref{eq:reduction} is then as follows : Clearly $\min_{\rho \in S_\text{bell} (\vec{\gamma}) } f(\rho) \geq \min_{\rho \in S(\vec{\gamma})} f(\rho)$, since $S_\text{bell} (\vec{\gamma}) \subseteq S (\vec{\gamma}) $. To show the other direction of the inequality, let $\rho^*$ be the state that achieves the minimization on the right hand side of Eq.~\eqref{eq:reduction}. Then, from Eq.~\eqref{eq:first_condition}, we obtain $f(\mathcal{T}(\rho^*)) \leq f(\rho^*)$. From Eq.~\eqref{eq:second_condition}, we know that $T(\rho^*) \in S_\text{bell} (\vec{\gamma})$. Therefore, $ \min_{\rho \in S_\text{bell} (\vec{\gamma}) } f(\rho) \leq f(\mathcal{T}(\rho^*)) \leq \min_{\rho \in S (\vec{\gamma})} f(\rho)$.
Thus, to obtain Eq.~\eqref{eq:reduction} for a protocol of interest, one must show the validity of Eqs.~\eqref{eq:first_condition} and \eqref{eq:second_condition}. We prove that Eq.~\eqref{eq:first_condition} holds for qubit protocols where key generation is done in all the X, Z (and if applicable Y) basis in Appendix \ref{appendix:twirling}. Thus, to reduce the optimization to Bell-diagonal states and obtain $F=F^\prime$, we now only need to check the validity of Eq.~\eqref{eq:second_condition}. This has to be considered separately for every choice of constraints, and observed values, and is done in the next section.
\subsection{Results}
We numerically check the difference between $F$ and $F^\prime$. The results are summarized in Table \ref{table:qubitbb84_results}. Since the numerical method is capable of producing both an upper bound and lower bound for $F$ and $F^\prime$, it is straightforward to determine when $F>F^\prime$. We claim $F=F^\prime$ when the bounds for $F$ and $F^\prime$ overlap. In some cases, $F=F^\prime$ can be analytically argued, by proving the validity of Eq.~\eqref{eq:second_condition} (see Sec. \ref{subsec:reduction}), as we do below.
To check the validity of Eq.~\eqref{eq:second_condition}, we need to look at the constraints that define $S(\vec{\gamma})$. There are two types of constraints. First, we have the constraint obtained from the source-replacement scheme, which is of the form $\text{Tr}_B(\rho) = \sigma_A$ and represents the fact that in prepare and measure protocols, Alice's state is known and never leaves her lab. For the qubit BB84 protocol, one can take $\sigma_A = I_A /2$ (see Appendix. \ref{appendix:qubitBB84}). Since $\text{Tr}_B(\rho_{AB}) = I_A / 2$ is always true for a Bell-diagonal state, this constraint is always satisfied by $T(\rho)$.
The remaining constraints are obtained from the acceptance test, and are of the form $\text{Tr}(\Gamma_k \rho ) = \gamma_k$. Thus, to check the validity of Eq.~\eqref{eq:second_condition}, we check whether $\text{Tr}(\Gamma_k \mathcal{T}(\rho)) = \text{Tr}(\mathcal{T}^\dagger(\Gamma_k) \rho ) =\gamma_k \quad \forall \rho$.
\begin{table*}[t!]
\begin{tabular}{|c|c|c|c|c|c|}
\hline
\multicolumn{6}{|c|}{Channel} \\
\hline
& & Misalignment & Depolarization & Misalignment + & Misalignment+ \\
& & & & Depolarization & Depolarization+ \\
& & & & & $\Phi_\text{replace}$ \\
\hline
& Coarse-grained & $=$ & $=$ & $= $& $=$ \\
Constraints& Sifted fine-grained & $=$ & $=$ & $=$& $>$ \\
& Fine-grained & $=$ & $=$ & $>$ & $>$ \\
\hline
\end{tabular}
\caption{Relation between $F$ and $F^\prime$ for Qubit BB84 protocol. Results are based upon the upper and lower bound to the optimization obtained from the numerical method \cite{Winick2018}. All equality cases can be explained by arguing a reduction to Bell-diagonal states.}
\label{table:qubitbb84_results}
\end{table*}
\begin{itemize}
\item \textbf{Coarse-grained statistics :} In this case, $\Gamma_k$ is the POVM element corresponding to QBER and gain in each basis. It is therefore easy to check with a simple calculation that $\mathcal{T}^\dagger (\Gamma_k) = \Gamma_k$ which implies $\text{Tr}(\Gamma_k \mathcal{T}(\rho)) = \text{Tr}(\Gamma_k \rho)$. Thus, for coarse-grained constraints the state shared between Alice and Bob can be assumed to be Bell-diagonal, and $F=F^\prime$. This explains the coarse-grained row in Table \ref{table:qubitbb84_results}.
\item \textbf{Sifted Fine-grained statistics:} Let us turn to the case of ``sifted fine-grained'' constraints. For the $Z$ basis, let the POVMs that make up the constraints be given by $\Gamma_{HH},\Gamma_{HV}, \Gamma_{VH}, \Gamma_{VV}$ (with similar expressions for the $X$ basis). In general, these POVMs are not invariant under $\mathcal{T}^\dagger$, and thus $\text{Tr}(\Gamma_k \mathcal{T}(\rho)) \neq \text{Tr}(\Gamma_k \rho)$. However, one can find that
\begin{comment}
\begin{equation} \label{eq:symmetry_POVMS}
\begin{aligned}
(\sigma_i \otimes \sigma_i)^\dagger \{ \Gamma_{HH}, \Gamma_{VV} \} (\sigma_i \otimes \sigma_i) \in \{ \Gamma_{HH}, \Gamma_{VV} \} \\
(\sigma_i \otimes \sigma_i)^\dagger \{ \Gamma_{HV}, \Gamma_{VH} \} (\sigma_i \otimes \sigma_i) \in \{ \Gamma_{VH}, \Gamma_{HV} \} \\
(\sigma_i \otimes \sigma_i)^\dagger \{ \Gamma_{++}, \Gamma_{--} \} (\sigma_i \otimes \sigma_i) \in \{ \Gamma_{++}, \Gamma_{--} \} \\
(\sigma_i \otimes \sigma_i)^\dagger \{ \Gamma_{+-}, \Gamma_{-+} \} (\sigma_i \otimes \sigma_i) \in \{ \Gamma_{+-}, \Gamma_{-+} \}
\end{aligned}
\end{equation}
\end{comment}
\begin{equation} \label{eq:symmetry_POVMS}
\begin{aligned}
\mathcal{T}^\dagger ( \Gamma_{HH}) = \mathcal{T}^\dagger( \Gamma_{VV} ) &= \frac{1}{2} (\Gamma_{HH}+\Gamma_{VV}), \\
\mathcal{T}^\dagger( \Gamma_{HV}) = \mathcal{T}^\dagger ( \Gamma_{VH} ) &= \frac{1}{2} ( \Gamma_{VH}+ \Gamma_{HV}), \\
\mathcal{T}^\dagger ( \Gamma_{++} ) = \mathcal{T}^\dagger( \Gamma_{--} ) &= \frac{1}{2}( \Gamma_{++} + \Gamma_{--} ), \\
\mathcal{T}^\dagger ( \Gamma_{+-} ) = \mathcal{T}^\dagger( \Gamma_{-+} ) &= \frac{1}{2}( \Gamma_{+-}+ \Gamma_{-+} ).
\end{aligned}
\end{equation}
Therefore, in this case, one can claim a reduction to Bell-diagonal as long as the statistics obey certain symmetries. That is, if one obtains statistics satisfying $\gamma_{HH}=\gamma_{VV}, \gamma_{HV}=\gamma_{VH}, \gamma_{++}=\gamma_{--}, \gamma_{+-}=\gamma_{-+}$, then
\begin{equation}
\begin{aligned}
\text{Tr}(\Gamma_k \rho) = \gamma_k \implies &\text{Tr}(\Gamma_k \mathcal{T}(\rho) ) = \gamma_k, \\
\text{even when } & \mathcal{T}^\dagger (\Gamma_k) \neq \Gamma_k.
\end{aligned}
\end{equation}
The statistics obey this symmetry when the channel consists of any combination of loss and misalignment, and therefore for these channel models we again obtain $F=F^\prime$ due to the reduction to Bell-diagonal states. Introducing the additional replacement channel $\Phi_\text{replace}$ destroys this symmetry, and we obtain $F\neq F^\prime$. This explains the sifted fine-grained row in Table \ref{table:decoybb84_results}.
\item \textbf{Fine-grained statistics :} In this case, in addition to Eq.~\eqref{eq:symmetry_POVMS}, it is possible to show that each POVM in the off-diagonal block of Table \ref{table:qubit_constraints} is mapped to the same off-diagonal block by $\mathcal{T}^\dagger$. That is,
\begin{equation}
\begin{aligned}
\mathcal{T}^\dagger( \Gamma_{H+}) &= \mathcal{T}^\dagger(\Gamma_{H-}) = \mathcal{T}^\dagger(\Gamma_{V+}) = \mathcal{T}^\dagger(\Gamma_{V-}) \\
&= \frac{1}{4} (\Gamma_{H+}+\Gamma_{H-}+\Gamma_{V+}+\Gamma_{V-}) \\
\mathcal{T}^\dagger( \Gamma_{+H}) &= \mathcal{T}^\dagger(\Gamma_{+V}) = \mathcal{T}^\dagger(\Gamma_{-H}) = \mathcal{T}^\dagger(\Gamma_{-V}) \\
&= \frac{1}{4} (\Gamma_{+H}+\Gamma_{+V}+\Gamma_{-H}+\Gamma_{-V})
\end{aligned}
\end{equation}
That is, if one obtains statistics satisfying $\gamma_{HH}=\gamma_{VV}, \gamma_{HV}=\gamma_{VH}, \gamma_{++}=\gamma_{--}, \gamma_{+-}=\gamma_{-+}$ along with $\gamma_{H+} = \gamma_{H-} = \gamma_{V+} = \gamma_{V-}$, and $\gamma_{+H} = \gamma_{+V} = \gamma_{-H} = \gamma_{-V}$, then we again can claim that
\begin{equation}
\begin{aligned}
\text{Tr}(\Gamma_k \rho) = \gamma_k \implies &\text{Tr}(\Gamma_k \mathcal{T}(\rho) ) = \gamma_k, \\
\text{even when } &
\mathcal{T}^\dagger (\Gamma_k) \neq \Gamma_k.
\end{aligned}
\end{equation}
This is the case when the channel only contains depolarization.
For only misalignment, it has already been shown that fine-grained constraints allow us to show that Eve factors off and holds a state that is independent of the Alice-Bob state \cite{wang2021Numerical}. Since Eve's quantum system factors off, the $F=F^\prime$ follows from the fact that $W$ and $Z$ are independent random variables for each basis, i.e $ S(Z | \tilde{A} \tilde{B} W) = S(Z | \tilde{A} \tilde{B} )$. For the remaining two cases, we find that the optimal values of the two objective functions are unequal, and no reduction to the Bell-diagonal case is possible. This explains the fine-grained row of Table \ref{table:qubitbb84_results}.
\end{itemize}
We plot $F, F^\prime$ corresponding to the last two columns of Table \ref{table:decoybb84_results} in Figs. \ref{fig:misalign_depol}, \ref{fig:misalign_depol_perturb}.
\begin{figure}
\caption{$F,F^\prime$ for a channel with misalignment and depolarization. We find that $F=F^\prime$ for coarse-grained and sifted fine-grained constraints, while $F>F^\prime$ for fine-grained constraints. The plot corresponds to depolarization probability $q=0.1$, and is plotted against the misalignment angle $\theta$.}
\label{fig:misalign_depol}
\end{figure}
\begin{figure}
\caption{$F,F^\prime$ for a channel with misalignment, depolarization and replacement channel. We find that $F=F^\prime$ for coarse-grained, while $F>F^\prime$ for sifted fine-grained and fine-grained constraints. The state leaving Alice's lab is replaced with the signal state corresponding to $H$, with probability $\lambda = 0.2$. The plot corresponds to depolarization probability $q=0.1$, and is plotting against the misalignment angle $\theta$. }
\label{fig:misalign_depol_perturb}
\end{figure}
\section{WCP Decoy State BB84}
\label{sec:WCP_Decoy}
In this section, we present results for the WCP decoy-state BB84 protocol \cite{lim2014Concise,lo2005Decoy,rusca2018Finitekey,rice2009Numerical,hwang2003Quantum,ma2005Practical} in the same manner. The key rate calculations, protocol description, channel simulation, and decoy analysis is exactly identical to the one from \cite{wang2021Numerical}. Therefore, these aspects will be only briefly described in this work. The only difference lies in the modification of Kraus operators according to Eqs.~\eqref{eq:newkraus}, and the inclusion of the replacement channel $\Phi_{\text{replace}}$ in the channel simulation.
\subsection{Protocol Specification}
Alice prepares and sends a phase-randomized weak coherent pulse (WCP) pulse in the polarization mode $H,V,A,D$ with equal probability, choosing to use the ``signal intensity'' with probability close to one, and some ``decoy intensities''. Bob implements passive basis choice with equal probability. We use a squashing model on Bob's side \cite{Gittsovich2014} to describe his measurements, and Bob's squashed POVMs can be found in Appendix \ref{appendix:decoyprotocol}. Alice and Bob announce a small fraction of their data, and perform the acceptance test. If the protocol accepts, Alice and Bob announce basis, and throw away the signals where they measured in different basis, or where Bob got a no detection event. Alice then maps her measurement outcomes to the raw key, followed by error correction (Cascade) and privacy amplification. For the descriptions of the exact Kraus operators of the protocol, we refer the reader to Appendix \ref{appendix:decoyprotocol}.
\begin{table}[h!]
\centering
\begin{tabular}{|c|c| ccccc|}
\hline
\multicolumn{7}{|c|}{Bob Measures} \\
\hline
& & H & V & + & - & $\emptyset$ \\ \hline
& H & \mk{$ \gamma^{\mu_i}_{HH}$ } & \mk{$\gamma^{\mu_i}_{HV}$} & $\gamma^{\mu_i}_{H+}$ & $\gamma^{\mu_i}_{H-}$ & $\gamma^{\mu_i}_{H \emptyset}$ \\
Alice& V & \mk{$\gamma^{\mu_i}_{VH}$} & \mk{$\gamma^{\mu_i}_{VV}$} & $\gamma^{\mu_i}_{V+}$ & $\gamma^{\mu_i}_{V-}$ & $\gamma^{\mu_i}_{V \emptyset}$ \\
Sends&+& $\gamma^{\mu_i}_{+H}$ & $\gamma^{\mu_i}_{+V}$ & \mk{$\gamma^{\mu_i}_{++}$ } & \mk{$\gamma^{\mu_i}_{+-}$} & $\gamma^{\mu_i}_{+ \emptyset}$ \\
& -& $\gamma^{\mu_i}_{-H}$ & $\gamma^{\mu_i}_{-V}$ & \mk{$\gamma^{\mu_i}_{-+}$} & \mk{$\gamma^{\mu_i}_{--}$ } & $\gamma^{\mu_i}_{- \emptyset}$ \\
\hline
\end{tabular}
\caption{Format of fine-grained statistics obtained for deocy-state BB84 protocol. The rows denote the state sent by Alice, and the columns denote the measurement outcome measured by Bob. One such table is obtained for each intensity used by Alice. $H$ and $A$ correspond to measurement outcome $0$, while $V$ and $D$ correspond to measurement outcome $1$. $\gamma^{\mu_i}_{x,y}$ denotes the probability of Alice obtaining outcome $x$ and Bob obtaining outcome $y$, given intensity $\mu_i$ was used.}
\label{table:decoybb84_constraints}
\end{table}
The fine-grained statistics obtained by Alice and Bob are given by the Table \ref{table:decoybb84_constraints}. Again, as in Sec. \ref{sec:qubit_bb84}, we use the phrase ``fine-grained constraints'' to refer to the case where all the entries in Table \ref{table:qubit_constraints} are used for the acceptance test, ``sifted fine-grained'' when only the entries marked in red are used, and ``coarse-grained" constraints when only the (unnormalized) QBER and Gain constraints for each basis are used. Additionally, we use the constraints from source-replacement that characterize Alice's system for prepare and measure protocols.
The exact manner in which statistics from a channel consisting of misalignment and loss are computed is identical to the procedure described in Ref.~\cite[Appendix C]{wang2021Numerical}. We will not repeat those calculations here. We include an additional ``replacement channel'' $\Phi_{\text{replace}}$ which replaces the state leaving Alice's lab with the signal state corresponding to $H$ with probability $\lambda = 0.2$. This is interesting since it breaks symmetries in observed statistics. If we include the replacement channel, then each row $\vec{\gamma}^{\mu_i}_j$ of Table \ref{table:decoybb84_constraints} (computed for loss and misalignment), is replaced by $(1-\lambda)\vec{\gamma}^{\mu_i}_j + \lambda\vec{\gamma}^{\mu_i}_H$ (since Alice sends each state with equal probability).
\subsection{Results}
\begin{table*}
\centering
\begin{tabular}{|c|c|c|c|c|c|}
\hline
\multicolumn{6}{|c|}{Channel} \\
\hline
& & Loss & Misalignment & Loss+Misalignment & Loss+Misalignment+ \\
& & & & & $\Phi_\text{replace}$ \\
\hline
& Coarse-grained & $=$ & $=$ & $= $& $=$ \\
Constraints& Sifted fine-grained & $=$ & $=$ & $=$& $>$ \\
& Fine-grained & $=$ & $=$ & $>$ & $>$ \\
\hline
\end{tabular}
\caption{Relation between $F$ and $F^\prime$ for Decoy BB84 protocol. Results are based upon the upper and lower bound to the optimization obtained from the numerical method \cite{Winick2018}. Note that this table is similar to Table \ref{table:qubitbb84_results} obtained for qubit BB84, suggesting the fact that similar arguments can be made for understanding this table in both cases.}
\label{table:decoybb84_results}
\end{table*}
The optimization problem for decoy protocols is solved by obtaining bounds on the single photon yields as
\begin{equation}
\gamma^{1,L}_{y | x} \leq \gamma^1_{y|x} \leq \gamma^{1,U}_{y|x}, \quad \forall x,y
\end{equation}
where $\gamma^1_{y|x}$ denotes the probability of Bob obtaining outcome $y$, given Alice sent signal $x$ and $1$ photon. One can then compute lower and upper bounds on $\gamma^1_{x,y}$, by using $\gamma^1_{x,y} = \Pr(x) \gamma^1_{y|x}$, where $\Pr(x)$ denotes the probability of Alice sending signal $x$.
The optimization problem \cite{ma2005Practical,lo2005Decoy,rice2009Numerical,wang2021Numerical} is then given by (see Appendix \ref{appendix:decoy})
\begin{equation} \label{eq:decoy_opt}
\begin{aligned}
F &= \min_{\rho \in S_1^\prime } f(\rho), \\
S_1^\prime & = \{ \rho \in H_+ | \gamma^{1,L}_k \leq \text{Tr}(\Gamma_k \rho) \leq \gamma^{1,U}_k, \forall k\}
\end{aligned}
\end{equation}
where $H_+$ denotes positive semidefinite operators, and $S^\prime_1$ is the set of density operators compatible with observed statistics, and $k$ depends on the exact nature of coarse-graining.
We numerically compute the difference between $F= \min_{\rho \in S_1^\prime } f(\rho)$ and $F^\prime = \min_{\rho \in S_1^\prime } f^\prime(\rho)$ for all our channel models, and various types of constraints. The results are summarized in Table \ref{table:decoybb84_results}.
Since after squashing, the single photon contribution to the objective function involves both Alice and Bob having qubits (or vacuum), our intuition from the qubit BB84 picture can be used to understand the results in Table \ref{table:decoybb84_results}. We believe a more rigorous justification can be made along the same lines as for the qubit case, however that is not a contribution of this work. For coarse-grained constraints, we expect symmetry arguments to allow us to restrict to Bell-diagonal states, in which case announcing $W$ provides no new information to Eve. For the ``sifted fine-grained'' and ``fine-grained'' case, we expect symmetry arguments to not work in general, but to allow a restriction to Bell-diagonal states if observations are also symmetric, as seen in Sec. \ref{sec:qubit_bb84}. We plot $F, F^\prime$ corresponding to the last two columns of the Table \ref{table:decoybb84_results} in Figs. \ref{fig:misalign_loss} and \ref{fig:misalign_loss_perturb}.
\begin{figure}
\caption{$F,F^\prime$ for a channel with misalignment and loss. We find that $F=F^\prime$ for coarse-grained and sifted fine-grained constraints, while $F>F^\prime$ for fine-grained constraints. The plot corresponds to a misalignment angle $\theta$ given by $\sin^2(\theta)=0.06$, and three intensities $\mu_1=0.5, \mu_2=0.1, \mu_3= 0.001$, with the first intensity used to generate the key.}
\label{fig:misalign_loss}
\end{figure}
\begin{figure}
\caption{$F,F^\prime$ for a channel with misalignment, loss and replacement channel. We find that $F=F^\prime$ for coarse-grained, while $F>F^\prime$ for sifted fine-grained and fine-grained constraints. The state leaving Alice's lab is replaced with the signal state corresponding to $H$ with probability $\lambda=0.2$.
The plot corresponds to a misalignment angle $\theta$ given by $\sin^2(\theta)=0.06$, and three intensities $\mu_1=0.5, \mu_2=0.1, \mu_3= 0.001$, with the first intensity used to generate the key.}
\label{fig:misalign_loss_perturb}
\end{figure}
\textbf{Effect of zero-photon contribution: } The above analysis is done for the case where we only keep the single-photon contribution to the key in Eq.~\eqref{eq:decoy_opt} (Eq.~\eqref{eq:objfunc_diffn}). Let us consider the zero-photon contribution to the key. In this case, note that since no signal left Alice's lab, Eve cannot know anything about Alice's key bit. Therefore, the zero-photon contribution to $F$ is equal to $p^{(0)}_{\text{pass}}$, where $p^{(0)}_{\text{pass}}$ is the probability of zero-photon event leading to detection and passing sifting.
Moreover, if Alice sends zero photons, the state giving rise to Bob's detection must be assumed to be known to Eve. Therefore Eve has perfect knowledge of Bob's data. In this case, if $W$ is announced, Eve has perfect knowledge of Alice's data as well. Therefore, the zero-photon contribution to $F^\prime$ is \textit{always zero}.
Therefore, in this case $F > F^\prime$ always, regardless of the type of constraints used.
\section{Conclusion}
\label{sec:conclusion}
In this work, we pointed out a critical flaw in the analysis of QKD protocols using Cascade, that stems from an improper consideration of the classical communication during Cascade. This leads to the computation of secret key rates that are not justified. We proposed a simple and elegant fix, involving the construction of a convenient virtual protocol that cannot leak less information to Eve than the one using Cascade. Therefore, its key rate can be safely used in any protocol using Cascade. Our approach is easy and straightforward to implement in the numerical framework of \cite{wang2021Numerical}. We applied our solution to various implementations of the BB84 protocol, and compared our results with those of earlier, incorrect approaches. In many cases, we found that the numerical value of the key rate does not change, indicating that the communication from Bob to Alice does not leak additional information to Eve. A number of such cases were shown to arise due to symmetries in the protocol, and in the observed statistics.
All code used in this work will be made available soon.
\appendix
\section{Protocol Descriptions}
\label{appendix:protocol_descriptions}
\subsection{Qubit BB84 }
\label{appendix:qubitBB84}
Using the source-replacement scheme \cite{curty2004Entanglement}, the protocol can be equivalently described as Alice creating the Bell-state $\ket{\psi}_{AA^\prime} = \ket{\phi_+} = \frac{\ket{00}+\ket{11}}{\sqrt{2}}$, and sending $A^\prime$ to Bob. We model misalignment as a rotation of angle $\theta$ about the $Y$ axis on $A^\prime$, with
\begin{equation}
\begin{aligned}
U(\theta) &= I_A \otimes \begin{pmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta),
\end{pmatrix} \\
\mathcal{E}_{\text{misalign}} (\rho) &= U(\theta) \rho U(\theta)^\dagger.
\end{aligned}
\end{equation}
Depolarization is modelled as a map
\begin{equation}
\mathcal{E}_\text{depol} (\rho)= (1-q) (\rho)+q \text{Tr}_{A^\prime} (\rho) \otimes \frac{I_B}{2} .
\end{equation}
The state on which statistics are computed is given by $\rho_{AB} = \mathcal{E}_\text{depol} ( \mathcal{E}_{\text{misalign}}(\ket{\phi_+} \bra{\phi_+}) )$. The entries in Table \ref{table:qubit_constraints} can be computed via $\gamma_i = \text{Tr} (\Gamma_i \rho_{AB})$.
Both Alice and Bob perform measurements on qubit systems, and their POVMs are given by $\{ P_{(Z,0)} = p_z \ket{0} \bra{0}, P_{(Z,1)} = p_z \ket{1}\bra{1}, P_{(X,0)} = p_x \ket{+} \bra{+} , P_{(X,1)} = p_x \ket{-} \bra{-} \}$, with $p_z=p_x=\frac{1}{2}$ . In addition, Alice implements the keymap by simply copying the measurement outcome to the key register.
From the discussion in Appendix A of \cite{Lin2019}, we can remove certain registers created by the generic form of the Kraus operators in Eq.~\eqref{eq:generic_kraus}. In particular, we do not need to consider the registers that store Alice and Bob's outcome, and we only need one copy of the announcement register.
In this case, the general form for the Kraus operators from Eq.~\eqref{eq:generic_kraus} now becomes
\begin{equation} \label{eq:new_generic_kraus}
\begin{aligned}
K_{\alpha}& = \sum_x \ket{r(\alpha,\alpha,x) }_Z \otimes \sqrt{ \sum_{y} P^A_{(\alpha,x)} \otimes P^B_{(\alpha,y)} } \otimes \ket{\alpha}_{\tilde{A}},
\end{aligned}
\end{equation}
while Eq.~\eqref{eq:newkraus} becomes
\begin{equation} \label{eq:new_generic_newkraus}
\begin{aligned}
K^\prime_{\alpha,w}& = \sum_x \ket{r(\alpha,\alpha,x) }_Z \otimes \sqrt{ \sum_{ \substack{y \\ x\oplus y = w}} P^A_{(\alpha,x)} \otimes P^B_{(\alpha,y)} } \\
& \otimes \ket{\alpha}_{\tilde{A}} \otimes \ket{w}_W,
\end{aligned}
\end{equation}
where $\alpha,\beta$ denotes basis choice, and $x,y$ denotes measurement outcomes. Alice and Bob's POVMs are given by $P^A = \{P^A_{(\alpha,x)}\}$, and $P^B = \{P^B_{(\alpha,y)}\}$. Since Alice and Bob throw away all signals that have basis mismatch, the set of operators generating the $\mathcal{G}$ map can be given by $\{ K_{\alpha} \} $, and the set of operators generating $\mathcal{G}^\prime$ is given by $\{K^\prime_{\alpha,w}\}$.
The $\mathcal{Z}$ map has Kraus operators $\{ Z_i\} $ given by $Z_i = \ket{i} \bra{i}_Z \otimes I_{AB \tilde{A} }$.
Therefore, the final Kraus operators for $F$ are given by
\begin{equation}
\begin{aligned}
K_Z &= \left[ \begin{pmatrix} 1 \\ 0 \end{pmatrix}_Z \otimes \sqrt{p_z} \begin{pmatrix} 1 & \\ & 0 \end{pmatrix}_A + \begin{pmatrix} 0 \\ 1 \end{pmatrix}_Z \otimes \sqrt{p_z} \begin{pmatrix} 0 & \\ & 1 \end{pmatrix}_A \right] \\
& \otimes \sqrt{p_z} \begin{pmatrix} 1 & \\ & 1 \end{pmatrix}_B \otimes \begin{pmatrix} 1 \\ 0 \end{pmatrix}_{\tilde{A}}, \\
K_X &= \left[ \begin{pmatrix} 1 \\ 0 \end{pmatrix}_Z \otimes \sqrt{\frac{p_x}{2} } \begin{pmatrix} 1 & 1 \\ 1 & 1 \end{pmatrix}_A + \begin{pmatrix} 0 \\ 1 \end{pmatrix}_Z \otimes\sqrt{ \frac{p_x}{2} } \begin{pmatrix} 1 & -1 \\ -1 & 1 \end{pmatrix}_A \right] \\
& \otimes \sqrt{p_x} \begin{pmatrix} 1 & \\ & 1 \end{pmatrix}_B \otimes \begin{pmatrix} 0 \\ 1 \end{pmatrix}_{\tilde{A}},
\end{aligned}
\end{equation}
and
\begin{equation}
\begin{aligned}
Z_1 &= \begin{pmatrix} 1 & \\ & 0 \end{pmatrix} \otimes \mathbb{I}_{\dim_A \times\dim_B \times 2}, \\
Z_2 &= \begin{pmatrix} 0 & \\ & 1 \end{pmatrix} \otimes \mathbb{I}_{\dim_A \times\dim_B \times 2}.
\end{aligned}
\end{equation}
The operators for $F^\prime$ can be constructed from Eq.~\eqref{eq:new_generic_newkraus}.
\subsection{WCP Decoy BB84}
\label{appendix:decoyprotocol}
Along with source-replacement, we use the squashing model from \cite{Gittsovich2014} to squash Bob's system to three dimensions. Since we only generate key from the single-photon pulses, Alice's POVMs are given by $\{ P_{(Z,0)} = p_z \ket{0} \bra{0}, P_{(Z,1)} = p_z \ket{1}\bra{1}, P_{(X,0)} = p_x \ket{+} \bra{+} , P_{(X,1)} = p_x \ket{-} \bra{-} \}$, with $p_z=p_x=\frac{1}{2}$ . Bobs POVMs are given by
\begin{equation}
\begin{aligned}
P^B_{(Z,0)} &= p_z \begin{pmatrix} 0 & 0 &0 \\ 0& 1 & 0\\ 0&0 & 0 \end{pmatrix} , \quad P^B_{(Z,1)} = p_z \begin{pmatrix} 0 & 0& 0\\ 0& 0 & 0\\ 0&0 & 1 \end{pmatrix}, \\
P^B_{(X,0)} &= \frac{p_x}{2} \begin{pmatrix} 0&0 & 0 \\ 0&1 & 1 \\ 0& 1 & 1 \end{pmatrix} , \quad P^B_{(X,1)} = \frac{p_x}{2} \begin{pmatrix} 0&0 &0 \\ 0&1 & -1 \\ 0 & -1 & 1 \end{pmatrix}, \\
P^B_{\bot} &= \begin{pmatrix} 1 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{pmatrix},
\end{aligned}
\end{equation}
with $p_x=p_z= \frac{1}{2}$.
Here, the first column corresponds to the vacuum subspace, while the second and third column make up the qubit subspace.
Again, from the discussion in \cite{Lin2019}, we can remove certain registers created by the generic form of the Kraus operators in Eq.~\eqref{eq:generic_kraus}. After removing these registers, the form of the Kraus operators is given by Eq.~\eqref{eq:new_generic_kraus}.
Therefore, the final Kraus operators for $F$ are given by
\begin{comment}
\begin{equation}
\begin{aligned}
K_Z &= \left[ \begin{pmatrix} 1 \\ 0 \end{pmatrix}_Z \otimes \begin{pmatrix} 1 & & & \\ & 0 & & \\
& & 0 & \\
& & & 0 \end{pmatrix}_A + \begin{pmatrix} 0 \\ 1 \end{pmatrix}_Z \otimes \begin{pmatrix} 0 & & & \\ & 1 & & \\
& & 0 & \\
& & & 0 \end{pmatrix}_A \right] \\
&\otimes \sqrt{p_z} \begin{pmatrix} 0 & & \\ & 1 & \\& & 1 \end{pmatrix}_B \otimes \begin{pmatrix} 1 \\ 0 \end{pmatrix}_{\tilde{A}}, \\
K_X &= \left[ \begin{pmatrix} 1 \\ 0 \end{pmatrix}_Z \otimes \begin{pmatrix} 0 & & & \\ & 0 & & \\
& & 1 & \\
& & & 0 \end{pmatrix}_A + \begin{pmatrix} 0 \\ 1 \end{pmatrix}_Z \otimes \begin{pmatrix} 0 & & & \\ & 0 & & \\
& & 0 & \\
& & & 1 \end{pmatrix}_A \right] \\
&\otimes \sqrt{p_x} \begin{pmatrix} 0 & & \\ & 1 & \\& & 1 \end{pmatrix}_B \otimes \begin{pmatrix} 0 \\ 1 \end{pmatrix}_{\tilde{A}},
\end{aligned}
\end{comment}
\begin{equation}
\begin{aligned}
K_Z &= \left[ \begin{pmatrix} 1 \\ 0 \end{pmatrix}_Z \otimes \sqrt{p_z} \begin{pmatrix} 1 & \\ & 0 \end{pmatrix}_A + \begin{pmatrix} 0 \\ 1 \end{pmatrix}_Z \otimes \sqrt{p_z} \begin{pmatrix} 0 & \\ & 1 \end{pmatrix}_A\right] \\
&\otimes \sqrt{p_z} \begin{pmatrix} 0 & & \\ & 1 & \\& & 1 \end{pmatrix}_B \otimes \begin{pmatrix} 1 \\ 0 \end{pmatrix}_{\tilde{A}}, \\
K_X &= \left[ \begin{pmatrix} 1 \\ 0 \end{pmatrix}_Z \otimes \sqrt{\frac{p_x}{2} } \begin{pmatrix} 1 & 1 \\ 1 & 1 \end{pmatrix}_A + \begin{pmatrix} 0 \\ 1 \end{pmatrix}_Z \otimes\sqrt{ \frac{p_x}{2} } \begin{pmatrix} 1 & -1 \\ -1 & 1 \end{pmatrix}_A \right] \\
&\otimes \sqrt{p_x} \begin{pmatrix} 0 & & \\ & 1 & \\& & 1 \end{pmatrix}_B \otimes \begin{pmatrix} 0 \\ 1 \end{pmatrix}_{\tilde{A}},
\end{aligned}
\end{equation}
and
\begin{equation}
\begin{aligned}
Z_1 &= \begin{pmatrix} 1 & \\ & 0 \end{pmatrix} \otimes \mathbb{I}_{\dim_A \times\dim_B \times 2}, \\
Z_2 &= \begin{pmatrix} 0 & \\ & 1 \end{pmatrix} \otimes \mathbb{I}_{\dim_A \times\dim_B \times 2}.
\end{aligned}
\end{equation}
The operators for $F^\prime$ can be constructed from Eq.~\eqref{eq:new_generic_newkraus},
\section{Bell-diagonal States} \label{appendix:bell}
For Bell-diagonal states, we can show that the announcement of the location of errors $W$ leaks no new information to Eve, by showing that Eve's state is block-diagonal in $W$ anyway.
In the Bell-diagonal case, the state shared between Alice and Bob can be written as
\begin{equation}
\begin{aligned}
\rho_{AB}&=\lambda_0 \ket{\phi_+} \bra{\phi_+} + \lambda_1 \ket{\phi_-}\bra{\phi_-}\\
&+ \lambda_2 \ket{\psi_+}\bra{\psi_+}+\lambda_3 \ket{\psi_-}\bra{\psi_-},
\end{aligned}
\end{equation}
where $\ket{\phi_{+/-}}, \ket{\psi_{+/-}}$ are the Bell states, and $\lambda_i$s are related to quantum bit error rate (QBER) via, $Q_Z=\lambda_3+\lambda_4$, $Q_X=\lambda_2+\lambda_4$, $Q_Y=\lambda_2+\lambda_3$. We can assume Eve holds a purification of the form
\begin{equation}
\begin{aligned}
\ket{\psi}_{ABE}&=\sqrt{\lambda_0} \ket{\phi_+} \ket{e_0} + \sqrt{\lambda_1} \ket{\phi_-} \ket{e_1} \\
&+ \sqrt{\lambda_2} \ket{\psi_+} \ket{e_2}+\sqrt{\lambda_3 }\ket{\psi_-} \ket{e_3},
\end{aligned}
\end{equation}
where $\ket{e_i}$ are orthonormal basis vectors for Eve's system.
Let us suppose Alice and Bob measure in the basis $\alpha\in \{X,Z\}$, the (unnormalized) state after the measurement is given by
\begin{equation}
\rho^{(\alpha)}_{XYE}=\sum_{x,y \in\{0,1\}} \ket{x} \bra{x} \otimes \ket{y} \bra{y} \otimes \rho^{(\alpha),x,y}_E ,
\end{equation}
where $\rho^{(\alpha),x,y}_E=\operatorname{Tr} [(P^A_{(\alpha,x)} \otimes P^B_{(\alpha,y)} \otimes I_E ) \ket{\psi}\bra{\psi}_{ABE}]$.
A simply calculation shows that the support $(\rho^{(\alpha),0,0}_E, \rho^{(\alpha),1,1}_E)$ is orthogonal to the support of $(\rho^{(\alpha),1,0}_E, \rho^{(\alpha),0,1}_E)$. Thus, we can conclude that Eve can be assumed to always know the value of $x \oplus y$ for the entire raw key, if the state shared between Alice and Bob is Bell-diagonal. In fact, the above discussion is also true when Alice and Bob measure in the $Y$ basis, and is therefore also applicable to the six-state protocol.
\section{Twirling reduces key rate}
\label{appendix:twirling}
For our protocol, $f(\rho) = S(Z|E \tilde{A} \tilde{B})_{\rho}$. One can always expand
\begin{equation} \label{eq:S_expansion1}
\begin{aligned}
S(Z|E \tilde{A} \tilde{B}) &= \sum_{\alpha,\beta} \text{Prob}(\alpha,\beta) S(Z|E,\tilde{A}=\alpha, \tilde{B}=\beta) \\
& = \sum_\alpha \text{Prob}(\alpha) S(Z|E,\tilde{A}=\alpha),
\end{aligned}
\end{equation}
where we used the fact that $Z=\bot$ for basis mismatch, and those signals are thrown away. Now, let
\begin{equation}
\mathcal{T}(\rho) = \frac{1}{4} \sum_{i=1}^4 \rho_i = \frac{1}{4} \sum_{i=1}^4 (\sigma_i \otimes \sigma_i) \rho (\sigma_i \otimes \sigma_i)^\dagger .
\end{equation}
Then,
\begin{equation} \label{eq:S_expansion2}
\begin{aligned}
S(Z|E,\tilde{A}=\alpha)_{ \mathcal{T} (\rho)} &\leq \frac{1}{4} \sum_{i=1}^4 S(Z|E,\tilde{A}=\alpha)_{ \rho_i} \\
&= \frac{1}{4} \sum_{i=1}^4 S(Z|E,\tilde{A}=\alpha)_{\rho} \\
& = S(Z|E,\tilde{A}=\alpha)_{\rho},
\end{aligned}
\end{equation}
where we have used linearity of $\mathcal{T}$ and concavity of conditional entropy in the first inequality. The second line follows from the fact that the action of the pauli operators on $\rho$ either leave the measurements performed ($X,Y,$ or $Z$) unchanged, or flip the outcomes, neither of which can affect the entropy.
Combining Eqs.~\eqref{eq:S_expansion1},\eqref{eq:S_expansion2}, we obtain Eq.~\eqref{eq:first_condition}, which is required for the reduction to Bell-diagonal states.
\section{Decoy Analysis}
\label{appendix:decoy}
The decoy analysis in this work is similar to that from \cite{wang2021Numerical}, with small changes in notation, and is included here for sake of completeness. For a phase-randomized weak coherent pulse (WCP), the state is diagonal in photon number and follows the poissonian probability distribution
\begin{equation}
p_{\mu_i} (n) = \frac{\mu_i^n}{n !} e^{-\mu_i}.
\end{equation}
For any statistic $\gamma_{y|x}$, one can then write
\begin{equation}
\gamma^{\mu_i}_{y|x} = \sum _n p_{\mu_i} (n) \gamma^n_{y|x} ,
\end{equation}
where $\gamma^{\mu_i}_{y|x}$ denotes the probability of Bob obtaining outcome $y$ given Alice sent signal $x$ and intensity $\mu_i$.
If one uses multiple intensities, then one can use the following set of equations
\begin{equation}
\begin{aligned}
\gamma^{\mu_i}_{y|x} &\leq \sum_{n \leq N} p_{\mu_i}(n) \gamma^n_{y|x} + (1- \sum_{n \leq N} p_{\mu_i}(n) ), \\
\gamma^{\mu_i}_{y|x} &\geq \sum_{n \leq N} p_{\mu_i}(n) \gamma^n_{y|x},
\end{aligned}
\end{equation}
to obtain upper bounds and lower bound on $\gamma^1_{y|x}$.
\begin{equation}
\gamma^{1,L}_{y | x} \leq \gamma^1_{y|x} \leq \gamma^{1,U}_{y|x}, \quad \forall x,y
\end{equation}
Noting that we can now compute bounds on $\gamma^{1}_{x,y} = \Pr(x) \gamma^{1}_{y|x}$, we obtain bounds on the all single-photon statistics for any particular coarse-graining, which we refer to as
\begin{equation}
S_1^\prime = \{ \rho \in H_+ | \gamma^{1,L}_k \leq \text{Tr}(\Gamma_k \rho) \leq \gamma^{1,U}_k, \forall k\}
\end{equation}
where $\gamma^{1}_k$ means the $k$th statistics obtained from $1$ photon signals, and the range of $k$ depends on the exact nature of the coarse-graining.
\textbf{Objective function:} The state shared between Alice and Bob after source-replacement can be assumed to be block-diagonal in the photon number of Alice's signal, given by
\begin{equation}
\rho_{AA_SB} = \sum_n p_n \ket{n}\bra{n}_{A_S} \otimes \rho^{(n)}_{AB},
\end{equation} where $A$ and $B$ are Alice and Bob's systems, and $A_S$ is a shield system.
In such cases, the objective function can be shown to satisfy \cite{li2020Improving}
\begin{equation}
\min_{\rho \in S} f(\rho) = \sum_n p_n \min_{\rho^{(n)}_{AB} \in S^\prime_n} f(\rho^{(n)}_{AB}).
\end{equation}
For polarization encoded phase-randomized pulses, Eve can perform a photon-number-splitting attack \cite{lutkenhaus2002Quantum}. This implies that no key can be generated for $n > 1$ in the above equation. Therefore, we have
\begin{equation}
\begin{aligned} \label{eq:objfunc_diffn}
\min_{\rho \in S} f(\rho) &= p_0 \min_{\rho^{(0)}_{AB} \in S^\prime_0} f(\rho^{(0)}_{AB}) + p_1 \min_{\rho^{(1)}_{AB} \in S^\prime_1} f(\rho^{(1)}_{AB}) \\
&\geq p_1 \min_{\rho^{(1)}_{AB} \in S^\prime_1} f(\rho^{(1)}_{AB}).
\end{aligned}
\end{equation}
In this work, we will use the second expression above.
Since one does not know the exact single-photon statistics, but rather knows bounds on them due to decoy analysis, the optimization problem is then given by
\begin{equation}
\begin{aligned}
F &= \min_{\rho \in S_1^\prime (G)} f(\rho), \\
S_1^\prime & = \{ \rho \in H_+ | \gamma^{1,L}_k \leq \text{Tr}(\Gamma_k \rho) \leq \gamma^{1,U}_k, \forall k\}
\end{aligned}
\end{equation}
\end{document}
|
\begin{document}
\begin{frontmatter}
\begin{fmbox}
\dochead{Research}
\title{Minimal ancilla mediated quantum computation}
\author[
addressref={aff1},
corref={aff1},
email={[email protected]}
]{\inits{TJ}\fnm{Timothy J} \snm{Proctor}}
\author[
addressref={aff1},
corref={aff1},
email={[email protected]}
]{\inits{V}\fnm{Viv} \snm{Kendon}}
\address[id=aff1]{
\orgname{School of Physics and Astronomy, E C Stoner Building, University of Leeds}
\street{},
\postcode{LS2 9JT}
\city{Leeds},
\cny{UK}
}
\end{fmbox}
\begin{abstractbox}
\begin{abstract}
Schemes of universal quantum computation in which the interactions between the computational elements, in a computational register, are mediated by some ancillary system are of interest due to their relevance to the physical implementation of a quantum computer. Furthermore, reducing the level of control required over both the ancillary and register systems has the potential to simplify any experimental implementation. In this paper we consider how to minimise the control needed to implement universal quantum computation in an ancilla-mediated fashion. Considering computational schemes which require no measurements and hence evolve by unitary dynamics for the global system, we show that when employing an ancilla qubit there are certain fixed-time ancilla-register interactions which, along with ancilla initialisation in the computational basis, are universal for quantum computation with no additional control of either the ancilla or the register. We develop two distinct models based on locally inequivalent interactions and we then discuss the relationship between these unitary models and the measurement-based ancilla-mediated models known as ancilla-driven quantum computation.
\end{abstract}
\begin{keyword}
\kwd{ancilla}
\kwd{universal gates}
\kwd{minimal control}
\kwd{quantum computation}
\kwd{quantum bus}
\kwd{ancilla-driven}
\kwd{ancilla-controlled}
\end{keyword}
\end{abstractbox}
\end{frontmatter}
\section{Introduction}
The original theoretical setting for quantum computation is the gate model \cite{feynman1985quantum} in which a global unitary that acts on a register of qubits, which computes the solution to some problem, is decomposed into a sequence of fundamental gates that are applied to the register. As in classical computation, it is desirable for these fundamental gates to be members of some \emph{finite} and \emph{universal} gate set, from which any global unitary can be composed up to arbitrary accuracy. There has been extensive research on such universal sets, and a significant example is the set composed of any entangling gate in conjunction with any universal set of single-qubit unitaries \cite{brylinski2002universal,bremner2002practical}. Furthermore, almost any two-qubit entangling gate is universal on its own provided that it can be applied to arbitrary pairs of qubits \cite{lloyd1995almost,barenco1995universal}. These results are of significant theoretical importance for the understanding of quantum computation.
\newline
\indent
However, the physical implementation of these models requires direct interactions between arbitrary pairs of register qubits and, often, direct application of single-qubit rotations and measurements. This is a huge practical challenge and most experimentally implemented or proposed schemes mediate the required multi-qubit interactions using some ancillary system. An example of such an ancilla-mediated scheme is the original Cirac-Zoller ion-trap gate \cite{cirac1995quantum}, where the ancilla in this case is the collective quantized motion of the ions. Further examples include superconducting qubits coupled to nitrogen-vacancy (NV) centres \cite{marcos2010coupling,lu2013quantum,qiu2014coupling,zhu2011coherent} or transmission line resonators \cite{wang2009coupling,wallraff2004strong}, spin qubits coupled via ancillary photonic qubits \cite{carter2013quantum,luxmoore2013interfacing} and the coupling of a Cooper-pair box to a micro-mechanical resonator \cite{armour2002entanglement}. It is therefore of both practical and theoretical interest to study the effect of incorporating the ancillary system into the computational model. Indeed, those gates that have been shown to be universal in a direct implementation of the circuit model cannot in general be utilised to implement quantum computations entirely mediated via an ancilla. We will refer to schemes in which all the multi-qubit interactions are mediated via some ancillary system as \emph{ancilla-mediated quantum computation} (AMQC).
\newline
\indent
An extensively researched model of AMQC is \emph{quantum bus} (qubus) computation \cite{milburn1999simulating,wang2002simulation,spiller2006quantum, brown2011ancilla, louis2007efficiencies,munro2005efficient}. This model employs a field-mode ancilla to mediate two-qubit gates on pairs of register qubits with the interaction between the ancilla and a register qubit being a controlled displacement of the field-mode. Recently, we have developed an analogous model which employs a $d$-dimensional \emph{qudit} ancilla \cite{proctor2014quantum} with a displacement operator defined in the discrete phase space of the qudit \cite{wootters1987wigner,vourdas2004quantum}. These models have been shown to require a lower number of operations to implement certain gate sequences than a direct implementation of the circuit model \cite{brown2011ancilla,proctor2014quantum,noteE}. However, neither of these models can implement a universal gate set on the register using \emph{only} this ancilla-register interaction and so, although no interactions between register qubits are required, some further direct access is needed to the register qubits to implement some basis-changing single-qubit unitary \cite{proctor2014quantum,brown2011using}.
\newline
\indent
In order to implement useful quantum computations, it will be necessary to have register qubits with as long a coherence time as possible. However, if it is necessary to access each register qubit to implement multiple forms of control this will potentially introduce many sources of decoherence. Limiting the forms of access required to the register qubits may help to isolate the register more effectively, and so, motivated by this, the measurement-based \emph{ancilla-driven quantum computation} (ADQC) \cite{kashefi2009twisted,anders2010ancilla,shah2013ancilla} and more recently the globally unitary \emph{ancilla-controlled quantum computation} (ACQC) \cite{proctor2013universal} schemes were developed. In both of these models the access to the register qubits is limited to one \emph{fixed-time} interaction between a single register qubit (at a time) and an ancilla qubit, where the qubits are not necessarily of the same physical type. The additional direct access to the register that was required in the qubus architecture, and analogous qudit model, is replaced by local unitary operations on the ancilla and, in the case of ADQC, ancilla measurements.
\newline
\indent
Although both the ancilla-driven and ancilla-controlled models require the minimum possible access to the computational register, they replace the local control of the register with local control of the ancillary system and so still require more than one fixed quantum gate to implement universal computation. Halil-Shah and Oi \cite{halil2014minimum} have recently shown that the measurement-based ancilla-driven model can be adapted so that no local control, beyond ancilla preparation in a fixed state, is required of either the ancilla or the register. In this model, the computation can be achieved using a fixed interaction and ancilla measurement in the computational basis alone. However, this requires a stochastic repeat-until-success style gate scheme \cite{paetznick2013repeat}, whereby one has to wait until a random walk through the set of unitaries is within the required precision of the desired unitary. In this paper we will show that it is possible to develop deterministic models that require only a single fixed ancilla-register interaction and ancilla preparation in the computational basis with no ancilla measurements necessary. Such schemes require a minimal level of control of both the ancillary and register systems whilst allowing for universal quantum computation. Hence, we will refer to such models as \emph{minimal control} models of ancilla-mediated quantum computation, and we will often drop the reference to ancilla-mediation for brevity. In Section~\ref{min1} we introduce two such schemes based on locally inequivalent interactions. The first of these models requires multiple ancillas to implement entangling two-qubit gates on the register and so, although it requires minimal control, it has an overhead in terms of ancilla use. We then develop an alternative minimal control model which does not have this ancilla overhead and requires only three ancilla-register interactions per two-qubit entangling gate, the minimum possible in any unitary scheme \cite{lamata2008sequential}. We briefly discuss the physical implementation of these models before concluding in Section~\ref{con}. We begin in Section~\ref{def} with some essential definitions.
\section{Definitions \label{def}}
We denote the Pauli operators acting on the $j^{th}$ qubit by $X_j$, $Y_j$ and $Z_j$ and take $\ket{0}$ and $\ket{1}$ to be the positive and negative eigenstates of the Pauli $Z$ operator respectively. Using standard definitions, we take the Hadamard gate $H$ to be
\begin{equation} H := \frac{1}{\sqrt{2}}\left(\ket{0}\bra{0} + \ket{0}\bra{1} + \ket{1}\bra{0} - \ket{1}\bra{1} \right),\label{Had} \end{equation}
and the single-qubit phase gate to be
\begin{equation}R(\theta):=\ket{0}\bra{0} +e^{i\theta}\ket{1}\bra{1}. \label{Rtheta}\end{equation}
We furthermore define $T:=R\left(\pi/4\right)$ and the two-qubit SWAP gate
\begin{equation} \text{SWAP}:=\ket{00}\bra{00} + \ket{01}\bra{10} + \ket{10}\bra{01} +\ket{11}\bra{11}. \end{equation}
Except for those gates defined above and the identity operator $\mathbb{I}$, where standard notation is used, all single-qubit gates will be denoted by lower case roman letters.
We define a general controlled gate, with a control qubit $j$ and a target qubit $k$, by
\begin{equation} C^j_{k}(u,v):=\ket{0}\bra{0}_j \otimes u_{k} + \ket{1}\bra{1}_j \otimes v_{k}, \end{equation} where $u,v \in U(2)$. The subscripts $j$ and $k$ will be dropped from the notation when no ambiguity will arise and we let $Cu:=C(\mathbb{I},u)$ and $SCu:=\text{SWAP} \cdot Cu$. Two operators $U,V \in U(4)$ are called \emph{locally equivalent} \cite{makhlin2002nonlocal} with respect to a decomposition into qubit subsystems $j$ and $k$ if
\begin{equation} U=u_j \otimes v_k \cdot V \cdot p_j \otimes q_k, \end{equation}
for some $u,v,p,q \in U(2)$.
\section{Minimal control ancilla-mediated quantum computation \label{min1}}
We now present two schemes of ancilla-mediated quantum computation that require only a single fixed-time ancilla-register interaction and ancilla preparation in the computational basis and hence are minimal control models.
\subsection{A first minimal control model}
We introduce our first model by giving a general form for an ancilla-register interaction which under certain conditions can implement a universal gate set on a register of qubits within the constraints of minimal control. We give an explicit construction for the application of a universal gate set on the register before comparing this model to the measurement-based scheme of Halil-Shah and Oi \cite{halil2014minimum} and giving a simple example of an interaction that obeys the required constraints.
\subsubsection{A general interaction}
We consider a general fixed ancilla-register interaction of the form
\begin{equation} K^j_{a} := u_{j} \otimes H_a \cdot CZ \cdot v_{j} \otimes \mathbb{I}_a,\end{equation}
where $u, v \in U(2)$. This interaction is shown in Fig.~\ref{minint}a and it is locally equivalent to $CZ$. We define $u_{0}:=uv$ and $u_1:=uZv$ and note that we may also write the interaction in the form $K^j_a=\mathbb{I} \otimes H \cdot C^a_j(u_0,u_1)$. We will show that for any $K$ such that $\{ u_0, u_1\}$ is a universal single-qubit gate set we may implement a minimal control model.
\begin{figure}
\caption{\label{minint}
\label{minint}
\end{figure}
We do this by showing how we may implement a universal gate set on the register qubits. It follows directly from the definition of the ancilla-register interaction $K^j_a$ that
\begin{equation} K^j_{a} \ket{\psi}_j \ket{i}_a = u_{i} \ket{\psi}_j \otimes H\ket{i}_a, \end{equation}
where $i=0,1$. Hence, we can deterministically apply the elements of $\{u_0,u_1\}$ on any register qubit, which we assume is a universal set for $SU(2)$, and so we may simulate any gate in $SU(2)$ up to arbitrary accuracy using only $K$ and the initialisation of ancilla in the computational basis. This gate method is depicted in the circuit diagram of Fig.~\ref{minint}b.
\newline
\indent
We now show how to implement a maximally entangling gate between two register qubits, $j$ and $k$, using only $K$ and ancillas prepared in the computational basis. A straightforward explicit calculation, utilising the identities $ vu^{\dagger}_0u= \mathbb{I}$, $HZH=X$, $XX=ZZ=\mathbb{I}$ and $XZXZ=-\mathbb{I}$, shows that
\begin{equation} K^k_{a} K^j_{a} \cdot u_{0_j}^{\dagger} \otimes u_{0_k}^{\dagger} \otimes \mathbb{I} \cdot K^k_{a} K^j_{a} = M^j_k \otimes \mathbb{I}_a, \label{minacqcseq} \end{equation}
where the induced entangling gate on the register qubits $j$ and $k$ is
\begin{equation} M^j_k= u_j \otimes u_k \cdot CZ \cdot v_j \otimes v_k.\label{minM} \end{equation}
Although Eq.~(\ref{minacqcseq}) is an ancilla-mediated sequence which implements an entangling two-qubit gate on the register, as written it requires local unitaries on the register qubits and it is not decomposed into only $K$ gates. However, we may decompose the $u_0^{\dagger}$ gate on each register qubit into further $K$ gates. This is because $u_0$ and $u_1$ are a universal set for $SU(2)$ and hence there is a choice of $k_1,...,k_n=0,1$ such that
\begin{equation}
\left(\prod_{i=1}^n K^{j}_{a_i}\right) \ket{\psi}_j\ket{k_{1}}_{a_1}...\ket{k_{n}}_{a_n}=\tilde{u}^{\dagger}_{0}\ket{\psi}_j H\ket{k_i}_{a_1}...H\ket{k_n}_{a_n},\end{equation}
with $\tilde{u}^{\dagger}_0=u_{k_n}...u_{k_1}$ approximating $u^{\dagger}_0$ up to arbitrary accuracy with finite $n$. In certain cases $u^{\dagger}_0$ may be implemented exactly.
\begin{figure}
\caption{ \label{mintwo}
\label{mintwo}
\end{figure}
Hence we may implement an entangling gate which is locally equivalent to $CZ$ between pairs of register qubits using only $K$ gates and ancillas initialised in the computational basis\textsuperscript{a}. The circuit diagram for this two-qubit gate on the register is given in Fig.~\ref{mintwo}. Therefore, under the assumption that $u_0$ and $u_1$ are universal for $SU(2)$, we have shown that $K$, along with ancillas prepared in the computational basis, can implement a minimal control model of ancilla-mediated quantum computation.
\subsubsection{Discussion and comparison with a measurement-based model}
We compare this minimal unitary model to the recently proposed measurement-based minimal scheme of Halil-Shah and Oi \cite{halil2014minimum}. In their model the computation is achieved using only a fixed interaction, ancilla preparation in a fixed state, and measurement in the computational basis. The fixed initial ancilla state is compensated for by the measurement which projects the ancilla onto states in the computational basis. Only one and two ancilla-register interactions are required to implement the single and two-qubit gates respectively. However this model results in a probabilistic repeat-until-success style gate scheme \cite{paetznick2013repeat}, whereby one has to wait until a random walk through the set of unitaries is within the required precision of the desired unitary. Although the model presented here is entirely deterministic, there is an overhead, that in general may be large, to implement the two-qubit gates. We note that this overhead does not appear if an ancilla measurement is performed after the first two interactions in Fig.~\ref{mintwo}, for an appropriately initialised ancilla and measurement basis, and in this case both models are of a similar form.
\newline
\indent
We note that if $vu$ is diagonal in the computational basis the additional ancillas are not required, however in this case $u_0$ and $u_1$ will commute and so cannot form a universal set for $SU(2)$. In general, the required additional ancillas for the two-qubit gates may create an impractical overhead. However, we now demonstrate that there exists an appropriate form for $K$ that is universal and has a low overhead for additional ancillas for each entangling gate.
\subsubsection{A specific interaction}
We can parametrise a general unitary operator $p \in U(2)$ by the matrix in the computational basis
\begin{equation} p(\eta,\phi,\psi,\theta) = e^{i \eta}\begin{pmatrix} e^{i \phi}\cos \theta & e^{-i \psi} \sin \theta \\ e^{i \psi}\sin \theta & - e^{-i \phi} \cos \theta \end{pmatrix}. \end{equation}
A specific suitable choice for the ancilla-register interaction $K$ is given by taking $u=p(\eta, \zeta,\zeta, \frac{\pi}{8})$ and $v=p(\frac{\pi}{8} - \eta, -\zeta-\frac{\pi}{8} ,\zeta - \frac{\pi}{8} , \frac{\pi}{8})$. It is straightforward to show that this gives $u_0 = T$ and $u_1=HT$. We have that $T^{7}=T^{\dagger}$ and so $u_1u_0^7=H$. It then follows that $u_0$ and $u_1$ form a universal set for $SU(2)$ as $H$ and $T$ are a universal set for single-qubit unitaries \cite{boykin2000new}. It is necessary to implement $u_0^{\dagger}$ on each register qubit to implement the sequence of Eq.~(\ref{minacqcseq}) and Fig.~\ref{mintwo}. We have that $u_0^{\dagger}=u_0^7$ and so the sequence of Eq.~(\ref{minacqcseq}) and Fig.~\ref{mintwo} can be implemented using 14 ancillas prepared in the state $\ket{0}$ and one `entangling' ancilla, that mediates the gate, prepared in any state.
\subsection{A second minimal control model \label{min2}}
We now present an alternative minimal control model which does not require additional ancillas. As before, we will introduce the model with a general interaction which with certain restrictions can be used to implement a minimal control model and then give a specific example of a simple suitable interaction.
\subsubsection{A general interaction}
Take an ancilla-register interaction of the form
\begin{equation} L^j_{a} := \mathbb{I}_j \otimes u_a \cdot SCR(\theta) \cdot R(\theta_r)_j \otimes R(\theta_a)_a. \end{equation}
This is decomposed into local and non-local parts in Fig.~\ref{minacqc}a. We note that this can also be expressed as $L^j_a=SC^a_j(uR(\theta_r),uR(\theta+\theta_r))\cdot\mathbb{I}_j \otimes R(\theta_a)_a$. We show that an interaction of this form, along with ancillas prepared in the computational basis, can implement universal quantum computation on the register if $\theta$ is such that $CR(\theta)$ is entangling (all non-trivial $\theta$) and $\{v_0,v_1\}$ is a universal set for $SU(2)$ where $v_i:=R(\theta^i+\theta_a)uR(\theta^i+\theta_r) $. As before, we do this by showing how we may implement a two-qubit entangling gate and a universal set for single-qubit unitaries on the register. We note that it is possible to set $\theta_r$ and $\theta_a$ to zero and obtain a universal interaction and these local rotations are included to increase the generality of the interaction.
\newline
\indent
We may implement an entangling two-qubit gate between register qubits $j$ and $k$ using an ancilla initialised in the state $\ket{0}$ by interacting the ancilla sequentially with qubits $j$ and $k$ before completing the gate with a second interaction with the $j$ qubit. This is the interaction sequence
\begin{equation}L^j_{a} L^k_{a}L^j_{a} \ket{\psi}_{jk} \ket{0}_a = N^j_k \ket{\psi}_{jk} \otimes u\ket{0}_a ,\label{2qmin}\end{equation}
where $N^j_k$ is entangling for non-trivial $\theta$ and is given by
\begin{equation} N^j_k = R(\theta_a)u_j \otimes \mathbb{I}_k \cdot SCR(\theta) \cdot R(\theta_a)uR(\theta_r)_j \otimes R(\theta_r)_k. \label{njk}\end{equation}
This is represented in the circuit diagram of Fig.~\ref{minacqc}b and can be shown with a simple explicit calculation.
We may decompose any single-qubit gate on a register qubit into only ancilla-register interactions $L$ and ancilla state-preparation in the computational basis. This is because
\begin{equation} L^j_aL^j_a \ket{\psi}_j \ket{i}_a = v_i\ket{\psi}_j \otimes u\ket{i}_a,\end{equation}
where $i=0,1$ and we assume that $\{v_0,v_1\}$ is a universal set for $SU(2)$. This is represented in the circuit diagram of Fig.~\ref{minacqc}c. Hence, as we have shown how to implement a two-qubit entangling gate and a universal set for $SU(2)$ on the register then this is a minimal control model of ancilla-mediated quantum computation.
\begin{figure}
\caption{ \label{minacqc}
\label{minacqc}
\end{figure}
\subsubsection{Discussion and comparison with other models}
This model requires three interactions for each entangling two-qubit gate on the register which, although greater than the two needed with the aid of ancilla measurement in ADQC \cite{anders2010ancilla} and the minimal extension of Halil-Shah and Oi \cite{halil2014minimum}, is the minimum possible in any measurement-free scheme \cite{lamata2008sequential}. Furthermore, in contrast to the first minimal control model, there is no requirement for multiple ancilla qubits to implement the entangling gates. Finally, we note that the two-qubit gates on the register are implemented in an identical fashion to those in the ACQC model \cite{proctor2013universal} and $L$ obeys the required conditions to be universal for that model.
\subsubsection{A specific interaction}
A simple example of a specific form for the interaction $L^j_a$ such that $v_0$ and $v_1$ form a universal set for $SU(2)$ (and hence $L^j_a$ may implement this minimal control model) is given by taking $u=H$, $\theta=\frac{\pi}{4}$ and $\theta_r=\theta_a=0$. We then have that $L^j_{a} = \mathbb{I}_j \otimes H_a \cdot SCT$ and hence
$v_0=H$ and $v_1=THT$. A proof of the universality of $\{H,THT\}$ for $SU(2)$ is given in Appendix~\ref{AppA}. The entangling gate induced on a pair of register qubits from the sequence of Eq.~(\ref{2qmin}) is then $N= H \otimes \mathbb{I} \cdot SCT \cdot H \otimes \mathbb{I}$ and this can easily simulate $\text{CNOT}:=CX$ as in this case $(N^j_k)^4=C^k_jX$.
\subsection{The physical implementation of minimal control models}
The models introduced herein are motivated by the physical challenges of implementing the multiple forms of control required in most universal models of quantum computation and hence it is interesting to briefly consider systems that may be appropriate for a physical realisation. We initially concentrate on a simple Hamiltonian for implementing the second minimal control model presented in Section~\ref{min2}. We set $\hbar=1$ and consider the two-qubit interaction Hamiltonian
\begin{equation} \boldsymbol{H}(\theta) = \pi (X \otimes X + Y \otimes Y) + (\pi - \theta) Z \otimes Z, \label{ham} \end{equation}
which, applied for a time $t=1/4$, implements (up to an irrelevant global phase) the unitary operator
\begin{equation} U(\theta)=e^{- i \boldsymbol{H}(\theta) /4} = SCR(\theta) \cdot R(-\theta/2) \otimes R(-\theta/2). \end{equation}
If we consider the second minimal control model and take the fixed ancilla-register gate $L^j_a$ to be the unitary implemented by applying $\boldsymbol{H}(\theta)$ to the ancilla and register qubit for a time $t=1/4$, i.e. $U(\theta)$, followed by a fixed ancilla rotation of the form $R(\theta/2)HR(\theta/2)$, we have that $L^j_a = \mathbb{I}_j \otimes R(\theta/2)HR(\theta/2)_a \cdot U(\theta)$. Hence, this $L^j_a$ gives $v_0=H$ and $v_1=R(\theta)HR(\theta)$ which we have shown to be a universal set for $SU(2)$ when $\theta=\pi/4$ and so this form for $L^j_a$ is appropriate for implementing the second minimal control model. With this simple interaction Hamiltonian, $\boldsymbol{H}(\pi/4)$, local control of the ancilla is required. However, we see that this is a \emph{fixed} gate on the ancilla after every ancilla-register interaction via $\boldsymbol{H}(\pi/4)$ and hence this can be a fixed element in an experimental setup or incorporated into the natural evolution of the ancilla between interactions\textsuperscript{b}. For example, if the ancillary qubit is photonic the local operation can be performed by fixed linear optics \cite{kok2007linear} after each ancilla-register interaction. Indeed, the use of ancillary photons to mediate gates has been demonstrated in many experimental setups, for example with atomic \cite{reiserer2014quantum,tiecke2014nanophotonic} or spin \cite{carter2013quantum,luxmoore2013interfacing} qubits.
\newline
\indent
Interactions with the form $\boldsymbol{H}(\theta)$ arise naturally in spin systems, with one example of an implementation of $\boldsymbol{H}(\pi/4)$ given by the coupling between quantum dot resonant exchange qubits \cite{doherty2013two}. A particularly relevant physical system to ancilla-mediated models is the coupling of nuclear spins via ancillary electronic spins in nitrogen-vacancy (NV) defects in diamond \cite{taminiau2014universal,robledo2011high,waldherr2014quantum} and in such setups it may be possible to engineer the Hamiltonian $\boldsymbol{H}(\pi/4)$ \cite{borhani2005cluster}. Although in some physical realisations, such as the photonic case discussed above, the fixed local operation on the ancillary qubit after each interaction is convenient or natural, in others it may be problematic and negate the benefits of the models introduced herein. However, it is also possible to find Hamiltonians that directly implement suitable interactions for either of the models proposed in Sections~\ref{min1}. In certain systems the Hamiltonian is highly tuneable, with an example being those involving superconducting qubits \cite{niskanen2007quantum}, and due to the long coherence times of ensembles of electron spins in NV centers \cite{stanwix2010coherence} a particularly promising physical system for ancilla-mediated quantum computing is arrays of spin ensembles in diamond (each spin ensemble is an effective qubit) coupled by ancillary flux qubits \cite{qiu2014coupling,lu2013quantum,marcos2010coupling}. Indeed, coherent coupling in such a system as been demonstrated \cite{zhu2011coherent}. It would be interesting to consider which physical systems have Hamiltonians that are naturally suited to generating appropriate interactions for the models introduced herein and we leave a more detailed study of this for future work.
\section{Conclusions \label{con}}
We have presented two unitary models of ancilla-mediated quantum computation that require only minimal control of both the ancillary and register systems. The only control necessary in these models to implement universal quantum computation on a register of qubits is a single fixed-time ancilla-register interaction between one ancilla qubit and one register qubit (at a time) and ancilla preparation in the computational basis. The first of these models is based on maximally entangling interactions that are locally equivalent to $CZ$ and requires multiple ancilla qubits to mediate two-qubit entangling gates on the register. This model is similar in many respects to the minimal measurement-based ancilla-mediated model of Halil-Shah and Oi \cite{halil2014minimum}, in which the requirement for ancilla preparation is replaced with the need for ancilla measurements in the computational basis, but is deterministic rather than stochastic. The second of these models removes the need for multiple ancillas to mediate each entangling gate by employing interactions that utilise the SWAP gate in a similar manner to the model known as ancilla-controlled quantum computation \cite{proctor2013universal}. As in the ancilla-controlled model, only three ancilla-register interactions are required to implement a two-qubit entangling gate on the register, which is the minimum possible in any scheme that does not include measurements \cite{lamata2008sequential}, and two for a single-qubit gate. Again, due to the global unitarity of the model the computation is deterministic and is based on a finite gate set composed of one two-qubit entangling gate and two single-qubit gates that form a universal set for single-qubit unitaries. We conjecture that these models require the minimal possible level of control for a unitary ancilla-mediated scheme.
\begin{backmatter}
\section*{Competing interests}
The authors declare that they have no competing interests.
\section*{Endnotes}
\textsuperscript{a} The two-qubit gate in this first minimal control model can be seen to employ essentially the same geometric methods as the qudit ancilla model we introduced in Ref. \cite{proctor2014quantum} and hence also the qubus model. In the qudit ancilla model the ancilla interacts with the register via displacements of the ancilla (with the displacements defined in the discrete lattice phase space of a qudit) controlled by a register qubit. In this model with a qudit of dimension $2$, i.e. a qubit ancilla, controlled displacements are the $C^j_aX$ and $C^j_aZ$ gates. The two-qubit gates between register qubits $j$ and $k$ are then mediated by a sequence of the form $C^k_aXC^j_aZC^k_aXC^j_aZ=C^j_kZ$. This requires two different interactions between the ancilla and register. The model presented here essential uses this gate method but removes the need for two different interactions by including a Hadamard gate on the ancilla in the interaction definition as $HZH=X$. It is the additional local gates $u_l$ and $u_r$ (needed to make the gate universal without additional single-qubit gates) that then results in the need for additional ancillas to mediate the two-qubit gates in the first minimal control model. Finally, this gate method can be considered to be geometric as $XZXZ$ can be considered to create a closed loop in the discrete lattice phase space of a qubit - the details of this are in Ref. \cite{proctor2014quantum}.
\newline
\textsuperscript{b} Note that this is different to ADQC and ACQC in which the required rotations on the ancilla depend on the gate that is to be implemented (and previous measurement outcomes in the case of ADQC).
\end{backmatter}
\appendix
\section{\label{AppA}}
Here we prove that $v_0=H$ and $v_1=THT$ are a universal set for $SU(2)$. Using similar notation to Boykin \emph{et al.} \cite{boykin2000new}, we denote the $n^{th}$ roots of the $X$ and $Z$ operators by $X^{\frac{1}{n}}$ and $Z^{\frac{1}{n}}$. Any $u \in SU(2)$ can be written as
\begin{equation} u=\exp \left( i \varphi \hat{n} \cdot \vec{\sigma} \right), \end{equation}
where $\vec{\sigma}=(X,Y,Z)$ is the vector of Pauli operators, $\hat{n}=(n_x,n_y,n_z)$ is some unit vector in $\mathbb{R}^3$, $\hat{n} \cdot \vec{\sigma} = n_x X + n_y Y +n_z Z$ and $\varphi \in \mathbb{R}$ is some rotation angle. We have that
\begin{equation} \exp \left( i \varphi \hat{n} \cdot \vec{\sigma} \right) = \cos \varphi \mathbb{I} + i \sin \varphi ( \hat{n} \cdot \vec{\sigma} ).\end{equation}
Up to irrelevant global phases, which we ignore from now on, $Z=\exp \left( i \frac{\pi}{2} Z \right)$ and $X=\exp \left( i \frac{\pi}{2} X \right)$ and hence $Z^{\frac{1}{n}}=\exp \left( i \frac{\pi}{2n} Z \right)$ and $X^{\frac{1}{n}}=\exp \left( i \frac{\pi}{2n} X \right)$. Using these, and the identity $HZH=X$, we have that $X^{\frac{1}{n}}=HZ^{\frac{1}{n}}H$. It is straightforward to confirm that $T=Z^{-\frac{1}{4}}$ and so $v_+:=v_0v_1=X^{-\frac{1}{4}}Z^{-\frac{1}{4}}$ and $v_-:=v_1v_0=Z^{-\frac{1}{4}}X^{-\frac{1}{4}}$. From a simple explicit calculation, we have that
\begin{equation} v_{\pm} = \cos^2 \frac{\pi}{8} - i \sin^2 \frac{\pi}{8} \left(\cot \frac{\pi}{8}(Z+X) \mp Y\right). \end{equation}
Therefore, for both $v_+$ and $v_-$ we have that $\cos \varphi = \cos^2 \frac{\pi}{8}$ and hence $\varphi$ is an irrational multiple of $\pi$ \cite{boykin2000new} and $\hat{n}_{\pm}=n_{\pm}/\|n_{\pm}\|$ where $n_{\pm} = -(\cot\frac{\pi}{8},\mp1,\cot\frac{\pi}{8})$. As $\varphi$ is an irrational multiple of $\pi$ we can approximate to arbitrary accuracy any rotation around the $n_{\pm}$ axis by $m$ applications of $v_{\pm}$, with $m$ a finite integer. As these axes of rotation are not parallel then any arbitrary rotation can be decomposed into rotations around these axes \cite{mladenova2011vector}. This then proves that $v_+$ and $v_-$ and hence $v_0$ and $v_1$ are a universal set for $SU(2)$.
\end{document}
|
\begin{document}
\title{Geometric aspects of the Daugavet property.}
\author{R. V. Shvidkoy \\
Department of Mathematics\\
Mathematical Sciences Building\\
Columbia, Missouri, 65211\\
USA\\
\textit{e-mail: }[email protected]}
\date{November, 1998}
\maketitle
\begin{abstract}
Let $X$ be a closed subspace of a Banach space $Y$ and $J$ be the inclusion
map. We say that the pair $(X,Y)$ has the Daugavet property if for every
rank one bounded linear operator $T$ from $X$ to $Y$ the following equality
\begin{equation}
\Vert J+T\Vert =1+\Vert T\Vert \label{ade}
\end{equation}
holds. A new characterization of the Daugavet property in terms of weak open
sets is given. It is shown that the operators not fixing copies of $\ell _1$
on a Daugavet pair satisfy (\ref{ade}).
Some hereditary properties are found: if $X$ is a Daugavet space and $Y$ is
its subspace, then $Y$ is also a Daugavet space provided $X/Y$ has the
Radon-Nikod\'ym property; if $Y$ is reflexive, then $X/Y$ is a Daugavet
space. Becides, we prove that if $(X,Y)$ has the Daugavet property and $
Y\subset Z$, then $Z$ can be renormed so that $(X,Z)$ possesses the Daugavet
property and the equivalent norm coincides with the original one on $Y$.
\end{abstract}
\section{Introduction.}
Let $X$ be a closed subspace of a Banach space $Y$ and $J:X\rightarrow Y$ be
the inclusion map. We say that the pair $(X,Y)$ has the Daugavet property
(or is a Daugavet pair) if for every rank one bounded linear operator $T$
from $X$ to $Y$ the following identity
\begin{equation}
\Vert J+T\Vert =1+\Vert T\Vert , \label{DE}
\end{equation}
which is called the Daugavet equation, holds. If (\ref{DE}) is satisfied by
operators from some class $\mathcal{M}$ we say that $(X,Y)$ has the Daugavet
property with respect to this class.
(\ref{DE}) was first established for compact operators on $C[0,1]$ by
Daugavet in 1963 (see \cite{Daug}). Further it became a subject of extensive
study mostly directed to finding new Daugavet spaces and classes of
operators satisfying (\ref{DE}). In particular, it was proved that all
non-atomic $C(K)$ and $L_1(\mu )$-spaces possess the Daugavet property even
for weakly compact operators (see \cite{fs, h1, h2}). Until recently
investigation of general properties of Daugavet spaces remained somehow
aside. As far as we could trace the first results in this direction appeared
in works of Wojtaszczyk \cite{woj} and Kadets \cite{k}. Some of the most far
reaching ones were the following:
i) The unit sphere of a Daugavet space does not have a strongly exposed
point. Thus, a Daugavet space cannot have the Radon-Nikod\'ym property (see
\cite{woj}).
ii) $\ell _1$ and $\ell _\infty $-sums of Daugavet spaces have the Daugavet
property (see \cite{woj} and \cite{kssw}).
iii) A Daugavet space does not have an unconditional basis (see \cite{k}).
A more intensive and systematic study of the general theory was initiated in
\cite{kssw}. The authors gave a characterization of the Daugavet property in
terms of slices of the unit ball. This allowed to get a lot of information
about isomorphic structure of the Daugavet spaces.
The present paper is a natural continuation of \cite{kssw}. We give
affirmative answers for many questions posed there and provide alternative
proofs of some known earlier results.
In Section 2 another characterization of the Daugavet property in terms of
weak open sets intersecting the unit ball is given. Using this tool we prove
that all operators not fixing a copy of $\ell _1$ on a Daugavet pair satisfy
the Daugavet equation (Theorem \ref{oper}). Note that the analogous result
for strong Radon-Nikod\'ym operators was already obtained in \cite{kssw}. We
also present some new hereditary properties (Theorem \ref{her2}). In
particular, a pair $(X,Y)$ has the Daugavet property, provided $Y$ is a
Daugavet space and $Y/X$ has the Radon-Nikod\'ym property.
Section 3 is entirely devoted to pairs of the form $(X,C(K))$, where $K$ is
a compact Hausdorff space. It is shown that in some natural cases, e.g.,
when $K$ is the unit ball of $X^{*}$, such a pair possesses the Daugavet
property whenever $X$ does. We will see that this is also the case for some
bigger $C(K)$-spaces containing $X$. In Section 4 one of them is shown to
be, in a sense, universal: a Banach space $Y$ can be isomorphically embedded
into it, whenever $X\subset Y$ and $Y/X$ is separable.
At the end of Section 4 we prove following renorming theorem: let $(X,Y)$
have the Daugavet property and $Z$ be a Banach space containing $Y$, then $Z$
can be renormed so that $(X,Z)$ possesses the Daugavet property and the
equivalent norm remains unchanged on $Y$. A consequence of this result and
the aforementioned Theorem \ref{oper} is that a Daugavet space does not
embed into an unconditional sum of Banach spaces without copies of $\ell _1$
. It is a generalization of the well known Theorem of Pelczy\'nski about
impossibility of embedding $C[0,1]$ and $L_1[0,1]$ into a space with
unconditional basis.
Throughout the text $\mathcal{L}(X,Y)$ denotes the space of all bounded
linear operators from $X$ into $Y$; $B(X)$ ($S(X)$) stands for the unit ball
(unit sphere) of a Banach space $X$; by $\overline{\limfunc{ext}}B(X^{*})$
we denote the weak$^{*}$ closure of the set of all extreme points of the
dual unit ball $B(X^{*}).$ For a subset $A$ of a Banach space, $\overline{A}$
denotes the norm-closure of $A$.
The author wishes to thank Professors V. Kadets, N. Kalton and D. Werner for
fruitful discussions, valuable remarks and constant interest to the work.
\section{Some characterizations and direct consequences.}
The central role in this section plays the notion of a slice.
\begin{definition}
Let $X$ be a Banach space. A $\emph{slice}$ of $B(X)$ is called the
following set
\[
S(x^{*},\varepsilon )=\{x\in B(X):x^{*}(x)>1-\varepsilon \},
\]
where $x^{*}\in X^{*}$ and $\varepsilon >0$. We always assume that $x^{*}\in
S(X^{*})$. If $X$ is a dual space and $x^{*}$ is taken from the predual,
then $S(x^{*},\varepsilon )$ is called a weak$^{*}$ slice.
\end{definition}
In paper \cite{kssw} the following characterization of the Daugavet property
in terms of slices was obtained.
\begin{lemma}
\label{chl1}The following are equivalent:
\begin{enumerate}
\item[(a)] The pair $(X,Y)$ has the Daugavet property;
\item[(b)] For every $y_0\in S(Y)$ and for every slice $S(x_0^{*},
\varepsilon _0)$ of $B(X)$ there is another slice $S(x_1^{*},\varepsilon
_1)\subset S(x_0^{*},\varepsilon _0)$ of $B(X)$ such that for every $x\in
S(x_1^{*},\varepsilon _1)$ the inequality $\Vert x+y_0\Vert \ge
2-\varepsilon _0$ holds;
\item[(c)] For every $x_0^{*}\in S(X^{*})$ and for every weak$^{\,*}$ slice
$S(y_0,\varepsilon _0)$ of $B(Y^{*})$ there is another weak$^{\,*}$ slice $
S(y_1,\varepsilon _1)\subset S(y_0,\varepsilon _0)$ of $B(Y^{*})$ such that
for every $y^{*}\in S(y_1,\varepsilon _1)$ the inequality $\Vert
x_0^{*}+y_{\mid X}^{*}\Vert \ge 2-\varepsilon _0$ holds.
\end{enumerate}
\end{lemma}
For the sake of completeness we present the proof here.
\emph{Proof.}
(a)$\Rightarrow $(b). Define $T{:}\allowbreak \ X\to Y$ by $Tx=x_0^{*}(x)y_0$
. Then $\Vert J^{*}+T^{*}\Vert =\Vert J+T\Vert =2$, so there is a functional
$y^{*}\in S(Y^{*})$ such that $\Vert J^{*}y^{*}+T^{*}y^{*}\Vert \ge
2-\varepsilon _0$ and $y^{*}(y_0)\ge 0$. Put
\[
x_1^{*}=\frac{J^{*}y^{*}+T^{*}y^{*}}{\Vert J^{*}y^{*}+T^{*}y^{*}\Vert }
,\quad \varepsilon _1=1-\frac{2-\varepsilon _0}{\Vert
J^{*}y^{*}+T^{*}y^{*}\Vert }.
\]
Then for all $x\in S(x_1^{*},\varepsilon _1)$ we have
\[
\langle (J^{*}+T^{*})y^{*},x\rangle \ge (1-\varepsilon _1)\Vert
J^{*}y^{*}+T^{*}y^{*}\Vert =2-\varepsilon _0,
\]
therefore
\begin{equation}
y^{*}(x)+y^{*}(y_0)x_0^{*}(x)\ge 2-\varepsilon _0, \label{eq2}
\end{equation}
which implies that $x_0^{*}(x)\ge 1-\varepsilon _0$, i.e., $x\in
S(x_0^{*},\varepsilon _0)$. Moreover, by (\ref{eq2}) we have $
y^{*}(x)+y^{*}(y_0)\ge 2-\varepsilon _0$ and hence $\Vert x+y_0\Vert \ge
2-\varepsilon _0$.
(b)$\Rightarrow $(a). Let $T\in \mathcal{L}(X,Y)$, $Tx=x_0^{*}(x)y_0$ be a
rank one operator. We can assume that $\Vert T\Vert =1$ (see, for example,
\cite{AbraAB}) and $\Vert x_0^{*}\Vert =\Vert y_0\Vert =1$. Fix any $
\varepsilon >0$. Then there is an $x\in S(x_0^{*},\frac \varepsilon 2)$ such
that $\Vert x+y_0\Vert >2-\frac \varepsilon 2$. So,
\[
\Vert J+T\Vert \geq \Vert x+x_0^{*}(x)y_0\Vert \geq \Vert x+y_0\Vert
-|1-x_0^{*}(x)|>2-\varepsilon \text{.}
\]
Let $\varepsilon $ go to zero.
The proof of equivalence (a)$\Leftrightarrow $(c) is analogous.\nopagebreak
$\Box $
One can see that the slices $S(x_1^{*},\varepsilon _1)$ and $
S(y_1,\varepsilon _1)$ in the statement of Lemma \ref{chl1} can be replaced
by vectors $x$ and $y^{*}$. We will often refer to Lemma \ref{chl1} in this
form.
We mention some remarkable consequences of Lemma \ref{chl1} (the proofs can
be found in \cite{kssw}). First, if $X$ has the Daugavet property then $X$
(and $X^{*}$) contains an isomorphic copy of $\ell _1$, and moreover,
vectors equivalent to the canonical basis of $\ell _1$ can be chosen in
arbitrary slices of $B(X)$ (and weak$^{*}$ slices of $B(X^{*})$). Hence,
neither $X$ nor $X^{*}$ possess the Radon-Nikod\'ym property provided $X$
has the Daugavet property (see also \cite{woj} and \cite{wer}). Second, all
strong Radon-Nikod\'ym operators and, in particular, all weakly compact
operators on a Daugavet pair satisfy the Daugavet equation. Below we isolate
another such a class of operators, namely those not fixing copies of $\ell
_1 $, but first we need the following modification of Lemma \ref{chl1},
which shows that we can operate with weak open sets as well as with slices.
\begin{lemma}
\label{chl2}The following are equivalent:
\begin{enumerate}
\item[(a)] The pair $(X,Y)$ has the Daugavet property;
\item[(b)] For any given $\varepsilon >0$, $y\in S(Y)$ and weak open set $U$
in $X$ with $U\cap B(X)\neq \emptyset $ there is a weak open set $V$ in $X$
with $V\cap B(X)\neq \emptyset $ and $V\cap B(X)\subset U\cap B(X)$ such
that $\Vert v+y\Vert >2-\varepsilon $, whenever $v\in V\cap B(X)$;
\item[(c)] For any given $\varepsilon >0$, $x^{*}\in S(X^{*})$ and weak$
^{*} $ open set $U$ in $Y^{*}$ with $U\cap B(Y^{*})\neq \emptyset $ there is
a weak$^{*}$ open set $V$ in $Y^{*}$ with $V\cap B(Y^{*})\neq \emptyset $
and $V\cap B(Y^{*})\subset U\cap B(Y^{*})$ such that $\Vert v_{\mid
X}+x^{*}\Vert >2-\varepsilon $, whenever $v\in V\cap B(Y^{*})$.
\end{enumerate}
\end{lemma}
\emph{Proof.}\textbf{\ }Let us prove (a)$\Rightarrow $(b).
First we consider the weak$^{*}$ open set $U^{**}$ in $X^{**}$ that induces $
U$ on $X$, i.e. $U^{**}\cap X=U$. By the Krein-Milman Theorem, there is a
convex combination of extreme points of $B(X^{**})$, $\stackrel{n}{
\stackunder{i=1}{\sum }}\lambda _ix_i^{**}$, such that $\stackrel{n}{
\stackunder{i=1}{\sum }}\lambda _ix_i^{**}\in U^{**}$. Clearly, we can find
weak$^{*}$ open neighborhoods $\{U_i^{**}\}_{i=1}^n$ of the points $
\{x_i^{**}\}_{i=1}^n$ respectively, for which the following inclusion holds:
\begin{equation}
\stackrel{n}{\stackunder{i=1}{\sum }}\lambda _i(U_i^{**}\cap
B(X^{**}))\subset U^{**}. \label{incl1}
\end{equation}
Now by the Choquet Lemma (weak$^{*}$ slices containing an extreme point form
a basis of its weak$^{*}$ neighborhoods, \cite[p.49]{hhz}), we can assume
that the sets $\{U_i^{**}\cap B(X^{**})\}_{i=1}^n$ are weak$^{*}$ slices.
Thus, inclusion (\ref{incl1}) restricted on $X$ looks as follows: $\stackrel{
n}{\stackunder{i=1}{\sum }}\lambda _iS_i\subset U$, where $S_i=U_i^{**}\cap
B(X^{**})\cap X$ are slices for all $i=1,2,\ldots ,n$.
Employing Lemma \ref{chl1}(b) we find a vector $x_1\in S_1$ with $\Vert
\lambda _1x_1+y\Vert >(\lambda _1+1-\varepsilon )$. Analogously, there is an
$x_2\in S_2$ with $\Vert \lambda _2x_2+\lambda _1x_1+y\Vert >(\lambda
_2+\lambda _1+1-\varepsilon )$. Continuing in the same way we finally find $
x_n\in S_n$ with $\Vert \lambda _nx_n+\lambda _{n-1}x_{n-1}+\ldots +\lambda
_1x_1+y\Vert >(\lambda _n+\lambda _{n-1}+\ldots +\lambda _1+1-\varepsilon
)=2-\varepsilon $, and $\stackrel{n}{\stackunder{i=1}{\sum }}\lambda
_ix_i\in U$. It remains only to use the lower weak semicontinuity of a norm
to get the required weak open set $V$.
This completes the proof of implication (a)$\Rightarrow $(b).
The implication (a)$\Leftarrow $(b) follows from Lemma \ref{chl1} and the
equivalence (a)$\Leftrightarrow $(c) is proved in the same way.\nopagebreak
$\Box $
\begin{theorem}
\label{oper}If the pair $(X,Y)$ has the Daugavet property, then every
operator from $\mathcal{L}(X,Y)$ not fixing copies of $\ell _1$ satisfies
the Daugavet equation.
\end{theorem}
\emph{Proof.} Let $T\in \mathcal{L}(X,Y)$, $\Vert T\Vert =1$, be such an
operator and $\varepsilon >0$ be arbitrary.
Our considerations will rely on the following ``releasing principle'':
suppose for some finite set of vectors $\{x_i\}_{i=1}^n\subset B(X)$ and
some $\varepsilon >0$ the inequalities
\begin{equation}
\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\theta _ix_i\right\|
>n-\varepsilon , \label{oper2}
\end{equation}
and
\begin{equation}
\left\| \stackunder{i\in I_1}{\sum }a_ix_i+\stackunder{i\in I_2}{\sum }
a_iTx_i\right\| >\left( \stackunder{i\in I_1\cup I_2}{\sum a_i}\right)
(1-\varepsilon ) \label{oper4}
\end{equation}
hold for all non-negative reals $a_i$, signs $\theta _i$, and some disjoint
sets $I_1$, $I_2\subset \{1,2,\ldots ,n\}$. Then there is a weak open set $
U\subset X$ such that (\ref{oper2}) and (\ref{oper4}) remain true for all $
x_n\in U\cap B(X)$.
Let us prove it. By the compactness argument, there is a $\delta >0$ such
that
\begin{equation}
\left\| \stackunder{i\in I_1}{\sum }a_ix_i+\stackunder{i\in I_2}{\sum }
a_iTx_i\right\| >1-\varepsilon +\delta , \label{oper3}
\end{equation}
whenever $\stackunder{i\in I_1\cup I_2}{\sum a_i}=1$ and $I_1$, $I_2$ as
above. Fix a finite $\frac \delta 2$-net $\left\{ (a_{k,1},a_{k,2},\ldots
,a_{k,n})\right\} _{k=1}^K$ in the set $\left\{ (a_1,a_2,\ldots ,a_n):~
\stackrel{n}{\stackunder{i=1}{\sum }}a_i=1,~a_i\geq 0\right\} $ equipped
with the $\ell _1$-metric. Using the lower weak semicontinuity of a norm and
weak continuity of a bounded linear operator we conclude that there is a
weak open set $U$ such that both (\ref{oper2}) and (\ref{oper3}) hold for $
a_i=a_{k,i}$ , $i=1,2,\ldots ,n$, $k=1,2,\ldots ,K$ and all $z_n\in U\cap
B(X)$. It is not hard to see that $U$ is desired.
Now we construct a sequence $\{x_i\}_{i=1}^\infty \subset B(X)$ which
satisfies (\ref{oper2}) and (\ref{oper4}) for all non-negative reals $a_i$,
signs $\theta _i$ and all disjoint finite sets $I_1$, $I_2\subset \Bbb{N}$ .
Assume that we have constructed such a sequence $\{x_i\}_{i=1}^n$ of length $
n.$ We want to prove now that altering only the last term $x_n$ one can find
another vector $x_{n+1}$ such that the resulting sequence of length $n+1$
satisfies (\ref{oper2}) and (\ref{oper4}). Arguing in such a way, we produce
the desired infinite sequence if only take $x_1\in S(X)$ with $\Vert
Tx_1\Vert >1-\varepsilon $ on the first step.
Let us put $x_{n+1}^{\prime }=x_n$ for a moment. Clearly, (\ref{oper4})
remains true for the sequence $x_1,x_2,\ldots ,x_n,x_{n+1}^{\prime }$ and
all $I_1,I_2$ with additional restriction: if one of them contains $n$ then
the other does not contain $n+1$. We get rid of this restriction by
alteration of $x_n$ and $x_{n+1}^{\prime }$. To this end, we use the
`releasing principle' for $x_{n+1}^{\prime }$ and find the corresponding
weak open set $U\subset X$. Application of Lemma \ref{chl2}(b) several times
yields a vector $x_{n+1}\in U\cap B(X)$ such that (\ref{oper2}) is valid for
the sequence $x_1,x_2,\ldots ,x_n,x_{n+1}$ and (\ref{oper4}) holds without
the restriction: if $I_1$ contains $n+1$, then $I_2$ does not contain $n$.
Then we use the ``releasing principle'' to release $x_n$ so that both (\ref
{oper2}) and (\ref{oper4}) remain true. Appealing to Lemma \ref{chl2}(b) we
finally get an $x_n^{\prime }$ such that (\ref{oper4}) holds for the
sequence $x_1,x_2,\ldots ,x_n^{\prime },x_{n+1}$ without any restrictions on
$I_1$ and $I_2$. Inequality (\ref{oper2}) is satisfied automatically.
The constructed sequence is $(1-\varepsilon )$-equivalent to the canonical
basis of $\ell _1$, for if $\stackrel{n}{\stackunder{i=1}{\sum }}|\lambda
_i|=1$, then by (\ref{oper2}) we have
\begin{eqnarray*}
\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\lambda _ix_i\right\|
&=&\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\limfunc{sign}\lambda
_i\cdot x_i+\stackrel{n}{\stackunder{i=1}{\sum }}(\lambda _i-\limfunc{sign}
\lambda _i)\cdot x_i\right\| \\
&>&n-\varepsilon -\stackrel{n}{\stackunder{i=1}{\sum }}|\lambda _i-\limfunc{
sign}\lambda _i|=n-\varepsilon -\stackrel{n}{\stackunder{i=1}{\sum }}
|1-|\lambda _i|| \\
&=&n-\varepsilon -n+1=1-\varepsilon .
\end{eqnarray*}
Since $T$ fixes no copies of $\ell _1$, by Rosenthal's Lemma we may assume
that the sequence $(Tx_n)_{n=1}^\infty $ is weakly Cauchy. Thus, $
(Tx_{2n+1}-Tx_{2n})_{n=1}^\infty $ is weakly null. By Mazur's Theorem there
are two finite disjoint sets $I_1$, $I_2\subset N$ such that for some $p\in
\limfunc{conv}\{x_i:i\in I_1\}$ and $q\in \limfunc{conv}\{x_i:i\in I_2\}$ we
have $\Vert Tp-Tq\Vert <\varepsilon $. From this and (\ref{oper4}) we
finally obtain
\[
\Vert p+Tp\Vert >\Vert p+Tq\Vert -\varepsilon >2(1-\varepsilon )-\varepsilon
=2-3\varepsilon ,
\]
which implies $\Vert J+T\Vert =2$ in view of arbitrariness of $\varepsilon $.
This finishes the proof.\nopagebreak
$\Box $
It is known that $C(K)$ has the Daugavet property (see \cite{fs} or \cite{h2}
) if $K$ is a compact Hausdorff space without isolated points. Besides, due
to a result of Rosenthal \cite{ros} and by Lemma 2.4 from \cite{ww} it
follows that operators on $C(K)$ not fixing copies of $C[0,1]$ are precisely
those not fixing copies of $\ell _1$. So, from the previous theorem we
obtain that all such operators satisfy the Daugavet equation. This result
was first established by Weis and Werner in their paper \cite{ww}. By
Theorem \ref{oper} we also solve a problem posed in \cite{kssw}.
\begin{corollary}
Suppose $X$ is a Daugavet space and $Y$ is a complemented subspace in $X$
such that $X/Y$ contains no copies of $\ell _1$, then the norm of every
projection from $X$ onto $Y$ is at least $2$.
\end{corollary}
\emph{Proof.} Let $P:X\rightarrow X$ be any projection onto $Y$. Then $-Id+P$
fixes no copies of $\ell _1$ and hence, by Theorem \ref{oper}, satisfies the
Daugavet equation. So, we have $\Vert P\Vert =\Vert Id+(-Id+P)\Vert =1+\Vert
P-Id\Vert \geq 2$.\nopagebreak
$\Box $
\noindent \textbf{Problem 1. }It remains open whether every
Dunford-Pettis operator on a Daugavet pair satisfies the Daugavet equation.
\noindent \textbf{Problem 2. }One of the remarkable
characterizations of Banach spaces not containing isomorphic copies of $\ell
_1$ is that the duals of such spaces possess the weak Radon-Nikod\'ym
property. Thus, no dual to a Daugavet space has this property. It is not
known, however, if the same is true for a Daugavet space itself.
Now we discuss the following question: suppose $X$ has the Daugavet
property; what classes of subspaces of $X$ possess the same property?
It was shown in\textrm{\ }\cite{kssw} that all the subspaces with separable
annihilator do. Such an effect could be attributed to extreme ``spreadness''
of a Daugavet unit ball (see Lemmas \ref{chl1} and \ref{chl2}). We will
repeatedly use this idea later on.
\begin{theorem}
\label{her2}Let $X$ have the Daugavet property and $Y$ be a subspace of $X$.
\begin{enumerate}
\item[(a)] If $X/Y$ has the Radon-Nikod\'ym property, then the pair $(Y,X)$
has the Daugavet property;
\item[(b)] If $Y$ is reflexive, then $X/Y$ has the Daugavet property.
\end{enumerate}
\end{theorem}
In the particular case when $X=L_1[0,1]$ part (b) of~Theorem \ref{her2} was
proved in \cite{kssw}.
\emph{Proof.} Part (a). According to Lemma \ref{chl1}(b) it is
sufficient to prove that given any $\delta >0$, $S(y^{*},\varepsilon )$ and $
x\in B_X$ ~there is a $y\in S(y^{*},\varepsilon )$ such that $\Vert x+y\Vert
>2-\delta .$
Denote by $j$ the quotient map $:X\mapsto X/Y$. Saving the notation for the
functional $y^{*}$, we extend it to all of $X$ by the Hahn-Banach Theorem.
The set $A=j(S(y^{*},\varepsilon ))$ is convex and contains the origin.
Since $X/Y$ has the Radon-Nikod\'ym property, the Phelps Theorem (see for
example \cite{Die-LNM}) yields a convex combination $\stackrel{n}{
\stackunder{i=1}{\sum }}\lambda _ia_i$ of strongly exposed points $
\{a_i\}_{i=1}^n$ of the set $\overline{A}$ for which
\begin{equation}
\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\lambda _ia_i\right\| <\frac
\delta 2. \label{hereq1}
\end{equation}
Let $\{a_i^{*}\}_{i=1}^n\subset (X/Y)^{*}$ be functionals exposing $
\{a_i\}_{i=1}^n$ respectively and let positive numbers $\{\varepsilon
_i\}_{i=1}^n$ be such that
\begin{equation}
\limfunc{diam}\left\{ S(a_i^{*},\varepsilon _i)\cap \overline{A}\right\}
<\frac \delta 4\text{, }i=1,2,...,n. \label{hereq2}
\end{equation}
Since $S(a_i^{*},\varepsilon _i)\cap A\neq \emptyset $, we have $
S(j^{*}a_i^{*},\varepsilon _i)\cap S(y^{*},\varepsilon )\neq \emptyset $.
Applying Lemma \ref{chl2}(b) we find $x_i\in S(j^{*}a_i^{*},\varepsilon
_i)\cap S(y^{*},\varepsilon )$ such that
\[
\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\lambda _ix_i+x\right\|
>2-\frac \delta 4
\]
Now taking into account (\ref{hereq1}) and (\ref{hereq2}) we obtain the
following estimate:
\[
\left\| j(\stackrel{n}{\stackunder{i=1}{\sum }}\lambda _ix_i)\right\|
<\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\lambda _ia_i\right\| +\frac
\delta 4<\frac \delta 2.
\]
It means that there is a $y\in B_Y$ for which
\[
\left\| \stackrel{n}{\stackunder{i=1}{\sum }}\lambda _ix_i-y\right\| <\delta
.
\]
Then by (\ref{hereq2}) we finally get
\[
\Vert x+y\Vert >2-\frac 32\delta .
\]
Clearly, $y\in S(y^{*},\varepsilon +\delta )$.
Because of arbitrariness of $\varepsilon $ and $\delta $, part (a) is proved.
The proof of part (b) is analogous (we have only to use the weak$^{*}$
topology and apply Lemma \ref{chl2}(c)). \nopagebreak
$\Box $
\noindent \textbf{Problem 3.} Under the conditions of Theorem \ref
{her2},
(a) does $Y$ have the Daugavet property if $X/Y$ is an Asplund space
(equivalently, $(X/Y)^{*}$ has the Radon-Nikod\'ym property) or, more
generally, if $X/Y$ fails to contain isomorphic copies of $\ell _1$?
(b) does $X/Y$ have the Daugavet property if either $Y$ or $Y^{*}$ (or both)
has the Radon-Nikod\'ym property or fails to contain isomorphic copies of $
\ell _1$?
\section{Subspaces of C(K)-spaces.}
Now we study the case when in a pair $(X,Y)$ the space $Y$ is a $C(K)$-space
for some compact Hausdorff space $K$. As was shown in various works (see
\cite{fs} or \cite{ww}) and as also follows from our Lemma \ref{chl1}, $C(K)$
has the Daugavet property if and only if $K$ has no isolated points.
Moreover, we can assert that if for some $X\subset C(K)$ the pair $(X,C(K))$
has the Daugavet property, then $K$ does not have such a point $k$, for
otherwise the rank one operator $Tx=-\chi _{\{k\}}\cdot x(k)$ does not
satisfy the Daugavet equation. So, investigating pairs of the form $(X,C(K))$
it is natural to require that $K$ have no isolated points.
We begin with a characterization of those Banach spaces $X$, $X\subset C(K)$
~that the pair $(X,C(K))$ has the Daugavet property. In the sequel, $\delta
_k^{*}$, $k\in K$ stands for the functional on $C(K)$ acting by the rule $
\delta _k^{*}(f)=f(k)$, $f\in C(K)$.
\begin{lemma}
\label{chC(K)}Let $X$ be a subspace of $C(K)$, where $K$ is a compact
Hausdorff space without isolated points. The following conditions are
equivalent:
\begin{enumerate}
\item[(a)] The pair $(X,C(K))$ has the Daugavet property;
\item[(b)] For every $\varepsilon >0$, $x^{*}\in S(X^{*})$ and open set $U$
in $K$ there exists a point $u\in U$ such that $\Vert x^{*}+\delta _{u\mid
X}^{*}\Vert >2-\varepsilon $;
\item[(c)] For every $x^{*}\in S(X^{*})$ and open set $U$ in $K$ there
exists a (closed) $G_\delta $-set $G$ in $U$ such that $\Vert x^{*}+\delta
_{u\mid X}^{*}\Vert =2$, whenever $u\in G$.
\end{enumerate}
\end{lemma}
\emph{Proof.} (a)$\Rightarrow $(b). Let $f\in S(C(K))$ be a function
vanishing outside $U$. By Lemma \ref{chl1}(c), there is a slice $S\subset
S(f,\frac 12)$ such that $\Vert x^{*}+\mu \Vert >2-\varepsilon $, for all $
\mu \in S$. Pick any $\delta _u^{*}\in S$. Clearly, $\delta
_u^{*}(f)=f(u)>\frac 12$ and hence, $u\in U$. So, $u$ is the required point.
(b)$\Rightarrow $(c). Apply part (b) countably many times and use the weak$
^{*}$ lower semicontinuity of a dual norm and the regularity of a Hausdorff
compact set.
(c)$\Rightarrow $(a). We apply Lemma \ref{chl1} again. Pick arbitrary $
x^{*}\in S(X^{*})$ and weak$^{*}$ slice $S(f,\varepsilon )$ in $B(C^{*}(K))$
. Let $U=\{k\in K:f(k)>1-\varepsilon \}$. By condition (c), we can find a
point $u\in U$ such that $\Vert x^{*}+\delta _{u\mid X}^{*}\Vert =2$.
Moreover, we have $\delta _u^{*}(f)=f(u)>1-\varepsilon $ and hence, $\delta
_u^{*}\in S(f,\varepsilon ).$ This completes the proof.\nopagebreak
$
\Box $
Of course, not every pair $(X,C(K))$ has the Daugavet property
provided $X$ does, e.g., this one $(C[0,1],C([0,1]\cup [2,3]))$. However, as
the following theorem shows, in some natural and useful cases this is true.
\begin{proposition}
\label{prC(K)1}If the pair $(X,Y)$ has the Daugavet property and $K$ is
either $B(Y^{*})$ or $\overline{\limfunc{ext}}B(Y^{*})$, then the pair $
(X,C(K))$ also has the Daugavet property.
\end{proposition}
\emph{Proof.} In both cases we use condition (b) of Lemma \ref{chC(K)}.
First, consider $K=B(Y^{*})$. Fix arbitrary $\varepsilon >0$, open set $
U\subset K$ and $x^{*}\in S(X^{*})$. By Lemma \ref{chl2}(c) there is $
y^{*}\in U$ such that $\Vert x^{*}+y_{\mid X}^{*}\Vert >2-\varepsilon $. We
denote by $u$ the functional $y^{*}$ regarding it as a point of topological
space $K$. It remains to notice that $\delta _{u\mid X}^{*}=y_{\mid X}^{*}$.
Let $K=\overline{\limfunc{ext}}B(X^{*})$. Fix $\varepsilon $, $U$ and $x^{*}$
as above. By the Choquet Lemma we may assume that $U$ is induced by a slice $
S$. By Lemma \ref{chl1}(c) there is a slice $S_1\subset S$, and hence, there
is a $y^{*}\in S\cap K$ such that $\Vert x^{*}+y_{\mid X}^{*}\Vert
>2-\varepsilon $. So, as above the point $u=y^{*}$ is required.\nopagebreak
$\Box $
In the case $K=B(Y^{*})$ this proposition solves a problem posed in
\cite{kssw}. The result was proved there for $K=\overline{\limfunc{ext}}
B(Y^{*})$. However, we include both cases to emphasize their common origin.
Let $K$ be a compact Hausdorff space without isolated points. We
introduce the following spaces:
\begin{eqnarray*}
l_\infty (K) &=&\left\{ f:K\mapsto R,\quad \Vert f\Vert _\infty =\sup
(|f(s)|,\,s\in K)<{\infty }\right\} , \\
m(K) &=&\left\{ f\in l_\infty (K):\limfunc{supp}(f)\ \text{is a first
category set}\right\} , \\
m_0(K) &=&l_\infty (K)/m(K).
\end{eqnarray*}
In what follows we investigate Daugavet properties of the space $m_0(K)$. In
the next section we use them to prove some general results on renormings.
$m_0(K)$ equipped with the factor-norm is a real $C^{*}$-algebra, and hence,
is a $C(Q)$-space. The appropriate compact set $Q=Q_K$ can be defined as the
set of all real homomorphisms on $m_0(K)$ endowed with the induced weak$^{*}$
topology. This is precisely limits by ultrafilters on $K$, which do not
contain first category sets. Let $\frak{U}$ be such an ultrafilter. We
denote by $\lim \frak{U}$ the point in $K$ to which it converges and by $
\frak{U}\_\lim $ the real homomorphism on $m_0(K)$ it generates ($\frak{U}
\_\lim \in Q_K$).
\begin{lemma}
\label{al}Suppose $U$ is an open set in $Q_K$, then there is an open set $V$
in $K$ such that for every $v\in V$ one can find an ultrafilter $\frak{U}_v$
on $K$ with $\lim \frak{U}_v=v$ and $\frak{U}_v\_\lim \in U$.
\end{lemma}
\emph{Proof.} By the construction of $Q_K$ we may assume there are a finite
set $(f_i)_{i=1}^n\subset m_0(K)$, $\varepsilon >0$ and ultrafilter $\frak{U}
_0$ on $K$ such that $U=\{\varphi \in Q_K:|\varphi (f_i)-\frak{U}_0\_\lim
(f_i)|<\varepsilon \}$. Denote $a_i=\frak{U}_0\_\lim (f_i)$. We fix a second
category set $A\in \frak{U}_0$ with the following property:
\begin{equation}
f_i(A)\subset (a_i-\varepsilon \;,\;a_i+\varepsilon )\text{,\quad }
i=1,2,\ldots ,n. \label{incl}
\end{equation}
Then we find an open set $V$ in $K$ such that for any open $W\subset V$, $
W\cap A$ is a second category set (see \cite{Kelley}). It remains to show
that $V$ is required.
Indeed, let $v\in V$. Consider an ultrafilter $\frak{U}_v$ containing $
\{W\cap A:W$ is an open neighborhood of $v\}$. Plainly, $\lim \frak{U}_v=v$.
On the other hand, in view of (\ref{incl}) we have $\frak{U}_v\_\lim
(f_i)\in (a_i-\varepsilon \;,\;a_i+\varepsilon )$, $i=1,2,\ldots ,n$. This
means that $\frak{U}_v\_\lim \in U$. This finishes the proof.\nopagebreak
$\Box $
It is easy to see that $C(K)$ is isometrically embedded into $
m_0(K)$ by the quotient map.
\begin{proposition}
\label{prC(K)2}If the pair $(X,C(K))$ has the Daugavet property, then the
pair $(X,m_0(K))$ also has the Daugavet property.
\end{proposition}
\emph{Proof.} We apply Lemma \ref{chC(K)} again using the interpretation of $
m_0(K)$ as a $C(Q)$-space. To this end, we fix $\varepsilon >0$, open set $
U\subset Q_K$ and $x^{*}\in S(X^{*})$. Applying Lemma \ref{al} to $U$ we
find the corresponding open set $V\subset K$. Lemma \ref{chC(K)} applied to
the pair $(X,C(K))$ yields $v\in V$ such that $\Vert x^{*}+\delta _{v\mid
X}^{*}\Vert >2-\varepsilon $. Consider the ultrafilter $\frak{U}_v$ with $
\lim \frak{U}_v=v$ and $\frak{U}_v\_\lim \in U$, and denote $u=\frak{U}
_v\_\lim $. So, $\delta _{v\mid X}^{*}=\delta _{u\mid X}^{*}$ and $u\in U$.
Hence, the point $u$ is desired.\nopagebreak
$\Box $
\begin{corollary}
The pair $(C(K),m_0(K))$ has the Daugavet property.
$\Box $
\end{corollary}
\begin{corollary}
\label{corm(K)}Let the pair $(X,Y)$ have the Daugavet property and $K$ be
either $B(Y^{*})$ or $\overline{\limfunc{ext}}B(X^{*})$, then the pair $
(X,m_0(K))$ has the Daugavet property too.
\end{corollary}
\emph{Proof.} Combine Propositions \ref{prC(K)1} and \ref{prC(K)2}.
\nopagebreak
$\Box $
\section{Renorming theorem.}
The main goal of this section is to prove the following result.
\begin{theorem}
\label{renth}Let $X$, $Y$, $Z$ be Banach spaces such that $X\subset Y\subset
Z$. If the pair $(X,Y)$ has the Daugavet property, then $Z$ can be renormed
so that $(X,Z)$ possesses the Daugavet property and the equivalent norm
coincides with the original one on $Y$.
\end{theorem}
In separable case this theorem was proved in \cite{kssw}. The general case,
however, requires more detailed consideration. Therefore we present the
complete proof here.
First we prove a theorem which establishes, in some sense, a property of
universality of $m_0(K)$-spaces, where $K$ is the unit ball of a dual space.
Since in the sequel we often deal with density character of a Banach space $
X $ (the minimal cardinality of a dense set in $X$), we denote it by $
\limfunc{dens}(X)$.
\begin{theorem}
\label{embth}Let $Y$ be a closed subspace of Banach spaces $Z$ and $W$. Let
also $\limfunc{dens}(Z/Y)=\beta $, where $\beta $ is an ordinal. Suppose $
B(W^{*})$ contains a family $\{B_\alpha \}_{\alpha <\beta }$ of disjoint
second category sets such that if $B^{\prime }=\stackunder{\alpha <\beta }{
\cup }B_\alpha $, then $B^{\prime }\cap -B^{\prime }=\emptyset $. Then there
is an isomorphic embedding $E:Z\rightarrow m_0(B(W^{*}))$, which coincides
with the natural one on $Y$.
\end{theorem}
\emph{Proof.} Let us fix a dense set $([z_\alpha ])_{\alpha <\beta }\subset
B(Z/W)$ with $\Vert z_\alpha \Vert \leq 1$, and for every $\alpha <\beta $
find a functional $\varphi _\alpha \in S(Y^{\bot })$ so that $\varphi
_\alpha (z_\alpha )=\Vert [z_\alpha ]\Vert $. Also to every $w^{*}$ we
assign a functional $\widetilde{w}^{*}$ obtained by restriction of $w^{*}$
on $Y$ and then extension to all of $Z$ by the Hahn-Banach Theorem.
Now we want to embed $Z$ into $\ell _\infty (B(W^{*}))$ so that every
element from the image of $B(Z)$ takes values greater than $\frac 18$ on a
second category set. To this end, for each $z\in Z$ we define a function $
f_z\in \ell _\infty (B(W^{*}))$ as follows:
\[
f_z(w^{*})=\left\{
\begin{array}{ll}
\widetilde{w}^{*}(z), & w^{*}\in B(W^{*})\backslash B_0 \\
\widetilde{w}^{*}(z)+8\varphi _\alpha (z), & w^{*}\in B_\alpha
\end{array}
\right. .
\]
Clearly the mapping $F:z\rightarrow f_z$ is linear and bounded. Moreover, $
f_z(w^{*})=w^{*}(z)$, if $z\in Y$. So, $F_{\mid X}$ is the natural embedding
of $Y$ into $\ell _\infty (B(W^{*}))$ (even into $C(B(W^{*}))$).
Suppose now $\Vert z\Vert =1$. Then either $\Vert [z]\Vert \leq \frac 14$ or
$\Vert [z]\Vert >\frac 14$. In the former case there is a $y_0\in Y$ such
that $\Vert z-y_0\Vert <\frac 38$. Because of the condition imposed on $
B^{\prime }$, the set $\{w^{*}\in B(W^{*})\backslash B^{\prime
}:w^{*}(y_0)>\Vert y_0\Vert -\frac 18\}$ is of second category, and for
every its element we have
\[
|f_z(w^{*})|=|\widetilde{w}^{*}(z)|>|\widetilde{w}^{*}(y_0)|-\frac
38=|w^{*}(y_0)|-\frac 38=\Vert y_0\Vert -\frac 12>\frac 18.
\]
So, $|f_z(w^{*})|>\frac 18$, for $w^{*}$ from some second category set.
In the case $\Vert [z]\Vert >\frac 14$, there is an ordinal $\alpha $, $
\alpha <\beta $, and $y\in Y$ such that $\Vert [z_\alpha ]\Vert >\frac 14$
and $\Vert z-z_\alpha -y\Vert <\frac 1{16}$. From this we get for all $
w^{*}\in B_\alpha $
\begin{eqnarray*}
|f_z(w^{*})| &=&|\widetilde{w}^{*}(z)+8\varphi _\alpha ^{*}(z)| \\
&>&|8\varphi _\alpha ^{*}(z_\alpha -y)|-\frac 12-|\widetilde{w}
^{*}(z)|=8\Vert [z_\alpha ]\Vert -\frac 32 \\
&>&\frac 84-\frac 32=\frac 12.
\end{eqnarray*}
To define the desired isomorphic embedding $E:Z\rightarrow m_0(B(W^{*}))$ we
just put $Ez=[Fz]$, $z\in Z$.\nopagebreak
$\Box $
It is not hard to construct countable number of second category sets
satisfying the condition of the previous theorem. So, in the special case
when $Z/Y$ is separable, we obtain the following corollary.
\begin{corollary}
Let $Y$ be a closed subspace of $Z$ such that $Z/Y$ is separable. Then there
exists an isomorphic embedding of $Z$ into $m_0(B(Y^{*}))$, which coincides
with the natural one on $Y$.
\end{corollary}
\emph{Proof of Theorem \ref{renth}.}
Suppose $(X,Y)$ is a Daugavet pair and $Z$ is some Banach space containing $
Y $. If $B(Y^{*})$ were very ``reach'' of disjoint second category sets,
i.e. enough to satisfy the condition of Theorem \ref{embth} (in this case $
Y=W$), there would exist an isomorphic embedding $E$ of $Z$ into $
m_0(B(Y^{*}))$. Appealing to Corollary \ref{corm(K)}, the equivalent norm $
|||z|||=\Vert Ez\Vert $ would be desired.
That, however, may not be the case, for example, when $\limfunc{dens}(Z)>
\limfunc{dens}(m_0(B(Y^{*})))$. So, we should replace $Y$ by a bigger space,
say $W$, which meets the condition of Theorem \ref{embth} and at the same
time possesses the Daugavet property in pair with $X$. If we can do this,
the norm introduced in the previous case satisfies our requirements, and we
are done.
Let $\beta $ be as in Theorem \ref{embth}. We define $W$ to be the $\ell
_\infty $-sum of $\beta $ copies of $C(B(Y^{*}))$, i.e. $W=\left\{ (f_\alpha
)_{\alpha <\beta }:f_\alpha \in C(B(Y^{*}))\text{ and }\Vert (f_\alpha
)\Vert =\stackunder{\alpha <\beta }{\sup }\Vert f_\alpha \Vert <\infty
\right\} $. $Y$ embeds into $W$ as follows:
\begin{eqnarray*}
y &\rightarrow &(y_\alpha )_{\alpha <\beta }\text{,\quad }y\in Y\text{;} \\
y_\alpha (s) &=&s(y)\text{,\quad }s\in B(Y^{*})\text{.}
\end{eqnarray*}
So, $Y$ can be regarded as a subspace of $W$. Using Proposition \ref{prC(K)1}
, it is not difficult to prove that the pair $(X,W)$ has the Daugavet
property.
Now fix $f\in C(B(Y^{*}))$, $\Vert f\Vert =1$, and for every $\alpha $, $
\alpha <\beta $, define the vector $w_\alpha =(f_{\alpha ^{\prime
}})_{\alpha ^{\prime }<\beta }$ so that $f_{\alpha ^{\prime }}=f$, if $
\alpha ^{\prime }=\alpha $, and $f_{\alpha ^{\prime }}=0$ otherwise. Put $
B_\alpha =S(w_\alpha ,\frac 13)$. Since every $B_\alpha $ is weak$^{*}$
open, it is a second category set. Next, $B_{\alpha ^{\prime }}\cap
B_{\alpha ^{\prime \prime }}=\emptyset $, $\alpha ^{\prime }\neq \alpha
^{\prime \prime }$, for otherwise every $w^{*}\in B_{\alpha ^{\prime }}\cap
B_{\alpha ^{\prime \prime }}$ would have norm bigger than 1. For the same
reason, $B^{\prime }=\stackunder{\alpha <\beta }{\cup }B_\alpha $ is
disjoint with $-B^{\prime }$.
So, we have constructed the space satisfying all our requirements. This
finishes the proof. \nopagebreak
$\Box $
\begin{corollary}
\label{corunc}A Daugavet space does not isomorphically embed into an
unconditional sum of Banach spaces without copies of $\ell _1$.
\end{corollary}
The proof is the same as that of Corollary 2.7 in \cite{kssw}. We only have
to use our Theorem \ref{oper} and the fact that the sum of finite number
operators not fixing copies of $\ell _1$ is an operator not fixing copies of
$\ell _1$.
It is worthwhile to remark that the previous result is a direct
generalization of the known Theorem of Pelczy\'nski for $C[0,1]$ and $
L_1[0,1]$ spaces (for more about that see \cite{En-St}, \cite{ks} and \cite
{Kalton}).
\noindent \textbf{Problem 4.} It would be interesting to find answer
to the following question: if $(X,Y)$ is a Daugavet pair, can $Y$ be
renormed to have the Daugavet property. We may, however, assert that such a
renorming cannot be accomplish leaving the norm on $X$ unchanged. In fact,
look at the space $L_\infty [0,1]$. It is 1-complemented in every containing
Banach space. Since every 1-codimensional subspace of a Daugavet space is at
least 2-complemented, $L_\infty [0,1]\oplus $ $\Bbb{R\ }$cannot be renormed
to have the Daugavet property so that the equivalent norm remains the same
on $L_\infty [0,1]$.
\end{document}
|
\begin{document}
\title{Vertex Algebroids I}
\author{Paul Bressler}
\operatorname{ad}dress{}
\email{bressler@@math.arizona.edu}
\date{\today}
\maketitle
\section{Introduction}
The purpose of this note is to give a ``coordinate free'' construction
and prove the uniqueness of the vertex algebroid which gives rise to
the chiral de Rham complex of \cite{GMS}. In order to do that, we adapt
the strategy of \cite{BD} to the setting of vertex agebroids.
To this end we show that the stack ${\mathcal EVA}_X$ of exact vertex algebroids on $X$
is a torsor under the stack in Picard groupoids ${\mathcal ECA}_X$ of exact Courant algebroids
on $X$ (and, in particular, is locally non-empty). Moreover, we show that that
${\mathcal ECA}_X$ is naturally equivalent to the stack of
$\Omega^2_X @>d>> \Omega^{3,cl}_X$-torsors. These facts are proven for $X$ a manifold
($C^\infty$, (complex) analytic, smooth algebraic variety over ${\Bbb C}$). We leave
to the reader the obvious extension to differential graded manifolds.
Given a manifold $X$, let $X^\sharp$ denote the differential graded manifold with the
underlying space $X$ and the structure sheaf ${\cal O}_{X^\sharp}$ the de Rham complex of
$X$. We show that ${\mathcal ECA}_{X^\sharp}$ is, in fact, trivial. Together with local existence
of vertex ${\cal O}_{X^\sharp}$-algebroids this implies that there exists a unique up to a
unique isomorphism vertex algebroid over the de Rham complex. We give a ``coordinate-free''
description of the unique vertex ${\cal O}_{X^\sharp}$-algebroid in terms of
generators and relations.
\section{Vertex operator algebras}
Throughout this section we follow the notations of \cite{GMS}. The following
definitions are lifted from loc. cit.
\subsection{}
A {\it ${\Bbb Z}_{\geq 0}$-graded vertex algebra} is a
${\Bbb Z}_{\geq 0}$-graded $k$-module $V=\oplus\ V_i$, equipped with
a distinguished vector ${\mathbf{1}}\in V_0$ ({\it vacuum vector})
and a family of bilinear operations
\[
_{(n)}:\ V\times V @>>> V,\ (a, b)\mapsto a_{(n)}b
\]
of degree $-n-1$, $n\in {\Bbb Z}$, such that
\[
{\mathbf{1}}_{(n)}a=\delta_{n,-1}a;\ a_{(-1)}{\mathbf{1}}=a;\ a_{(n)}{\mathbf{1}}=0\
\text{\ if\ }n\geq 0,
\]
and
\[
\sum_{j=0}^\infty\ \binom{m}{j}(a_{(n+j)}b)_{(m+l-j)}c= \\
\sum_{j=0}^\infty\ (-1)^j\binom{n}{j}
\bigl\{a_{(m+n-j)}b_{(l+j)}c-(-1)^nb_{(n+l-j)}a_{(m+j)}c\bigr\}
\]
for all $a,b,c\in V,\ m,n,l\in {\Bbb Z}$.
\subsection{}
A morphism of vertex algebras is a map of graded $k$-modules (of degree zero)
which maps the vacuum vector to the vacuum vector and commutes with all of
the operations.
Let ${\cal V} ert$ denote the category of vertex algebras.
\subsection{}
Let
\[
\partial^{(j)}a:=a_{(-1-j)}\b1,\ \ j\in{\Bbb Z}_{\geq 0} \ .
\]
Then, $\partial^{(j)}$ is an endomorphisms of $V$ of degree $j$ which satisfies
(see \cite{GMS})
\begin{itemize}
\item $\partial^{(j)}{\mathbf{1}}=\delta_{j,0}{\mathbf{1}}$,
\item $\partial^{(0)}=Id$,
\item $\partial^{(i)}\cdot\partial^{(j)}=\binom{i+j}{i}\partial^{(i+j)}$,
\item $(\partial^{(j)}a)_{(n)}b=(-1)^j\binom{n}{j}a_{(n-j)}b$, and
\item $\partial^{(j)}(a_{(n)}b)=\sum_{p=0}^j\ (\partial^{(p)}a)_{(n)}\partial^{(j-p)}b$
\end{itemize}
for all $n\in{\Bbb Z}$.
\subsection{}
The subject of the definition below is the restriction of the structure of a vertex
algebra to the graded components of degrees zero and one.
A {\it $1$-truncated vertex algebra} is a
septuple $v=(V_0,V_1,{\mathbf{1}},\partial,_{(-1)},_{(0)},_{(1)})$ where
\begin{itemize}
\item $V_0, V_1$ are $k$-modules,
\item ${\mathbf{1}}$ an element of $V_0$ ({\it vacuum vector}),
\item $\partial:\ V_0 @>>> V_1$ a $k$-linear map,
\item $_{(i)}:\ (V_0\oplus V_1)\times (V_0\oplus V_1) @>>> V_0\oplus V_1$
(where $(i=-1,0,1)$) are $k$-bilinear operations of degree $-i-1$.
\end{itemize}
Elements of $V_0$
(resp., $V_1$) will be denoted $a,b,c$ (resp., $x,y,z$). There are
seven operations: $a_{(-1)}b, a_{(-1)}x, x_{(-1)}a, a_{(0)}x$, $
x_{(0)}a, x_{(0)}y$ and $x_{(1)}y$. These satisfy the following axioms:
\begin{itemize}
\item
(Vacuum)
\[
a_{(-1)}{\mathbf{1}}=a;\ x_{(-1)}{\mathbf{1}}=x;\ x_{(0)}{\mathbf{1}}=0
\]
\item
(Derivation)
\begin{itemize}
\item[$Deriv_1\ \ $]
$(\partial a)_{(0)}b=0;\ (\partial a)_{(0)}x=0;\
(\partial a)_{(1)}x=-a_{(0)}x$
\item[$Deriv_2\ \ $]
$\partial(a_{(-1)}b)=(\partial a)_{(-1)}b+a_{(-1)}\partial b;\
\partial(x_{(0)}a)=x_{(0)}\partial a$
\end{itemize}
\item
(Commutativity)
\begin{itemize}
\item[$Comm_{-1}$]
$a_{(-1)}b=b_{(-1)}a;\ a_{(-1)}x=x_{(-1)}a-\partial(x_{(0)}a)$
\item[$Comm_0\ \ $]
$x_{(0)}a=-a_{(0)}x;\ x_{(0)}y=-y_{(0)}x+\partial(y_{(1)}x)$
\item[$Comm_1\ \ $]
$x_{(1)}y=y_{(1)}x$
\end{itemize}
\item
(Associativity)
\begin{itemize}
\item[$Assoc_{-1}$] $(a_{(-1)}b)_{(-1)}c=a_{(-1)}b_{(-1)}c$
\item[$Assoc_0\ \ $]
$\alpha_{(0)}\beta_{(i)}\gamma=
(\alpha_{(0)}\beta)_{(i)}\gamma+\beta_{(i)}\alpha_{(0)}\gamma,\ (\alpha,
\beta, \gamma\in V_0\oplus V_1)$
whenever the both sides are defined, i.e. the operation $_{(0)}$ is a derivation
of all of the operations $_{(i)}$.
\item[$Assoc_1\ \ $]
$(a_{(-1)}x)_{(0)}b=a_{(-1)}x_{(0)}b$
\item[$Assoc_2\ \ $]
$(a_{(-1)}b)_{(-1)}x=a_{(-1)}b_{(-1)}x+(\partial a)_{(-1)}b_{(0)}x+
(\partial b)_{(-1)}a_{(0)}x$
\item[$Assoc_3\ \ $]
$(a_{(-1)}x)_{(1)}y=a_{(-1)}x_{(1)}y-x_{(0)}y_{(0)}a$
\end{itemize}
\end{itemize}
\subsection{}
A {\it morphism} between two $1$-truncated vertex algebras
$f:\ v=(V_0,V_1,\ldots) @>>> v'=(V'_0,V'_1,\ldots)$ is a pair of maps of $k$-modules
$f=(f_0,f_1),\ f_i:\ V_i @>>> V'_i$ such that $f_0({\mathbf{1}})={\mathbf{1}}',\ f_1(\partial a)=
\partial f_0(a)$ and $f(\alpha_{(i)}\beta)=f(\alpha)_{(i)}f(\beta)$,
whenever both sides are defined.
Let ${\cal V} ert_{\leq 1}$ denote the category of $1$-truncated vertex algebras.
We have an obvious truncation functor
$$
t:\ {\cal V} ert @>>> {\cal V} ert_{\leq 1}
\eqno{(3.1.1)}
$$
which assignes to a vertex algebra $V=\oplus V_i$ the truncated algebra
$tV:=(V_0,V_1,\ldots)$.
\subsection{Remark}\label{rem:comm-alg}
It follows easily that the operation $_{(-1)}\ :V_0\times V_0 @>>> V_0$
endows $V_0$ with a structure of a commutative $k$-algebra.
\section{Vertex algebroids}
Suppose that $X$ is smooth variety over ${\Bbb C}$ (a complex manifold,
a $C^\infty$-manifold). In either case we will denoted by ${\cal O}_X$
(${\cal T}_X$, $\Omega^i_X$) the corresponding structure sheaf (the sheaf
of vector fields, the sheaf of differential $i$-forms).
A vertex ${\cal O}_X$-algebroid, as defined in this section, is, essentially,
a sheaf of $1$-truncated vertex algebras, whose degree zero component (which is
a sheaf of algebras by \ref{rem:comm-alg}) is identified with ${\cal O}_X$.
\subsection{Leibniz algebras}
A {\em Leibnitz $k$-algebra} is a $k$-vector space $\frak g$ equipped with
a bilinear operation $[\ ,\ ]:\frak g\otimes g @>>> g$ (the Leibniz bracket)
which satisfies the Jacobi type identity $[a,[b,c]] = [[a,b],c] +
[b,[a,c]]$.
A morphism of Leibniz $k$-algebras is a $k$-linear map which commutes with
the respective Leibniz brackets.
We denote the category of Leibniz $k$-algebras by $\operatorname{Leib}(k)$.
Let $\operatorname{Lie}(k)$ denote the category of Lie $k$-algebras.
Since any Lie algebra is a Leibnitz algebra there is an full
embedding
\begin{equation}\label{lie-leib}
\operatorname{Lie}(k) @>>> \operatorname{Leib}(k) \ .
\end{equation}
\subsection{Vertex algebroids}
A {\em vertex ${\cal O}_X$-algebroid} is a sheaf of ${\Bbb C}$-vector
spaces ${\cal V}$ with a pairing
\begin{eqnarray*}
{\cal O}_X\otimes_{\Bbb C}{\cal V} & @>>> & {\cal V} \\
f\otimes v & \mapsto & f*v
\end{eqnarray*}
such that $1* v = v$ (i.e. a ``non-associative unital
${\cal O}_X$-module'') equipped with
\begin{enumerate}
\item
a structure of a Leibniz ${\Bbb C}$-algebra $[\ ,\ ] :
{\cal V}\otimes_{\Bbb C}{\cal V} @>>> {\cal V}$
\item
a ${\Bbb C}$-linear map of Leibniz algebras $\pi : {\cal V} @>>> {\cal T}_X$
(the {\em anchor})
\item
a symmetric ${\Bbb C}$-bilinear pairing $\langle\ ,\ \rangle :
{\cal V}\otimes_{\Bbb C}{\cal V} @>>> {\cal O}_X$
\item
a ${\Bbb C}$-linear map $\partial : {\cal O}_X @>>> {\cal V}$ such that
$\pi\circ\partial = 0$
\end{enumerate}
which satisfy
\begin{eqnarray}
f*(g*v) - (fg)*v & = & - \pi(v)(f)*\partial(g) -
\pi(v)(g)*\partial(f)\label{assoc} \\
\left[v_1,f*v_2\right] & = & \pi(v_1)(f)*v_2 + f*[v_1,v_2] \label{leib}
\\
\left[v_1,v_2\right] + [v_2,v_1] & = & \partial(\langle v_1,v_2\rangle)
\label{symm-bracket}\\
\pi(f*v) & = & f\pi(v) \label{anchor-lin} \\
\langle f*v_1, v_2\rangle & = & f\langle v_1,v_2\rangle -
\pi(v_1)(\pi(v_2)(f)) \label{pairing}\\
\pi(v)(\langle v_1, v_2\rangle) & = & \langle[v,v_1],v_2\rangle +
\langle v_1,[v,v_2]\rangle \label{pairing-inv} \\
\partial(fg) & = & f*\partial(g) + g*\partial(f) \label{deriv} \\
\left[v,\partial(f)\right] & = & \partial(\pi(v)(f)) \label{bracket-o}\\
\langle v,\partial(f)\rangle & = & \pi(v)(f)\label{pairing-o}
\end{eqnarray}
for $v,v_1,v_2\in{\cal V}$, $f,g\in{\cal O}_X$.
\subsubsection{}
A morphism of vertex ${\cal O}_X$-algebroids is a ${\Bbb C}$-linear map of
sheaves which preserves all of the structures.
We denote the category of vertex ${\cal O}_X$-algebroids by
${\mathcal VA}_{{\cal O}_X}(X)$. It is clear that the notion vertex ${\cal O}_X$-algebroid
is local, i.e. vertex ${\cal O}_X$-algebroids form a stack which we denote
by ${\mathcal VA}_{{\cal O}_X}$.
\subsection{From vertex algebroids to 1-truncated vertex algebras}
Suppose that ${\cal V}$ is a vertex ${\cal O}_X$-algebroid. For $f,g\in{\cal O}_X$,
$v,w\in{\cal V}$ let
\begin{equation}\label{op:-1}
f_{(-1)}g = fg,\ \ f_{(-1)}v = f*v,\ \ v_{(-1)}f = f*v - \partial\pi(v)(f),
\end{equation}
\begin{equation}\label{op:0}
v_{(0)}f = - f_{(0)}v = \pi(v)(f),\ \ v_{(0)}w = [v,w],
\end{equation}
\begin{equation}\label{op:1}
v_{(1)}w = \langle v,w\rangle\ .
\end{equation}
\subsubsection{Lemma}
The septuple $({\cal O}_X,{\cal V},1,\partial, _{(-1)}, _{(0)}, _{(1)})$ is a
sheaf of 1-truncated vertex operator algebras.
Conversely, if the septuple $({\cal O}_X,{\cal V},1,\partial,_{(-1)},_{(0)},_{(1)})$
is a sheaf of 1-truncated vertex operator algebras, then the formulas
\eqref{op:-1}, \eqref{op:0}, \eqref{op:1} define a structure of a vertex
${\cal O}_X$-algebroid on ${\cal V}$.
\subsection{The associated Lie algebroid}
Suppose that ${\cal V}$ is a vertex ${\cal O}_X$-algebroid. Let
\begin{eqnarray*}
\Omega_{\cal V} & \stackrel{def}{=} & {\cal O}_X*\partial({\cal O}_X)\subset{\cal V} \ ,\\
\overline{\cal V} & \stackrel{def}{=} & {\cal V}/\Omega_{\cal V} \ .
\end{eqnarray*}
Note that the symmetrization of the Leibniz bracket takes values
in $\Omega_{\cal V}$.
For $f,g,h\in{\cal O}_X$
\[
f*(g*\partial(h)) - (fg)*\partial(h) =
\pi(\partial(h))(f)*\partial(g) + \pi(\partial(h))(g)*\partial(f)
= 0\ ,
\]
because $\pi\circ\partial = 0$. Therefore, ${\cal O}_X*\Omega_{\cal V} =
\Omega_{\cal V}$, and $\Omega_{\cal V}$ is an ${\cal O}_X$-module. The map
$\partial : {\cal O}_X @>>> \Omega_{\cal V}$ is a derivation, hence induces
the ${\cal O}_X$-linear map $\Omega^1_X @>>> \Omega_{\cal V}$.
Since the associator of the ${\cal O}_X$-action on ${\cal V}$ takes values
in $\Omega_{\cal V}$, $\overline{\cal V}$ is an ${\cal O}_X$-module.
For $f,g,h\in{\cal O}_X$
\[
\pi(f\partial(g))(h) = f\pi(\partial(g))(h) = 0 \ .
\]
Therefore, $\pi$ vanishes on $\Omega_{\cal V}$, hence, factors through the
map
\begin{equation}\label{VA-anchor}
\pi : \overline{\cal V} @>>> {\cal T}_X
\end{equation}
of ${\cal O}_X$-modules.
For $v\in{\cal V}$, $f,g\in{\cal O}_X$
\begin{eqnarray*}
[v,f\partial(g)] & = & \pi(v)(f)\partial(g)+f[v,\partial(g)] \\
& = & \pi(v)(f)\partial(g)+f\partial(\pi(v)(g)) \ .
\end{eqnarray*}
Therefore, $[{\cal V},\Omega_{\cal V}]\subseteq\Omega_{\cal V}$ and the Leibniz bracket on
${\cal V}$ descends to the operation
\begin{equation}\label{VA-bracket}
[\ ,\ ]:\overline{\cal V}\otimes_{\Bbb C}\overline{\cal V} @>>> \overline{\cal V}
\end{equation}
which is skew-symmetric because the symmetrization of the Leibniz
bracket on ${\cal V}$ takes values in $\Omega_{\cal V}$ and satisfies the Jacobi
identity because the Leibniz bracket on ${\cal V}$ does.
\subsubsection{Lemma}
The ${\cal O}_X$-module $\overline{\cal V}$ with the bracket
\eqref{VA-bracket} and the anchor \eqref{VA-anchor} is a Lie
${\cal O}_X$-algebroid.
\subsection{Transitive vertex algebroids}
A vertex ${\cal O}_X$-algebroid is called {\em transitive} if the anchor
map is surjective.
\subsubsection{Remark}
The vertex ${\cal O}_X$-algebroid ${\cal V}$ is called transitive if and only if
the Lie ${\cal O}_X$-algebroid $\overline{\cal V}$ is.
\subsubsection{}
Suppose that ${\cal V}$ is a transitive vertex ${\cal O}_X$-algebroid. The
derivation $\partial$ induces the map
\[
i : \Omega^1_X @>>> {\cal V} \ .
\]
For $v\in{\cal V}$, $f,g\in{\cal O}_X$
\begin{eqnarray*}
\langle v,f\partial(g)\rangle & = & f\langle v,\partial(g)\rangle
- \pi(\partial(g))\pi(v)(f) \\
& = & f\pi(v)(g) \\
& = & \iota_{\pi(v)}fdg \ .
\end{eqnarray*}
If follows that the map $i$ is adjoint to the anchor map $\pi$.
The surjectivity of the latter implies the latter implies the
injectivity of the former. Since, in addition, $\pi\circ i = 0$
the sequence
\[
0 @>>> \Omega^1_X @>i>> {\cal V} @>\pi>> \overline{\cal V} @>>> 0
\]
is exact and $i$ is isotropic.
\subsection{Exact vertex algebroids}
A vertex algebroid ${\cal V}$ is called {\em exact} if the map
$\overline{\cal V} @>>> {\cal T}_X$ is an isomorphism.
We denote the stack of exact vertex ${\cal O}_X$-algebroids by
${\mathcal EVA}_X$.
A morphism of exact vertex algebroids induces a morphism of
respective extensions of ${\cal T}_X$ by $\Omega^1_X$, hence is an
isomorphism of sheaves of ${\Bbb C}$-vector spaces. It is clear that
the inverse isomorphism is a morphism of vertex
${\cal O}_X$-algebroids. Hence, ${\mathcal EVA}_X$ is a stack in groupoids.
\subsubsection{Example}\label{example:loc-pic}
Suppose that ${\cal T}_X$ is freely generated as an ${\cal O}_X$-module by
a locally constant subsheaf of Lie ${\Bbb C}$-subalgebras
$\tau\subset{\cal T}_X$, i.e. the canonical map ${\cal O}_X\otimes_{\Bbb C}\tau
@>>> {\cal T}_X$ is an isomorphism.
There is a unique structure of an exact vertex ${\cal O}_X$-algebroid on
${\cal V}=\Omega^1_X\bigoplus{\cal T}_X$ such that
\begin{itemize}
\item $f*(1\otimes t) = f\otimes t$ for $f\in{\cal O}_X$, $t\in\tau$,
\item the anchor map is given by the projection ${\cal V} @>>> {\cal T}_X$,
\item the map $\tau @>>> {\cal V}$ is a morphism of Leibniz algebras,
\item the pairing on ${\cal V}$ restricts to the trivial pairing on
$\tau$,
\item the derivation $\partial : {\cal O}_X @>>> {\cal V}$ is given by the
composition ${\cal O}_X @>d>>\Omega^1_X @>>> {\cal V}$.
\end{itemize}
Indeed, the action of ${\cal O}_X$ is completely determined by \eqref{assoc}:
for $f,g\in{\cal O}_X$, $t\in\tau$,
\[
f*(g\otimes t) = f*(g*(1\otimes t)) = fg\otimes t - t(f)dg - t(g)df \ .
\]
In a similar fashion the bracket is completely determined by \eqref{leib}
and \eqref{symm-bracket}, and the pairing is determined by \eqref{pairing}.
We leave the verification of the identities \eqref{assoc} - \eqref{pairing-o}
to the reader.
\section{Courant algebroids}
Courant algebroids are classical limits of vertex algebroids. They are
related to vertex Poisson algebras (coisson algebras in the terminology of
\cite{BD}) in the same way as the vertex algebroids
are related to vertex operators algebras.
\subsection{Courant algebroids}
A {\em Courant ${\cal O}_X$-algebroid} is an ${\cal O}_X$-module ${\cal Q}$
equipped with
\begin{enumerate}
\item a structure of a Leibniz ${\Bbb C}$-algebra
\[
[\ ,\ ] : {\cal Q}\otimes_{\Bbb C}{\cal Q} @>>> {\cal Q} \ ,
\]
\item
an ${\cal O}_X$-linear map of Leibniz algebras (the anchor map)
\[
\pi : {\cal Q} @>>> {\cal T}_X \ ,
\]
\item
a symmetric ${\cal O}_X$-bilinear pairing
\[
\langle\ ,\ \rangle : {\cal Q}\otimes_{{\cal O}_X}{\cal Q} @>>> {\cal O}_X \ ,
\]
\item
a derivation
\[
\partial : {\cal O}_X @>>> {\cal Q}
\]
such that $\pi\circ\partial = 0$
\end{enumerate}
which satisfy
\begin{eqnarray}
[q_1,fq_2] & = & f[q_1,q_2] + \pi(q_1)(f)q_2 \\
\langle [q,q_1],q_2\rangle + \langle q_1,[q,q_2]\rangle & = & \pi(q)(\langle q_1, q_2\rangle) \\
\left[q,\partial(f)\right] & = & \partial(\pi(q)(f)) \\
\langle q,\partial(f)\rangle & = & \pi(q)(f)\label{axiom:courant-ip-with-df} \\
\left[q_1,q_2\right] + [q_2,q_1] & = & \partial(\langle q_1, q_2\rangle)
\end{eqnarray}
for $f\in{\cal O}_X$ and $q,q_1,q_2\in{\cal Q}$.
\subsubsection{}
A morphism of Courant ${\cal O}_X$-algebroids is an ${\cal O}_X$-linear map
of Leibnitz algebras which commutes with the respective anchor
maps and derivations and preserves the respective pairings.
We denote the category of Courant ${\cal O}_X$-algebroids on $X$ by
${\mathcal CA}_{{\cal O}_X}(X)$. The notion of Courant ${\cal O}_X$-algebroid is local, i.e.
Courant ${\cal O}_X$-algebroids form a stack which we denote ${\mathcal CA}_{{\cal O}_X}$.
\subsection{The associated Lie algebroid}
Suppose that ${\cal Q}$ is a Courant ${\cal O}_X$-algebroid. Let
\begin{eqnarray*}
\Omega_{\cal Q} & \stackrel{def}{=} & {\cal O}_X\partial({\cal O}_X)\subset{\cal Q} \ , \\
\overline{\cal Q} & \stackrel{def}{=} & {\cal Q}/\Omega_{\cal Q} \ .
\end{eqnarray*}
Note that the symmetrization of the Leibniz bracket on ${\cal Q}$ takes
values in $\Omega_{\cal Q}$.
For $q\in{\cal Q}$, $f,g\in{\cal O}_X$
\begin{eqnarray*}
[q,f\partial(g)] & = & f[q,\partial(g)] + \pi(q)(f)\partial(g) \\
& = & f\partial(\pi(q)(g)) + \pi(q)(f)\partial(g)
\end{eqnarray*}
which shows that $[{\cal Q},\Omega_{\cal Q}]\subseteq\Omega_{\cal Q}$. Therefore the
Leibniz bracket on ${\cal Q}$ descends to the Lie bracket
\begin{equation}\label{LAbracket}
[\ ,\ ] : \overline{\cal Q}\otimes_{\Bbb C}\overline{\cal Q} @>>> \overline{\cal Q}\ .
\end{equation}
Because $\pi$ is ${\cal O}_X$-linear and $\pi\circ\partial = 0$, $\pi$
vanishes on $\Omega_{\cal Q}$ and factors through the map
\begin{equation}\label{LAanchor}
\pi : \overline{\cal Q} @>>> {\cal T}_X \ .
\end{equation}
\subsubsection{Lemma}
The bracket \eqref{LAbracket} and the anchor \eqref{LAanchor}
determine the structure of a Lie ${\cal O}_X$-algebroid on
$\overline{\cal Q}$.
\subsection{Transitive Courant algebroids}
A Courant ${\cal O}_X$-algebroid is called {\em transitive} if the
anchor map is surjective.
\subsubsection{Remark}
A Courant ${\cal O}_X$-algebroid ${\cal Q}$ is transitive if and only if the
associated Lie ${\cal O}_X$-algebroid is.
\subsubsection{}
Suppose that ${\cal Q}$ is a transitive Courant ${\cal O}_X$-algebroid. The
derivation $\partial$ induces the ${\cal O}_X$-linear map
\[
i : \Omega^1_X @>>> {\cal Q} \ .
\]
Since $\langle q, \alpha\rangle = \iota_{\pi(q)}\alpha$, it
follows that the map $i$ is adjoint to the anchor map $\pi$. The
surjectivity of the latter implies that $i$ is injective. Since,
in addition, $\pi\circ i = 0$ the sequence
\[
0 @>>> \Omega^1_X @>{i}>> {\cal Q} @>>> \overline{\cal Q} @>>> 0
\]
is exact. Moreover, $i$ is isotropic with respect to the
symmetric pairing.
\subsection{Exact Courant algebroids}
The Courant algebroid ${\cal Q}$ is called {\em exact} if the anchor
map $\pi: \overline{{\cal Q}} @>>> {\cal T}_X$ is an isomorhism.
We denote the stack of exact Courant ${\cal O}_X$-algebroids by
${\mathcal ECA}_X$.
A morphism of exact Courant algebroids induces a morphism of
respective extensions of ${\cal T}_X$ by $\Omega^1_X$, hence is an
isomorphism of ${\cal O}_X$-modules. It is clear that the inverse is a
morphism of Courant ${\cal O}_X$-algebroids. Hence ${\mathcal ECA}_X$ is
a stack in groupoids.
\subsection{Automorphisms of exact Courant algebroids}
Let $\operatorname{Ext}^{\langle\ ,\ \rangle}_{{\cal O}_X}({\cal T}_X,\Omega^1_X)$ denote
the category whose objects are pairs $({\cal E},\langle\ ,\ \rangle)$,
where ${\cal E}$ is an extension
\[
0 @>>> \Omega^1_X @>i>> {\cal E} @>\pi>> {\cal T}_X @>>> 0 \ .
\]
and
\[
\langle\ ,\ \rangle : {\cal E}\otimes_{{\cal O}_X}{\cal E} @>>> {\cal O}_X
\]
is a symmetric pairing such that $i$ is Lagrangian and the induced
pairing between $\Omega^1_X$ and ${\cal T}_X={\cal E}/\Omega^1_X$ is the
canonical duality pairing. A morphism of such is a morphism of the
underlying extensions which preserves the respective pairings.
The map
\begin{equation}\label{Ext-exp}
\exp : \underline{\operatorname{Hom}}_{{\cal O}_X}({\cal T}_X,\Omega^1_X) @>>> \underline{\operatorname{Aut}}_{\operatorname{Ext}}({\cal E})
\end{equation}
defined by $\phi\mapsto (e\mapsto e + \phi(\pi(e)))$ is an
isomorphism. It restricts to the isomorphism
\[
\exp : \Omega^2_X @>>> \underline{\operatorname{Aut}}_{\operatorname{Ext}}^{\langle\ ,\
\rangle}({\cal E},\langle\ ,\ \rangle) \ ,
\]
via the map
\[
\Omega^2_X @>>> \underline{\operatorname{Hom}}_{{\cal O}_X}({\cal T}_X,\Omega^1_X)
\]
defined by $\beta\mapsto(\xi\mapsto\iota_\xi\beta)$.
Suppose that ${\cal Q}$ is an exact Courant ${\cal O}_X$-algebroid. The
automorphism, induced by a 2-form $\beta$ of the underlying
extension preserves the Leibnitz bracket if and only if the form
$\beta$ is closed, i.e. the map \eqref{Ext-exp} restricts to the
isomorphism
\[
\Omega^{2,cl}_X @>>> \underline{\operatorname{Aut}}_{\mathcal ECA}({\cal Q}) \ .
\]
\subsection{The ${\Bbb C}$-vector space structure of ${\mathcal ECA}$}
The category ${\mathcal ECA}_X(X)$ has a natural structure of a
``${\Bbb C}$-vector space in categories'' induced by that of
$\operatorname{Ext}^1_{{\cal O}_X}({\cal T}_X,\Omega^1_X)$.
\subsubsection{Addition}
Suppose that ${\cal Q}_1$ and ${\cal Q}_2$ are two exact Courant
${\cal O}_X$-algebroids. Let ${\cal Q}_1+{\cal Q}_2$ denote the push-out of
${\cal Q}_1\times_{{\cal T}_X}{\cal Q}_2$ by the addition map
$\Omega^1_X\times\Omega^1_X @>>> \Omega^1_X$. A section of
${\cal Q}_1+{\cal Q}_2$ is an equivalence class of pairs $(q_1,q_2)$, where
$q_i\in{\cal Q}_i$ and $\pi(q_1)=\pi(q_2)$. Two pairs are equivalent if
their (componentwise) difference is of the form $(i(\alpha),
-i(\alpha))$ for some $\alpha\in\Omega^1_X$.
The two maps $\Omega^1_X @>>> {\cal Q}_1+{\cal Q}_2$ given by
$\alpha\mapsto(i(\alpha),0)$ and $\alpha\mapsto(0,i(\alpha))$
coincide. We denote their common value by $i$ as well. There is a
short exact sequence
\[
0 @>>> \Omega^1_X @>i>> {\cal Q}_1+{\cal Q}_2 @>\pi>> {\cal T}_X @>>> 0 \ ,
\]
where $\pi((q_1,q_2))$ is defined as the common value of
$\pi(q_1)$ and $\pi(q_2)$.
The map $i$ determines the derivation $\partial :{\cal O}_X @>>>
{\cal Q}_1+{\cal Q}_2$ by $\partial(f) = i(df)$.
For $q_i,q'_i\in{\cal Q}_i$ let
\begin{equation}\label{formula:sum-bracket-pairing}
[(q_1,q_2),(q'_1,q'_2)] = ([q_1,q'_1],[q_2,q'_2]), \ \ \ \langle
(q_1,q_2),(q'_1,q'_2)\rangle = \langle q_1,q'_1\rangle + \langle
q_2,q'_2\rangle \ .
\end{equation}
\subsubsection{Lemma}
The bracket and the symmetric pairing given by
\eqref{formula:sum-bracket-pairing} determine a structure of an
exact Courant algebroid on ${\cal Q}_1+{\cal Q}_2$.
\subsubsection{Scalar multiplication}
Suppose that ${\cal Q}$ is an exact Courant ${\cal O}_X$-algebroid and
$\lambda\in{\Bbb C}$. Let $\lambda{\cal Q}$ denote push-out of ${\cal Q}$ by the
multiplication by $\lambda$ map $\Omega^1_X
@>{\lambda\cdot}>>\Omega^1_X$. A section of $\lambda{\cal Q}$ is an
equivalence class of pairs $(\alpha,q)$ with $\alpha\in\Omega^1_X$
and $q\in{\cal Q}$. Two pairs as above are equivalent if their
componentwise difference is of the form
$(\lambda\alpha,-i(\alpha))$ for some $\alpha\in\Omega^1_X$.
Let $i:\Omega^1_X @>>> \lambda{\cal Q}$ denote the map
$\alpha\mapsto(\alpha,0)$. There is a short exact sequence
\[
0 @>>> \Omega^1_X @>i>> \lambda{\cal Q} @>\pi>> {\cal T}_X @>>> 0
\]
where $\pi(\alpha,q) = \pi(q)$.
The map $i$ determines the derivation $\partial:{\cal O}_X @>>>
\lambda{\cal Q}$ by $\partial(f) = i(df) = (df,0)$. Note that $\pi\circ
i =0$ holds.
\subsubsection{Lemma}
There is a unique structure of exact Courant algebroid on
$\lambda{\cal Q}$ (with anchor $\pi$ and derivation $\partial$ as
above) such that the map ${\cal Q} @>>> \lambda{\cal Q}: q\mapsto(0,q)$ is a
morphism of Leibniz algebras.
\begin{pf}
For $q_i\in{\cal Q}$ the calculation
\begin{multline*}
(\lambda d\langle q_1,q_2\rangle,0)= (0,\partial\langle
q_1,q_2\rangle) = \\
(0,[q_1,q_2] + [q_2,q_1]) = [(0,q_1),(0,q_2)]
+ [(0,q_2),(0,q_1)] =
\partial\langle(0,q_1),(0,q_2)\rangle = \\
(d\langle(0,q_1),(0,q_2)\rangle,0)
\end{multline*}
together with \eqref{axiom:courant-ip-with-df} determines the
symmetric pairing on $\lambda{\cal Q}$. In particular, $
\langle(0,q_1),(0,q_2)\rangle=\lambda\langle q_1,q_2\rangle$.
\end{pf}
\subsubsection{}
Let ${\cal Q}_0$ denote the exact Courant algebroid with the underlying
${\cal O}_X$-module the trivial extension $\Omega^1_X\oplus{\cal T}_X$,
self-evident derivation and anchor map, the symmetric paring the
obvious extension of the natural duality pairing and the Leibniz
bracket uniquely determined by the requirement that the natural
inclusion of ${\cal T}_X$ is a map of Leibniz algebras.
Note that ${\cal Q}_0$ is equipped with a flat connection. For any
exact Courant algebroid ${\cal Q}$ a flat connection on ${\cal Q}$ is the
same thing as a morphism ${\cal Q}_0 @>>> {\cal Q}$.
\subsubsection{Lemma}
For any exact Courant algebroid ${\cal Q}$ there is a canonical
isomorphism ${\cal Q}\isomo{\cal Q}+{\cal Q}_0$.
\subsubsection{Remark}
The natural morphism (of Leibniz algebras) ${\cal Q} @>>> \lambda{\cal Q}$
factors through ${\cal T}_X$ if $\lambda=0$ (and is an isomorphism of
the underlying ${\cal O}_X$-modules otherwise) and the composition
${\cal T}_X @>>> 0{\cal Q} @>>> {\cal T}_X$ is clearly equal to the identity.
Thus, $0{\cal Q}$ carries a canonical flat connection, or,
equivalently, is canonically isomorphic to ${\cal Q}_0$.
\subsubsection{Remark}
If $\lambda\neq 0$ the natural map ${\cal Q} @>>> \lambda{\cal Q}$ is an isomorphism
of the underlying ${\cal O}_X$-modules with the inverse given by
$(\alpha,q)\mapsto q+\lambda^{-1}\alpha$. The Courant algebroid structure
on ${\cal Q}$ induced via this isomorphism and denoted $\partial_{\lambda{\cal Q}}$,
etc. is given by
\[
\partial_{\lambda{\cal Q}}(f) = \lambda^{-1}\partial(f),
\ \ \ \langle q_1,q_2\rangle_{\lambda{\cal Q}} = \lambda\langle q_1,q_2\rangle,
\ \ \ [q_1,q_2]_{\lambda{\cal Q}}=[q_1,q_2]
\]
\subsection{Classification of exact Courant algebroids}
\subsubsection{Connections}
A {\em connection} on an exact Courant ${\cal O}_X$-algebroid ${\cal Q}$ is
a Lagrangian section of the anchor map.
\subsubsection{Curvatrue}\label{ECAcurv}
Suppose that ${\cal Q}$ is an exact Courant ${\cal O}_X$-algebroid and
$\nabla$ is a connection on ${\cal Q}$. The formula
\[
(\xi,\xi_1,\xi_2)\mapsto\iota_\xi([\nabla(\xi_1),\nabla(\xi_2)] -
\nabla([\xi_1,\xi_2])
\]
defines a differential 3-form $c(\nabla)$ on $X$ which is easily
seen to be closed.
The differential form $c(\nabla)$ is
called {\em the curvature} of the connection $\nabla$.
\subsubsection{}
Suppose that ${\cal Q}$ is an exact Courant ${\cal O}_X$-algebroid, $\nabla$
is a connection on ${\cal Q}$ and
$\phi\in\operatorname{Hom}_{{\cal O}_X}({\cal T}_X,\Omega^1_X)$. Then, $\nabla+\phi$ is
another section of the anchor map. The section $\nabla+\phi$ is
Lagrangian if and only if $\phi$ is derived from a differential
2-form, i.e. there exists $\beta\in\Omega^2_X(X)$ such that
$\phi(\xi) = \iota_\xi\beta$ for all $\xi$.
Let ${\cal C}({\cal Q})$ denote the sheaf of (locally defined) connections
on ${\cal Q}$. It is a $\Omega^2_X$-torsor. The correspondence
$\nabla\mapsto c(\nabla)$ defines the map
\[
c : {\cal C}({\cal Q}) @>>> \Omega^{3,cl}_X
\]
which satisfies
\[
c(\nabla + \widetilde\beta) = c(\nabla) + d\beta \ ,
\]
where $\widetilde{(\ )} : \Omega^i_X @>>>
\underline{\operatorname{Hom}}_{{\cal O}_X}({\cal T}_X,\Omega^{i-1}_X)$ is defined by
$\widetilde\beta(\xi) = \iota_\xi\beta$.
Therefore, the pair $({\cal C}(\ ),c)$ defines the functor
\begin{equation}\label{ECA-to-tors}
{\mathcal ECA}_X(X) @>>> (\Omega^2_X @>d>> \Omega^{3,cl})-tors \ .
\end{equation}
\subsubsection{Lemma}
The functor \eqref{ECA-to-tors} is an eqivalence of ${\Bbb C}$-vector
spaces in categories.
\subsubsection{Corollary}
The ${\Bbb C}$-vector space of isomorphism classes of exact Courant
${\cal O}_X$-algebroids is naturally isomorphic to
$H^1(X;\Omega^2_X @>d>> \Omega^{3,cl}_X)$.
\subsection{The action on ${\mathcal EVA}$}
The groupoid ${\mathcal EVA}_X(X)$, if non-empty, is an ``affine space in
categories'' (under the action of the ``${\Bbb C}$-vector space in categories''
${\mathcal ECA}_X(X)$).
\subsubsection{}
Suppose that ${\cal V}$ (respectively, ${\cal Q}$) is an exact vertex (respectively,
Courant) algebroid. Let ${\cal Q}+{\cal V}$ denote the push-out of
${\cal Q}\times_{{\cal T}_X}{\cal V}$ by the addition map $\Omega^1_X\times\Omega^1_X @>+>>
\Omega^1_X$. A section of ${\cal Q}+{\cal V}$ is an equivalence class of pairs $(q,v)$
with $q\in{\cal Q}$, $v\in{\cal V}$ and $\pi(q)=\pi(v)$. Two pairs are equivalent if
their (componentwise) difference is of the form $(i(\alpha),-i(\alpha))$
for some $\alpha\in\Omega^1_X$.
The two maps $\Omega^1_X @>>> {\cal Q}+{\cal V}$ given by $\alpha\mapsto(i(\alpha),0)$
and $\alpha\mapsto(0,i(\alpha))$ coincide and we denote their common value
by $i$ as well. There is a short exact sequence
\[
0 @>>> \Omega^1_X @>i>> {\cal Q}+{\cal V} @>\pi>> {\cal T}_X @>>> 0 \ ,
\]
where $\pi((q,v))$ is defined to be the common value of $\pi(q)$ and $\pi(v)$.
Let
\begin{multline}\label{formulas:Q-plus-V}
\partial(f)=i(df),\ \ \ f*(q,v)=(fq,f*v), \\
[(q_1,v_1),(q_2,v_2)]= ([q_1,q_2],[v_1,v_2]),
\ \ \ \langle(q_1,v_1),(q_2,v_2)\rangle =
\langle q_1,q_2\rangle+\langle v_1,v_2\rangle
\end{multline}
\subsubsection{Lemma}
The formulas \eqref{formulas:Q-plus-V} determine a structure of
a vertex algebroid on ${\cal Q}+{\cal V}$.
\subsubsection{Proposition}
${\mathcal EVA}_X$ is a torsor under ${\mathcal ECA}_X$.
\begin{pf}
Suppose that ${\cal V}_1$ and ${\cal V}_2$ are two exact vertex ${\cal O}_X$-algebroids.
Let ${\cal V}_2-{\cal V}_1$ the push-out of ${\cal V}_2\times_{{\cal T}_X}{\cal V}_1$ by the difference
map $\Omega^1_X\times\Omega^1_X @>->>\Omega^1_X:(\alpha,\beta)\mapsto\alpha-\beta$.
A section of ${\cal V}_2-{\cal V}_1$ is an equivalence class of pairs $(v_1,v_2)$, where
$v_i\in{\cal V}_i$ and $\pi(v_1)=\pi(v_2)$. Two pairs are equivalent if
their (componentwise) difference is of the form $(i(\alpha),i(\alpha))$ for
some $\alpha\in\Omega^1_X$.
The two maps $\Omega^1_X @>>> {\cal V}_2-{\cal V}_1$ given by $\alpha\mapsto(i(\alpha),0)$
and $\alpha\mapsto(0,-i(\alpha))$ coincide and we denote their common value
by $i$ as well. There is a short exact sequence
\[
0 @>>> \Omega^1_X @>i>> {\cal V}_2-{\cal V}_1 @>\pi>> {\cal T}_X @>>> 0 \ ,
\]
where $\pi((v_2,v_1))$ is defined to be the common value of $\pi(v_2)$ and $\pi(v_1)$.
Let
\begin{multline}\label{formulas:V-minus-V}
\partial(f)=i(df),\ \ \ f(v_2,v_1)=(f*v_2,f*v_1), \\
[(v_2,v_1),(v'_2,v'_1)]= ([v_2,v'_2],[v_1,v'_1]),
\ \ \ \langle(v_2,v_1),(v'_2,v'_1)\rangle =
\langle v_2,v'_2\rangle+\langle v_1,v'_1\rangle
\end{multline}
It is clear that component-wise $*$-operation of ${\cal O}_X$ is well-defined
on ${\cal V}_2-{\cal V}_1$. Moreover, since the associator of the ${\cal O}_X$-action
on a vertex ${\cal O}_X$-algebroid (i.e. the right hand side of \eqref{assoc})
does not depend on the algebroid, the operation of ${\cal O}_X$ on ${\cal V}_2-{\cal V}_1$
is, in fact, associative. Since the ``anomalies'' present in \eqref{leib}
and \eqref{pairing} are algebroid-independent, ${\cal V}_2-{\cal V}_1$ is, in fact,
an exact Courant ${\cal O}_X$-algebroid. It is clear that $({\cal V}_2-{\cal V}_1)+{\cal V}_1$
is canonically isomorphic to ${\cal V}_2$.
So far we have established that, if non-empty, ${\mathcal EVA}_X(X)$ is a torsor
under ${\mathcal ECA}_X(X)$. It remains to show that ${\mathcal EVA}_X$ is locally
non-empty.
In the analytic or $C^\infty$ case example \ref{example:loc-pic}
provides a locally defined EVA. Indeed, locally on X there exists
an (abelian) Lie ${\Bbb C}$-subalgebra $\tau$
such that ${\cal T}_X\cong{\cal O}_X\otimes_{\Bbb C}\tau$.
In the algebraic setting the same example shows that ${\mathcal EVA}_X$ is
non-empty locally in \'etale topology. Since
$H^2_{\text{\'et}}(X,\Omega^2_X @>d>> \Omega^{3,cl}_X)$ is canonically isomorphic
to $H^2(X,\Omega^2_X @>d>> \Omega^{3,cl}_X)$ it follows that ${\mathcal EVA}_X$
is non-empty Zariski-locally.
\end{pf}
\section{Algebroids over the de Rham complex}
All of the notions of the preceeding section generalize in an
obvious way to differential graded manifolds (i.e. manifolds
whose structure sheaves are a sheaves of commutative differential graded
algebras).
For a manifold $X$ let $X^\sharp$ denote the differential graded
manifold with the underlying space $X$ and the structure sheaf ${\cal O}_{X^\sharp}$
the de Rham complex $\operatorname{D}R$. In other words, ${\cal O}_{X^\sharp} = \bigoplus_i\Omega^i_X[-i]$
(as a sheaf of graded algebras). We will denote by $\partial_{{\cal O}_{X^\sharp}}$
the derivation given by the de Rham differential.
\subsection{The structure of ${\cal T}_{X^\sharp}$}
The tangent sheaf of $X^\sharp$ (of derivations of ${\cal O}_{X^\sharp}$),
${\cal T}_{X^\sharp}$ is a sheaf of differential graded Lie algebras with the
differential $\partial_{{\cal T}_{X^\sharp}} = [\partial_{{\cal O}_{X^\sharp}},\ \ ]$
(note that $\partial_{{\cal O}_{X^\sharp}}\in{\cal T}_{X^\sharp}^1$).
\subsubsection{}
Let $\widetilde{\cal T}_X$ denote the cone of the identity endomorhism
of ${\cal T}_X$. That is, $\widetilde{{\cal T}_X}^i={\cal T}_X$ for $i=-1,0$ and
zero otherwise. The only nontrivial differential is the identity map.
The complex $\widetilde{\cal T}_X$ has the canonical structure of a
sheaf of differential graded Lie algebras.
The natural action of ${\cal T}_X$ (respectively ${\cal T}_X[1]$) on ${\cal O}_{X^\sharp}$
by the Lie derivative (respectively by interior product) gives rise
to the injective map of
DGLA
\begin{equation}\label{action-tau}
\tau : \widetilde{{\cal T}_X} @>>> {\cal T}_{X^\sharp} \ .
\end{equation}
The action $\tau$ extends in the canonical way to a structure
of a Lie ${\cal O}_{X^\sharp}$-algebroid on ${\cal O}_{X^\sharp}\otimes_{\Bbb C}\widetilde{\cal T}_X$
with the anchor map
\begin{equation}\label{tau-DR}
\tau_{{\cal O}_{X^\sharp}} : {\cal O}_{X^\sharp}\otimes_{\Bbb C}\widetilde{\cal T}_X @>>> {\cal T}_{X^\sharp}
\end{equation}
the canonical extension $\tau$ to a ${\cal O}_{X^\sharp}$-linear map. Note that
$\tau_{{\cal O}_{X^\sharp}}$ is surjective, i.e. the Lie ${\cal O}_{X^\sharp}$-algebroid
${\cal O}_{X^\sharp}\otimes_{\Bbb C}\widetilde{\cal T}_X$ is transitive. We denote this
algebroid by $\widetilde{\cal T}_{X^\sharp}$.
\subsubsection{}
Let ${\cal T}_{X^\sharp/X}\subset{\cal T}_{X^\sharp}$ denote the normalizer of
${\cal O}_X\subset{\cal O}_{X^\sharp}$. Since the action of ${\cal T}_X[1]$ is
${\cal O}_X$-linear, the map $\tau$ restricts to
\[
\tau : \widetilde{{\cal T}_X}^{-1} = {\cal T}_X[1] @>>> {\cal T}_{X^\sharp/X}
\]
and (the restriction of) $\tau_{{\cal O}_{X^\sharp}}$ factors through the map
\begin{equation}\label{tau-sub}
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X[1] @>>> {\cal T}_{X^\sharp/X}
\end{equation}
which is easily seen to be an isomorphism.
Since the action $\tau$ is ${\cal O}_X$-linear modulo
${\cal T}_{X^\sharp/X}$, $\tau_{{\cal O}_{X^\sharp}}$ induces the map
\begin{equation}\label{tau-fac}
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X @>>> {\cal T}_{X^\sharp}/{\cal T}_{X^\sharp/X}
\end{equation}
which is easily seen to be an isomorphism.
Therefore, there is an exact sequence of graded
${\cal O}_{X^\sharp}$-modules
\begin{equation}\label{ses-der}
0 @>>> {\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X[1] @>>> {\cal T}_{X^\sharp} @>>>
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X @>>> 0
\ .
\end{equation}
The composition
\[
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X[1] @>>> {\cal T}_{X^\sharp} @>{\partial_{{\cal T}_{X^\sharp}}}>>
{\cal T}_{X^\sharp}[1] @>>> {\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X[1]
\]
is the identity map.
\subsection{Exact Courant ${\cal O}_{X^\sharp}$-algebroids}
\subsubsection{Proposition}
Every exact Courant ${\cal O}_{X^\sharp}$-algebroid admits a unique flat
connection.
\begin{pf}
Consider a Courant algebroid
\[
0 @>>> \Omega^1_{X^\sharp} @>i>> {\cal Q} @>{\pi}>> {\cal T}_{X^\sharp} @>>> 0
\]
Note that, since $\Omega^1_{X^\sharp}$ is concentrated in non-negative
degrees, the map $\pi^{-1} : {\cal Q}^{-1} @>>> {\cal T}_{X^\sharp}^{-1}$ is
an isomorphism. Since ${\cal T}_{X^\sharp}^{-1}$ generates ${\cal T}_{X^\sharp}$ as a
DG-module over ${\cal O}_{X^\sharp}$, the splitting is unique if it exists.
To establish the existence it is necessary and sufficient to show
that the restriction of the anchor map to the DG-submodule of
${\cal Q}$ generated by ${\cal Q}^{-1}$ is an isomorphism.
Note that the map $\tau : \widetilde{\cal T}_X @>>> {\cal T}_{X^\sharp}$ lifts in a
unique way to a morphism of complexes $\widetilde\tau :
\widetilde{\cal T}_X @>>> {\cal Q}$. It is easily seen to be a morphism of
DGLA. Let ${\cal Q}'$ denote the ${\cal O}_{X^\sharp}$-submodule of ${\cal Q}$ generated by
the image of $\widetilde\tau$ (i.e. the DG ${\cal O}_{X^\sharp}$-submodule
generated by ${\cal Q}^{-1}$).
Since $\widetilde\tau^{-1}$ is ${\cal O}_X$-linear it extends to the
map
\begin{equation}\label{Q-sub}
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X[1] @>>> {\cal Q}'
\end{equation}
such that the composition
\[
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X[1] @>>> {\cal Q}' @>\pi>> {\cal T}_{X^\sharp}
\]
coincides with the composition of the isomorphism
\eqref{tau-sub}with the inclusion into ${\cal T}_{X^\sharp}$. Therefore,
\eqref{Q-sub} is a monomorphism whose image will be denoted
${\cal Q}''$, and $\pi$ restricts to an isomorphism of ${\cal Q}''$ onto
${\cal T}_{X^\sharp/X}$.
Since $\widetilde\tau^0$ is ${\cal O}_X$-linear modulo ${\cal Q}''$ it
extends to the map
\begin{equation}\label{Q-fac}
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X @>>> {\cal Q}'/{\cal Q}''
\end{equation}
which is surjective (since ${\cal Q}'/{\cal Q}''$ is generated as a
${\cal O}_{X^\sharp}$-module by the image of $\widetilde\tau^0$), and such
that the composition
\[
{\cal O}_{X^\sharp}\otimes_{{\cal O}_X}{\cal T}_X @>>> {\cal Q}'/{\cal Q}'' @>\pi>>
{\cal T}_{X^\sharp}/{\cal T}_{X^\sharp/X}
\]
coincides with the isomorphism \eqref{tau-fac}. Therefore,
\eqref{Q-fac} is an isomorphism.
Now the exact sequence \eqref{ses-der} implies that $\pi$
restricts to an isomorphism ${\cal Q}' @>\cong>> {\cal T}_{X^\sharp}$. The desired
splitting is the inverse isomorphism. It is obviously compatible
with brackets, hence, is a flat connection.
\end{pf}
\subsubsection{Corollary}\label{ECA-DR-final}
${\mathcal ECA}_{{\cal O}_{X^\sharp}}$ is equivalent to the final stack.
\subsubsection{Corollary}\label{corollary:existence-uniqueness}
An exact vertex ${\cal O}_{X^\sharp}$-algebroid exists and is unique up to
canonical isomorphism.
\begin{pf}
Since ${\mathcal EVA}_{{\cal O}_{X^\sharp}}$ is an affine space under ${\mathcal ECA}_{{\cal O}_{X^\sharp}}$
the uniqueness (local and global) follows from Corollary
\ref{ECA-DR-final}. Local existence and uniqueness implies
global existence.
\end{pf}
\subsection{The exact vertex ${\cal O}_{X^\sharp}$-algebroid}\label{subsection:construction}
For a vertex ${\cal O}_{X^\sharp}$-algebroid ${\cal V}$ with anchor map $\pi$ we will
call a $\widetilde{\cal T}_X$-rigidification of ${\cal V}$ a map of DG
Leibniz algebras $\psi : \widetilde{\cal T}_X @>>> {\cal V}$ such that the
composition $\pi\circ\psi$ coincides with the map
\eqref{action-tau}. A rigidified vertex ${\cal O}_{X^\sharp}$-algebroid is a pair
$({\cal V},\psi)$, where ${\cal V}$ is a vertex ${\cal O}_{X^\sharp}$-algebroid and $\psi$
is a $\widetilde{\cal T}_X$-rigidification of ${\cal V}$. A morphism of
rigidified vertex ${\cal O}_{X^\sharp}$-algebroids is a morphism of vertex
${\cal O}_{X^\sharp}$-algebroids which commutes with respective rigidifications.
The functor which assigns to a vertex ${\cal O}_{X^\sharp}$-algebroid ${\cal V}$ the
set of rigidifications of ${\cal V}$ is representable. The initial
$\widetilde{\cal T}_X$-rigidified vertex ${\cal O}_{X^\sharp}$-algebroid
$\widetilde{\cal U}$ may be described as follows.
As a sheaf of ${\Bbb C}$-vector spaces
\[
\widetilde{\cal U} =
\Omega^1_{X^\sharp}\oplus{\cal O}_{X^\sharp}\otimes_{\Bbb C}\widetilde{\cal T}_X \ .
\]
There is a
unique structure of a vertex ${\cal O}_{X^\sharp}$-algebroid on $\widetilde{\cal U}$
such that
\begin{itemize}
\item the derivation $\partial : {\cal O}_{X^\sharp} @>>> \Omega_{\widetilde{\cal U}}$
is given by the composition of the exterior deriviative
$d_{X^\sharp} : \operatorname{D}R @>>> \Omega^1_{X^\sharp}$ with the inclusion
of the first summand $\Omega^1_{X^\sharp} @>>> \widetilde{\cal U}$,
\item the projection onto the second summand induces an
isomorphism of Lie ${\cal O}_{X^\sharp}$-algebroids $\overline{\widetilde{\cal U}}
@>\cong>> \operatorname{D}R\otimes_{\Bbb C}\widetilde{\cal T}_X$,
\item the restriction to $1\otimes\widetilde{\cal T}_X$ of the adjoint
action of $\widetilde{\cal U}$ on $\Omega_{\widetilde{\cal U}}$ coincides with the
action of $\widetilde{\cal T}_X$ on $\Omega^1_{X^\sharp}$ by the Lie
derivative and \eqref{action-tau}.
\end{itemize}
Namely, the action of ${\cal O}_{X^\sharp}$ is forced by \eqref{assoc}:
\[
\alpha*(\beta\otimes\xi) = \alpha\wedge\beta\otimes\xi -
\tau(\xi)(\alpha)d_{X^\sharp}\beta - \tau(\xi)(\beta)d_{X^\sharp}\alpha \ .
\]
The symmetric pairing is the unique extension of the zero pairing
on $1\otimes\widetilde{\cal T}_X$ which satisfies \eqref{pairing}:
\[
\langle \beta_1\otimes\xi_1,\beta_2\otimes\xi_2\rangle =
-\beta_1\tau(\xi_2)(\tau(\xi_1)(\beta_2)) -
\beta_2\tau(\xi_1)(\tau(\xi_2)(\beta_1)) -
\tau(\xi_1)(\beta_2)\tau(\xi_2)(\beta_1)
\]
The Leibniz bracket on $\widetilde{\cal U}$ is the unique extension of
the Lie bracket on $\widetilde{\cal T}_X$ which satisfies \eqref{leib}
and \eqref{symm-bracket}.
Consider the following condition on a
$\widetilde{\cal T}_X$-rigidification $\psi$ of a vertex ${\cal O}_{X^\sharp}$-algebroid
${\cal V}$:
\subsubsection{}\label{Lin}
{\em
The component $\psi^{-1}:{\cal T}_X @>>> {\cal V}^{-1}$ is
${\cal O}_X$-linear, i.e. $\psi^{-1}(f\xi) = f*\psi^{-1}(\xi)$ for
$f\in{\cal O}_X$ and $\xi\in{\cal T}_X$.
}
The functor which assigns to a vertex ${\cal O}_{X^\sharp}$-algebroid the
collection of rigidifications of ${\cal V}$ satisfying \ref{Lin} is
represented by the appropriate quotient ${\cal U}$ of $\widetilde{\cal U}$.
We claim that ${\cal U}$ is an exact vertex ${\cal O}_{X^\sharp}$-algebroid.
Let $K$ denote the kernel of the canonical map
$\widetilde{\cal T}_{X^\sharp}^{-1}={\cal O}_X\otimes_{\Bbb C}{\cal T}_X @>>> {\cal T}_X$.
Let
\[
{\cal I} = \operatorname{D}R*K[1] + \operatorname{D}R*dK\subset\widetilde{\cal U} \ .
\]
Then, $\operatorname{D}R*{\cal I}={\cal I}$, $\langle\widetilde{\cal U},{\cal I}\rangle = 0$,
and $[\widetilde{\cal U},{\cal I}]\subseteq{\cal I}$.
Moreover, ${\cal I}$ projects isomorphically onto the kernel of \eqref{tau-DR}.
The essentially unique exact vertex ${\cal O}_{X^\sharp}$-algebroid is ${\cal U} = \widetilde{\cal U}/{\cal I}$.
\end{document}
|
\begin{document}
\title{A Bayesian Approach for Online Classifier Ensemble}
\author{\name Qinxun Bai \email [email protected] \\
\addr Department of Computer Science\\
Boston University\\
Boston, MA 02215, USA
\AND
\name Henry Lam \email [email protected] \\
\addr Department of Industrial \& Operations Engineering\\
University of Michigan\\
Ann Arbor, MI 48109, USA
\AND
\name Stan Sclaroff \email [email protected] \\
\addr Department of Computer Science\\
Boston University\\
Boston, MA 02215, USA}
\editor{}
\maketitle
\begin{abstract}
We propose a Bayesian approach for recursively estimating the classifier weights in online learning of a classifier ensemble. In contrast with past methods, such as stochastic gradient descent or online boosting, our approach estimates the weights by recursively updating its posterior distribution. For a specified class of loss functions, we show that it is possible to formulate a suitably defined likelihood function and hence use the posterior distribution as an approximation to the global empirical loss minimizer. If the stream of training data is sampled from a stationary process, we can also show that our approach admits a superior rate of convergence to the expected loss minimizer than is possible with standard stochastic gradient descent. In experiments with real-world datasets, our formulation often performs better than state-of-the-art stochastic gradient descent and online boosting algorithms.
\end{abstract}
\begin{keywords}
Online learning, classifier ensembles, Bayesian methods.
\end{keywords}
\section{Introduction}
The basic idea of classifier ensembles is to enhance the performance of individual classifiers by combining them.
In the offline setting, a popular approach to obtain the ensemble weights is to minimize the training error, or a surrogate risk function that approximates the training error. Solving this optimization problem usually calls for various sorts of gradient descent methods. For example, the most successful and popular ensemble technique, boosting, can be viewed in such a way~\citep{FreundSchapire1995,Mason1999,Friedman2001,Telgarsky2012}. Given the success of these ensemble techniques in a variety of batch learning tasks, it is natural to consider extending this idea to the online setting, where the labeled sample pairs $\{\mathbf{x}_t,y_t\}_{t=1}^T$ are presented to and processed by the algorithm sequentially, one at a time.
Indeed, online versions of ensemble methods have been proposed from a spectrum of perspectives. Some of these works focus on close approximation of offline ensemble schemes, such as boosting~\citep{OzaRussell2001,Pelossof2009}. Other methods are based on stochastic gradient descent~\citep{Babenko2009b,Leistner2009,Grbovic2011}. Recently,~\citet{Chen2012} formulated a smoothed boosting algorithm based on the analysis of regret from offline benchmarks. Despite their success in many applications~\citep{Grabner2006, Babenko2009a}, however, there are some common drawbacks of these online ensemble methods, including the lack of a universal framework for theoretical analysis and comparison, and the \emph{ad hoc} tuning of learning parameters such as step size.
In this work, we
propose
an online ensemble classification method that is not based on boosting or gradient descent. The main idea is to recursively estimate a posterior distribution of the ensemble weights in a Bayesian manner. We show that, for a given class of loss functions, we can define a likelihood function on the ensemble weights and, with an appropriately formulated prior distribution, we can
generate a posterior mean that closely approximates the empirical loss minimizer. If the stream of training data is sampled from a stationary process, this posterior mean converges to the expected loss minimizer.
Let us briefly explain the rationale for this scheme, which shall be contrasted from the usual Bayesian setup where the likelihood is chosen to describe closely the generating process of the training data. In our framework, we view Bayesian updating as a loss minimization procedure: it provides an approximation to the minimizer of a well-defined risk function. More precisely, this risk minimization interpretation comes from the exploitation of two results in statistical asymptotic theory. First is that, under mild regularity conditions, a Bayesian posterior distribution tends to peak at the maximum likelihood estimate (MLE) of the same likelihood function, as a consequence of the so-called Laplace method~\citep{mackay2003information}. Second, MLE can be viewed as a risk minimizer, where the risk is defined precisely as the expected negative log-likelihood.
Therefore, given a user-defined loss function, one can choose a suitable log-likelihood as a pure artifact, and apply a corresponding Bayesian update to minimize the risk. We will develop the theoretical foundation that justifies the above rationale.
Our proposed online ensemble classifier learning scheme is straightforward, but powerful in two respects. First, whenever our scheme is applicable, it can approximate the global optimal solution, in contrast with local methods such as stochastic gradient descent (SGD).
Second, assuming the training data is sampled from a stationary process, our proposed scheme possesses a rate of convergence to the expected loss minimizer that is at least as fast as standard SGD. In fact, our rate is faster unless the SGD step size is chosen optimally, which cannot be done \emph{a priori} in the online setting.
Furthermore, we also found that our method performs better in experiments with finite datasets compared with the averaging schemes in SGD~\citep{PolyakJuditsky1992,Schmidt2013} that have the same optimal theoretical convergence rate as our method.
In addition to providing a theoretical analysis of our formulation, we also tested our approach on real-world datasets and compared with individual classifiers, a baseline stochastic gradient descent method for learning classifier ensembles, and their averaging variants, as well as state-of-the-art online boosting methods. We found that our scheme consistently achieves superior performance over the baselines and often performs better than state-of-the-art online boosting algorithms, further demonstrating the validity of our theoretical analysis.
\noindent In summary, our contributions are:
\begin{enumerate}
\item
We propose a Bayesian approach to estimate the classifier weights with closed-form updates for online learning of classifier ensembles.
\item
We provide theoretical analyses of both the convergence guarantee and the bound on prediction error.
\item
We compare the asymptotic convergence rate of the proposed framework versus previous gradient descent frameworks thereby demonstrating the advantage of the proposed framework.
\end{enumerate}
This paper is organized as follows. We first briefly discuss the related works. We then state in detail our approach and provide theoretical guarantees in Section~\ref{sec:scheme}. A specific example for solving the online ensemble problem is provided in Section~\ref{sec:examples}, and numerical experiments are reported in Section~\ref{sec:experiments}. We discuss the use of other loss functions for online ensemble learning in Section~\ref{sec:ext} and conclude our paper in Section~\ref{sec:future} with future work. Some technical proofs are left to the Appendix.
\section{Related work}
\label{sec:related}
There is considerable past work on online ensemble learning. Many past works have focused on online learning with concept drift~\citep{WangFanYuHan2003,KolterMaloof2005,KolterMaloof2007,Minku2011}, where dynamic strategies of pruning and rebuilding ensemble members are usually considered. Given the technical difficulty, theoretical analysis for concept drift seems to be underdeveloped. \citet{KolterMaloof2005} proved error bounds for their proposed method, which appears to be the first such theoretical analysis, yet such analysis is not easily generalized to other methods in this category. Other works, such as~\citet{Schapire2001}, and~\citet{Cesa2003}, obtained performance bounds from the perspective of iterative games.
Our work is more closely related to methods that operate in a stationary environment, most notably some online boosting methods. One of the first methods
was proposed by~\citet{OzaRussell2001}, who showed asymptotic convergence to batch boosting under certain conditions. However, the convergence result only holds for some simple ``lossless" weak classifiers~\citep{Oza2001}, such as Na\"{\i}ve Bayes. Other variants of online boosting have been proposed, such as methods that employ feature selection~\citep{Grabner2006,Liu2007}, semi-supervised learning~\citep{Grabner2008}, multiple instance learning~\citep{Babenko2009a}, and multi-class learning~\citep{Saffari2010}. However, most of these works consider the design and update of weak classifiers beyond that of~\citet{Oza2001} and, thus, do not bear the convergence guarantee therein.
Other methods employ the gradient descent framework, such as Online GradientBoost~\citep{Leistner2009}, Online Stochastic Boosting~\citep{Babenko2009b} and Incremental Boosting~\citep{Grbovic2011}. There are convergence results given for many of these, which provide a basis for comparison with our framework. In fact, we show that our method compares favorably to gradient descent in terms of asymptotic convergence rate.
Other recent online boosting methods (Chen et al., 2012; Beygelzimer et al., 2015) generalize the weak learning assumption to online learning, and can offer theoretical guarantees on the error rate of the learned strong classifier if certain performance assumptions are satisfied for the weak learners. Our work differs from these approaches, in that our formulation and theoretical analysis focuses on the classes of loss functions, rather than imposing assumptions on the set of weak learners. In particular, we show that the ensemble weights in our algorithm converge asymptotically at an optimal rate to the minimizer of the expected loss.
Our proposed optimization scheme is related to two other lines of work. First is the so-called model-based method for global optimization~\citep{Zlochin2004,RubinsteinKroese2004,Hu2007}. This method iteratively generates an approximately optimal solution as the summary statistic for an evolving probability distribution. It is primarily designed for deterministic optimization, in contrast to the stochastic optimization setting that we consider. Second, our approach is, at least superficially, related to Bayesian model averaging (BMA)~\citep{Hoeting1999}. While BMA is motivated from a model selection viewpoint and aims to combine several candidate models for better description of the data, our approach does not impose any model but instead targets at loss minimization.
The present work builds on an earlier conference paper~\citep{Bai2014}. We make several generalizations here. First, we remove a restrictive, non-standard requirement on the loss function (which enforces the loss function to satisfy certain integral equality; Assumption 2 in~\citealp{Bai2014}). Second, we conduct experiments that compare our formulation with two variants of the SGD baseline in~\citet{Bai2014}, where the ensemble weights are estimated via two averaging schemes of SGD,
namely Polyak-Juditsky averaging~\citep{PolyakJuditsky1992} and
Stochastic Averaging Gradient~\citep{Schmidt2013}. Third, we evaluate two additional loss functions for ensemble learning and compare them with the loss function proposed in~\citet{Bai2014}.
\section{Bayesian Recursive Ensemble}
\label{sec:scheme}
We denote the input feature
by $\mathbf{x}$ and its classification label by $y$ ($1$ or $-1$). We assume that we are given $m$ binary weak classifiers $\{c_i(\mathbf{x})\}_{i=1}^m$, and our goal is to find the best ensemble weights $\boldsymbol\lambda=(\lambda_1,\ldots,\lambda_m)$, where $\lambda_i\geq 0$, to construct an ensemble classifier. For now, we do not impose a particular form of ensemble method (we defer this until Section \ref{sec:examples}), although one example form is $\sum_i\lambda_ic_i(\mathbf{x})$.
We focus on online learning, where training data $(\mathbf{x},y)$ comes in sequentially, one at a time at $t=1,2,3, \ldots$.
\subsection{Loss Minimization Formulation}
We formulate the online ensemble learning problem as a stochastic loss minimization problem. We first introduce a loss function at the weak classifier level.
Given a training pair $(\mathbf{x},y)$ and an arbitrary weak classifier $h$, we denote $g:=g(h(\mathbf{x}),y)$ as a non-negative loss function. Popular choices of $g$ include the logistic loss function, hinge loss, ramp loss, zero-one loss, etc. If $h$ is one of the given weak classifiers $c_i$, we will denote $g(c_i(\mathbf{x}),y)$ as $g_i(\mathbf{x},y)$, or simply $g_i$ for ease of notation. Furthermore, we define $g_i^t:=g(c_i^t(\mathbf x^t),y^t)$ where $(\mathbf x^t,y^t)$ is the training sample and $c_i^t$ the updated $i$-th weak classifier at time $t$. To simplify notation, we use $\mathbf g:=(g_1,\ldots,g_m)$ to denote the vector of losses for the weak classifiers, $\mathbf g^t:=(g_1^t,\ldots,g_m^t)$ to denote the losses at time $t$, and $\mathbf g^{1:T}:=(\mathbf g^1,\ldots,\mathbf g^T)$ to denote the losses
up to time $T$.
With the above notation, we let $\ell(\bm\lambda;\mathbf g^t)$ be some ensemble loss function at time $t$, which depends on the ensemble weights and the individual loss of each weak classifier. Then, ideally, the optimal ensemble weight vector $\bm\lambda^*$ should minimize the expected loss $E[\ell(\bm\lambda,\bm g)]$, where the expectation is taken with respect to the underlying distribution of the training data $p(\mathbf{x},y)$. Since this data distribution is unknown, we use the empirical loss as a surrogate:
\begin{equation}
L_T(\boldsymbol\lambda;\mathbf g^{1:T})=\ell_0(\boldsymbol\lambda)+\sum_{t=1}^T\ell(\boldsymbol\lambda;\mathbf g^t)
\label{eq:cumulated}
\end{equation}
where $\ell_0(\bm\lambda)$ can be regarded as an initial loss and can be omitted.
We make a set of assumptions on $L_T$ that are adapted from \citet{Chen1985}:
\begin{assumption}[Regularity Conditions]
Assume that for each $T$, there exists a $\boldsymbol\lambda_T^*$ that minimizes \eqref{eq:cumulated}, and
\vspace*{-0.5em}
\begin{enumerate}
\item ``local optimality": for each $T$, $\nabla L_T(\bm\lambda_T^*;\mathbf g^{1:T})=0$ and $\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})$ is positive definite,
\item ``steepness": the minimum eigenvalue of $\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})$
approaches
$\infty$ as $T\to\infty$,
\item ``smoothness": For any $\epsilon>0$, there exists a positive integer $N$ and $\delta>0$ such that for any $T>N$ and $\bm\theta\in H_\delta(\bm\lambda_T^*)=\{\bm\theta:\|\bm\theta-\bm\lambda_T^*\|_2\leq\delta\}$, $\nabla^2L_T(\bm\theta;\mathbf g^{1:T})$ exists and satisfies
{\footnotesize
$$I-A(\epsilon)\leq\nabla^2L_T(\bm\theta;\mathbf g^{1:T})\left(\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})\right)^{-1}\leq I+A(\epsilon)$$}
for some positive semidefinite symmetric matrix $A(\epsilon)$ whose largest eigenvalue tends to 0 as $\epsilon\to0$, and the inequalities above are matrix inequalities,
\item ``concentration": for any $\delta>0$, there exists a positive integer $N$ and constants $c,p>0$ such that for any $T>N$ and $\theta\not\in H_\delta(\bm\lambda_T^*)$, we have
\begin{eqnarray*}
&&L_T(\bm\theta;\mathbf g^{1:T})-L_T(\bm\lambda_T^*;\mathbf g^{1:T})< \\
&&c\left((\bm\theta-\bm\lambda_T^*)'\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})(\bm\theta-\bm\lambda_T^*)\right)^p,
\end{eqnarray*}
\item ``integrability":
$$\int e^{-L_T(\bm\lambda;\mathbf g^{1:T})}d\bm\lambda<\infty.$$
\end{enumerate} \label{regularity}
\end{assumption}
In the situation where $\ell$ is separable in terms of each component of $\bm\lambda$, i.e.~$\ell(\bm\lambda;\mathbf g)=\sum_{i=1}^mr_i(\lambda_i;\mathbf g)$ and $\ell_0(\bm\lambda)=\sum_{i=1}^ms_i(\lambda_i)$ for some twice differentiable functions $r_i(\cdot;\mathbf g)$ and $s_i(\cdot)$, the assumptions above will depend only on
$f_i(\lambda;\mathbf g^{1:T}):=\sum_{t=1}^Tr_i(\lambda;\mathbf g^t)+s_i(\lambda)$ for each $i$. For example, Condition 3 in Assumption \ref{regularity} reduces to merely checking uniform continuity of each $f_i''(\cdot;\mathbf g^{1:T})$.
Condition 1 in Assumption \ref{regularity} can be interpreted as the standard first and second order conditions for the optimality of $\bm\lambda_T^*$, whereas Condition 3 in essence requires continuity of the Hessian matrix. Conditions 2 and 4 are needed for the use of the Laplace method \citep{mackay2003information}, which, as we will show later, stipulates that the posterior distribution peaks near the optimal solution $\bm\lambda_T^*$ of empirical loss~\eqref{eq:cumulated}.
\subsection{A Bayesian Approach}
\label{sec:framework}
We state our procedure in Algorithm~\ref{alg:framework}. We define $p(\mathbf g|\bm\lambda)=e^{-\ell(\bm\lambda;\mathbf g)}$ and $p(\bm\lambda)=e^{-\ell_0(\bm\lambda)}$.
\begin{algorithm}[htb]
\caption{Bayesian Ensemble}
\label{alg:framework}
\begin{algorithmic}
\STATE {\bfseries Input:} streaming samples $\{(\mathbf{x}^t,y^t)\}_{t=1}^T$
\STATE \qquad\quad online weak classifiers $\{c_i^t(\mathbf{x})\}_{i=1}^m$
\STATE \qquad\quad the functions $p(\mathbf{g}|\bm\lambda)$ and $p(\bm\lambda)$
\STATE {\bfseries Initialize:} hyper-parameters for $p(\mathbf{g}|\bm\lambda)$ and $p(\bm\lambda)$
\FOR{$t=1$ {\bfseries to} $T$}
\STATE \begin{itemize}
\item[] $\forall i$, compute $g_i^t=g(c_i^t(\mathbf{x}^t),y^t)$
\item[] update for the ``posterior distribution" of $\bm\lambda:$\\
$p(\bm\lambda|\mathbf{g}^{1:t})\partialropto p(\mathbf{g}^{t}|\bm\lambda)p(\bm\lambda|\mathbf{g}^{1:t-1}) \partialropto\partialrod\limits_{s=1}^tp(\mathbf{g}^{s}|\bm\lambda)p(\bm\lambda)$
\item[] update the weak classifiers using $(\mathbf{x}^t,y^t)$
\end{itemize}
\ENDFOR
\end{algorithmic}
\end{algorithm}
Algorithm~\ref{alg:framework} requires some further explanation:
\begin{enumerate}
\item Our updated estimate for $\bm\lambda$ at each step is the ``posterior mean" for $\bm\lambda$, given by
$$\frac{\int\bm\lambda\partialrod\limits_{s=1}^tp(\mathbf{g}^{s}|\bm\lambda)p(\bm\lambda)d\bm\lambda}{\int\partialrod\limits_{s=1}^tp(\mathbf{g}^{s}|\bm\lambda)p(\bm\lambda)d\bm\lambda}$$
\item When the loss function $\ell$ satisfies
\begin{equation}
\int e^{-\ell(\bm\lambda;\mathbf w)}d\mathbf w=1\label{int t}
\end{equation}
and $\ell_0$ satisfies
\begin{equation*}
\int e^{-\ell_0(\mathbf w)}d\mathbf w=1
\end{equation*}
then $p(\mathbf g|\bm\lambda)$ is a valid likelihood function and $p(\bm\lambda)$ a valid prior distribution, so that $p(\bm\lambda|\mathbf g^{1:t})$ as depicted in Algorithm~\ref{alg:framework} is indeed a posterior distribution for $\bm\lambda$ (i.e. the quote-and-quote around ``posterior distribution" in the algorithm can be removed). In this context, a good choice of $p(\bm\lambda)=e^{-\ell_0(\bm\lambda)}$, e.g. as a conjugate prior for the likelihood $p(\mathbf g|\bm\lambda)=e^{-\ell(\bm\lambda;\mathbf g)}$, can greatly facilitate the computational effort at each step. On the other hand, we also mention that such a likelihood interpretation is not a necessary requirement for Algorithm~\ref{alg:framework} to work, since its convergence analysis relies on the Laplace method, which is non-probabilistic in nature.
\end{enumerate}
Algorithm~\ref{alg:framework} offers the desirable properties characterized by the following theorem.
\begin{thm}
\label{thm:convergence}
Under Assumption \ref{regularity}, the sequence of random vectors $\bm\lambda_T$ with distributions $p_{\scriptscriptstyle T}(\bm\lambda|\mathbf g^{1:T})$ in Algorithm~\ref{alg:framework} satisfies the asymptotic normality property
\begin{equation}
\left(\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})\right)^{1/2}(\bm\lambda_T-\bm\lambda_T^*)\stackrel{d}{\to}N(0,1) \label{AN}
\end{equation}
where $\bm\lambda_T$ is interpreted as
a random variable with distribution $p_{\scriptscriptstyle T}(\bm\lambda|\mathbf g^{1:T})$, and $\stackrel{d}{\to}$ denotes convergence in distribution. Furthermore, under the uniform integrability condition $\sup_{\scriptscriptstyle T}E_{\bm\lambda_T|\mathbf g^{1:T}}\|\bm\lambda_T-\bm\lambda_T^*\|_1^{1+\epsilon}<\infty$ for some $\epsilon>0$, we have
\begin{equation}
|E_{\bm\lambda_T|\mathbf g^{1:T}}[\bm\lambda_T]-\bm\lambda_T^*|=o\left(\frac{1}{\sigma_T^{1/2}}\right)\label{posterior mean}
\end{equation}
where $E_{\bm\lambda_T|\mathbf g^{1:T}}[\cdot]$ denotes the posterior mean and
$\sigma_T$ is the minimum eigenvalue of the matrix $\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})$.
\end{thm}
\begin{proof}
Let
$$\tilde L_T(\bm\lambda;\mathbf g^{1:T})=L_T(\bm\lambda;\mathbf g^{1:T})+\log\int e^{-L_T(\bm\lambda;\mathbf g^{1:T})}d\bm\lambda$$
which is well-defined by Condition 5 in Assumption \ref{regularity}. Note that $e^{-\tilde L_T(\bm\lambda;\mathbf g^{1:T})}$ is a valid probability density in $\bm\lambda$ by definition. Moreover, Conditions 1--4 in Assumption \ref{regularity} all hold when $L_T$ is replaced by $\tilde L_T$ (since they all depend only on the gradient of $L_T(\bm\lambda;\mathbf g^{1:T})$ with respect to $\bm\lambda$ or the difference $L_T(\bm\lambda_1;\mathbf g^{1:T})-L_T(\bm\lambda_2;\mathbf g^{1:T})$).
The convergence in \eqref{AN} then follows from Theorem 2.1 in \citet{Chen1985} applied to the sequence of densities $e^{-\tilde L_T(\bm\lambda;\mathbf g^{1:T})}$ for $T=1,2,\dots$. Condition 1 in Assumption \ref{regularity} is equivalent to conditions (P1) and (P2) therein, while Conditions 2 and 3 in Assumption~\ref{regularity} correspond to (C1) and (C2) in~\citet{Chen1985}.
Condition 4 is equivalent to (C3.1), which then implies (C3) there to invoke its Theorem 2.1 to conclude \eqref{AN}.
To show the bound \eqref{posterior mean} we take the expectation on \eqref{AN} to get
\vspace*{-0.25em}
\begin{equation}
\left(\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})\right)^{\frac{1}{2}}(E_{\bm\lambda_T|\mathbf g^{1:T}}[\bm\lambda_T]-\bm\lambda_T^*)\to0,
\label{interim}
\end{equation}
which is valid because of the uniform integrability condition $\sup_TE_{\bm\lambda_T|\mathbf g^{1:T}}\|\bm\lambda_T-\bm\lambda_T^*\|_1^{1+\epsilon}<\infty$ \citep{durrett10}. Therefore, $E_{\bm\lambda_T|\mathbf g^{1:T}}[\bm\lambda_T]-\bm\lambda_T^*=\left(\nabla^2L(\bm\lambda_T^*;\mathbf g^{1:T})\right)^{-\frac{1}{2}}\mathbf w_T$ where $\mathbf w_T=o(1)$ by \eqref{interim}. But then
\begin{eqnarray*}
&&\left\|\left(\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})\right)^{-\frac{1}{2}}\mathbf w_T\right\|_1\\
&\leq&\left\|\left(\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})\right)^{-\frac{1}{2}}\right\|_1\|\mathbf w_T\|_1\\
&\leq&\frac{C}{\sigma_T^{1/2}}\|\mathbf w_T\|_1=o\left(\frac{1}{\sigma_T^{1/2}}\right)
\end{eqnarray*}
where $\|\cdot\|_1$ when applied to matrix is the induced $L_1$-norm.
This shows \eqref{posterior mean}.
\vspace*{-0.2cm}
\end{proof}
The idea behind \eqref{AN} comes from classical Bayesian asymptotics and is an application of the so-called Laplace method \citep{mackay2003information}. Theorem \ref{thm:convergence} states that given the loss structure satisfying Assumption \ref{regularity}, the posterior distribution of $\bm\lambda$ under our update scheme provides an approximation to the minimizer $\bm\lambda_T^*$ of the cumulative loss at time $T$, as $T$ increases, by tending to a normal distribution peaked at $\bm\lambda_T^*$ with shrinking variance ($\bm\lambda_T^*$ here can be interpreted as the maximum a posterior (MAP) estimate). The bound \eqref{posterior mean} states that this posterior distribution can be summarized using the posterior mean to give a point estimate of $\bm\lambda_T^*$. Moreover, note that $\bm\lambda_T^*$ is the global, not merely local, minimizer of the cumulative loss. This approximation of global optimum highlights a key advantage of the proposed Bayesian scheme over other methods such as stochastic gradient descent (SGD), which only find a local optimum.
The next theorem states another benefit of our Bayesian scheme over standard SGD. Suppose that SGD does indeed converge to the global optimum. Even so, it turns out that our proposed Bayesian scheme converges faster than standard SGD under the assumption of i.i.d. training samples.
\begin{thm}
\label{thm:rate_general}
Suppose Assumption \ref{regularity} holds. Assume also that $\mathbf g^t$ are i.i.d., with $E[\ell(\bm\lambda;\mathbf g)]<\infty$ and $E[\ell(\bm\lambda;\mathbf g)^2]<\infty$. The Bayesian posterior mean produced by Alg.~\ref{alg:framework} converges to $\text{argmin}_{\bm\lambda}E[\ell(\bm\lambda;\mathbf g)]$ strictly faster than standard SGD (supposing it converges to the global minimum), given by
\begin{equation}
\bm\lambda_{T+1}\leftarrow\bm\lambda_T-\epsilon_{\scriptscriptstyle T}K\nabla\ell(\bm\lambda_T;\mathbf g^T)\label{SGD update}
\end{equation}
in terms of the asymptotic variance, except when the step size $\epsilon_{\scriptscriptstyle T}$ and the matrix $K$ is chosen optimally.
\end{thm}
\noindent In Theorem \ref{thm:rate_general}, by asymptotic variance we mean the following: both the sequence of posterior means and the update sequence from SGD possess versions of the central limit theorem, in the form $\sqrt T(\bm\lambda_T-\bm\lambda^*)\stackrel{d}{\to}N(0,\Sigma)$ where $\bm\lambda^*=\text{argmin}_{\bm\lambda}E[\ell(\bm\lambda;\mathbf g)]$. Our comparison is on the asymptotic covariance matrix $\Sigma$ with respect to matrix inequality: for two update schemes with corresponding asymptotic covariance matrices $\Sigma_1$ and $\Sigma_2$, Scheme 1 converges faster than Scheme 2 if $\Sigma_2-\Sigma_1$ is positive definite.
\begin{proof}
The proof follows by combining \eqref{posterior mean} with established central limit theorems for sample average approximation \citep{pasupathy2011stochastic} and stochastic gradient descent (SGD) algorithms. First, let $z(\bm\lambda):=E[\ell(\bm\lambda;\mathbf g)]$, and $\bm\lambda^*:=\text{argmin}_{\bm\lambda}z(\bm\lambda)$. Note that the quantity $\bm\lambda_T^*$ is the minimizer of $\frac{1}{T}\sum_{t=1}^T\ell(\bm\lambda;\mathbf g^t)+\frac{\ell_0(\bm\lambda)}{T}$. Then, together with the fact that $\frac{\ell_0(\bm\lambda)}{T}$ is asymptotically negligible, Theorem 5.9 in \citet{pasupathy2011stochastic} stipulates that $\sqrt T(\bm\lambda_T^*-\bm\lambda^*)\stackrel{d}{\to}N(0,\Sigma)$, where
\begin{equation}
\Sigma=(\nabla^2 z(\bm\lambda))^{-1}Var(\nabla\ell(\bm\lambda;\mathbf g))(\nabla^2 z(\bm\lambda))^{-1}\label{Sigma}
\end{equation}
and $Var(\cdot)$ denotes the covariance matrix.
Now since $\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})=\sum_{t=1}^T(\nabla^2\ell(\bm\lambda_T^*;\mathbf g^t))$ and $\frac{1}{T}\sum_{t=1}^T(\nabla^2\ell(\bm\lambda_T^*;\mathbf g^t))\to E[\nabla^2\ell(\bm\lambda^*;\mathbf g)]$ by the law of large numbers \citep{durrett10}, we have $\nabla^2L_T(\bm\lambda_T^*;\mathbf g^{1:T})=\Theta(T)$. Then the bound in \eqref{posterior mean} implies that $|E_{\bm\lambda_T|\mathbf g^{1:T}}[\bm\lambda_T]-\bm\lambda_T^*|=o\left(\frac{1}{\sqrt T}\right)$. In other words, the difference between the posterior mean and $\bm\lambda_T^*$ is of smaller scale than $1/\sqrt T$. By Slutsky Theorem \citep{serfling2009approximation}, this implies that $\sqrt T(E_{\bm\lambda_T|\mathbf g^{1:T}}[\bm\lambda_T]-\bm\lambda^*)\stackrel{d}{\to}N(0,\Sigma)$ also.
On the other hand, for SGD \eqref{SGD update}, it is known (e.g.~\citealp{asmussen2007stochastic}) that the optimal step size parameter value is $\epsilon_{\scriptscriptstyle T}=1/T$ and $K=\nabla^2z(\bm\lambda)$, in which case the central limit theorem for the update $\bm\lambda_T$ will be given by $\sqrt T(\bm\lambda_T-\bm\lambda^*)\stackrel{d}{\to}N(0,\Sigma)$ where $\Sigma$ is exactly \eqref{Sigma}. For other choices of step size, either the convergence rate is slower than order $1/\sqrt T$ or the asymptotic variance, denoted by $\tilde\Sigma$, is such that $\tilde\Sigma-\Sigma$ is positive definite. Therefore, by comparing the asymptotic variance, the posterior mean always has a faster convergence unless the step size in SGD is chosen optimally.
\end{proof}
To give some intuition from a statistical viewpoint, Theorem~\ref{thm:rate_general} arises from two layers of approximation of our posterior mean to $\bm\lambda^*$. First, thanks to \eqref{posterior mean}, the difference between posterior mean and the minimizer of cumulative loss $\bm\lambda_T^*$ (which can be interpreted as the MAP) decreases at a rate faster than $1/\sqrt T$. Second, $\bm\lambda_T^*$ converges to $\bm\lambda^*$ at a rate of order $1/\sqrt T$ with the optimal multiplicative constant. This is equivalent to the observation that the MAP, much like the maximum likelihood estimator (MLE), is asymptotically efficient as a statistical estimator.
Putting things in perspective, compared with local methods such as SGD, we have made an apparently stronger set of assumptions (i.e. Assumption \ref{regularity}), which pays off by allowing for stronger theoretical guarantees (Theorems \ref{thm:convergence} and \ref{thm:rate_general}). In the next section we describe an example where a meaningful loss function precisely fits into our framework.
\section{A Specific Example}
\label{sec:examples}
We now discuss in depth a simple and natural choice of loss function and its corresponding likelihood function and prior, which are also used in our experiments in Section \ref{sec:experiments}.
Consider
\begin{equation}
\ell(\bm\lambda;\mathbf g)=\theta\sum_{i=1}^m\lambda_ig_i-\sum_{i=1}^m\log\lambda_i\label{loss}
\end{equation}
The motivation for \eqref{loss} is straightforward: it is the sum of individual losses each weighted by $\lambda_i$. The extra term $\log\lambda_i$ prevents $\lambda_i$ from approaching zero, the trivial minimizer for the first term. The parameter $\theta$ specifies the trade-off between the importance of the first and the second term.
This loss function satisfies Assumption \ref{regularity}. In particular, the Hessian of $L_T$ turns out to not depend on $g^{1:T}$, therefore all conditions of Assumption~\ref{regularity} can be verified easily.
Using the discussion in
Section~\ref{sec:framework}, we choose the exponential likelihood (note that in this definition we add an extra constant term $m\log\theta$ on \eqref{loss}, which does not affect the minimization in any way)
\begin{equation}
p(\mathbf g|\bm\lambda)=\partialrod_{i=1}^m(\theta\lambda_i)e^{-\theta\lambda_ig_i}\ .
\label{eq:likelihood}
\end{equation}
To facilitate computation, we employ the Gamma prior:
\begin{equation}
p(\bm\lambda)\partialropto\partialrod_{i=1}^m\lambda_i^{\alpha-1}e^{-\beta\lambda_i}
\label{eq:prior}
\end{equation}
where $\alpha$ and $\beta$ are the hyper shape and rate parameters. Correspondingly, we pick $\ell_0(\bm\lambda)=\beta\sum_{i=1}^m\lambda_i-(\alpha-1)\sum_{i=1}^m\log\lambda_i$. To be concrete, the cumulative loss in \eqref{eq:cumulated} (disregarding the constant terms) is
$$\beta\sum_{i=1}^m\lambda_i-(\alpha-1)\sum_{i=1}^m\log\lambda_i+\sum_{t=1}^T\left(\theta\sum_{i=1}^m\lambda_ig_i^t-\sum_{i=1}^m\log\lambda_i\right).$$
Now, under conjugacy of \eqref{eq:likelihood} and \eqref{eq:prior}, the posterior distribution of $\bm\lambda$ after $t$ steps is given by the Gamma distribution
$$p(\bm\lambda|\mathbf{g}^{1:t})\partialropto\partialrod_{i=1}^m(\lambda_i)^{\alpha+t-1}e^{-(\beta+\theta\sum_{s=1}^tg_i^{s})\lambda_i}\ .$$
Therefore the posterior mean for each $\lambda_i$ is
\begin{equation}
\frac{\alpha+t}{\beta+\theta\sum_{s=1}^tg_i^s}\ .
\label{eq:post_mean}
\end{equation}
We use the following prediction rule at each step:
\begin{equation}
y=
\left\{
\begin{array}{rcl}
1 \ & \mbox{if}\ \sum\limits_{i=1}^m\lambda_ig_i(\mathbf{x},1)\leq \sum\limits_{i=1}^m\lambda_ig_i(\mathbf{x},-1)\\
-1 \ & \mbox{otherwise}
\end{array}
\right.\\
\label{eq:strong}
\end{equation}
where each $\lambda_i$ is the posterior mean given by \eqref{eq:post_mean}. For this setup, Algorithm~\ref{alg:framework} can be cast as Algorithm~\ref{alg:specification} below, which is to be implemented in Section~\ref{sec:experiments}.
\begin{algorithm}[bht]
\caption{Closed-form Bayesian Ensemble}
\label{alg:specification}
\begin{algorithmic}
\STATE {\bfseries Input:} streaming samples $\{(\mathbf{x}^t,y^t)\}_{t=1}^T$
\STATE \qquad\quad online weak classifiers $\{c_i^t(\mathbf{x})\}_{i=1}^m$
\STATE {\bfseries Initialize:} parameters $\theta$ for likelihood~\eqref{eq:likelihood} and parameters $\alpha,\beta$ for prior~\eqref{eq:prior}
\FOR{$t=1$ {\bfseries to} $T$}
\STATE \begin{itemize}
\item[] $\forall i$, compute $g_i^t=g(c_i^t(\mathbf{x}^t),y^t)$, where $g$ is logistic loss function
\item[] update the posterior mean of $\bm\lambda$ by~\eqref{eq:post_mean}
\item[] update the weak classifiers according to the particular choice of online weak classifier
\item[] make prediction by~\eqref{eq:strong} for the next incoming sample
\end{itemize}
\ENDFOR
\end{algorithmic}
\end{algorithm}
The following bound provides
further understanding
of the loss function \eqref{loss} and the prediction rule \eqref{eq:strong}, by relating their use with a guarantee on the prediction error:
\begin{thm}
\label{thm:bound}
Suppose that $\mathbf g^t$ are i.i.d., so that $\bm\lambda_T^*$ converges to $\bm\lambda^*:=\text{argmin}_{\bm\lambda}E[\ell(\bm\lambda;\mathbf g)]$ for $\ell$ defined in \eqref{loss}. The prediction error using rule \eqref{eq:strong} with $\bm\lambda^*$ is bounded by
\begin{equation}
P_{(\mathbf{x},y)}(\text{error})\leq m^{\frac{1}{p}}\left(E_{(\mathbf{x},y)}
\left[\left(\sum_{i=1}^m\frac{g_i(\mathbf{x},-y)}{E[g_i(\mathbf{x},y)]}\right)^{\frac{-1}{p-1}}\right]\right)
^{\frac{p-1}{p}}
\label{eq:bound_opt}
\end{equation}
for any $p>1$.
\end{thm}
To make sense of this result, note that the quantity $\frac{1}{E[g_i(\mathbf{x},y)]}g_i(\mathbf{x},-y)$ can be interpreted as a performance indicator of each weak classifier, i.e. the larger it is, the better the weaker classifier is, since a good classifier should have a small loss $E[g_i(\mathbf{x},y)]$ and correspondingly a large $g_i(\mathbf{x},-y)$. As long as there exist some good weak classifiers among the $m$ choices, $\sum_{i=1}^m\frac{g_i(\mathbf{x},-y)}{E[g_i(\mathbf{x},y)]}$ will be large, which leads to a small error bound in \eqref{eq:bound_opt}.
\vspace*{0.2in}
\begin{proof}
Suppose $\bm\lambda$ is used in the strong classifier~\eqref{eq:strong}. Denote $I(\cdot)$ as the indicator function. Consider
\begin{eqnarray*}
&&E_(\mathbf{x},y)\left[\sum_{i=1}^m\lambda_ig_i(\mathbf{x},y)\right]\\
&=&\int\Bigg(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)P(y=1|\mathbf{x}){}
+\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1)P(y=-1|\mathbf{x})\Bigg)dP(\mathbf{x})\\
&\geq&\int\Bigg( I(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)>\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1))
\cdot\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)P(y=1|\mathbf{x}){}\\
&&{}+I(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)<\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1))
\cdot\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1)P(y=-1|\mathbf{x})\Bigg)dP(\mathbf{x})\\
&\geq&\int\Bigg( I(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)>\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1))
\cdot\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1)P(y=1|\mathbf{x}){}\\
&&{}+I(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)<\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-1))
\cdot\sum_{i=1}^m\lambda_ig_i(\mathbf{x},1)P(y=-1|\mathbf{x})\Bigg)dP(\mathbf{x})\\
&=&E_{(\mathbf{x},y)}\left[I(\text{error})\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-y)\right]\\
&\geq&P(\text{error})^p
\left(E_{(\mathbf{x},y)}\left[\left(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-y)\right)^{\frac{-1}{p-1}}\right]\right)^{-(p-1)}
\end{eqnarray*}
the last inequality holds by reverse Holder inequality \citep{Hardy1952}. So
\vspace*{-0.2cm}
\begin{eqnarray*}
P(\text{error})&\leq& \left(E_{(\mathbf{x},y)}\left[\sum_{i=1}^m\lambda_ig_i(\mathbf{x},y)\right]\right)^{\frac{1}{p}}\\
&&\cdot\left(E_{(\mathbf{x},y)}\left[\left(\sum_{i=1}^m\lambda_ig_i(\mathbf{x},-y)\right)^{\frac{-1}{p-1}}\right]\right)
^{\frac{p-1}{p}}
\end{eqnarray*}
and the result~\eqref{eq:bound_opt} follows by plugging in $\lambda_i=\frac{1}{\theta E_{(\mathbf x,y)}[g_i(\mathbf x,y)]}$ for each $i$, the minimizer of $E[\ell(\bm\lambda;\mathbf g)]$, which can be solved directly when $\ell$ is in the form \eqref{loss}.
\vspace*{-0.2cm}
\end{proof}
Finally, in correspondence to Theorem \ref{thm:rate_general}, the standard SGD for \eqref{loss} is written as
\begin{equation}
\lambda_i^{t+1}=\lambda_i^{t}-\frac{\gamma}{t}\left(\theta g_i^t-\frac{1}{\lambda_i^{t}}\right)
\label{eq:SGD}
\end{equation}
where $\gamma$ is a parameter that controls the step size. The following result is a consequence of Theorem~\ref{thm:rate_general} (we give another proof here that reveals more specific details).
\begin{thm}
\label{thm:rate}
Suppose that $\mathbf g^t$ are i.i.d., and $0<E_{(\mathbf x,y)}[g_i(\mathbf x,y)]<\infty$ and $Var_{(\mathbf x,y)}(g_i(\mathbf x,y))<\infty$.
For each $\lambda_i$, the posterior mean given by~\eqref{eq:post_mean} always has a rate of convergence at least as fast as the SGD update \eqref{eq:SGD} in terms of asymptotic variance. In fact, it is strictly better in all situations except when the step size parameter $\gamma$ in \eqref{eq:SGD} is set optimally a priori.
\end{thm}
\begin{proof}
Since for each $i$, $g_i^t$ are i.i.d., the sample mean $(1/T)\sum_{t=1}^T\mathbf g_i^t$ follows a central limit theorem. It can be argued using the delta method \citep{serfling2009approximation} that the posterior mean \eqref{eq:post_mean} satisfies
\begin{eqnarray}
&&\sqrt T\left(\frac{\alpha+T}{\beta+\theta\sum_{t=1}^Tg_i^t}-\frac{1}{\theta E[g_i(\mathbf{x},y)]}\right)\notag\\
&\longrightarrow&N\left(0,\frac{Var(g_i(\mathbf{x},y))}{\theta^2(E[g_i(\mathbf{x},y)])^4}\right)\label{asy Bayesian}
\end{eqnarray}
For the stochastic gradient descent scheme \eqref{eq:SGD}, it would be useful to cast the objective function as $z_i(\lambda_i)=E[\theta\lambda_ig_i-\log\lambda_i]$. Let $\lambda_i^*=\text{argmin}_{\lambda}z_i(\lambda)$ which can be directly solved as $\frac{1}{\theta E[g_i]}$. Then $z_i''(\lambda_i^*)=\frac{1}{{\lambda_i^*}^2}=\theta^2(E[g_i(\mathbf{x},y)])^2$. If the step size $\gamma>\frac{1}{2z''(\lambda_i^*)}$, the update scheme \eqref{eq:SGD} will generate $\lambda_i^T$ that satisfies the following central limit theorem \citep{asmussen2007stochastic,kushner2003stochastic}
\begin{equation}
\sqrt T(\lambda_i^T-\lambda_i^*)\stackrel{d}{\rightarrow}N(0,\sigma_i^2)\label{CLT2}
\end{equation}
where
\begin{equation}
\sigma_i^2=\int_0^\infty e^{(1-2\gamma z_i''(\lambda_i^*))s}\gamma^2Var\left(\theta g_i(\mathbf{x},y)-\frac{1}{\lambda_i^*}\right)ds\label{sigma}
\end{equation}
and $\theta g_i(\mathbf{x},y)-\frac{1}{\lambda_i^*}$ is the unbiased estimate of the gradient at the point $\lambda_i^*$. On the other hand, $\lambda_i^T-\lambda_i^*=\omega_p(\frac{1}{\sqrt T})$ if $\gamma\leq\frac{1}{2z''(\lambda_i^*)}$, i.e.~the convergence is slower than \eqref{CLT2} asymptotically and so we can disregard this case~\citep{asmussen2007stochastic}. Now substitute $\lambda_i^*=\frac{1}{\theta E[g_i]}$ into \eqref{sigma} to obtain
\begin{eqnarray*}
\sigma_i^2&=&\theta^2\gamma^2Var(g_i(\mathbf{x},y))\int_0^\infty e^{(1-2\gamma/\lambda_i^*)s}ds\\
&=&\frac{\theta^2\gamma^2Var(g_i(\mathbf{x},y))}{2\gamma/\lambda_i^*-1}
=\frac{\theta^2\gamma^2Var(g_i(\mathbf{x},y))}{2\gamma\theta^2(E[g_i(\mathbf{x},y)])^2-1}
\end{eqnarray*}
and let $\gamma=\tilde\gamma/\theta^2$, we get
\begin{equation}
\sigma_i^2=\frac{\tilde\gamma^2Var(g_i(\mathbf{x},y))}{\theta^2(2\tilde\gamma(E[g_i(\mathbf{x},y)])^2-1)}\label{eq:asy_SGD}
\end{equation}
if $\tilde\gamma>\frac{\theta^2}{2z''(\lambda_i^*)}=\frac{1}{2(E[g_i(\mathbf{x},y)])^2}$.
We are now ready to compare the asymptotic variance in \eqref{asy Bayesian} and \eqref{eq:asy_SGD}, and show that for all $\tilde\gamma$, the one in \eqref{asy Bayesian} is smaller.
Note that this is equivalent to showing that
$$\frac{Var(g_i(\mathbf{x},y))}{\theta^2(E[g_i(\mathbf{x},y)])^4}
\leq\frac{\tilde\gamma^2Var(g_i(\mathbf{x},y))}{\theta^2(2\tilde\gamma(E[g_i(\mathbf{x},y)])^2-1)}$$
Eliminating the common factors, we have
$$\frac{1}{(E[g_i(\mathbf{x},y)])^2}\leq\frac{\tilde\gamma^2}{2\tilde\gamma-1/(E[g_i(\mathbf{x},y)])^2}$$
and by re-arranging the terms, we have
$$(E[g_i(\mathbf{x},y)])^2\left(\tilde\gamma-\frac{1}{(E[g_i(\mathbf{x},y)])^2}\right)^2\geq0$$
which is always true. Equality holds iff $\tilde\gamma=\frac{1}{(E[g_i(\mathbf{x},y)])^2}$, which corresponds to $\gamma=\frac{1}{\theta^2(E[g_i(\mathbf{x},y)])^2}$.
Therefore, the asymptotic variance in \eqref{asy Bayesian} is always smaller than \eqref{eq:asy_SGD}, unless the step size $\gamma$ is chosen optimally.
\end{proof}
\section{Experiments}
\label{sec:experiments}
We report two sets of binary classification experiments in the online learning setting.
In the first set of experiments, we evaluate our scheme's performance vs.\ five baseline methods: a single baseline classifier, a uniform voting ensemble, and three SGD based online ensemble learning methods.
In the second set of experiments, we compare with three leading online boosting methods: GradientBoost~\citep{Leistner2009}, Smooth-Boost~\citep{Chen2012}, and the online boosting method of~\citet{OzaRussell2001} .
In all experiments, we follow the experimental setup in~\citet{Chen2012}. Data arrives as a sequence of examples $(\mathbf{x}_1, y_1), . . . ,(\mathbf{x}_T , y_T )$. At each step $t$ the online learner predicts the class label for $\mathbf{x}_t$, then the true label $y_t$ is revealed and used to update the classifier online.
We report the averaged error rate for each evaluated method over five trials of different random orderings of each dataset. The experiments are conducted for two different choices of weak classifiers: Perceptron and Na\"{\i}ve Bayes.
In all experiments, we choose the loss function $g$ of our method to be the ramp loss, and set the hyperparameters of our method as $\alpha=\beta=1$ and $\theta=0.1$.
From the expression of the posterior mean~\eqref{eq:post_mean}, the prediction rule~\eqref{eq:strong} is unrelated to the values of $\alpha$, $\beta$ and $\theta$ in the longterm.
We have observed that the classification performance of our method is not very sensitive with respect to changes in the settings of these parameters.
However, the stochastic gradient descent baseline (SGD)~\eqref{eq:SGD} is sensitive to the setting of $\theta$, and since $\theta=0.1$ works best for SGD we also use $\theta=0.1$ for our method.
\subsection{Comparison with Baseline Methods}
\label{sec:exp_baseline}
In the experimental evaluation, we compare our online ensemble method with five baseline methods:
\begin{enumerate}
\item a single weak classifier ({\scshape Perceptron} or {\scshape Na\"{\i}ve Bayes}),
\item a uniform ensemble of weak classifiers ({\scshape Voting}),
\item an ensemble of weak classifiers where the ensemble weights are estimated via standard stochastic gradient descent ({\scshape SGD}),
\item a variant of (3.) where the ensemble weights are estimated via Polyak averaging~\citep{PolyakJuditsky1992} ({\scshape SGD-avg}), and
\item another variant of (3.) where the ensemble weights are estimated via the Stochastic Average Gradient method of~\citet{Schmidt2013} ({\scshape SAG}).
\end{enumerate}
We use ten binary classification benchmark datasets obtained from the LIBSVM repository\footnote{http://www.csie.ntu.edu.tw/{\textasciitilde}cjlin/libsvmtools/datasets/}.
Each dataset is split into training and testing sets for each random trial, where a training set contains no more than $10\%$ of the total amount of data available for that particular benchmark. For each experimental trial, the ordering of items in the testing sequence is selected at random, and each online classifier ensemble learning method is presented with the same testing data sequence for that trial.
In each experimental trial, for all ensemble learning methods, we utilize a set of 100 pre-trained weak classifiers that are kept static during the online learning process.
The training set is used in learning these 100 weak classifiers. The same weak classifiers are then shared by all of the ensemble methods, including our method. In order to make weak classifiers divergent, each weak classifier uses a randomly sampled subset of data features as input for both training and testing. The first baseline (single classifier) is learned using all the features.
For all of the benchmarks we observed that the error rate varies with different orderings of the dataset. Therefore, following~\citet{Chen2012}, we report the average error rate over five random trials of different orders of each sequence. In fact, while the error rate may vary according to different orderings of a dataset, it was observed throughout all our experiments that the ranking of performance among different methods is usually consistent.
\begin{table*}[p]
\caption{Experiments of online classifier ensemble using pre-trained Perceptrons as weak classifiers and keeping them fixed online. Mean error rate over five random trials is shown in the table. We compare with five baseline methods: a single Perceptron classifier ({\scshape Perceptron}), a uniform ensemble scheme of weak classifiers ({\scshape Voting}), an ensemble scheme using SGD for estimating the ensemble weights ({\scshape SGD}), an ensemble scheme using the Polyak averaging scheme of SGD~\citep{PolyakJuditsky1992} to estimate the ensemble weights ({\scshape SGD-avg}), and an ensemble scheme using the Stochastic Average Gradient~\citep{Schmidt2013} to estimate the ensemble weights ({\scshape SAG}). Our method attains the top performance for all testing sequences.}
\begin{center}
\begin{small}
\begin{sc}
\scalebox{0.95}{
\begin{tabular}{|l||c|c|c|c|c|c|c|}
\hline
Dataset & \# Examples & Perceptron & Voting & SGD & SGD-avg & SAG & Ours \\
\hline \hline
Heart & 270 & 0.258 & 0.268 & 0.265 & 0.266 & 0.245 & {\bf 0.239} \\ \hline
Breast-Cancer & 683 & 0.068 & 0.056 & 0.056 & 0.055 & 0.055 & {\bf 0.050} \\ \hline
Australian & 693 & 0.204 & 0.193 & 0.186 & 0.187 & 0.171 & {\bf 0.166} \\ \hline
Diabetes & 768 & 0.389 & 0.373 & 0.371 & 0.372 & 0.364 & {\bf 0.363} \\ \hline
German & 1000 & 0.388 & 0.324 & 0.321 & 0.323 & 0.315 & {\bf 0.309} \\ \hline
Splice & 3175 & 0.410 & 0.349 & 0.335 & 0.338 & 0.301 & {\bf 0.299} \\ \hline
Mushrooms & 8124 & 0.058 & 0.034 & 0.034 & 0.034 & 0.031 & {\bf 0.030} \\ \hline
Ionosphere & 351 & 0.297 & 0.247 & 0.240 & 0.241 & 0.240 & {\bf 0.236} \\ \hline
Sonar & 208 & 0.404 & 0.379 & 0.376 & 0.379 & 0.370 & {\bf 0.369} \\ \hline
SVMguide3 & 1284 & 0.382 & 0.301 & 0.299 & 0.299 & 0.292 & {\bf 0.289} \\
\hline
\end{tabular}
}
\label{tab:P_static}
\end{sc}
\end{small}
\end{center}
\end{table*}
\begin{table*}[p]
\caption{Experiments of online classifier ensemble using pre-trained Na\"{\i}ve Bayes as weak classifiers and keeping them fixed online. Mean error rate over five random trials is shown in the table.
We compare with five baseline methods: a single Na\"{\i}ve Bayes classifier ({\scshape Na\"{\i}ve Bayes}), a uniform ensemble scheme of weak classifiers ({\scshape Voting}), an ensemble scheme using SGD for estimating the ensemble weights ({\scshape SGD}), an ensemble scheme using the Polyak averaging scheme of SGD~\citep{PolyakJuditsky1992} to estimate the ensemble weights ({\scshape SGD-avg}), and an ensemble scheme using the Stochastic Average Gradient~\citep{Schmidt2013} to estimate the ensemble weights ({\scshape SAG}). Our method attains the top performance for all testing sequences.}
\begin{center}
\begin{small}
\begin{sc}
\scalebox{0.95}{
\begin{tabular}{|l||c|c|c|c|c|c|c|}
\hline
dataset & \# Examples & Na\"{\i}ve Bayes & Voting & SGD & SGD-avg & SAG & Ours \\
\hline \hline
Heart & 270 & 0.232 & 0.207 & 0.214 & 0.215 & 0.206 & {\bf 0.202} \\ \hline
Breast-Cancer & 683 & 0.065 & 0.049 & 0.050 & 0.049 & 0.048 & {\bf 0.044} \\ \hline
Australian & 693 & 0.204 & 0.201 & 0.200 & 0.200 & 0.187 & {\bf 0.184} \\ \hline
Diabetes & 768 & 0.259 & 0.258 & 0.256 & 0.256 & 0.254 & {\bf 0.253} \\ \hline
German & 1000 & 0.343 & 0.338 & 0.338 & 0.338 & 0.320 & {\bf 0.315} \\ \hline
Splice & 3175 & 0.155 & 0.156 & 0.155 & 0.155 & {\bf 0.152} & {\bf 0.152} \\ \hline
Mushrooms & 8124 & 0.037 & 0.066 & 0.064 & 0.064 & 0.046 & {\bf 0.031} \\ \hline
Ionosphere & 351 & 0.199 & 0.196 & 0.195 & 0.195 & 0.193 & {\bf 0.192} \\ \hline
Sonar & 208 & 0.338 & 0.337 & 0.337 & 0.337 & 0.337 & {\bf 0.336} \\ \hline
SVMguide3 & 1284 & 0.315 & 0.316 & 0.304 & 0.316 & 0.236 & {\bf 0.215} \\
\hline
\end{tabular}
}
\label{tab:NB_static}
\end{sc}
\end{small}
\end{center}
\end{table*}
\begin{figure}
\caption{Plots of the error rate as online learning progresses for three benchmark datasets: Mushrooms, Breast-Cancer, and Australian. (Plots for other benchmarks datasets are provided in the supporting material.) The red curve in each graph shows the error rate for our method, as a function of the number samples processed in the online learning of ensemble weights. The cyan curves are results from {\scshape SGD}
\label{fig:error}
\end{figure}
Classification error rates for this experiment are shown in Tables~\ref{tab:P_static} and~\ref{tab:NB_static}.
Our proposed method consistently performs the best for all datasets.
Its superior performance against {\scshape Voting} is consistent with the asymptotic convergence analysis in Theorem~\ref{thm:convergence}.
Its superior performance against the {\scshape SGD} baseline is consistent with the convergence rate analysis in Theorem~\ref{thm:rate}.
Polyak averaging ({\scshape SGD-avg}) does not improve the performance of basic SGD in general; this is consistent with the analysis in~\citet{Xu2011} which showed that, despite its optimal asymptotic convergence rate, a huge number of samples may be needed for Polyak averaging to reach its asymptotic region for a randomly chosen step size.
{\scshape SAG}~\citep{Schmidt2013} is a close runner-up to our approach, but it has two limitations:
1) it requires knowing the length of the testing sequence \emph{a priori}, and
2) as noted in~\citet{Schmidt2013}, the step size suggested in the theoretical analysis does not usually give the best result in practice, and thus the authors suggest a larger step size instead. In our experiments, we also found that the improvement of~\citet{Schmidt2013} over the {\scshape SGD} baseline relies on tuning the step size to a value that is greater than that given in the theory. The performance of {\scshape SAG} reported here has taken advantage of these two points.
\begin{figure}
\caption{Experiments to evaluate different settings of $\beta$ for our online classifier ensemble method, using pre-trained Perceptrons and Na\"{\i}
\label{fig:beta}
\end{figure}
\begin{figure}
\caption{Experiments to evaluate different settings of $\theta$ for our online classifier ensemble method, using pre-trained Perceptrons and Na\"{\i}
\label{fig:theta}
\end{figure}
Fig.~\ref{fig:error} shows plots of the convergence of online learning for three of the benchmark datasets. Plots for the other benchmark datasets are provided in the supplementary material. Each plot reports the classification error curves of our method, the {\scshape SGD} baseline, Polyak averaging {\scshape SGD-avg}~\citep{PolyakJuditsky1992}, and Stochastic Average Gradient {\scshape SAG}~\citep{Schmidt2013}. Overall, for all methods, the error rate generally tends to decrease as the online learning process considers more and more samples. As is evident in the graphs, our method tends to attain lowest error rates overall, throughout each training sequence, for the compared methods for these benchmarks.
Ideally, as an algorithm converges, the rate of cumulative error should tend to decrease as more samples are processed, approaching the minimal error rate that is achievable for the given set of pre-trained weak classifiers. Yet given the finite size of training sample set, and the randomness caused by different orderings of the sequences, we may not see the ideal monotonic curves. But in general, the trend of curves obtained by our method is consistent with the convergence analysis of Theorem 1.
The online learning algorithm that converges faster should result in curves that go down more quickly in general. Again, given finite samples and different orderings, there is variance, but still, consistent with Theorem 2, the consistently better performance of our formulation vs.\ the compared methods is evident.
Fig~\ref{fig:beta} and Fig.~\ref{fig:theta} show plots for studying the sensitivity of parameter settings of our method. It is clear from the expression of the posterior mean~\eqref{eq:post_mean} that the numerator containing $\alpha$ will be cancelled out in the prediction rule~\eqref{eq:strong}, therefore we just need to study the effect of $\beta$ and $\theta$. We select a short sequence, ``Heart" and a long sequence, ``Mushrooms" as two representative datasets. We plot the classification error rates of our method under different settings of $\beta$ (Fig.~\ref{fig:beta}) and $\theta$ (Fig.~\ref{fig:theta}), averaged over five random trials. It can be observed that the performance of our method is not very sensitive with respect to the changes in the settings of $\beta$ and $\theta$ even for a short sequence like ``Heart" (270 samples). And the performance is more stable to the settings of these parameters for longer sequence like ``Mushrooms" (8124 samples). This observation is consistent with the asymptotic property of our prediction rule~\eqref{eq:strong}.
We observed similar behavior for all the other benchmark datasets we tested.
\subsection{Comparison with Online Boosting Methods}
We further compare our method with a single Perceptron/Na\"{\i}ve Bayes classifier that is updated online, and three representative online boosting methods reported in~\citet{Chen2012}: {\scshape OzaBoost} is the method proposed by~\citet{OzaRussell2001}, {\scshape OGBoost} is the online GradientBoost method proposed by \citet{Leistner2009}, and {\scshape OSBoost} is the online Smooth-Boost method proposed by~\citet{Chen2012}. {\scshape Ours-r} is our proposed Bayesian ensemble method for online updated weak classifiers.
All methods are trained and compared following the setup of~\citet{Chen2012}, where for each experimental trial, a set of 100 weak classifiers are initialized and updated online.
We use ten binary classification benchmark datasets that are also used by~\citet{Chen2012}. We discard the ``Ijcnn1" and ``Web Page" datasets from the tables of~\citet{Chen2012}, because they are highly biased with portions of positive samples around $0.09$ and $0.03$ respectively, and even a na\"{\i}ve ``always negative" classifier attains comparably top performance.
\begin{table*}[p]
\caption{Experiments of online classifier ensemble using online Perceptrons as weak classifiers that are updated online. Mean error rate over five trials is shown in the table. We compare with a single online Perceptron classifier ({\scshape Perceptron}) and three representative online boosting methods reported in~\citet{Chen2012}. {\scshape OzaBoost} is the method proposed by~\citet{OzaRussell2001}, {\scshape OGBoost} is the online GradientBoost method proposed by \citet{Leistner2009}, and {\scshape OSBoost} is the online Smooth-Boost method proposed by~\citet{Chen2012}. Our method ({\scshape Ours-R}) attains the top performance for most of the testing sequences.}
\begin{center}
\begin{small}
\begin{sc}
\scalebox{0.95}{
\begin{tabular}{|l|c|c|c|c|c|c|}
\hline
dataset & \# examples & Perceptron & OzaBoost & OGBoost & OSBoost & Ours-r \\
\hline \hline
Heart & 270 & 0.2489 & 0.2356 & 0.2267 & 0.2356 & {\bf 0.2134} \\ \hline
Breast-Cancer & 683 & 0.0592 & 0.0501 & 0.0445 & 0.0466 & {\bf 0.0419} \\ \hline
Australian & 693 & 0.2099 & 0.2012 & 0.1962 & 0.1872 & {\bf 0.1655} \\ \hline
Diabetes & 768 & 0.3216 & 0.3169 & 0.3313 & 0.3185 & {\bf 0.3098} \\ \hline
German & 1000 & 0.3256 & 0.3364 & 0.3142 & 0.3148 & {\bf 0.3105} \\ \hline
Splice & 3175 & 0.2717 & 0.2759 & 0.2625 & 0.2605 & {\bf 0.2584} \\ \hline
Mushrooms & 8124 & 0.0148 & 0.0080 & 0.0068 & {\bf 0.0060} & 0.0062 \\ \hline
Adult & 48842 & 0.2093 & 0.2045 & 0.2080 & 0.1994 & {\bf 0.1682} \\ \hline
Cod-RNA & 488565& 0.2096 & 0.2170 & 0.2241 & 0.2075 & {\bf 0.1934} \\ \hline
Covertype & 581012& 0.3437 & 0.3449 & 0.3482 & 0.3334 & {\bf 0.3115} \\
\hline
\end{tabular}
}
\label{tab:P_online}
\end{sc}
\end{small}
\end{center}
\end{table*}
\begin{table*}[p]
\caption{Experiments of online classifier ensemble using online Na\"{\i}ve Bayes as weak classifiers that are updated online. Mean error rate over five trials is shown in the table. We compare with a single online Na\"{\i}ve Bayes classifier ({\scshape Na\"{\i}ve Bayes}) and three representative online boosting methods reported in~\citet{Chen2012}. {\scshape OzaBoost} is the method proposed by~\citet{OzaRussell2001}, {\scshape OGBoost} is the online GradientBoost method proposed by \citet{Leistner2009}, and {\scshape OSBoost} is the online Smooth-Boost method proposed by~\citet{Chen2012}. Our method ({\scshape Ours-R}) attains the top performance for 7 out of 10 testing sequences. For ``Cod-RNA" our implementation of the Na\"{\i}ve Bayes baseline was unable to duplicate the reported result; ours gave 0.2555 instead.}
\begin{center}
\begin{small}
\begin{sc}
\scalebox{0.95}{
\begin{tabular}{|l|c|c|c|c|c|c|}
\hline
dataset & \# examples & Naive Bayes & OzaBoost & OGBoost & OSBoost & Ours-r \\
\hline \hline
Heart & 270 & 0.1904 & 0.2570 & 0.3037 & 0.2059 & {\bf 0.1755} \\ \hline
Breast-Cancer & 683 & 0.0474 & 0.0635 & 0.1004 & 0.0489 & {\bf 0.0408} \\ \hline
Australian & 693 & 0.1751 & 0.2133 & 0.2826 & 0.1849 & {\bf 0.1611} \\ \hline
Diabetes & 768 & 0.2664 & 0.3091 & 0.3292 & 0.2622 & {\bf 0.2467} \\ \hline
German & 1000 & 0.2988 & 0.3206 & 0.3598 & 0.2730 & {\bf 0.2667} \\ \hline
Splice & 3175 & 0.2520 & 0.1563 & 0.1863 & 0.1370 & {\bf 0.1344} \\ \hline
Mushrooms & 8124 & 0.0076 & 0.0049 & 0.0229 & {\bf 0.0029} & 0.0054 \\ \hline
Adult & 48842 & 0.2001 & 0.1912 & 0.1878 & {\bf 0.1581} & 0.1658 \\ \hline
Cod-RNA & 488565& 0.2206$*$ & 0.0796 & 0.0568 & {\bf 0.0581} & 0.2552 \\ \hline
Covertype & 581012& 0.3518 & 0.3293 & 0.3732 & 0.3634 & {\bf 0.3269} \\
\hline
\end{tabular}
}
\label{tab:NB_online}
\end{sc}\\
\end{small}
\end{center}
\end{table*}
The error rates for this experiment are shown in Tables~\ref{tab:P_online} and~\ref{tab:NB_online}. As can be seen, our method outperforms competing methods using the Perceptron weak classifier in nearly all the benchmarks tested. Moreover, our method performs among the best for the Na\"{\i}ve Bayes weak classifier. It is worth noting that our method is the only one that outperforms the single classifier baseline in all benchmark datasets, which further confirms the effectiveness of the proposed ensemble scheme.
We also note that despite our best efforts to align both the weak classifier construction and experimental setup with competing methods~\citep{Chen2012,Chen2013}, there are inevitably differences in weak classifier construction. Firstly, given that our method only focuses on optimizing the ensemble weights, each incoming sample is treated equally in the update of all weak classifiers, while all three online boosting methods adopt more sophisticated weighted update schemes for the weak classifiers, where the sample weight is dynamically adjusted during each round of update. Secondly, in order to make weak classifiers different from each other, our weak classifiers use only a subset of input features, while weak classifiers of competing methods use all features and are updated differently. As a result, the weak classifiers used by our method are actually weaker than in competing methods. Nevertheless, our method often compares favorably.
\section{Additional Loss Functions for Online Ensemble Learning}\label{sec:ext}
We discuss other loss functions that fit into our Bayesian online ensemble learning framework. Note that the loss function~\eqref{loss} given in Section~\ref{sec:examples} is very simple, to the extent that the surrogate empirical loss~\eqref{eq:cumulated} at each step can be directly minimized in closed-form.
To demonstrate the flexibility of our framework, the empirical losses in the two examples we give below cannot be minimized directly, but they are still effectively solvable using our approach.
\begin{enumerate}
\item Consider the loss function
\begin{eqnarray}
\ell(\bm\lambda;\mathbf g)&=&\sum\limits_{i=1}^m(1-\lambda_i)\log g_i+\theta\sum\limits_{i=1}^m g_i\nonumber\\
&+&\sum\limits_{i=1}^m\log\Gamma(\lambda_i)-(\log\theta)\sum\limits_{i=1}^m\lambda_i
\label{eq:loss1}
\end{eqnarray}
where $\theta>0$ is a fixed parameter. The corresponding likelihood is given by the following product of Gamma distributions
\begin{equation}
p(\mathbf g|\bm\lambda)=\partialrod_{i=1}^m\frac{\theta^{\lambda_i}}{\Gamma(\lambda_i)}g_i^{\lambda_i-1}e^{-\theta g_i}
\label{eq:likelihood_loss1}
\end{equation}
A conjugate prior for $\bm\lambda$ is available, in the form
$$p(\bm\lambda)\partialropto\partialrod_{i=1}^m\frac{a^{\lambda_i-1}\theta^{c\lambda_i}}{\Gamma(\lambda_i)^b}$$
where $a,b,c>0$ are hyperparameters. The posterior distribution of $\bm\lambda$ after $t$ steps is given by the Gamma distribution
\begin{equation}
p(\bm\lambda|\mathbf{g}^{1:t})\partialropto
\partialrod_{i=1}^m\frac{(a\partialrod\limits_{s=1}^tg_i^{s})^{\lambda_i-1}\theta^{(c+t)\lambda_i}}{\Gamma(\lambda_i)^{(b+t)}}
\label{eq:post_loss1}
\end{equation}
Note that given posterior~\eqref{eq:post_loss1}, the posterior mean for each $\lambda_i$ is not available in closed-form, but it can be computed using standard numerical integration procedures, such as those provided in the Matlab Mathematics Toolbox (it only involves one-dimensional procedures because of the independence among the $\bm\lambda$). The corresponding prediction rule at each step is given by
\begin{equation*}
y=
\left\{
\begin{array}{rcl}
1 \ & \text{if}\ \sum\limits_{i=1}^m(1-\lambda_i)\log \frac{g_i(\mathbf{x},1)}{g_i(\mathbf{x},-1)} + \theta\sum\limits_{i=1}^m (g_i(\mathbf{x},1)-g_i(\mathbf{x},-1))\leq 0\\
-1 \ & \text{otherwise}
\end{array}
\right.\\
\label{eq:strong_loss1}
\end{equation*}
Note that the likelihood function~\eqref{eq:likelihood_loss1} of $g$ is a Gamma distribution, which has support $(0,\infty)$. For computational convenience, instead of choosing the ramp loss for $g$ as in Section~\ref{sec:examples}, we can choose $g$ to be the logistic function.
\item
We can extend the ensemble weights to include two correlated parameters for each weight, i.e., $\lambda_i=(\alpha_i,\beta_i)$. In this case, we may define the loss function as
\begin{eqnarray}
\ell(\bm\alpha,\bm\beta;\mathbf g)&=&\sum\limits_{i=1}^m\beta_i g_i+\sum\limits_{i=1}^m(1-\alpha_i)\log g_i\nonumber\\
&+&\sum\limits_{i=1}^m\log\Gamma(\alpha_i)-\sum\limits_{i=1}^m\alpha_i\log\beta_i
\label{eq:loss2}
\end{eqnarray}
with the corresponding Gamma likelihood
\begin{equation}
p(\mathbf g|\bm\alpha,\bm\beta)=\partialrod_{i=1}^m\frac{\beta_i^{\alpha_i}}{\Gamma(\alpha_i)}g^{\alpha_i-1}e^{-\beta_i g_i}
\label{eq:likelihood_loss2}
\end{equation}
A conjugate prior is available for $\bm\alpha$ and $\bm\beta$ jointly
$$p(\bm\alpha,\bm\beta)\partialropto\partialrod_{i=1}^m\frac{p^{\alpha_i-1}e^{-q\beta_i }}{\Gamma(\alpha_i)^r\beta_i^{-\alpha_i s}}$$
where $p,q,r,s$ are hyperparameters. The posterior distribution of $\bm\alpha$ and $\bm\beta$ after $t$ steps is given by the Gamma distribution
\begin{equation}
p(\bm\alpha,\bm\beta|\mathbf g^{1:t})\partialropto\partialrod_{i=1}^m\frac{(p\partialrod\limits_{s=1}^tg_i^{s})^{\alpha_i-1}e^ {-(q+\sum_{s=1}^tg_i^{s})\beta_i}}{\Gamma(\alpha_i)^{(r+t)}\beta_i^{-\alpha_i(s+t)}}
\label{eq:post_loss2}
\end{equation}
Again, the posterior mean for~\eqref{eq:post_loss2} is not available in closed-form and we can approximate it using numerical methods. The corresponding prediction rule at each step is given by
\begin{equation*}
y=
\left\{
\begin{array}{rcl}
1 \ & \mbox{if}\ \sum\limits_{i=1}^m(1-\alpha_i)\log \frac{g_i(\mathbf{x},1)}{g_i(\mathbf{x},-1)} + \sum\limits_{i=1}^m\beta_i (g_i(\mathbf{x},1)-g_i(\mathbf{x},-1)) \leq 0\\
-1 \ & \mbox{otherwise}
\end{array}
\right.\\
\label{eq:strong_loss2}
\end{equation*}
\end{enumerate}
Note that both of these two loss functions satisfy Assumption \ref{regularity}. Similar as the example proposed in Section~\ref{sec:examples}, the Hessian of $L_T$ turns out to not depend on $g^{1:T}$, therefore all conditions of Assumption~\ref{regularity} can be verified easily.
As a result, applying Algorithm~\ref{alg:framework} on these two loss functions for solving the online ensemble learning problem also possesses the convergence properties given by Theorems~\ref{thm:convergence} and~\ref{thm:rate_general}.
We follow the experimental setup of Section~\ref{sec:exp_baseline} to compare our proposed loss~\eqref{loss} with the additional losses~\eqref{eq:loss1} and~\eqref{eq:loss2} discussed here, using pre-trained Perceptron and Na\"{i}ve Bayes as weak classifiers.
The loss function $g$ for weak classifier $c$ is chosen as a logistic function of $y\cdot c(x)$. According to the posterior update rules given in~\eqref{eq:post_loss1} and~\eqref{eq:post_loss2}, hyper parameters $b,c$ and $r,s$ will keep increasing as online learning proceeds. However, we observe in practice that the numerical integration
of posterior means based on posterior distributions~\eqref{eq:post_loss1} and~\eqref{eq:post_loss2} will not converge if the values of hyper parameters $b,c,r,s$ are too large. In our experiments, we set upper bounds for these parameters. In particular, we set the upper bound for $b$ and $c$ as $1000$, the upper bound for $r$ and $s$ as $200.5$ and $200$ respectively (Since $s$ should be strictly less than $r$, we use the following initialization: $s=1$, $r=1.5$, as suggested by~\citealp{Fink1997}).
Averaged classification error rate over five trials for this experiment is shown in Table~\ref{tab:loss1}. Note that the result in this table should not be directly compared with those reported in Tables~\ref{tab:P_static} and~\ref{tab:NB_static}, given the loss function $g$ for weak classifiers is chosen differently.
We observe that loss~\eqref{eq:loss2} works slightly better than loss~\eqref{eq:loss1}, which is reasonable given more parameters in the formula of~\eqref{eq:loss2}. This advantage also leads to a superior performance to loss~\eqref{loss} proposed in Section 4 for shorter sequences, such as ``Heart", ``Ionosphere" and ``Sonar". However, for longer sequences, loss~\eqref{loss} still has some advantage because of the closed-form posterior mean.
\begin{table*}[htb]
\caption{Experiments of online classifier ensemble using pre-trained Perceptrons/Na\"{\i}ve Bayes as weak classifiers and keeping them fixed online. Mean error rate over five random trials is shown in the table.
We compare our method using the proposed loss function~\eqref{loss} with alternative losses defined by~\eqref{eq:loss1} and~\eqref{eq:loss2}. In general, the loss function~\eqref{loss} that enables closed-form posterior mean performs the best.}
\vskip -0.1in
\begin{center}
\begin{small}
\begin{sc}
\scalebox{0.95}{
\begin{tabular}{|l||c|c|c|c||c|c|c|}
\hline
& & \multicolumn{3}{c||}{Perceptron weak learner} & \multicolumn{3}{c|}{Na\"{i}ve Bayes weak learner} \\
\cline{1-8}
dataset & \# examples & loss~\eqref{loss} & loss~\eqref{eq:loss1} & loss~\eqref{eq:loss2} & loss~\eqref{loss} & loss~\eqref{eq:loss1} & loss~\eqref{eq:loss2} \\
\hline \hline
Heart & 270 & 0.203 & 0.208 & {\bf 0.198} & 0.197 & 0.204 & {\bf 0.196} \\ \hline
Breast-Cancer & 683 & {\bf 0.065} & 0.070 & 0.068 & {\bf 0.045} & 0.050 & 0.046 \\ \hline
Australian & 693 & {\bf 0.183} & 0.207 & 0.200 & {\bf 0.191} & 0.209 & 0.203 \\ \hline
Diabetes & 768 & 0.301 & 0.307 & {\bf 0.300} & 0.285 & 0.287 & {\bf 0.284} \\ \hline
German & 1000 & {\bf 0.338} & 0.347 & 0.348 & {\bf 0.292} & {\bf 0.292} & 0.293 \\ \hline
Splice & 3175 & {\bf 0.390} & 0.418 & 0.418 & {\bf 0.144} & 0.150 & 0.150 \\ \hline
Mushrooms & 8124 & {\bf 0.028} & 0.032 & 0.031 & {\bf 0.025} & 0.047 & 0.046 \\ \hline
Ionosphere & 351 & 0.293 & 0.295 & {\bf 0.259} & {\bf 0.171} & 0.172 & {\bf 0.171} \\ \hline
Sonar & 208 & 0.385 & 0.391 & {\bf 0.380} & {\bf 0.301} & 0.302 & 0.303 \\ \hline
SVMguide3 & 1284 & {\bf 0.265} & 0.278 & 0.276 & {\bf 0.222} & 0.226 & 0.225\\ \hline
\end{tabular}
}
\label{tab:loss1}
\end{sc}
\end{small}
\end{center}
\vskip -0.1in
\end{table*}
\section{Conclusion}
\label{sec:future}
We proposed a Bayesian approach for online estimation of the weights of a classifier ensemble. This approach was based on an empirical risk minimization property of the posterior distribution, and involved suitably choosing the likelihood function based on a user-defined choice of loss function. We developed the theoretical foundation, and identified the class of loss functions, for which the update sequence generated by our approach converged to the stationary risk minimizer. We demonstrated that, unlike standard SGD, the convergence guarantee was global and that the rate was optimal in a well-defined asymptotic sense. Moreover, experiments on real-world datasets demonstrated that our approach compared favorably to state-of-the-art SGD methods and online boosting methods.
In future work, we will study further generalization of the scope of loss functions, and the extension of our framework to non-stationary environments.
\end{document}
|
\begin{document}
\title[ the inverse of the polygamma functions]{Inequalities for the inverses of the polygamma functions}
\author{necdet batir}
\address{department of mathematics\\
faculty of sciences and arts\\
nev{\c{s}}eh{\i}r hac{\i} bekta{\c{s}} veli university, nev{\c{s}}eh\i r, turkey}
\email{[email protected]}
\email{[email protected]}
\subjclass[2000]{Primary: 33B15; Secondary: 26D07.}
\keywords{inverse of digamma function, mean value theorem, gamma function, polygamma functions, inequalities.}
\begin{abstract}
We provide an elementary proof of the left side inequality and improve the right inequality in
\begin{align*}
\bigg[\frac{n!}{x-(x^{-1/n}+\alpha)^{-n}}\bigg]^{\frac{1}{n+1}}&<((-1)^{n-1}\psi^{(n)})^{-1}(x)\nonumber\\
&<\bigg[\frac{n!}{x-(x^{-1/n}+\beta)^{-n}}\bigg]^{\frac{1}{n+1}},
\end{align*}
where $\alpha=[(n-1)!]^{-1/n}$ and $\beta=[n!\zeta(n+1)]^{-1/n}$, which was proved in \cite{6}, and we prove the following inequalities for the inverse of the digamma function $\psi$.
\begin{equation*}
\frac{1}{\log(1+e^{-x})}<\psi^{-1}(x)< e^{x}+\frac{1}{2}, \quad x\in\mathbb{R}.
\end{equation*}
The proofs are based on nice applications of the mean value theorem for differentiation and elementary properties of the polygamma functions.
\end{abstract}
\maketitle
\section{introduction}
As it is well known for a positive real number $x$ the gamma function $\Gamma$ is defined to be
$$
\Gamma (x)=\int_0^\infty {u^{x - 1}e^{ - u}du}.
$$
It is a common knowledge that the gamma function plays a special role in the theory of special functions. The most important function related to the gamma function is the digamma or psi function $\psi(x)$, which is defined by logarithmic derivative of the gamma function $\Gamma(x)$, that is,
$$
\psi(x)=\frac{\Gamma'(x)}{\Gamma(x)}.
$$
The digamma function $\psi$ is closely related with the Euler-Mascheroni constant $\gamma$ $(=0.57721...)$ and harmonic numbers $H_n=1+\frac{1}{2}+\cdots+\frac{1}{n}$. They satisfy $\psi(n+1)=-\gamma+H_n$ $(n\in\mathbb{N})$. In \cite{14} it is proved that
$\psi(p_n) \sim \log n$, when $n\sim\infty$, is equivalent with the Prime Number Theorem, where $p_n$ is $n$ th prime number. Functions $\psi'(x),\psi''(x),\psi'''(x),...$ are called polygamma functions in the literature. Polygamma functions are also very important functions and appear in the evaluations of many series and integrals \cite{2,9,12,13,16,18}. They are also related with many special functions such as the Riemann zeta function, Hurwitz zeta function, Clausen's function and generalized harmonic numbers. There exists a huge literature on inequalities for the digamma and polygamma functions, see \cite{3,4,6,7,10,11,15}, but for their inverses almost no inequality exists. The only known such an inequality is the following one which is due to the author \cite[Theorem 2.5]{6}.
\begin{align}\label{e:1}
\bigg[\frac{n!}{x-(x^{-1/n}+\alpha)^{-n}}\bigg]^{\frac{1}{n+1}}&<\left((-1)^{n-1}\psi^{(n)}\right)^{-1}(x)\nonumber\\
&<\bigg[\frac{n!}{x-(x^{-1/n}+\beta)^{-n}}\bigg]^{\frac{1}{n+1}},
\end{align}
where $\alpha=[(n-1)!]^{-1/n}$ and $\beta=[n!\zeta(n+1)]^{-1/n}$.
Our first aim in this paper is to give an elementary proof of the left side of this inequality and to improve its right inequality.
Our second aim is to establish, through the use of elementary properties of polygamma functions and the mean value theorem, simple bounds for the inverse of the digamma function $\psi$. Numerical experiments show that our bounds are are remarkably accurate for all $x\in\mathbb{R}$. In our proof we make use of the following relations for the gamma and polygamma functions.
The gamma function satisfies the functional equation $\Gamma(x+1)=x\Gamma(x)$, and has the following canonical product representation
\begin{equation*}
\Gamma(x+1)=e^{-\gamma x}\prod_{k=1}^{\infty}\frac{k}{k+x}e^{x/k}\quad x>-1,
\end{equation*}
where $\gamma=0.57721...$ is Euler-Mascheroni constant; see \cite[pg.346]{18}.
Taking logarithm of both sides of this formula, we obtain for $x>-1$
\begin{equation}\label{e:2}
\log\Gamma(x+1)=-\gamma x+\sum\limits_{k=1}^\infty\left[\frac{x}{k}-\log(x+k)+\log k\right].
\end{equation}
Differentiation gives
\begin{equation}\label{e:3}
\psi(x+1)=-\gamma+\sum\limits_{k=1}^\infty\left[\frac{1}{k}-\frac{1}{k+x}\right] \quad x>-1.
\end{equation}
For $x>0$ and $n=1,2,...$
\begin{equation}\label{e:4}
(-1)^{n-1}\psi^{(n)}(x)=\int\limits_{0}^\infty\frac{t^ne^{-xt}dt}{1-e^{-t}}=n!\sum\limits_{k=0}^{\infty}\frac{1}{(x+k)^{n+1}},
\end{equation}
and
\begin{equation}\label{e:5}
\psi^{(n)}(x+1)-\psi^{(n)}(x)=\frac{(-1)^nn!}{x^{n+1}}.
\end{equation}
See \cite[p.260]{1} for these and further properties of these functions.
\section{main result}
We collect our main results in this section.
\begin{thm}For $x>0$ and $n=1,2,3,...$ we have
\begin{align}\label{e:6}
&\bigg[\frac{1}{n}\bigg(\frac{x}{(n-1)!}-\left(\left(\frac{(n-1)!}{x}\right)^{1/n}+1\right)^{-n}\bigg)\bigg]^{-\frac{1}{n+1}}\nonumber\\
&<\left((-1)^{n-1}\psi^{(n)}\right)^{-1}(x)<\left(\frac{(n-1)!}{x}\right)^{\frac{1}{n}}+\frac{1}{2}
\end{align}
\end{thm}
\begin{proof}
By (\ref{e:5}) we have for $n=1,2,3,...$
\begin{equation}\label{e:7}
\frac{(-1)^{n-1}(n-1)!}{x^{n}}=\psi^{(n-1)}(x+1)-\psi^{(n-1)}(x).
\end{equation}
Using (\ref{e:4}), we get for $t>0$
\begin{equation}\label{e:8}
-\frac{1}{t^{n}}=\sum\limits_{k=0}^\infty\bigg[\frac{1}{(k+t+1)^{n}}-\frac{1}{(k+t)^n}\bigg].
\end{equation}
By mean value theorem for differentiation we have
\begin{equation}\label{e:9}
\frac{1}{(k+t+1)^{n}}-\frac{1}{(k+t)^n}=\frac{-n}{(k+\varepsilonilon(k))^{n+1}},\quad t<\varepsilonilon(k)<t+1.
\end{equation}
We therefore can write (\ref{e:8}) as following
\begin{equation}\label{e:10}
\frac{1}{nt^{n}}=\sum\limits_{k=0}^\infty\frac{1}{(k+\varepsilonilon(k))^{n+1}}.
\end{equation}
From (\ref{e:9}) we have
\begin{equation}\label{e:11}
\varepsilonilon(k)=\left[\frac{1}{n}\left(\frac{1}{(k+t)^{n}}-\frac{1}{(k+t+1)^{n}}\right)\right]^{-\frac{1}{n+1}}-k.
\end{equation}
We want to show that $\varepsilonilon$ is strictly increasing on $(0,\infty)$. For this purpose we define
\begin{equation*}
f(u)=\left[\frac{1}{n}\left(\frac{1}{u^n}-\frac{1}{(u+1)^n}\right)\right]-u.
\end{equation*}
Clearly, $\varepsilonilon'(k)>0$ is equivalent to $f'(u)>0$.
Differentiation gives
\begin{equation*}
f'(u)=\frac{1}{n+1}\left(u^{-n}-(u+1)^{-n}\right)\left(\frac{u^{-n}-(u+1)^{-n}}{n}\right)^{-\frac{n+2}{n+1}}-1.
\end{equation*}
We conclude that $\varepsilonilon$ is strictly increasing if and only if
\begin{equation}\label{e:12}
\left[\frac{u^{-(n+1)}-(u+1)^{-(n+1)}}{n+1}\right]^{-\frac{1}{n+2}}<\left[\frac{u^{-n}-(u+1)^{-n}}{n}\right]^{-\frac{1}{n+1}},
\end{equation}
which follows from the fact that the generalized mean
$$
S_p(a,b)=\left[\frac{a^p-b^p}{p(a-b)}\right]^{1/(p-1)}
$$
is strictly increasing in $p$; see \cite[pg. 234]{17}. Indeed (\ref{e:12}) is equivalent to $S_{-(n+1)}(u,u+1)<S_{-n}(u,u+1)$.
Using (\ref{e:11}) we get
\begin{equation*}
\lim\limits_{k\to\infty}\varepsilonilon(k)=t+\lim\limits_{u\to\infty}\bigg[\frac{1}{n}\left(u^{-n}-(u+1)^{-n}\right)\bigg]^{-\frac{1}{n+1}}-u.
\end{equation*}
In \cite[Lemma 1.4]{6} with $(t,s)=(1,0)$ this limit is evaluated and equals to 1/2. Thus,
\begin{equation}\label{e:13}
\varepsilonilon(\infty):=\lim\limits_{k\to\infty}\varepsilonilon(k)=t+\frac{1}{2}.
\end{equation}
By (\ref{e:11}) it is clear that
\begin{equation}\label{e:14}
\varepsilonilon(0)=\bigg[\frac{1}{n}\left(t^{-n}-(t+1)^{-n}\right)\bigg]^{-\frac{1}{n+1}}.
\end{equation}
From the fact that $\varepsilonilon$ is strictly increasing we conclude from (\ref{e:10}) that
\begin{equation*}
\sum\limits_{k=0}^\infty\frac{1}{(k+\varepsilonilon(\infty))^{n+1}}<\frac{1}{nt^n}<\sum\limits_{k=0}^\infty\frac{1}{(k+\varepsilonilon(0))^{n+1}}
\end{equation*}
or taking into account (\ref{e:4})
\begin{equation}\label{e:15}
(-1)^{n-1}\psi^{(n)}(\varepsilonilon(\infty))<\frac{(n-1)!}{t^n}<(-1)^{n-1}\psi^{(n)}(\varepsilonilon(0)).
\end{equation}
Since the mapping $t\to(-1)^{n-1}\psi^{(n)}(t)$ is strictly decreasing, applying the inverse of this function to both sides of (\ref{e:15}) and using (\ref{e:13}) and (\ref{e:14}) we get
\begin{align}\label{e:16}
&\bigg[\frac{1}{n}\bigg(\frac{t}{(n-1)!}-\left(\left(\frac{(n-1)!}{t}\right)^{1/n}+1\right)\bigg]^{-\frac{1}{n+1}}\nonumber\\
&<\left((-1)^{n-1}\psi^{(n)}\right)^{-1}\left(\frac{(n-1)!}{t^n}\right)<t+\frac{1}{2}
\end{align}
where $[(-1)^{n-1}\psi^{(n)}]^{-1}$ is the iverse of the mapping $t\to(-1)^{n-1}\psi^{(n)}(t)$.
Setting $x=\frac{(n-1)!}{t^n}$ here we get the desired result (\ref{e:6}).
\end{proof}
\begin{rem}
Numerical computations show that the upper bound given in (\ref{e:6}) is much accurate than that of (\ref{e:1}).
\end{rem}
\begin{thm} For $x\in\mathbb{R}$ we have
\begin{equation}\label{e:17}
\frac{1}{\log(1+e^{-x})}<\psi^{-1}(x)< e^{x}+\frac{1}{2}.
\end{equation}
\end{thm}
\begin{proof} We want to give two different proofs.\\
\textbf{\textit{First proof.}} Applying the mean value theorem to $\log (\Gamma(t))$ on $[x,x+1]$ and using the functional equation $\Gamma(x+1)=x\Gamma(x)$ for the gamma function, we obtain
\begin{equation}\label{e:18}
\log x=\psi(x+\xi(x)),\quad 0<\xi(x)<1.
\end{equation}
We want to show that $\xi$ is strictly increasing on $(0,\infty)$. Differentiation gives
\begin{equation}\label{e:19}
\frac{1}{x}=(1+\xi'(x))\psi'(x+\xi(x))
\end{equation}
and
\begin{equation}\label{e:20}
-\frac{1}{x^2}=\xi''(x)\psi'(x+\xi(x))+(1+\xi'(x))^2\psi''(x+\xi(x)).
\end{equation}
By (\ref{e:19}) we have
$$
(1+\xi'(x))^2=\frac{1}{x^2\psi'(x+\xi(x))^2}.
$$
Substituting this into (\ref{e:20}) gives
\begin{equation}\label{e:21}
-x^2\xi''(x)\left[\psi'(x+\xi(x))\right]^3=\psi''(x+\xi(x))+\left[\psi'(x+\xi(x))\right]^2.
\end{equation}
We now show that the right hand side of this identity is positive. Let's define for $x>0$
$$
g(x)=\psi''(x)+\left(\psi'(x)\right)^2.
$$
Applying the recurrence relations
\begin{equation*}
\psi'(x+1)-\psi'(x)=-\frac{1}{x^2}\quad \mbox{and}\quad \psi''(x+1)-\psi''(x)=\frac{2}{x^3},
\end{equation*}
which follows from (\ref{e:5}), we obtain for positive $x$
\begin{equation}\label{e:22}
g(x)-g(x+1)=\frac{2}{x^2}\left(\psi'(x)-\frac{1}{x}-\frac{1}{2x^2}\right).
\end{equation}
Using
\begin{equation*}
\frac{1}{x}=\int\limits_{0}^\infty e^{-xt}dt,\quad \frac{1}{x^2}=\int\limits_{0}^\infty te^{-xt}dt\quad \mbox{and}\quad \psi'(x)=\int\limits_{0}^\infty \frac{te^{-xt}}{1-e^{-t}}dt,
\end{equation*}
we find that
\begin{equation}\label{e:23}
\psi'(x)-\frac{1}{x}-\frac{1}{2x^2}=\int\limits_{0}^\infty \frac{\alpha(t)e^{-xt}}{e^t-1}dt
\end{equation}
where $\alpha(t)=t+2+(t-2)e^t$. Since $\alpha(0)=\alpha'(0)=0$ and $\alpha''(t)=te^t>0$, we get $\alpha(t)>0$ for all $t>0$. Thus, the left side of (\ref{e:23}) is positive. This fact and (\ref{e:22}) together imply for $x>0$ that $g(x)-g(x+1)>0$. This reveals that
$$
g(x)>g(x+1)>g(x+2)>\cdots>f(x+n)>\lim\limits_{n\to\infty}g(x+n)=0.
$$
Since $\psi'(x)>0$ for $x>0$, we conclude from (\ref{e:21}) that $\xi''(x)<0$. That is, $\xi'$ is strictly decreasing on $(0,\infty)$. Since $\psi'$ is strictly decreasing and $0<\xi(x)<1$, (\ref{e:19}) yields
\begin{equation}\label{e:24}
\frac{1}{x\psi'(x)}-1<\xi'(x)<\frac{1}{x\psi'(x+1)}-1.
\end{equation}
Using the asymptotic expansion
\begin{equation*}
\psi'(x)\sim\frac{1}{x}+\frac{1}{2x^2}+\frac{1}{2x^3}+\cdots
\end{equation*}
see \cite[pg 260; 6.4.12]{1} we see that the limits of both bounds in (\ref{e:24}) tend to 0 as $x$ goes to infinity, that is, $\lim\limits_{x\to\infty}\xi'(x)=0$. We therefore have $\xi'(x)>\lim\limits_{x\to\infty}\xi'(x)=0$. Therefore, $\xi$ is a monotonic increasing function of $x$ on $(0, \infty)$. Replacing $x$ by $x+1$ in (\ref{e:18}), and using the fact that both $\psi$ and $\xi$ are strictly increasing on $(0,\infty)$ we get
\begin{equation}\label{e:25}
\log (x+1)=\psi(x+\xi(x+1)+1)>\psi(x+\xi(x)+1).
\end{equation}
Employing the well known recurrence relation
\begin{equation}\label{e:26}
\psi(x+1)-\psi(x)=\frac{1}{x},
\end{equation}
we obtain from (\ref{e:25}) and (\ref{e:18})
\begin{equation*}
\log(x+1)>\frac{1}{x+\xi(x)}+\psi(x+\xi(x))=\frac{1}{x+\xi(x)}+\log x
\end{equation*}
or
\begin{equation*}
x+\xi(x)>\frac{1}{\log(1+1/x)}.
\end{equation*}
Applying $\psi$ both sides, this becomes by (\ref{e:18})
\begin{equation}\label{e:27}
\log x=\psi(x+\xi(x))>\psi\left(\frac{1}{\log(1+1/x)}\right).
\end{equation}
Replacing $x$ by $x+1$ in (\ref{e:18}) and using the relation (\ref{e:26}) we get that
\begin{equation}\label{e:28}
\xi(x+1)=\frac{1}{\log(x+1)-\psi(x+\xi(x+1))}-x.
\end{equation}
Since $\xi$ is bounded and strictly increasing it has a limit as $x$ approaches to infinity. We therefore conclude from (\ref{e:18}) and (\ref{e:28}) that
\begin{align*}
\lim\limits_{x\to\infty}\xi(x+1)=&\lim\limits_{x\to\infty}\left[\frac{1}{\log(x+1)-\psi(x+\xi(x+1))}-x\right]\nonumber\\
=&\lim\limits_{x\to\infty}\left[\frac{1}{\log(x+1)-\psi(x+\xi(x))}-x\right]\nonumber\\
=&\lim\limits_{x\to\infty}\left[\frac{1}{\log(1+1/x)}-x\right]=\frac{1}{2}.
\end{align*}
So monotonic increase of $\xi$ and $\psi$ and Equation (\ref{e:28}) imply that
\begin{equation}\label{e:29}
\log x=\psi(x+\xi(x))<\psi\left(x+\lim\limits_{x\to\infty}\xi(x)\right)=\psi\left(x+\frac{1}{2}\right).
\end{equation}
Combining (\ref{e:27}) and (\ref{e:29}) it follows that
\begin{equation*}
\psi\left(\frac{1}{\log(1+1/x)}\right)<\log x<\psi\left(x+\frac{1}{2}\right).
\end{equation*}
The desired inequality (\ref{e:6}) now follows from replacing $x$ by $e^x$ here, after applying $\psi^{-1}$ both sides.\\
\textbf{\textit{Second proof.}} Utilizing the functional equation $\Gamma(x+1)=x\Gamma(x)$ in (\ref{e:4}) and using (\ref{e:2}), we obtain
\begin{align}\label{e:30}
\log x&=\log\Gamma(x+1)-\log\Gamma(x)\nonumber\\
&=-\gamma x+\sum\limits_{k=1}^\infty\bigg[\frac{x}{k}-\log(x+k)+\log k\bigg]\nonumber \\
&+\gamma (x-1)-\sum\limits_{k=1}^\infty\bigg[\frac{x-1}{k}-\log(x+k-1)+\log k\bigg]\nonumber\\
&=-\gamma+\sum\limits_{k=1}^\infty\bigg[\frac{1}{k}-\log(x+k)+\log(x+k-1)\bigg].
\end{align}
By mean value theorem we get
\begin{equation}\label{e:31}
\log(x+k)-\log(x+k-1)=\frac{1}{k+\phi(k)},\quad x-1<\phi(k)<x.
\end{equation}
So (\ref{e:30}) becames
\begin{equation}\label{e:32}
\log x=-\gamma +\sum\limits_{k=1}^\infty\bigg[\frac{1}{k}-\frac{1}{k+\phi(k)}\bigg]
\end{equation}
It is clear from (\ref{e:27}) that
\begin{equation}\label{e:33}
\phi(k)=\frac{1}{\log\bigg(1+\frac{1}{x+k-1}\bigg)}-k,
\end{equation}
and
\begin{equation}\label{e:34}
\phi(1)=\frac{1}{\log\bigg(1+\frac{1}{x}\bigg)}-1.
\end{equation}
We can easily compute that
\begin{equation}\label{e:35}
\phi(\infty):=\lim\limits_{k\to\infty}\phi(k)=x-\frac{1}{2}.
\end{equation}
We want to show that $\phi$ is strictly increasing on $[1,\infty)$. Setting $u=x+k-1$ in (\ref{e:33}) its right hand side becomes
\begin{equation*}
x-1+\frac{1}{\log\bigg(1+\frac{1}{u}\bigg)}-u=g(u),\,\mbox{say}.
\end{equation*}
So in order to prove that $\phi$ is strictly increasing on $[1,\infty)$, it suffices to show that $g$ is strictly increasing on $(0,\infty.)$ If we differentiate $g$ and apply the well known geometric-logarithmic mean inequality $G\leq L$ we see that $g'(u)>0$, which implies that $\phi$ is strictly increasing on $[1,\infty)$. Hence we conclude from (\ref{e:32}) that
\begin{equation*}
-\gamma+\sum\limits_{k=1}^\infty\left(\frac{1}{k}-\frac{1}{k+\phi(1)}\right)\leq\log x \leq-\gamma+\sum\limits_{k=1}^\infty\left(\frac{1}{k}-\frac{1}{k+\phi(\infty)}\right).
\end{equation*}
Using (\ref{e:32}), (\ref{e:33}) and (\ref{e:3}) we obtain
\begin{equation*}
\psi\left(\frac{1}{\log(1+1/x)}\right)\leq\log x\leq \psi\left(x+\frac{1}{2}\right).
\end{equation*}
Since $\psi^{-1}$ is strictly increasing on $(-\infty,\infty)$, applying $\psi^{-1}$ both sides of this inequalities we get
\begin{equation*}
\frac{1}{\log(1+1/x)}\leq\psi^{-1}(\log x)\leq x+\frac{1}{2}.
\end{equation*}
Replacing $x$ by $e^x$ here completes the proof of Theorem 2.1.
\end{proof}
\end{document}
|
\begin{document}
\title{Approximation to Singular Quadratic Collision Model in
Fokker-Planck-Landau Equation}
\begin{abstract}
We propose a Hermite-Galerkin spectral method to numerically solve
the spatially homogeneous Fokker-Planck-Landau equation with
singular quadratic collision model. To compute the collision model,
we adopt a novel approximation formulated by a combination of a
simple linear term and a quadratic term very expensive to
evaluate. Using the Hermite expansion, the quadratic term is
evaluated exactly by calculating the spectral coefficients. To deal
with singularities, we make use of Burnett polynomials so that even
very singular collision model can be handled smoothly. Numerical
examples demonstrate that our method can capture low-order moments
with satisfactory accuracy and performance.
\vspace*{4mm}
\noindent {\bf Keywords:} Fokker-Planck-Landau equation;
Hermite-Galerkin spectral method; Burnett polynomials; Quadratic collision
operator; Super singularity
\end{abstract}
\section{Introduction}
The Fokker-Planck-Landau (FPL) equation is a common kinetic model in
plasma physics, accelerator physics and astrophysics. It describes
binary collision between charged particles with long-range Coulomb
interaction, and is represented by a nonlinear partial
integro-differential equation.
As a classical result, the FPL operator is the limit of the Boltzmann
operator for a sequence of scatting cross sections which converge in a
convenient sense to a delta function at zero scattering angle
\cite{comparisonBuet1999}. The original derivation of the equation
based on this idea is due to Landau \cite{Landau1965}, and then
several work has been devoted to this problem, such as
\cite{ArsenBuryak, DegondLucquinDesreux, Rosenbluth}. Recently Villani
\cite{Villaniweak1998} has obtained a rigorous proof of this
asymptotic problem in the space homogeneous scenario. For the
mathematical properties of the FPL equation, such as the existence of
the solutions, we refer the reader to Villani \cite{Villanireview} and
the reference therein.
The numerical solution of the nonlinear kinetic equations, such as FPL
equation, also represents a real challenge for numerical method. This
is essentially due to the non-linearity, as well as the high dimension
of variables, which is seven for the full problem. Moreover, the
complex three dimensional integro-differential stiff
advection-diffusion operator in velocity space is also remarkably
difficult to deal with due to the high singularity. Besides, this
integration has to be handled carefully since it is closely related to
the macroscopic properties, for example the collision term does not
change the total mass, momentum and energy. Several numerical
approaches have been brought up to solve FPL equation. Generally
speaking, there are two kinds of methods, stochastic methods and the
deterministic methods. For the stochastic methods, DSMC method which
is widely in the simulation of Boltzmann equation\cite{bird} is
adopted to solve FPL equation. A detail discussion about the
stochastic method is beyond the scope of this paper, and we refer the
reader to \cite{DirectDimarco2010, BobylevMonte2013} for a much more
complete treatment. For the deterministic methods, due to the complex
form of the FPL operator, several numerical approaches are devoted to
the simpler diffusive Fokker-Planck model \cite{GuoTang2000,
ZhangWei1997}, the space homogeneous situations in the isotropic
case \cite{BuetConservative1998} or cylindrically symmetric problems
\cite{ConservativePekker1984}. Moreover, Villani \cite{Villani1998on}
has brought up a linear collision model for the Maxwell molecules. The
construction of conservative and entropy schemes for the space
homogeneous case has been proposed in \cite{Degond1994,
BuetNumerical1999}, where the main physical properties are all
satisfied. But the direct implementation of such schemes are all quite
expensive. Several fast approximated algorithms to reduce the
complexity of these methods, based on multipole expansion
\cite{Lemou1998} or multigrid techniques \cite{BuetConservative1998}
have been proposed. A fast spectral method based on Fourier spectral
approximation of the collision operator is introduced in
\cite{PARESCHI2000}, and it is then also utilized to solve
inhomogeneous FPL equation \cite{Deterministiczhang2016,
Filbet2002numerical}. For the numerical stiffness of the
Fokker-Planck collision operator, the implicit time scheme is also
studied \cite{ImplicitLemou2005, massTaitano2015}. There is a certain
kind of asymptotic-preserving method that seeks to accelerate the
solution of the FPL equation by the so-called penalization techniques
\cite{JinYan2011}.
As another kind of spectral method, Hermite spectral method is also
utilized to solve FPL equation. Hermite method, where the basis functions
with weighted orthogonality in $\bbR^3$ are employed,
dates back to Grad's work \cite{Grad} where it is used to solve the
Boltzmann equation and is known as the moment method ever since.
Besides, the expansion with respect to Burnett polynomials was proposed in
\cite{Burnett1936, Gamba2018} to find the coefficients of the collision term
of the expansion in the Hermite basis. Using the
Hermite expansion, it is still a tough job to evaluate the exact
coefficients in the expansion of the collision operator, since the
computational cost for the quadratic from is hardly bearable
and novel models need to be introduced. In a recent
work \cite{QuadraticCol}, the explicit expressions of all the
coefficients in the Hermite spectral method for the quadratic
Boltzmann collision operator are presented, and the new collision
model which can preserve the physical properties and reduce
computational cost at the same time was brought up using these
coefficients. It is much harder to evaluate these coefficients for the
quadratic FPL collision operator compared to the Boltzmann equation,
because of the high singularity and the operator of partial
derivative. In \cite{ExactPfefferle2017}, the coefficients for the
Coulombian case were evaluated numerically, and the explicit form was
listed for the first few moments.
Inspired by these work, we in this paper are devoted to the numerical
method for FPL equation with quadratic collision model, which may be
very singular. Following the approach in \cite{QuadraticCol}, we
approximate the collision model as the combination of a simple linear
term and a quadratic term. The idea is to take only a portion in the
truncated series expansion to be treated ``quadratically'', and the
remaining part is approximated by the linear collision operator
brought up by Villani \cite{Villani1998on}. This may greatly reduce
the computational cost and we can still capture the evolution of
physical variables accurately. The linear term can be handled easily,
while the difficulty imposed by the singularity in the quadratic
collision model remains. We reveal that by making use of Burnett
polynomials, the singular part of the integral in the collision
operator can be handled smoothly. For the typical case that the
repulsive force between molecules is proportional to a negative power
of their distance, our method can handle problems where the index for
the power of distance is as great as $-5$, in comparison to the index
fixed as $-3$ in \cite{ExactPfefferle2017}. To deal with the remaining
part in the quadratic term without singularity, the Hermite-Galerkin
spectral method is then adopted. We derive the explicit formulae for
all the coefficients in the Hermite expansion of the collision
operator, and these formulae can all be evaluated exactly offline for
immediate applications. Thus eventually the quadratic term is able to
be evaluated efficiently.
The rest of this paper is organized as follows. In Section
\ref{sec:Bol_Her}, we briefly review the FPL equation and the Hermite
expansion of the distribution function. In Section \ref{sec:general},
we first give an explicit expression of the series expansion of the
quadratic collision operator and then introduce precisely how to deal
with the singularity by Burnett polynomials. The construction of the
approximated collision model is presented in Section
\ref{sec:construction}. Some numerical experiments verifying the
effectiveness of our methods are carried out in Section
\ref{sec:numerical}. The concluding remarks and detailed derivation of
the expansions are given in Section \ref{sec:conclusion} and
\ref{sec:appendix} respectively.
\section{FPL equation and Hermite expansion}
\label{sec:Bol_Her}
We will first give a brief review of the FPL equation, and then
introduce the Hermite spectral method for the expansion of the distribution function.
\subsection{FPL equation}
\label{sec:FPL}
The Fokker-Planck-Landau equation is a prevalent kinetic model in
plasma physics, describing the state of the particles in terms of a
distribution function $f(t,\bx,\bv)$, where $t$ is the time
coordinate, $\bx$ represents the spatial coordinates, and $\bv$ stands
for the velocity of particles. The governing equation of $f$ is
\begin{equation}
\frac{\partial f}{\partial t} + \nabla_{\bx} \cdot (\bv f) = \mQ[f],
\qquad t\in \mathbb{R}^+, \quad \bx \in \mathbb{R}^3,
\quad \bv \in \mathbb{R}^3,
\end{equation}
where $\mQ[f]$ is the collision operator with a quadratic form
\begin{equation} \label{eq:quad_col}
\mQ[f](t,\bx,\bv) = \nabla_{\bv} \cdot \int_{\mathbb{R}^3}
A(\bv - \bv_{\ast})(f(\bv_{\ast})\nabla_{\bv}f(\bv) -
f(\bv)\nabla_{\bv_{\ast}}f(\bv_{\ast})) \dd \bv_{\ast},
\end{equation}
where $A$ depends on the interaction between particles and is a
$3\times 3$ negative and symmetric matrix in the form
\cite{PARESCHI2000} of
\begin{equation}
\label{eq:A}
A(\bv) = \Psi(|\bv|) \Pi (\bv),
\end{equation}
where $\Psi$ is a non-negative radial function, and $\Pi(\bv)$ is the
orthogonal projection upon the space orthogonal to $\bv$, as
$\Pi_{ij}(\bv) = \delta_{ij} - \frac{v_iv_j}{|\bv|^2}$.
We are primarily concerned with the IPL model, for which the force
between two molecules is always repulsive and proportional to a
negative power of their distance. In this case, the function
$\Psi(\bv)$ has the form
\begin{equation}
\label{eq:IPL}
\Psi(\bv) := \Lambda |\bv|^{\gamma + 2},
\end{equation}
where $\Lambda >0$ is a constant and $\gamma$ is the index of the
power of distance. This equation is obtained as a limit of the
Boltzmann equation, when all the collisions become grazing
\cite{Desvillettes1992}. In the case of the Boltzmann equation,
different $\gamma$ lead to different models. The case $\gamma > 0$
corresponds to the ``hard potential'' case, whereas for $\gamma < 0$,
it corresponds to the case of ``soft potential''. In the critical case
$\gamma = 0$, the gas molecules are referred to as ``Maxwell
molecules''. Another case of interest is when $\gamma = -3$ of the
Coulombian case, which is a very important model for applications in
plasma.
We shall focus on the numerical approximation of $\mQ[f]$, especially
when $\gamma$ is very small. Our model of approximating the collision operator
is best illustrated in the spatially homogeneous FPL equation case, namely
\begin{equation}
\label{eq:homo}
\frac{\partial f}{\partial t} = \mQ[f],
\qquad t\in \mathbb{R}^+, \quad \bv \in \mathbb{R}^3.
\end{equation}
As a classical result in kinetic equations, the steady state solution of this equation takes
the form of the Maxwellian:
\begin{equation}
\label{eq:general_Maxwellian}
\mathcal{M}_{\rho,\bu,\theta}(\bv)
:= \frac{\rho}{(2\pi\theta)^{3/2}}
\exp \left( -\frac{|\bv - \bu|^2}{2\theta} \right),
\end{equation}
where the density $\rho$, velocity $\bu$ and temperature $\theta$
are defined as follows
\begin{equation}
\label{eq:macro_var}
\rho = \int_{\bbR^3} f(t,\bv) \dd \bv, \quad \bu =
\frac{1}{\rho}\int_{\bbR^3}\bv f(t,\bv) \dd \bv, \quad \theta =
\frac{1}{3\rho}\int_{\bbR^3}|\bv -\bu|^2 f(t,\bv) \dd \bv.
\end{equation}
Moreover, the physical variables such as the heat flux $q_i$ and the
stress tensor $\sigma_{ij}$ are also of interest. They are defined as
\begin{gather*}
q_i = \frac{1}{2}\int_{\bbR^3}|\bv -\bu|^2(v_i-u_i)f\dd \bv, \qquad i = 1, 2, 3, \\
\sigma_{ij} = \int_{\bbR^3}
\left( (v_i-u_i)(v_j -u_j) - \frac{1}{3} \delta_{ij} |\bv-\bu|^2 \right) f
\dd \bv, \quad i,j = 1,2,3.
\end{gather*}
Similar to the Boltzmann equation, the collision operator preserves in
time the macroscopic quantities mass, momentum and energy. Therefore,
those are invariant quantities under evolution, and
\eqref{eq:macro_var} holds for any $t$. Thus we can
obtain
\begin{equation}
\label{eq:u_theta}
\rho = 1, \quad \bu = 0, \quad \theta = 1,
\end{equation}
by selecting proper frame of
reference and applying appropriate non-dimensionalization.
Now the Maxwellian \eqref{eq:general_Maxwellian} is simply reduced to
\begin{equation}
\label{eq:Maxwellian}
\mathcal{M}(\bv) := \frac{1}{(2\pi)^{3/2}}
\exp \left( -\frac{|\bv|^2}{2} \right).
\end{equation}
The heat flux and stress tensor are reduced into
\begin{gather*}
q_i = \frac{1}{2}\int_{\bbR^3}|\bv|^2v_if\dd \bv, \qquad \sigma_{ij}
=\int_{\bbR^3} \left( v_iv_j - \frac{1}{3} \delta_{ij} |\bv|^2
\right) f \dd \bv, \quad i,j = 1,2,3.
\end{gather*}
The normalization \eqref{eq:u_theta} shall always be
assumed in the following context.
In the literature, the complicated
form of the collision operator $\mQ[f]$ is handled by introducing
approximations of less complexity. For instance, for the Maxwell molecules
with $\Lambda=1$, if the distribution function $f$ is radially
symmetric, which is a property to be preserved under time evolution,
the collision operator can be rewritten as
\begin{equation} \label{eq:Linear}
\mQ^{\mathrm{linear}}[f] =
(D - 1) \nabla_{\bv} \cdot (\nabla f + f \bv),
\end{equation}
which was proposed by C. Villani \cite{Villani1998on} . Here $D$ is the
dimension of the velocity space, and we always set $D =3$ in the
context. In this case, the FPL equation is reduced into the linear
Fokker-Planck equation (FP), which can be used to describe the
relation of Brownian molecules in a gas.
Due to the complex form of the FPL operator, several numerical
approaches are devoted to the simpler diffusive Fokker-Planck model or
on the reduced collision models \cite{POTAPENKO1999115,
BEREZIN1987163}. Hence it is of high necessity to develop efficient numerical methods for
the original FPL equation with quadratic collision operator.
\subsection{Series expansion of distribution function}
\label{sec:expansion}
Our numerical discretization shall be based on the series
expansion in the weighted $L^2$ space of the distribution function
$\mF = L^2(\bbR^3; \mM^{-1}\dd\bv)$:
\begin{equation}
\label{eq:expansion}
f(t, \bv) = \sum_{|\balpha|=0}^{+\infty}f_{\balpha}(t)H^{\balpha}(\bv)\mM(\bv),
\end{equation}
where $\mM(\bv)$ is the Maxwellian, and $\balpha=(\alpha_1, \alpha_2,
\alpha_3)^T$ is a three-dimensional multi-index, and $|\balpha| = \alpha_1
+ \alpha_2 + \alpha_3$.
In \eqref{eq:expansion}, $H^{\balpha}(\bv)$ are the Hermite
polynomials defined as follows:
\begin{definition}[Hermite polynomials]
For $\alpha_i \in \mathbb{N}, i = 1,2, 3$, the Hermite polynomial
$H^{\balpha}(\bv)$ is defined as
\begin{equation}
\label{eq:basis}
H^{\balpha}(\bv) = \frac{(-1)^n}{\mathcal{M}(\bv)}
\frac{\partial^{|\balpha|}}{\partial v_1^{\alpha_1} \partial
v_2^{\alpha_2} \partial v_3^{\alpha_3}} \mathcal{M}(\bv),
\end{equation}
where $\mathcal{M}(\bv)$ is given in \eqref{eq:Maxwellian}.
\end{definition}
The expansion \eqref{sec:expansion} was introduced to solve Boltzmann
equations \cite{Grad}, where such an expansion was invoked to
deploy moment methods. We can derive moments
based on the coefficients $f_{\balpha}$ from the orthogonality
of Hermite polynomials
\begin{equation}
\label{eq:Her_orth}
\int_{\bbR^3}H^{\balpha}(\bv)H^{\bbeta}(\bv)\mM(\bv) \dd \bv
= \delta_{\balpha,\bbeta} \balpha!,
\end{equation}
where $\delta_{\balpha, \bbeta}$ is defined as
$\delta_{\balpha,\bbeta} =
\prod\limits_{i=1}^3\delta_{\alpha_i,\beta_i}$
and $\balpha ! = \prod\limits_{i=1}^3\alpha_i!$. For example, by the
orthogonality aforementioned, we can insert the expansion
\eqref{eq:expansion} into the definition of $\rho$ in
\eqref{eq:macro_var} to get $f_{\boldsymbol{0}} = \rho$, where
$\boldsymbol{0} = (0, 0,0)$. In our case, the normalization
\eqref{eq:u_theta} gives us $f_{{\boldsymbol{0}}} = 1$. In a similar manner, we can see
from the other two equations in \eqref{eq:macro_var}
and \eqref{eq:u_theta} that
\begin{equation}
f_{\be_i} = 0, \quad i = 1, 2,3, \qquad \sum_{i=1}^3f_{2\be_i}= 0,
\end{equation}
where $\be_i$ is a three dimensional index whose $i$-th entry equals
$1$ and other entries equal zero. The heat flux and stress tensor are
related to the coefficients by
\begin{displaymath}
q_j = 2 f_{3\be_j} + \sum_{k=1}^3f_{\be_j + 2\be_k}, \qquad \sigma_{ij} =
(1+\delta_{ij})f_{\be_i + \be_j}, \qquad i, j = 1, 2,3.
\end{displaymath}
\section{Approximation of quadratic collision term}
\label{sec:general}
In order to investigate the evolution of the coefficients
$f_{\balpha}$ in the expansion \eqref{eq:expansion}, we shall expand
the collision term under the same function space. The expansion of
collision operator of the linear type is rather straightforward. As
an example, the explicit form of expansion of \eqref{eq:Linear} in
three dimensional case is
\begin{equation}
\label{eq:linear_exp}
\mathcal{Q}^{\mathrm{linear}}[f] =
\sum_{|\balpha|=0}^{+\infty}Q_{\balpha}^{\mathrm{linear}} H^{\balpha}(\bv)\mM(\bv),
\qquad Q_{\balpha}^{\mathrm{linear}} = -(D-1)|\balpha| f_{\balpha}.
\end{equation}
which comes as a consequence of the property that Hermite polynomials
can diagonalize the linear FP operator. It is also intrinsically
implied by the fact that Fokker-Planck equation can be used in the
context of stochastic process while Hermite polynomials play a crucial
role in Brownian motion, but we shall not take the stochastic
perspective here.
We shall first discuss the series expansion of the quadratic collision
term $\mQ[f]$ defined in \eqref{eq:quad_col}, and then combine the
quadratic result with the linear-type collision operators to construct
collision models with better accuracy and less computational
complexity.
\subsection{Series expansions of quadratic collision terms}
Suppose the binary collision term $\mQ[f]$ is expanded into the following form
\begin{equation}
\label{eq:S_expan}
\mQ[f](\bv) = \sum_{|\balpha|=0}^{+\infty}
Q_{\balpha}H^{\balpha}(\bv)\mM(\bv).
\end{equation}
Due to the orthogonality of Hermite polynomials, we get
\begin{equation}
\label{eq:S_k1k2k3}
Q_{\balpha}
= \frac{1}{\balpha!}\int H^{\balpha}(\bv)\mQ[f](\bv) \dd \bv
=\sum\limits_{|\blambda|=0}^{+\infty}\sum\limits_{|\bkappa|=0}^{+\infty}
A_{\balpha}^{\blambda, \bkappa}
f_{\blambda}f_{\bkappa},
\end{equation}
where the last equality can be derived by inserting
\eqref{eq:expansion} into \eqref{eq:quad_col}, and
\begin{equation}
\label{eq:coeA_detail}
\begin{aligned}
& A_{\balpha}^{\blambda,\bkappa} = \frac{1}{\balpha!}
\int_{\mathbb{R}^3} H^{\balpha}(\bv) \nabla_{\bv}\cdot
\int_{\mathbb{R}^3} A(\bv -
\bv_{\ast}) \\
&\qquad \Big(H^{\blambda}(\bv_{\ast})
\mM(\bv_{\ast})\nabla_{\bv}\left(H^{\bkappa}(\bv)\mM(\bv)\right)
-H^{\blambda}(\bv)\mM(\bv)\nabla_{\bv_{\ast}}
\left(H^{\bkappa}(\bv_{\ast})\mM(\bv_{\ast})\right)\Big) \dd
\bv_{\ast} \,\mathrm{d}\bv.
\end{aligned}
\end{equation}
The above formula is of an extremely complex form, with the evaluation
of every single coefficient requiring a six dimensional integration, as well as
differential operations. Granted this can be computed by numerical quadrature;
the computational cost would be unbearble for getting all these coefficients.
Recently, in \cite{ExactPfefferle2017}, a strategy to simplify the
above integral is introduced for the Coulombian case
$\gamma = -3$, and the explicit values are given with
small indices. In order to deal with this integral, we give the explicit
expressions of all the coefficients $A_{\balpha}^{\blambda, \bkappa}$
and enlarge the applicable region of these expressions to
$\gamma > -5$ for the quadratic collision kernel, which incorporates
the domain of definition for $\gamma$ in the IPL model. The main
results are summarized in the following theorem:
\begin{theorem}
\label{thm:coeA}
The expansion coefficients of the collision operator $\mQ[f](\bv)$
defined in \eqref{eq:S_k1k2k3} have the following form:
\begin{equation}
\label{eq:coeA}
\begin{aligned}
A_{\balpha}^{\blambda,\bkappa} =& 2^{(\gamma + 3 -
|\balpha|)/2}\sum\limits_{s,t = 1}^3
\sum\limits_{|{p}|=0}^{|\balpha|-1}
\frac{\Lambda}{{q^{[s]}}!}
\left(a_{p, r^{[t]}}^{\bkappa+\be_t,\blambda} -
a_{p, r^{[t]}}^{\lambda, \kappa+e_t}\right)
B_{r^{[t]}}^{q^{[s]}}(\gamma, s, t),
\end{aligned}
\end{equation}
where $p = (p_1, p_2, p_3)^T$ is a three-dimensional multi-index and
\begin{equation}
\label{eq:ijl_A}
q^{[s]} = \alpha - e_s - p,
\quad r^{[t]}= \lambda + \kappa + e_{t} -
p, \quad a_{p,q}^{\lambda, \kappa}
= \prod_{i=1}^3a_{p_i q_i}^{\lambda_i\kappa_i}, \qquad s, t = 1, 2, 3.
\end{equation}
the sum is taken for the indices in the range if and only if each
subindex is non-negative.
The coefficients $a_{pq}^{\lambda\kappa}$ and
$B_{p}^{q}(\gamma, s, t)$ are defined by
\begin{equation}
\label{eq:coea}
a_{pq}^{\lambda\kappa} = 2^{-(p+q)/2} \lambda! \kappa!
\sum_{s=\max(0,p-\kappa)}^{\min(p,\lambda)}
\frac{(-1)^{q-\lambda+s}}{s!(\lambda-s)!(p-s)!(q-\lambda+s)!},
\end{equation}
and
\begin{equation}
\label{eq:coe_gamma}
B_{p}^{q}(\gamma, s, t) :=
-G_{st}(\gamma, p, q) +
\delta_{st}\sum_{r = 1}^3G_{rr}(\gamma, p, q),
\end{equation}
where
\begin{equation}
\label{eq:eta}
G_{st}(\gamma, p, q) = \int_{\bg \in
\bbR^3} |\bg|^{\gamma}
g_sg_tH^{p}(\bg)H^{q}(\bg)\mM(\bg) \dd \bg,
\qquad s,t = 1, 2,3.
\end{equation}
\end{theorem}
The proof of Theorem \ref{thm:coeA} can be found in Appendix
\ref{sec:Appendix_coeA}. Hence, we only have to compute
\eqref{eq:eta}. When $\gamma > -3$, it can be computed directly by the
recursive formula of the Hermite Polynomials following the method in
\cite{QuadraticCol}. However, for the Coulombian case $\gamma = -3$,
the recursive formula can not be adopted directly due to the
singularity induced by the small value of $\gamma$. In
\cite{ExactPfefferle2017}, the Coulombian case $\gamma = -3$ is
evaluated by adopting the special form of the quadratic collision term
there. In the next section, we will introduce a new
method to deal with the super singularity for a large region of
$\gamma$.
\subsection{Derivation of exact coefficients in super singular integral}
In this section, we will introduce a different method to calculate these
coefficients exactly and the applicable area of $\gamma$ is
enlarged as well. In order to deal with the singularity , Burnett polynomials,
products of Sonine polynomials and solid spherical harmonics
\cite{Ikenberry}, are utilized here. Burnett polynomials are
introduced in \cite{Burnett1936} to approximate the distribution
function of Boltzmann equation, and was adopted in \cite{BurnettCol,
Gamba2018} to reduce the quadratic collision operator. To be
concrete, the normalized form of the Burnett polynomials is
\begin{displaymath}
B_{\hat{\balpha}}(\bv) = \sqrt{\frac{2^{1-\hat{\alpha}_1} \pi^{3/2} \hat{\alpha}_3!}
{\Gamma(\hat{\alpha}_3+\hat{\alpha}_1+3/2)}}
L_{\hat{\alpha}_3}^{(\hat{\alpha}_1+1/2)} \left( \frac{|\bv|^2}{2} \right) |\bv|^{\hat{\alpha}_1}
Y_{\hat{\alpha}_1}^{\hat{\alpha}_2} \left( \frac{\bv}{|\bv|} \right),
\end{displaymath}
where the index $\hat{\balpha}$ is defined as
$$\hat{\balpha} = (\hat{\alpha}_1, \halpha_2, \halpha_3)^T,
\quad \halpha_1,\halpha_3 \in \bbN, \quad \halpha_2 =
-\halpha_1,\cdots,\halpha_1.$$
Here $L_n^{(\beta)}(x)$ is the Laguerre polynomials
\begin{displaymath}
L_n^{(\beta)}(x) = \frac{x^{-\beta} \exp(x)}{n!}
\frac{\mathrm{d}^n}{\mathrm{d}x^n}
\left[ x^{n+\beta} \exp(-x) \right],
\end{displaymath}
and $Y_l^m(\bn)$ is spherical harmonics
\begin{displaymath}
Y_l^m(\bn) = \sqrt{\frac{2l+1}{4\pi} \frac{(l-m)!}{(l+m)!}}
P_l^m(\cos \theta) \exp(\mathrm{i} m \phi), \qquad
\bn = (\sin \theta \cos \phi, \sin \theta \sin \phi, \cos \theta)^T
\end{displaymath}
with $P_l^m$ the associate Legendre polynomial
\begin{displaymath}
P_l^m(x) = \frac{(-1)^m}{2^l l!} (1-x^2)^{m/2}
\frac{\mathrm{d}^{l+m}}{\mathrm{d}x^{l+m}} \left[ (x^2-1)^l \right].
\end{displaymath}
By the orthogonality of Laguerre polynomials and spherical harmonics,
one can find that
\begin{equation}
\label{eq:orthogonality_Burnett}
\int_{\bbR^3} \overline{B_{\hat{\balpha}}(\bv)} B_{\hat{\bbeta}}(\bv)
\mathcal{M}(\bv) \dd\bv =
\delta_{\hat{\balpha}, \hat{\bbeta}}.
\end{equation}
In order to reduce complexity, the symmetry of
$G_{st}(\gamma, \bp, \bq)$, which is stated in Lemma \ref{thm:G}, is
utilized first to reduce the cost of computation and storage.
\begin{lemma}
\label{thm:G}
For the expressions $G_{st}(\gamma, \bp, \bq)$, it holds that
\begin{equation}
\label{eq:G_1}
G_{st}(\gamma, \bp, \bq)=G_{ts}(\gamma, \bp, \bq), \qquad s, t = 1,
2, 3,
\end{equation}
and
\begin{equation}
\begin{split}
G_{11}(\gamma, \bp, \bq)= G_{22}(\gamma, \Pi_{2}^1\bp,
\Pi_{2}^1\bq) = G_{33}(\gamma, \Pi_{3}^1\bp,
\Pi_{3}^1\bq), \\
G_{12}(\gamma, \bp, \bq)= G_{13}(\gamma, \Pi_{3}^2\bp,
\Pi_{3}^2\bq) = G_{23}(\gamma, \Pi_{3}^1\bp,
\Pi_{3}^1\bq).
\end{split}
\end{equation}
Here $\Pi_{i}^jp$ is a permutation operator which exchanges the
$i$-th and $j$-th entries of $p$.
\end{lemma}
Based on Lemma \ref{thm:G}, we only have to compute two cases
$G_{33}(\gamma, \bp, \bq)$ and $G_{13}(\gamma, \bp, \bq)$. In order
to handle the singularity in $G_{st}(\gamma, \bp, \bq)$, Hermite
polynomials in \eqref{eq:eta} is expressed by a linear combination of
the Burnett polynomials, precisely
\begin{equation}
\label{eq:Her_Bur}
H^{\balpha}(\bv) = \sum_{|\hat{\balpha}|_B = |\balpha|}
C_{\hat{\balpha}}^{\balpha}B_{\hat{\balpha}}(\bv), \qquad C_{\hat{\balpha}}^{\balpha} =
\int_{\bbR^3}B_{\hat{\balpha}}(\bv)H^{\balpha}(\bv)\mM(\bv) \dd \bv,
\end{equation}
where $|\hat{\balpha}|_B = \halpha_1 + 2\halpha_3.$ Since both Hermite
and Burnett polynomials are orthogonal polynomials associated with the
same weight function, thus the coefficients
$C_{\hat{\balpha}}^{\balpha}$ defined in \eqref{eq:Her_Bur} are
nonzero only when the degrees of $H^{\balpha}$ and $B_{\hat{\balpha}}$
are equal, precisely $|\hat{\balpha}|_B = |\balpha|$. The detailed
algorithm to compute the coefficient $C_{\hat{\balpha}}^{\balpha}$ can
be found in \cite{BurnettCol} and we also explain that briefly in
Appendix \ref{sec:Appendix_CoeC}. With the help of the Burnett
polynomials, we can finally get the exact value of
$G_{st}(\gamma, \bp, \bq).$
\begin{proposition}
When $\gamma > -5$, $G_{st}(\gamma, \bp, \bq)$ defined in
\eqref{eq:eta} can be simplified as
\begin{equation}
\label{eq:detail_D}
\begin{aligned}
G_{st}(\gamma, \bp, \bq) & = 2^{(\gamma + 2)/2}\sum_{|\hat{\bp}|_B
= |\bp|}\sum_{|\hat{\bq}|_B =
|\bq|}C_{\hat{\bp}}^{\bp}C_{\hat{\bq}}^{\hat{\bq}}
D_{\hat{p}_3,\hat{q}_3}^{\hat{p}_1\hat{q}_1} \\
& K\left(\frac{\gamma + \hat{p}_1 + \hat{q}_1+3}{2}, \hat{p}_1 +
\frac{1}{2}, \hat{q}_1 + \frac{1}{2}, \hat{p}_3,
\hat{q}_3\right) F_{st}(\hat{p}_1, \hat{p}_2, \hat{q}_1,
\hat{q}_2),
\end{aligned}
\end{equation}
where
\begin{equation}
\label{eq:F}
F_{st}(\hat{p}_1, \hat{p}_2, \hat{q}_1, \hat{q}_2) =
\int_{\bbS^2}n_sn_tY_{\hat{p}_1}^{\hat{p}_2}(\bn)Y_{\hat{q}_1}^{\hat{q}_2}(\bn) \dd \bn,
\qquad s, t = 1, 2, 3,
\end{equation}
where $n_s$ and $n_t$ are the $s$-th and $t$-th entries of the unit
vector $\bn$ in spherical coordinates
$\bn = (\sin \theta \cos \phi, \sin \theta \sin \phi, \cos \theta)^T$.
The parameters in \eqref{eq:detail_D} are defined as
$$D_{n_1n_2}^{l_1l_2} = \sqrt{\frac{n_1! n_2!}{\Gamma(n_1+l_1+3/2)
\Gamma(n_2+l_2 + 3/2)}}$$ and
\begin{equation}
\begin{split}
K(\mu, \alpha, \kappa, m, n) =
(-1)^{m+n}\Gamma(\mu+1)\sum_{i=0}^{\min(m,n)} \binom{\mu -
\alpha}{m - i} \binom{\mu - \kappa}{n - i} \binom{i + \mu}{i}.
\end{split}
\end{equation}
\end{proposition}
\begin{proof}
Substituting \eqref{eq:Her_Bur} into \eqref{eq:eta}, and adopting
the formula introduced in \cite[eq.(10)]{Srivastava}
\begin{equation} \label{eq:no_hg}
\begin{split}
\int_0^{+\infty} L_{m}^{(\alpha)} (s) L_{n}^{(\kappa)} (s) s^{\mu}
\exp(-s) \dd s = (-1)^{m+n}\Gamma(\mu+1)\sum_{i=0}^{\min(m,n)}
\binom{\mu - \alpha}{m - i} \binom{\mu - \kappa}{n - i} \binom{i +
\mu}{i},
\end{split}
\end{equation}
we have thus validated this proposition.
\end{proof}
Finally, as stated previously in Lemma \ref{thm:G}, we only need to
compute $G_{33}(\gamma, \bp, \bq)$ and $G_{13}(\gamma, \bp, \bq)$.
Therefore, only these two corresponding cases of \eqref{eq:F} are
discussed in the following theorem and the proof is presented in
Appendix \ref{sec:Appendix_coeF}.
\begin{theorem}
\label{thm:coeF}
Define $\eta_{lm}^{\mu}$ as
\begin{equation}
\label{eq:coe_eta}
\eta_{lm}^{\mu} = \sqrt{\frac{[l + (2\delta_{1, \mu} - 1)m +
\delta_{1, \mu}] [l - (2\delta_{-1, \mu} - 1)m + \delta_{-1,
\mu}]} {2^{|\mu|}(2l -1)(2l+1)}}.
\end{equation}
Then the coefficients $F_{13}(l_1, m_1, l_2, m_2)$ and
$F_{33}(l_1, m_1, l_2, m_2)$ have the following explicit form
\begin{equation}
\begin{aligned}
& F_{13}(l_1, m_1, l_2, m_2) =
\frac{(-1)^{m_2+1}}{\sqrt{2}}\sum_{k,j,l=0,1}
(-1)^{l+j}\eta_{\delta_{0k}+(-1)^kl_2, m_2}^0
\eta_{(-1)^jl_1+\delta_{0j}, m_1}^{(-1)^l}
\delta_{l_1 + \delta_{1k} - \delta_{1j} ,
l_2 -\delta_{1k} +\delta_{1j}}^{m_1+(-1)^l, -m_2}, \\
& F_{33}(l_1, m_1, l_2, m_2) =(-1)^{m_2} \sum_{k, j=0,1} \eta_{\delta_{0k}+(-1)^kl_2, m_2}^0
\eta_{(-1)^jl_1+\delta_{0j}, m_1}^0 \delta_{l_1 + \delta_{1k} - \delta_{1j} ,
l_2 -\delta_{1k} +\delta_{1j}}^{m_1, -m_2}.
\end{aligned}
\end{equation}
\end{theorem}
The above analysis shows that for the FPL collision operator, the
coefficients $A_{\balpha}^{\blambda,\bkappa}$ can be calculated
exactly for $\gamma > -5$, which makes it much easier to build the
high order scheme to numerically solve FPL equation. Moreover, this
algorithm for the coefficients here is readily applicable for offline
numerical evaluation, the effectiveness of which is corroborated by
our numerical examples.
\section{Construction of novel collision model}
\label{sec:construction}
Until now, we already obtain a complete algorithm to calculate the
coefficients $A_{\balpha}^{\blambda,\bkappa}$, which can
be utilized either to discretize the quadratic collision term or to construct new
collision models. We will now discuss both topics.
\subsection{Discretization of homogeneous FPL equation}
We will discrete the homogeneous FPL equation
by the Galerkin spectral method in terms of the expansion of the distribution function
\eqref{eq:expansion}. For any positive integer $M$, we
define as the functional space of numerical solution
\begin{equation}
\label{eq:set}
\mathcal{F}_M=\text{span}\{H^{\balpha}(v)M(v)|\balpha\in I_M\}
\subset \mathcal{F}=L^2(\mathbb{R}^3;\mM^{-1}dv),
\end{equation}
where
$I_M=\{(\alpha_1, \alpha_2, \alpha_3)| 0 \leqslant |\balpha| \leqslant
M, \alpha_i \in \mathbb{N},i=1,2,3\}$.
Then the semi-discrete function $f_M(t, \cdot)\in \mF_M$
satisfies
\begin{equation} \label{eq:var}
\int_{\mathbb{R}^3} \frac{\partial f_M}{\partial t} \varphi
\mathcal{M}^{-1} \,\mathrm{d}\bv =
\int_{\mathbb{R}^3} \mQ(f_M, f_M) \varphi \mathcal{M}^{-1} \dd \bv,
\qquad \forall \varphi \in \mF_M.
\end{equation}
Suppose
\begin{equation}
\label{eq:f_h}
f_M(t,\bv) = \sum\limits_{\balpha \in I_M}
f_{\balpha}(t)H^{\balpha}(\bv)\mM(\bv) \in \mF_M.
\end{equation}
The equations \eqref{eq:S_expan} and \eqref{eq:S_k1k2k3} imply that the
variational form \eqref{eq:var} is equivalent to the ODE
system below:
\begin{equation} \label{eq:ODE}
\frac{\mathrm{d}f_{\balpha}}{\mathrm{d}t} =
\sum\limits_{\blambda \in I_M} \sum\limits_{\bkappa \in I_M}
A_{\balpha}^{\blambda,\bkappa}f_{\blambda}f_{\bkappa},
\qquad \balpha \in I_M.
\end{equation}
We thus obtain the formulation of the ODE system in its full form \eqref{eq:ODE},
with the help of the exact coefficients $A_{\balpha}^{\blambda, \bkappa}$ for all
$\balpha, \blambda, \bkappa \in I_M$.
With $M$ fixed, these coefficients need to be computed only
once, and then can be used repeatedly for multiple numerical examples.
\subsection{Approximation to general collision model}
In practical computation, the storage cost of computing the
coefficients $A_{\balpha}^{\blambda, \bkappa}$ is formidably
expensive, as the number of coefficients increases significantly with
$M$ increasing. Moreover, the computational cost $O(M^9)$ is an issue
especially when solving the spatially non-homogeneous problems.
To overcome this difficulty, the method in \cite{QuadraticCol} is
utilized to reduce the computational cost, precisely that the coefficients
$A_{\balpha}^{\blambda, \bkappa}$ for a small number $M_0$ are
computed and stored, when the computational cost for solving
\eqref{eq:ODE} is acceptable. As for $\balpha \not\in I_{M_0}$, we apply
the linear model \eqref{eq:Linear} brought up by Villani and compute as:
\begin{equation}
\label{eq:linear_ode}
\frac{\dd f_{\balpha}}{\dd t} = -(D-1)|\balpha| f_{\balpha}, \qquad
\balpha \not\in I_{M_0}.
\end{equation}
Combining \eqref{eq:ODE} and \eqref{eq:linear_ode}, we obtain a
novel collision operator
\begin{equation}
\label{eq:collision_new}
\mQ^{M_0}[f] = P_{M_0}\mQ[P_{M_0}f] - \mQ^{\rm linear}[(I - P_{M_0})f], \qquad
\forall f \in \mF,
\end{equation}
where $P_{M_0}$ is the orthogonal projection from $\mF$ onto
$\mF_{M_0}$. After applying spectral method
to this collision operator in the functional space $\mF_M$,
where $M$ is chosen to be larger than ${M_0}$
the final ODE system for the new model is
\begin{equation}
\label{eq:new_collision}
\frac{\dd f_{\balpha}}{\dd t} = \mQ^{M, M_0}_{\balpha},
\end{equation}
where
\begin{equation}
\label{eq:coe_newcollision}
\mQ^{M, M_0}_{\balpha} = \left\{
\begin{array}{cc}
\sum\limits_{\blambda \in I_{M_0}} \sum\limits_{\bkappa \in I_{M_0}}
A_{\balpha}^{\blambda,\bkappa}f_{\blambda}f_{\bkappa},
& \balpha \in I_{M_0}, \\
-(D-1)|\balpha| f_{\balpha},
& \balpha \in I_M \setminus I_{M_0}
\end{array}
\right.
\end{equation}
By now, we have obtained a series of new collision models
\eqref{eq:collision_new}. It can be expected that such combination
could reduce the time cost significantly due to the simple form of the
linear FP collision operator in the Hermite basis, while at the same
time manage to maintain a high level of accuracy since the evolution
function already captures the most crucial information in coefficients
of lower order and performs a satisfactory approximation in the other
coefficients. This will be observed in the numerical examples.
\section{Numerical examples}
\label{sec:numerical}
In this section, we shall present several results in our numerical
computation. In all of the numerical experiments, we shall adopt the newly
proposed collision operator \eqref{eq:collision_new}, and solve the
equation
\begin{displaymath}
\frac{\partial f}{\partial t} = \mQ^{M_0}[f],
\end{displaymath}
numerically for some positive integer $M_0$. Such an equation is solved
by the Galerkin spectral method for solution defined in the functional space
$\mF_M$, with $M$ chosen to be greater than $M_0$.
Namely we solve the system of ODE \eqref{eq:coe_newcollision}.
As for the discretization in time, we use the 4th-order Runge-Kutta method in
the examples, and the time step is chosen as $\Delta t = 0.01$. In
the examples, we shall set $\Lambda = 1.$
Finally, we would like to mention that the derivation of the expansion coefficients
in the Hermite basis are exact in each case by mathematical derivation
instead of numerical integration in order to achieve high accuracy.
\subsection{BKW solution}
For the Maxwell gas $\gamma = 0$, the original
FPL equation admits an exact solution with the following expression:
\begin{displaymath}
f(t,\bv) = (2\pi \tau(t))^{-3/2} \exp \left( -\frac{|\bv|^2}{2\tau(t)} \right)
\left[ 1 + \frac{1-\tau(t)}{\tau(t)}
\left( \frac{|\bv|^2}{2\tau(t)} - \frac{3}{2} \right) \right],
\end{displaymath}
where $\tau(t) = 1 - 0.4\exp\left(-4t\right)$. As a good
approximation of the initial distribution function, we use $M = 15$
($816$ degrees of freedom) in our simulation. For the visualization
purpose, we define the marginal distribution functions (MDFs)
\begin{displaymath}
g(t,v_1) = \int_{\mathbb{R}^2} f(t,\bv) \,\mathrm{d}v_2 \,\mathrm{d}v_3, \qquad
h(t,v_1,v_2) = \int_{\mathbb{R}} f(t,\bv) \,\mathrm{d}v_3.
\end{displaymath}
The initial MDFs are plotted in Figure \ref{fig:ex1_init}, in which
the lines for exact functions and their numerical approximation are
hardly distinguishable.
\begin{figure}
\caption{Figure (a) is the initial marginal distribution functions
$g(0, v_1)$. The red solid line corresponds to the exact solution,
and the blue dashed line corresponds to the numerical approximation.
Figure (b) is the initial marginal distribution functions
$h(0, v_1, v_2)$. The blue solid lines correspond to the exact
solution, and the red dashed lines correspond to the numerical
approximation. Figure (c) shows only the numerical approximation.}
\label{fig:ex1_init_1d}
\label{fig:ex1_init_2d_contour}
\label{fig:ex1_init_2d}
\label{fig:ex1_init}
\end{figure}
Numerical results for $t = 0.01$, $0.02$ and $0.06$ are given in
Figures \ref{fig:ex1_1d} and \ref{fig:ex1_2d}, respectively for the
marginal distribution function $g(t, v_1)$ and $h(t, v_1, v_2)$. Here
we set $M_0$ as $M_0 = 5$ and $15$. For $M_0 = 5$, the numerical
solution provides a reasonable approximation, but still with
noticeable deviations, while for $M_0 = 15$, the two solutions match
perfectly in all cases.
\begin{figure}
\caption{Marginal distribution functions $g(t, v_1)$ for $M_0 = 5$ and
$15$ at $t = 0.01$, $0.02$ and $0.06$. The red solid lines
correspond to the exact solution, and the blue dot dashed and black
dashed lines correspond to the numerical solutions with $M_0 = 15$
and $5$ respectively.}
\label{fig:ex1_1d}
\end{figure}
\begin{figure}
\caption{Marginal distribution functions $h(t, v_1, v_2)$ for
$M_0 = 5$ and $15$ at $t = 0.01$, $0.02$ and $0.06$. The red dashed lines
correspond to the exact solution, and the blue solid lines at different
columns correspond to the numerical solutions $M_0 = 5$ and
$M_0 = 15$. }
\label{fig:ex1_2d}
\end{figure}
Now we consider the time evolution of the expansion coefficients. By
expanding the exact solution into Hermite series, we get the exact
solution for the coefficients:
\begin{equation}
\label{eq:ex1_true}
f_{\alpha}(t) = \left\{\begin{array}{ll}
\left[
-0.2\exp \left( -4 t \right)
\right]^{\frac{|\alpha|}{2}}
\dfrac{1 - |\alpha|/2}{(\alpha_1/2)!(\alpha_2/2)!(\alpha_3/2)!}, &
\text{if } \alpha_1, \alpha_2, \alpha_3 \text{ are even}, \\[13pt]
0, & \text{ otherwise}.
\end{array} \right.
\end{equation}
From \eqref{eq:ex1_true}, we can find that the coefficients
$f_{\alpha}$ are zero for any $t$ if
$1 \leqslant |\alpha| \leqslant 3$. Hence we will focus on the
coefficients $f_{400}$ and $f_{220}$ here.
Figure \ref{fig:ex1_moments} gives the comparison between the
numerical solution and the exact solution for these two coefficients.
In both plots, all the three lines coincide perfectly.
\begin{figure}
\caption{The evolution of the coefficients. The red lines correspond
to the reference solution, and the blue dot dashed and black dashed
lines correspond to the numerical solutions of $M_0=15$ and $M_0=5$
respectively.}
\label{fig:ex1_moments}
\end{figure}
\subsection{Bi-Gaussian initial data}
\label{sec:ex2}
In this example, we perform the numerical test for the Bi-Gaussian
problem. Here the Coulombian case $\gamma = -3$ are tested. The initial
distribution function is
\begin{displaymath}
f(0,\bv) = \frac{1}{2\pi^{3/2}} \left[
\exp \Big( -(v_1 + \sqrt{3/2})^2 + v_2^2 + v_3^2 \Big) +
\exp \Big( -(v_1 - \sqrt{3/2})^2 + v_2^2 + v_3^2 \Big)
\right].
\end{displaymath}
In this numerical test, we use $M = 20$ which gives a good
approximation of the initial distribution function (see Figure
\ref{fig:ex2_init}).
\begin{figure}
\caption{Figure (a) is the initial marginal distribution functions
$g(0, v_1)$. The red solid line corresponds to the exact solution,
and the blue dashed line corresponds to the numerical approximation.
Figure (b) is the initial marginal distribution functions
$h(0, v_1, v_2)$. The blue solid lines correspond to the exact
solution, and the red dashed lines correspond to the numerical
approximation. Figure (c) shows only the numerical approximation.}
\label{fig:ex2_init_1d}
\label{fig:ex2_init_2d_contour}
\label{fig:ex2_init_2d}
\label{fig:ex2_init}
\end{figure}
For this example, we consider the three cases $M_0 = 5, 10, 15$, and
the corresponding one-dimensional marginal distribution functions at
$t = 0.4$, $1$ and $3$ are given in Figure \ref{fig:ex2_1d}. In all
the results, the numerical results are converging to the reference
solution as $M_0 = 15$, and the lines for $M_0 = 10$ and $M_0 = 15$
are very close to each other. To get a clearer picture, similar
comparisons of two-dimensional results are also provided in Figure
\ref{fig:ex2_2d}.
\begin{figure}
\caption{The Coulombian case $\gamma = -3$. Marginal distribution
functions at different times.}
\label{fig:ex2_1d}
\end{figure}
\begin{figure}
\caption{The Coulombian case $\gamma = -3$. Comparison of numerical
results with the reference solution. The red dashed contours are the
reference solution as $M_0 = 15$. The blue solid contours at
different columns are respectively the results for $M_0 = 5$ and
$M_0 = 10$.}
\label{fig:ex2_2d}
\end{figure}
Now we consider the evolution of stress tensor and heat flux. In this
example, we always have $\sigma_{11} = -2\sigma_{22} = -2\sigma_{33}$
and $q_i = 0, i =1, 2,3$. Therefore we focus only on the evolution of
$\sigma_{11}$, which is plotted in Figure \ref{fig:ex2_sigma11}. It
can be seen that even for $M_0 = 5$, the evolution of the stress
tensor is almost exact, where the distribution function is not
approximated very well. The three lines are all on top of each other.
\begin{figure}
\caption{The Coulombian case $\gamma = -3$. Evolution of
$\sigma_{11}
\label{fig:ex2_sigma11}
\end{figure}
In order to test the computational capacity of our new model, the same
example with a very small $\gamma$ as $\gamma = -4.9$ is tested. Here
we also set $M=20$, and choose the numerical results with $M_0=15$ as
the reference solution. Figure \ref{fig:ex2_4p9_2d} shows the marginal
distribution $h(t, v_1, v_2)$ at $t = 0.4, 1$ and $3$ with $M_0= 5$
and $10$. It illustrates that when $\gamma$ equals $-4.9$, the
numerical solutions are converging to the reference solution as
$M_0=15$, and that the solution with $M_0=10$ is almost the same as
the reference solution. The time evolution of $\sigma_{11}$ is plotted
in Figure \ref{fig:ex2_4p9_sigma11}, where the three results are also
on top of each other, even with $M_0=5$. Moreover, from Figure
\ref{fig:ex2_4p9_2d} and \ref{fig:ex2_4p9_sigma11}, we can find that
the time evolution of the distribution function with $\gamma = -4.9$
is slower than that with $\gamma=-3$, which is also consistent with
the form of the FPL collision operator.
\begin{figure}
\caption{The case $\gamma = -4.9$. Comparison of numerical solutions
and the reference solution. The red dashed contours are the
reference solutions $M_0 = 15$. The blue solid contours at different
columns are respectively the numerical solutions $M_0 = 5$ and
$M_0=10$.}
\label{fig:ex2_4p9_2d}
\end{figure}
\begin{figure}
\caption{The case $\gamma = -4.9$. Evolution of
$\sigma_{11}
\label{fig:ex2_4p9_sigma11}
\end{figure}
\subsection{Rosenbluth problem}
In this example, the Rosenbluth problem is tested. Also the Coulombian
case $\gamma=-3$ and the case $\gamma = -4.9$ are tested. The initial
condition is from \cite{PARESCHI2000} as
\begin{equation}
\label{eq:ex3_ini}
f(0, \bv) = A \exp(-(B|\bv| -1)^2).
\end{equation}
The parameter $A, B$ are standardized to satisfy the condition that
the initial density and temperature all equal $1$, precisely
\begin{equation}
\label{eq:ex3_coe}
A = \frac{(b/3)^{3/2}}{a^{5/2}}, \qquad B =
\frac{(b/3)^{1/2}}{a^{1/2}},
\end{equation}
where $a = \pi (3\sqrt{\pi}({\rm erf}(1) + 1 + 2/e)$ and
$b = \pi(9.5\sqrt{\pi}({\rm erf}(1)+ 1) + 7/e)$. Here $e$ is the Euler
number and $\rm erf (x) = \frac{1}{\sqrt{\pi}}\int_{0}^{x}e^{-t^2}\dd
t$ is the error function.
In order to approximate the initial distribution function well, in
this numerical test, we set $M=40$. The initial MDFs are plotted in
Figure \ref{fig:ex3_init}, which illustrates the perfect numerical
approximation to the exact distribution function.
For this example, also the three cases $M_0 = 5, 10, 15$ are
tested. The numerical solution with $M_0=15$ is treated as the
reference solution. The corresponding one and two dimensional
marginal distribution functions for the Coulombian $\gamma = -3$ are
shown in Figure \ref{fig:ex3_1d} and \ref{fig:ex3_2d}, where the
numerical solutions are converging to the reference solution and that
with $M_0 = 10$ is almost the same as the reference solution.
Moreover, our new model can also approximate the $\gamma = -4.9$ case
well. The two dimensional marginal distribution functions for
$\gamma = -4.9$ are presented in Figure \ref{fig:ex3_4p9}. The
numerical solution with $M_0=15$ is also chosen as the reference
solution. Similar to the example in Sec \ref{sec:ex2}, the time
evolution of the distribution function with $\gamma = -4.9$ is also
slower than that with $\gamma = -3$. Further more, though there are
distinct differences between the numerical solution $M_0=5$ and
$M_0=15$, the numerical solutions are converging to the reference
solutions $M_0 = 15$.
\begin{figure}
\caption{Figure (a) is the initial marginal distribution function
$g(0, v_1)$. The red solid line corresponds to the exact solution,
and the blue dashed line corresponds to the numerical approximation.
Figure (b) is the initial marginal distribution functions
$h(0, v_1, v_2)$. The blue solid lines correspond to the exact
solution, and the red dashed lines correspond to the numerical
approximation. Figure (c) shows only the numerical approximation.}
\label{fig:ex3_init_1d}
\label{fig:ex3_init_2d_contour}
\label{fig:ex3_init_2d}
\label{fig:ex3_init}
\end{figure}
\begin{figure}
\caption{The Coulombian case $\gamma = -3$. Marginal distribution
functions at different times.}
\label{fig:ex3_1d}
\end{figure}
\begin{figure}
\caption{The Coulombian case $\gamma = -3$. Comparison of numerical
solutions and the reference solution $M_0 =15$. The red dashed
contours are the reference solution. The blue solid contours in
different columns are respectively the numerical solutions $M_0 = 5$
and $M_0=10$.}
\label{fig:ex3_2d}
\end{figure}
\begin{figure}
\caption{The Coulombian case $\gamma = -4.9$. Marginal distribution
functions at different times. The first row is the marginal
distribution $g(t, v_1)$, and the latter two rows are the marginal
distribution functions $h(t, v_1, v_2)$ with different $M_0$. The
red dashed contours are the reference solutions $M_0 = 15$. The blue
solid contours at different rows are respectively the numerical
solutions when $M_0 = 5$ and $M_0=10$.}
\label{fig:ex3_4p9}
\end{figure}
\section{Conclusion}
\label{sec:conclusion}
In this paper, we focus on applying the Hermite spectral method to
develop an efficient and accurate way of approximating and numerically
solving the Fokker-Planck-Landau equation. Basic properties of Hermite
polynomials are utilized to obtain a simplified expression of the
coefficients, which renders the numerical method feasible. Burnett
polynomials are introduced to deal with the super singular integral in
the computation. This method could cover more practical cases up to
$\gamma>-5$.
A novel collision model is built with a combination of quadratic
collision model and the linearized collision model brought up by
C. Villani \cite{Villani1998on}. The numerical experiments validate
the efficiency of this new model. With the model introduced, the
numerical solutions are of a high accuracy, as well as an affordable
computational cost. This method should be further validated in the
numerical tests for the full FPL equation with spatial variables,
which will be one of the future works.
\section{Appendix}
\label{sec:appendix}
\subsection{Proof of Theorem \ref{thm:coeA}}
\label{sec:Appendix_coeA} We shall present of proof of Theorem 1 here.
In order to prove Theorem \ref{thm:coeA}, we first introduce the lemma
below:
\begin{lemma}
\label{thm:Hermite_cv}
Let $\bv = \bh + \bg/2$ and $\bw = \bh - \bg/2$. It holds that
\begin{displaymath}
\begin{split}
& H^{\alpha}(\bv) H^{\kappa}(\bw) = \sum_{\alpha'+\kappa' = \alpha+\kappa}
a_{\alpha'\kappa'}^{\alpha\kappa} H^{\alpha'}(\sqrt{2}\bh) H^{\kappa'} \left( \frac{\bg}{\sqrt{2}} \right),
\end{split}
\end{displaymath}
where the coefficients $a_{\alpha'\kappa'}^{\alpha\kappa}$ are
defined in \eqref{eq:coea}.
\end{lemma}
\begin{corollary}
\label{thm:Hermit_split}
Let $\bv = \bh + \bg/2$. We have
\begin{displaymath}
H^{\alpha}(\bv) = \sum_{\kappa + \lambda = \alpha}
\frac{2^{-|\alpha|/2} \alpha!}{\kappa!\lambda !}
H^{\kappa}\left(\sqrt{2} \bh\right)
H^{\lambda} \left( \frac{\bg}{\sqrt{2}} \right).
\end{displaymath}
\end{corollary}
The proof of Lemma \ref{thm:Hermite_cv} can be found in
\cite{QuadraticCol}. Next we will prove Theorem \ref{thm:coeA}.
{\renewcommand\proofname{Proof of Theorem \ref{thm:coeA}}
\begin{proof}
Using an integration by parts and the recursion formula of Hermite
polynomials
\begin{equation}
\label{eq:recursion_H}
\frac{\partial}{\partial v_s} \Big(\mM(\bv) H^{\alpha}(\bv)\Big) =
(-1)\mM(\bv)H^{\alpha+e_s}(\bv), \qquad s = 1, 2, 3,
\end{equation}
the coefficients $A_{\alpha}^{\lambda,\kappa}$ \eqref{eq:coeA_detail}
can be simplified as
\begin{equation}
\label{eq:coeA_detail1}
\begin{aligned}
A_{\alpha}^{\lambda,\kappa} = \sum_{s,t
=1}^3 & \frac{\Lambda}{(\alpha-e_s)!} \int_{\bbR^3\times\bbR^3}
\lvert \bv-\bv^*\rvert^\gamma G_{st}(\bv-\bv^*) \\
& H^{\alpha - e_s}(\bv^*) \mM(\bv)\mM(\bv^*)
\left(H^{\lambda}(\bv^{\ast})H^{\kappa+e_t}(\bv)-H^{\lambda}(\bv)H^{\kappa+e_t}(\bv^*)\right)\dd
\bv \dd \bv^{\ast},
\end{aligned}
\end{equation}
where $G_{st}(\bv)=-v_sv_t+\delta_{st}|\bv|^2$. Further
simplification of \eqref{eq:coeA_detail1} follows the method in
\cite{QuadraticCol}, where the velocity of the mass center is
defined as $\bh = (\bv+\bv^{\ast})/2$ and the relative velocity is
defined as $\bg = \bv - \bv^{\ast}$. Hence, it holds that
\begin{gather}
\label{eq:var_change}
\bv = \bh + \frac{1}{2} \bg, \quad
\bv^{\ast} = \bh - \frac{1}{2} \bg, \quad
|\bv|^2 + |\bv^{\ast}|^2 = \frac{1}{2} |\bg|^2 + 2|\bh|^2, \qquad
\mathrm{d}\bv \, \mathrm{d}\bv^{\ast} = \mathrm{d}\bg \, \mathrm{d}\bh.
\end{gather}
Combing Lemma \ref{thm:Hermite_cv}, \eqref{eq:coeA_detail1} and
\eqref{eq:var_change}, the integral in
$A_{\alpha}^{\lambda,\kappa}$ can be rewritten with respect to
$\bg$ and $\bh$
\begin{equation}
\label{eq:coeA_detail2}
\begin{aligned}
A_{\alpha}^{\lambda,\kappa} =2^{(\gamma/2+3 - |\alpha|)/2}\sum_{s,t=
1}^3\sum_{p+q=k-e_s}
\sum_{r+\beta=\lambda+\kappa} \int_{\bbR^3\times \bbR^3} \lvert \bg
\rvert^\gamma \frac{\Lambda}{p!q!}
G_{st}(\bg) H^q(\bg) \mM(\bg) \mM(\bh) \\
\left(a_{(\beta+e_t)r}^{(\kappa+e_t)\lambda}H^r(\bg)H^{\beta+e_t}(\bh)H^p(\bh)
- a_{r(\beta+e_t)}^{\lambda(\kappa+e_t)}H^{\beta+e_t}(\bg)H^{r}(\bh)H^p(\bh)\right)
\dd \bg \dd \bh.
\end{aligned}
\end{equation}
Using the orthogonality of Hermite polynomials, we can finally
prove Theorem \ref{thm:coeA}.
\end{proof}
}
\subsection{Proof of Theorem \ref{thm:coeF}}
\label{sec:Appendix_coeF}
In order to prove Theorem \ref{thm:coeF}, we will introduce the lemma
below:
\begin{lemma}
\label{thm:coe_Y}
For three spherical harmonics $Y_l^m$, $Y_{l_1}^{m_1}$ and
$Y_{l_2}^{m_2}$, if $m \neq m_1 + m_2$, or $l \not\in [|l_1 - l_2|,
l_1 + l_2]$, then
\begin{equation}
\int_{\bbS^2} Y_{l_1}^{m_1}({\bn})Y_{l_2}^{m_2}({\bn})
\overline{Y_{l}^m({\bn})} \dd {\bn} = 0.
\end{equation}
Especially, we have
\begin{equation}
\label{eq:Y}
Y_l^m({\bn})Y_1^{\mu}(\bn) =
\sqrt{\frac{3}{4\pi}}\left(\eta_{l+1,m}^{\mu}Y_{l+1}^{m+\mu}({\bn})
+ (-1)^{\mu}\eta_{-l, m}^{\mu}Y_{l-1}^{m+\mu}({\bn})\right),
\qquad \mu = -1, 0, 1,
\end{equation}
where $\eta_{lm}^{\mu}$ is defined in \eqref{eq:coe_eta}.
\end{lemma}
The result of this lemma can be found in Section 12.9 of
\cite{Arfken}.
{\renewcommand\proofname{Proof of Theorem \ref{thm:coeF}}
\begin{proof}
Noting that
\begin{equation}
\label{eq:n_Y}
\begin{aligned}
n_1 = \sqrt{\frac{2\pi}{3}}\left(Y_1^1 - Y_1^{-1}\right), \qquad
n_2 = -\mathrm{i} \sqrt{\frac{2\pi}{3}}\left(Y_1^1 + Y_1^{-1}\right), \qquad
n_3 = 2\sqrt{\frac{\pi}{3}}Y_1^0.
\end{aligned}
\end{equation}
Based on Lemma \ref{thm:coe_Y} and the property of spherical harmonic
$\overline{Y_l^m(\bn)} = (-1)^m Y_l^{-m}(\bn)$, we can derive the
results in Theorem \ref{thm:coeF} with the orthogonality property of
spherical harmonics
\begin{equation}
\label{eq:orth_sp}
\int_{\bbS^2} Y_{l_1}^{m_1}(\bn) \overline{Y_{l_2}^{m_2}({\bn})}
\dd{\bn}=\delta_{l_1l_2}\delta_{m_1m_2}.
\end{equation}
\end{proof}
}
\subsection{Computation of Coefficients
$C_{\hat{\balpha}}^{\balpha}$}
\label{sec:Appendix_CoeC}
In this section, we will briefly introduce the algorithm to calculate
$C_{\hat{\balpha}}^{\balpha}$, and the original algorithm is in
\cite{BurnettCol}.
Define
\begin{equation}
\label{eq:S}
S_{-1} = \frac{1}{2}(v_1 - \mathrm{i} v_2), \quad S_0 = v_3, \quad S_1 =
-\frac{1}{2}(v_1 + \mathrm{i} v_2),
\end{equation}
and the recursive formula of the basis functions \cite{Cai2018} is
\begin{equation}
\label{eq:recursive}
\begin{aligned}
& S_{\mu}B_{\hat{\balpha}}(\bv)= \frac{1}{2^{|\mu|/2}}
\left[\sqrt{2(\hat{\alpha}_1+\hat{\alpha}_3)+3}\eta_{\halpha_1+1,
m}^{\mu}B_{\hat{\balpha} +(1, \mu,0)^T}(\bv) -
\sqrt{2\halpha_3}\eta_{\halpha_1+1, \halpha_2}^{\mu}B_{\hat{\balpha}+(1,\mu,-1)^T}(\bv) \right. \\
& \left. + (-1)^{\mu}\sqrt{2(\halpha_3+\halpha_1)+1}
\eta_{-\halpha_1,\halpha_2}^{\mu}B_{\hat{\balpha}+(-1,\mu,0)^T}(\bv)
-(-1)^{\mu}\sqrt{2(\halpha_3+1)}\eta_{-\halpha_1,\halpha_2}^{\mu}B_{\hat{\balpha}
+ (-1,\mu,1)^T}(\bv) \right],
\end{aligned}
\end{equation}
where $ \eta_{lm}^{\mu}$ is defined in \eqref{eq:eta} and we set
$B_{\hat{\balpha}}(\bv) = 0$ if $|\halpha_2| > \halpha_1$ or either of
$\halpha_1, \halpha_3$ is negative. Based on the recursion formula of
Hermite polynomials
\begin{equation}
\label{eq:recursive_h}
v_s H^{\balpha}(\bv) = H^{\balpha+e_s}(\bv)
+ k_sH^{ \balpha - e_s}(\bv), \qquad s = 1,2,3,
\end{equation}
we can get the recursive formula to compute
$C_{\hat{\balpha}}^{\balpha}$, precisely
\begin{equation}
\label{eq:recursive_c}
\begin{aligned}
a_{\hat{\balpha} + e_2}^{(-1)}C_{\hat{\balpha} + e_1}^{\balpha} +
b_{\hat{\balpha}+e_2}^{(-1)}C_{\hat{\balpha} - e_1 +
e_3}^{\balpha} &= \frac{1}{2}k_1C_{\hat{\balpha}+e_2}^{\balpha -
e_1} -
\frac{\mathrm{i}}{2} k_2 C_{\hat{\balpha} + e_2}^{\balpha - e_2}, \\
a_{\hat{\balpha}}^{(0)}C_{\hat{\balpha}+e_1}^{\balpha} +
b_{\hat{\balpha}}^{(0)}C_{\hat{\balpha} - e_1 + e_3}^{\balpha} &=
k_3C_{\hat{\balpha}}^{\balpha - e_3}, \\
a_{\hat{\balpha} - e_2}^{(1)}C_{\hat{\balpha}+e_1}^{\balpha} +
b_{\hat{\balpha} - e_2}^{(1)}C_{\hat{\balpha} - e_1 + e_3}^{\balpha} &=
-\frac{1}{2}k_1C_{\hat{\balpha} - e_2}^{\balpha - e_1} - \frac{\mathrm{i}}{2}
k_2C_{\hat{\balpha} - e_2}^{\balpha- e_2},
\end{aligned}
\end{equation}
where $|\balpha| = |\hat{\balpha}|_B$ and
\begin{equation}
\label{eq:coe_a}
a_{\hat{\balpha}}^{(\mu)} = \frac{1}{2^{|\mu|/2}}\sqrt{(2(\halpha_1+\halpha_3)
+3)}\eta_{\halpha_1+1,\halpha_3}^{\mu},
\quad b_{\hat{\balpha}}^{(\mu)} = \frac{(-1)^{\mu+1}}{2^{|\mu|/2}}\sqrt{2(\halpha_3+1)
}\eta_{-\halpha_1,\halpha_3}^{\mu}, \quad \mu = -1, 0, 1,
\end{equation}
As is stated in \cite{BurnettCol}, we solve all the coefficients
$C_{\hat{\balpha}}^{\balpha}$ by the order of $|\balpha|$, so that the
right-hand sides of \eqref{eq:recursive_c} are always known. The
initial condition and the boundary conditions are
$C_{\boldsymbol{0}}^{\boldsymbol{0}}=1$ and
$C_{\hat{\balpha}}^{\balpha} = 0$ if $|\halpha_2| > \halpha_1$ or
either of $\halpha_1, \halpha_3$ is negative. Moreover, the time
complexity for computing all the coefficients
$C_{\hat{\balpha}}^{\balpha}$ with
$|\hat{\balpha}|_B = |\balpha| \leqslant M$ is $O(M^5)$.
\end{document}
|
\betaegin{document}
\title{Morin singularities of coframes and frames}
\alphauthor{Camila M. \textsc{Ruiz}}
\alphaddress{Universidade de São Paulo\\
Instituto de Ciências Matemática e de Computação \\
Avenida Trabalhador São-carlense, 400 - Centro\\
CEP: 13566-590 - São Carlos - SP, Brasil}
\email{[email protected]}
\textsl{m}aketitle
\betaegin{abstract}
Inspired by the properties of an $n$-frame of gradients $(\nabla f_1, \elldots, \nabla f_n)$ of a Morin map $f:M\rightarrow\textsl{m}athbb{R}^n$, with $\omegaperatorname{dim} M\gammaeq n$, we introduce the notion of Morin singularities in the context of singular $n$-coframes and singular $n$-frames. We also study the singularities of generic 1-forms associated to a Morin $n$-coframe, in order to generalize a result of T. Fukuda \cite[Theorem 1]{Fukuda}, which establishes a modulo 2 congruence between the Euler characteristic of a compact manifold $M$ and the Euler characteristics of the singular sets of a Morin map defined on $M$, to the case of Morin $n$-coframes and Morin $n$-frames.
\end{abstract}
\section{Introduction}\ellambdabel{Introduction}
{Morin maps are maps that only admit Morin singularities. It is well known that these singularities are stable, and conversely, that stable map-germs which have corank 1 are Morin singularities. Therefore, Morin singularities are fundamental and frequently arise as singularities of maps from one manifold to another, as observed by K. Saji in \cite{Saji1}. Morin singularities have been studied by many authors in different contexts as \cite{Morin2,Ando,Fukuda,Saeki,SaekiSakuma}, and more recently \cite{KalmarTerpai,SzaboSzucsTerpai,Szucs,InaIshiKawaThang,Dutertrefukui,Saji1,Saji2,Saji3,Ruiz1}. In particular, papers of J.M. Èlia\v{s}berg \cite{Eliasberg}, J.R. Quine \cite{Quine}, T. Fukuda \cite{Fukuda}, O. Saeki \cite{Saeki} and N. Dutertre and T. Fukui \cite{Dutertrefukui} investigate relations between the topology of a manifold and the topology of the critical locus of maps with Morin singularities.}
Let $f:M^m\rightarrow\textsl{m}athbb{R}^n$ be a smooth Morin map defined on an $m$-dimensional {Riemannian manifold} $M$, with $m\gammaeq n$. The singular points of $f=(f_1, \elldots, f_n)$ are the points $x\in M$, such that the rank of the derivative $df(x)$ is equal to $n-1$. Then, taking the gradient of each coordinate function $f_1, \elldots, f_n$, we obtain a singular $n$-frame $(\nabla f_1(x), \elldots, \nabla f_n(x))$ defined on $M$ whose singular locus $\Sigma$ is given by $$\Sigma=\{x\in M \, | \, \omegaperatorname{rank}(\nabla f_1(x), \elldots, \nabla f_n(x))=n-1\}.$$
It is well known that the singular sets of $f$, $A_k(f)$ and $\omegaverline{A_k(f)}$ ($k=1, \elldots, n-1$), are submanifolds of $M$ of dimension $n-k$, such that $\omegaverline{A_k(f)}=\cup_{i\gammaeq k}A_i(f)$ and
\betaegin{equation*}\omegaperatorname{rank} df_{|_{\omegaverline{A_k(f)}}}(x)=\elleft\{\betaegin{array}{ll}
n-k, &\text{ if } x\in A_k(f);\\
n-k-1, &\text{ if } x\in \omegaverline{A_{k+1}(f)};
\end{array}\right.\end{equation*} (see \cite{Fukuda}, \cite{Morin2}, \cite{Saeki} for Morin singularities). This means that the intersection of the vector space spanned by $\nabla f_1(x), \elldots, \nabla f_n(x)$ with the normal space to $\omegaverline{A_k(f)}$ at $x$ is a subspace of dimension: $$\omegaperatorname{dim}(\ellambdangle\nabla f_1(x), \elldots, \nabla f_n(x)\rangle\cap N_x\omegaverline{A_k(f)})=\elleft\{\betaegin{array}{ll}
k-1, &\text{ if } x\in A_k(f);\\
k, &\text{ if } x\in \omegaverline{A_{k+1}(f)}.
\end{array}\right.$$ In particular, if $x\in A_k(f)$ then $\ellambdangle\nabla f_1(x), \elldots, \nabla f_n(x)\rangle\pitchfork N_x\omegaverline{A_k(f)}.$
{Furthermore, if $x\in \omegaverline{A_{k+1}(f)}$} and $\{z_1(x), \elldots, z_{n-k-1}(x)\}$ is a basis of a vector space supplementary to $\ellambdangle\nabla f_1(x), \elldots, \nabla f_n(x)\rangle\cap {N_x\omegaverline{A_{k}(f)}}$ in $\ellambdangle\nabla f_1(x), \elldots, \nabla f_n(x)\rangle$ then $$\omegaperatorname{dim}(\ellambdangle z_1(x), \elldots, z_{n-k-1}(x)\rangle\cap N_x\omegaverline{A_{k+1}(f)})=\elleft\{\betaegin{array}{ll}
0, &\text{ if } x\in A_{k+1}(f);\\
1, &\text{ if } x\in \omegaverline{A_{k+2}(f)}.
\end{array}\right.$$
Based on properties of an $n$-frame of gradients $(\nabla f_1, \elldots, \nabla f_n)$ of a Morin map $f$, in this paper we introduce the notion of Morin singular points of type $A_k$ in the context of singular $n$-frames that are not necessarily gradients {(Definition \ref{def:nframe})} and $n$-coframes that are not necessarily differentials {(Definition \ref{def:ncoframe})}. To do this, in Section \ref{s1} we consider an $n$-coframe $\omega=(\omega_1, \elldots, \omega_n)$ with corank 1 {(Definition \ref{corank1})} defined on a smooth $m$-dimensional manifold $M$, with $m\gammaeq n$, and we proceed by induction on $k$, for $k=1,\elldots,n$, in order to define Morin singular sets $\Sigma^k(\omega)$ and $A_k(\omega)$ {(Definitions \ref{defs1}, \ref{defiSk} and \ref{Morinsing})}. We will say that $\omega$ is a Morin $n$-coframe {(Definition \ref{def:ncoframe})} if it admits only Morin singular points, that is, if each singular point $x\in M$ of $\omega$ belongs to $A_k(\omega)$, for some $k=1, \elldots, n$ {(see Remark \ref{singak})}. {In particular, we show that the Morin singular sets $A_k(\omega)$ and $\Sigma^k(\omega)=\omegaverline{A_k(\omega)}$ ($k=1, \elldots, n$) are smooth submanifolds of $M$ of dimension $n-k$ (Lemmas \ref{dimsigmaum} and \ref{skdimension}), such that $\omegaverline{A_k(\omega)}=\cup_{i\gammaeq k}A_i(f)$ (Remark \ref{akclosure}) and in Lemmas \ref{eqlocalSk} and \ref{eqloc3} we exhibit equations that define locally the singular sets $\Sigma^{k}(\omega)$.}
The definition of Morin singularities for $n$-coframes can be analogously adapted to $n$-frames as follows. When considering a smooth manifold $M$, differential 1-forms are naturally dual to vector fields, more specifically, if we fix a Riemannian metric on $M$ then there exists an isomorphism between the tangent and cotangent bundles of M, so that vector fields and 1-forms can be identified. To illustrate this notion, we give some examples {of Morin $n$-frames} in the end of Section \ref{s1}.
Let $L\in\textsl{m}athbb{R} P^{n-1}$ be a straight line in $\textsl{m}athbb{R}^n$ and let $\pi_L:\textsl{m}athbb{R}^n\rightarrow L$ be the orthogonal projection to $L$. In \cite{Fukuda}, T. Fukuda applied Morse theory and well known properties of singular sets $A_k(f)$ of a Morin map $f:M\rightarrow\textsl{m}athbb{R}^n$ to study the critical points of mappings $\pi_L\circ f:M\rightarrow L$ and their restrictions to the singular sets $\pi_L\circ f|_{A_k(f)}$ and $\pi_L\circ f|_{\omegaverline{A_k(f)}}$. Similarly, in Sections \ref{s2} and \ref{s3} of this paper, we investigate the zeros of a generic 1-form $$\xi(x)=\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i(x)}$$ associated to a Morin $n$-coframe $\omega=(\omega_1, \elldots, \omega_n)$ and we verify that $\xi$, $\xi_{|_{A_k(\omega)}}$ and $\xi_{|_{\omegaverline{A_k(\omega)}}}$ have properties that are analogous to the properties of the generic orthogonal projections $\pi_L\circ f(x)$ associated to a Morin map $f=(f_1, \elldots, f_n)$ and of their restrictions. {More precisely, let $a=(a_1, \elldots, a_n)\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ and let $\omega=(\omega_1, \elldots, \omega_n)$ be a Morin $n$-coframe defined on a manifold $M$, in Section \ref{s2} we prove that if $p\in M$ is a zero of $\xi(x)=\sum_{i=1}^{n}{a_i\omega_i(x)}$ then $p\in\Sigma^1(\omega)$ and $p$ is a zero of $\xi_{|_{\Sigma^1(\omega)}}$ (Lemma \ref{zeroszsobresigma1}). In Lemma \ref{lemazerosrestricoes}, we show that, for $k=0,\elldots,n-2$, if $p\in A_{k+1}(\omega)$ then $p$ is a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if $p$ is a zero of $\xi_{|_{\Sigma^{k}(\omega)}}$. And, in Lemma \ref{ptosansaozeros} we verify that if $p\in A_n(\omega)$ then $p$ is a zero of the restriction $\xi_{|_{\Sigma^{n-1}(\omega)}}$. Let $Z(\xi_{|_{\Sigma^k(\omega)}})$ be the zero set of the restriction of the 1-form $\xi$ to $\Sigma^k(\omega)$, we also prove in Lemmas \ref{lemainterzeroscomsigma2} and \ref{ptscrticrestasigmak} that for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $Z(\xi_{|_{\Sigma^k(\omega)}})\cap\Sigma^{k+2}(\omega)=\emptyset$, for $k=0, \elldots, n-2$.}
{In Section \ref{s3}, in Lemmas \ref{nondegeneratexi}, \ref{zerosnaodegdeak}, \ref{zerosnaodegdeaum} and \ref{nondegeneratexisk}, we prove that generically the 1-form $\xi(x)$ and its restrictions $\xi_{|_{\Sigma^k(\omega)}}$ and $\xi_{|_{A_k(\omega)}}$ admit only non-degenerate zeros. We also show that, for $k=0,\elldots, n-2$, if $p\in A_{k+1}(\omega)$ is a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$then, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{k}(\omega)}}$ (Lemmas \ref{nondegenerateequivalenceA1} and \ref{naodegkekmaisum}). Finally, in Lemma \ref{naodegan} we verify that, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, if $p\in A_n(\omega)$ then $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{n-1}(\omega)}}$.}
As a consequence of these results, we obtain a generalization of Fukuda's Theorem \cite[Theorem 1]{Fukuda} for the case of Morin $n$-coframes (Theorem \ref{fukudaparacampos}). { More precisely, we prove in Theorem \ref{fukudaparacampos} that if $\omega=(\omega_1, \elldots, \omega_n)$ is a Morin $n$-coframe defined on an $m$-dimensional compact manifold $M$ then $$\chi(M)\equiv\Deltaisplaystyle\sum_{k=1}^{n}{\chi(\omegaverline{A_k(\omega)})} \textsl{m}od 2,$$ where $\chi(M)$ denotes the Euler characteristic of $M$.} We end the paper with this generalized theorem, whose proof uses the classical Poincaré-Hopf Theorem for 1-forms.
The author would like to express her sincere gratitude to Nicolas Dutertre and Nivaldo de Góes Grulha Júnior for fruitful discussions and valuable comments that resulted in this work.
The author was supported by CNPq, "Conselho Nacional de Desenvolvimento Científico e Tecnológico", Brazil (grants 143479/2011-3 and 209531/2014-2).
\section{The Morin $n$-coframes}\ellambdabel{s1}
Let $M$ be a smooth manifold of dimension $m$ and $\omega=(\omega_1,\elldots,\omega_n)$ be a (singular) $n$-coframe, that is, a set of $n$ smooth 1-forms defined on $M$, with $m\gammaeq n$: \betaegin{equation*}\betaegin{array}{llll}
\omega:& M&\rightarrow &{T^{\alphast}M}^n\\
&x&\textsl{m}apsto&(x,\omega_1(x),\cdots,\omega_n(x))
\end{array}
\end{equation*} where ${T^{\alphast}M}^n=\{(x,\varphi_1,\elldots,\varphi_n) \ | \ x\in M; \ \varphi_i\in T^{\alphast}_xM, i=1,\elldots,n\}$ is the ``$n$-cotangent bundle'' of $M$. Note that $T^{\alphast}M^n$ is a smooth manifold of dimension $m(n+1)$, because it is locally diffeomorphic to $U\times M_{m,n}(\textsl{m}athbb{R})$,
where $U\subset\textsl{m}athbb{\textsl{m}athbb{R}}^m$ is an open set and $M_{m,n}(\textsl{m}athbb{R})$ denotes the space of matrices of dimension $m\times n$ with real coefficients.
\betaegin{lema}\ellambdabel{lemaTMnum} Let $T^{\alphast}M^{n,n-1}\subset T^{\alphast}M^n$ be the subset defined by $$T^{\alphast}M^{n,n-1}=\elleft\{(x,\varphi_1,\elldots,\varphi_n)\in T^{\alphast}M^n \ | \ \omegaperatorname{rank}(\varphi_1,\elldots,\varphi_n)=n-1 \right\}.$$ Then $T^{\alphast}M^{n,n-1}$ is a submanifold of $T^{\alphast}M^n$ of dimension $n(m+1)-1$.
\end{lema}
\betaegin{proof} Let $M_{m,n}^{n-1}(\textsl{m}athbb{R})$ be the submanifold of $M_{m,n}(\textsl{m}athbb{R})$ of codimension $m-n+1$ given by the matrices of rank $n-1$ in $M_{m,n}(\textsl{m}athbb{R})$, then $T^{\alphast}M^{n,n-1}$ is locally diffeomorphic to $U\times M_{m,n}^{n-1}(\textsl{m}athbb{R})$, for some open subset $U\subset\textsl{m}athbb{\textsl{m}athbb{R}}^m$. Hence, $T^{\alphast}M^{n,n-1}$ is a submanifold of $T^{\alphast}M^n$ and $\omegaperatorname{dim}(T^{\alphast}M^{n,n-1})=n(m+1)-1$.
\end{proof}
\betaegin{defi}\ellambdabel{corank1}
We say that $\omega=(\omega_1,\elldots,\omega_n)$ has $\omegaperatorname{corank} 1$ if the following properties hold:
\betaegin{enumerate}[(a)]
\item $\omega\pitchfork T^{\alphast}M^{n,n-1}$ in $T^{\alphast}M^n$;
\item $\omega^{-1}(T^{\alphast}M^{n,\elleq n-2})=\emptyset$;
\end{enumerate}
where $T^{\alphast}M^{n,\elleq n-2}=\elleft\{(x,\varphi_1,\elldots,\varphi_n)\in T^{\alphast}M^n \ | \ \omegaperatorname{rank}(\varphi_1,\elldots,\varphi_n)\elleq n-2 \right\}$.
\end{defi}
Note that by Definition \ref{corank1}, if an $n$-coframe $\omega=(\omega_1,\elldots,\omega_n)$ has $\omegaperatorname{corank} 1$ then, for each $x\in M$, $\omegaperatorname{rank}(\omega_1(x),\elldots,\omega_n(x))$ is either equal to $n$ or equal to $n-1$.
\betaegin{defi}\ellambdabel{defs1}
Let $\omega=(\omega_1,\elldots,\omega_n)$ be an $n$-coframe with $\omegaperatorname{corank} 1$. The singular set of $\omega$, $\Sigmaigma^1(\omega)$, is the set of points $x\in M$ at which the rank is not maximal, that is $$\Sigmaigma^1(\omega)=\{x\in M \ | \ \omegaperatorname{rank}(\omega_1(x),\elldots,\omega_n(x))=n-1\}.$$
\end{defi}
\betaegin{lema}\ellambdabel{dimsigmaum}
If $\omega$ is an $n$-coframe with $\omegaperatorname{corank} 1$ then $\Sigmaigma^1(\omega)$ is either the empty set or a submanifold of $M$ of dimension $n-1$.
\end{lema}
\betaegin{proof} Note that $\Sigmaigma^1(\omega)=\omega^{-1}(T^{\alphast}M^{n,n-1})$ and that $\omega\pitchfork T^{\alphast}M^{n,n-1}$. Thus, if $\Sigmaigma^1(\omega)\neq\emptyset$ then $\Sigmaigma^1(\omega)$ is a submanifold of $M$ of codimension $m-n+1$, that is, $\omegaperatorname{dim}(\Sigmaigma^1(\omega))=n-1$.
\end{proof}
Let $\omega=(\omega_1,\elldots,\omega_n):M\rightarrow T^{\alphast}M^n$ be an $n$-coframe with $\omegaperatorname{corank} 1$ defined on an $m$-dimensional smooth manifold $M$. Next, we will define the subsets $A_k(\omega)$ and $\Sigma^{k+1}(\omega)$ of $M$, for $k=1,\elldots,n$. To do this we will proceed by induction on $k$ starting from the definition of the singular set $\Sigma^1(\omega)$ .\\
\betaegin{nota} Let us denote by $\Sigma^0(\omega)$ the manifold $M$ and by $N_x^{\alphast}\Sigma^0(\omega)=\{0\}$ the set that contains only the null 1-form of $T_x^{\alphast}M$. { Moreover, if $S\subset M$ is a smooth submanifold of $M$, let us denote by $N^{\alphast}_xS$ the set $N^{\alphast}_xS=\{\psi\in T_{x}^{\alphast}M \, | \, \psi(T_xS)=0\}.$}
\end{nota}
We know that $\Sigma^1(\omega)=\{x\in\Sigma^0(\omega) \, | \, \omegaperatorname{rank}(\omega_1(x),\elldots,\omega_n(x))=n-1\}$ and that $\omegaperatorname{dim}(\Sigma^1(\omega))=n-1$. In particular, $$p\in \Sigma^1(\omega)\textsl{m}athbb{R}ightarrow\omegaperatorname{dim}(\ellambdangle\omega_1(p),\elldots,\omega_n(p)\rangle\cap N_p^{\alphast}\Sigma^0(\omega))=0,$$ where $\ellambdangle\omega_1(p),\elldots,\omega_n(p)\rangle$ is the vector subspace of $T_p^{\alphast}M$ spanned by the 1-forms $\omega_1(p),\elldots,\omega_n(p)$.
Let us suppose that $\Sigma^i(\omega)$ is defined for $i=1,\elldots, k-1$ so that $\Sigma^i(\omega)$ is a smooth submanifold of $M$ of dimension $n-i$, $\Sigma^{i}(\omega)\subset\Sigma^{i-1}(\omega)$ and, { for $i=2,\elldots, k-1$}, $$p\in \Sigma^i(\omega)\textsl{m}athcal{L}eftrightarrow\omegaperatorname{dim}(\ellambdangle\omega_1(p),\elldots,\omega_n(p)\rangle\cap N_p^{\alphast}\Sigma^{i-1}(\omega))=i-1,$$ where $\Sigma^i(\omega)$ is locally given by $$\textsl{m}athcal{U}\cap\Sigma^i(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots= F_{m-n+i}(x)=0\}$$ {and $$\textsl{m}athcal{U}\cap\Sigma^{i-1}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots= F_{m-n+i-1}(x)=0\},$$} for some open neighborhood $\textsl{m}athcal{U}\subset M$ and smooth functions $F_i:\textsl{m}athcal{U}\rightarrow\textsl{m}athbb{R}$ whose derivatives $dF_i(x)\in T_{x}^{\alphast}M$ are linearly independent for each $x\in\Sigma^i(\omega)\cap\textsl{m}athcal{U}$. Also, $N_{x}^{\alphast}\Sigma^{i-1}(\omega)$ is the vector subspace of $T_{x}^{\alphast}M$ spanned by these derivatives, that is, $N_{x}^{\alphast}\Sigma^{i-1}(\omega)=\ellambdangle dF_1(x), \elldots, dF_{m-n+i-1}(x)\rangle$.
We set $r=n-k+1$ and $(x,\varphi)=(x,\varphi_1,\elldots,\varphi_{r})$. In order to define $\Sigma^k(\omega)$ we first consider: $$T_{\Sigma^{k-1}}^{\alphast}M^{r}=\{(x,\varphi) \ | \ x\in\Sigma^{k-1}(\omega); \varphi_1,\elldots,\varphi_{r}\in T_x^{\alphast}M\}$$ and
\betaegin{equation*}{\renewcommand{r}{r}
N_{\Sigma^{k-1}}^{\alphast}M^{r}=\{(x,\varphi)\in T_{\Sigma^{k-1}}^{\alphast}M^{r} \ | \omegaperatorname{rank}(\varphi_1,\elldots,\varphi_{r})=r,\\
\omegaperatorname{dim}(\ellambdangle\varphi_1,\elldots,\varphi_{r}\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))=1\}.\end{array}}
\end{equation*}
\betaegin{lema} $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ is a smooth manifold of dimension $mr+r$.
\end{lema}
\betaegin{proof}
By the induction hypothesis, $\Sigma^{k-1}(\omega)$ is a smooth submanifold of $M$ of dimension $r$. Then, there exists an open subset $V\subset\textsl{m}athbb{R}^{r}$ so that $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ is locally diffeomorphic to $V\times M_{m,r}(\textsl{m}athbb{R})$. Thus, $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ is a smooth manifold and $\omegaperatorname{dim}(T_{\Sigma^{k-1}}^{\alphast}M^{r})=mr+r.$ \end{proof}
\betaegin{lema} \ellambdabel{dimNSM} $N_{\Sigma^{k-1}}^{\alphast}M^{r}$ is a hypersurface of $T_{\Sigma^{k-1}}^{\alphast}M^{r}$, that is, a submanifold of dimension $mr+r-1$.
\end{lema}
\betaegin{proof} By the induction hypothesis, {for each $p\in\Sigma^{k-1}(\omega)$, there exist an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ and functions $F_1,\elldots,F_{m-r}:\textsl{m}athcal{U}\rightarrow\textsl{m}athbb{R}$ such that $$\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)=\{x\in\textsl{m}athcal{U} \ | \ F_1(x)=\elldots=F_{m-r}(x)=0\}$$ with $\omegaperatorname{rank}(dF_1(x),\elldots,dF_{m-r}(x))=m-r$, for each $x\in\Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U}.$}
If $(p,\tilde{\varphi})\in N_{\Sigma^{k-1}}^{\alphast}M^{r}$ then $\omegaperatorname{rank}(\tilde{\varphi}_1,\elldots,\tilde{\varphi}_{r})=r$ and $$\omegaperatorname{rank} (\tilde{\varphi}_1,\elldots,\tilde{\varphi}_{r},dF_1(p), \elldots, dF_{m-r}(p))=m-1$$ since $N_{p}^{\alphast}\Sigma^{k-1}(\omega)=\ellambdangle dF_1(p), \elldots, dF_{m-r}(p)\rangle$. Thus, $$\Deltaet(dF_1(p), \elldots, dF_{m-r}(p), \tilde{\varphi}_1,\elldots,\tilde{\varphi}_{r})=0$$ and fixing the notation $\tilde{\varphi}_i=(\tilde{\varphi}_i^1,\elldots,\tilde{\varphi}_i^m)$ for $i=1,\elldots,r,$ we can suppose without loss of generality that
\betaegin{equation*}{\renewcommand{ccccccc}{ccccccc}
\Deltaisplaystyle\varphirac{\partial F_1}{\partial x_1}(p)& \cdots &\Deltaisplaystyle\varphirac{\partial F_{m-r}}{\partial x_1}(p) & \tilde{\varphi}_1^1&\cdots &\tilde{\varphi}_{r-1}^1\\
\vdots&\Deltadots&\vdots & \vdots &\Deltadots &\vdots\\
\Deltaisplaystyle\varphirac{\partial F_1}{\partial x_{m-1}}(p)& \cdots &\Deltaisplaystyle\varphirac{\partial F_{m-r}}{\partial x_{m-1}}(p) &\tilde{\varphi}_1^{m-1}&\cdots &\tilde{\varphi}_{r-1}^{m-1}
\end{array}\right|}\neq0
\end{equation*} and consequently, that
\betaegin{equation}\ellambdabel{matrizFU}{\renewcommand{ccccccc}{ccccccc}
\Deltaisplaystyle\varphirac{\partial F_1}{\partial x_1}(x)& \cdots &\Deltaisplaystyle\varphirac{\partial F_{m-r}}{\partial x_1}(x) & \varphi_1^1&\cdots &\varphi_{r-1}^1\\
\vdots&\Deltadots&\vdots & \vdots &\Deltadots &\vdots\\
\Deltaisplaystyle\varphirac{\partial F_1}{\partial x_{m-1}}(x)& \cdots &\Deltaisplaystyle\varphirac{\partial F_{m-r}}{\partial x_{m-1}}(x) &\varphi_1^{m-1}&\cdots &\varphi_{r-1}^{m-1}
\end{array}\right|}\neq0
\end{equation} for all $(x,\varphi)\in(\Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U})\times \textsl{m}athcal{V}$, where $\textsl{m}athcal{V}\subset\textsl{m}athbb{R}^{mr}$ is an open subset. Thus, $N_{\Sigma^{k-1}}^{\alphast}M^{r}$ can be locally defined by
\betaegin{equation}\ellambdabel{eqlocalNSM}
N_{\Sigma^{k-1}}^{\alphast}M^{r}=\{(x,\varphi)\in\textsl{m}athcal{U}\times \textsl{m}athcal{V} \ | \ F_1=\elldots=F_{m-r}=\Delta=0\},
\end{equation} where $\Delta(x,\varphi)=\Deltaet(dF_1(x),\elldots,dF_{m-r}(x),\varphi_1,\elldots,\varphi_{r})$.
Let $B(x,\varphi)$ be the square matrix of order $m$ whose columns are given by the coefficients of the 1-forms $dF_1(x)$, $\elldots$, $dF_{m-r}(x)$, $\varphi_1$, $\elldots$, $\varphi_{r}$:
\betaegin{equation*}B(x,\varphi)=\elleft(\betaegin{array}{cccccc}
dF_1(x) & \cdots & dF_{m-r}(x) & \varphi_1 & \cdots & \varphi_{r}
\end{array}\right).
\end{equation*} We have, $$\Delta(x,\varphi)=\Deltaisplaystyle\sum_{i=1}^{m}{\varphi_{r}^{i}\omegaperatorname{cof}(\varphi_{r}^i,B)},$$ where $\omegaperatorname{cof}(\varphi_{r}^i,B)$ denotes the cofactor of $\varphi_{r}^i$ in the matrix $B(x,\varphi)$ so that
\betaegin{equation*}
\Deltaisplaystyle\varphirac{\partial\Delta}{\partial \varphi_{r}^{m}}(x,\varphi)=\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(\varphi_{r}^i,B)\Deltaisplaystyle\varphirac{\partial \varphi_{r}^{i}}{\partial \varphi_{r}^{m}} + \varphi_{r}^{i}\Deltaisplaystyle\varphirac{\partial \omegaperatorname{cof}(\varphi_{r}^{i},B)}{\partial \varphi_{r}^{m}}}
\end{equation*} and since $\omegaperatorname{cof}(\varphi_{r}^{i},B)$ does not depend on the variable $\varphi_{r}^{m}$, $$\varphirac{\partial \omegaperatorname{cof}(\varphi_{r}^{i},B)}{\partial \varphi_{r}^{m}}=0, \text{ for } i=1,\elldots,m.$$ Then, $$\Deltaisplaystyle\varphirac{\partial\Delta}{\partial \varphi_{r}^{m}}(x,\varphi)=\omegaperatorname{cof}(\varphi_{r}^{m},B)\omegaverset{(\ref{matrizFU})}{\neq}0,$$ and the derivative of $\Delta(x,\varphi)$ with respect to $\varphi$ does not vanish, that is, $d_\varphi\Delta(x,\varphi)\neq0$ and the matrix
\betaegin{equation*}\elleft[\renewcommand{lll}{c}
d F_1(x)\\
\vdots\\
d F_{m-r}(x)\\
d \Delta(x,\varphi)\\
\end{array}}\right]=\elleft[\setlength{\alpharraycolsep}{0.1cm}{
\betaegin{array}{ccc}
d_x F_1(x) & \vdots & \\
\vdots & \vdots & O_{(m-r)\times(r)} \\
d_x F_{m-r}(x) & \vdots & \\
\cdots \ \cdots \ \cdots \ \cdots \ \cdots & \vdots & \cdots \ \cdots \ \cdots \ \cdots \ \cdots \ \cdots \\
d_x\Delta(x,\varphi) & \vdots & d_\varphi\Delta(x,\varphi) \\
\end{array}}\right]
\end{equation*} has rank $ m-r+1$, where $O_{(m-r)\times(r)}$ denotes a null matrix. Hence, $$\omegaperatorname{rank}(d F_1(x),\elldots,d F_{m-r}(x),d\Delta(x,\varphi))=m-r+1,$$ for each $(x,\varphi)\in N_{\Sigma^{k-1}}^{\alphast}M^{r}\cap(\textsl{m}athcal{U}\times \textsl{m}athcal{V})$ and, therefore, $N_{\Sigma^{k-1}}^{\alphast}M^{r}$ is a smooth submanifold of $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ of dimension $m+mr-(m-r+1)=mr+r-1.$
\end{proof}
{By the induction hypothesis, we have that for each $p\in\Sigma^{k-1}(\omega)$, $$\omegaperatorname{dim}(\ellambdangle\omega_1(p),\elldots,\omega_{n}(p)\rangle\cap N_p^{\alphast}\Sigma^{k-2}(\omega))=k-2$$ and there exist an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ and functions $F_1,\elldots,F_{m-r}:\textsl{m}athcal{U}\rightarrow\textsl{m}athbb{R}$ such that $\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)=\{x\in\textsl{m}athcal{U} \ | \ F_1(x)=\elldots=F_{m-r}(x)=0\}$ with $\omegaperatorname{rank}(dF_1(x),\elldots,dF_{m-r}(x))=m-r$, for each $x\in\Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U}.$ Then, we can choose $\{\Omega_1(x),\elldots,\Omega_{r}(x)\}$ a smooth $r$-coframe defined on $\textsl{m}athcal{U}$ which restriction to $\textsl{m}athcal{U}\cap \Sigma^{k-1}(\omega)$ is a smooth basis of a vector subspace supplementary to \betaegin{equation}\ellambdabel{base} \ellambdangle\omega_1(x),\elldots,\omega_{n}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-2}(\omega)\end{equation} in $\ellambdangle\omega_1(x),\elldots,\omega_{n}(x)\rangle$.} Let $\Omega^{k-1}: \Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U}\rightarrow T_{\Sigma^{k-1}}^{\alphast}M^{r}$ be the map given by $\Omega^{k-1}(x)=(x,\Omega_1(x),\elldots,\Omega_{r}(x))$, we define:
\betaegin{defi}\ellambdabel{def2point3} We say that the $n$-coframe $\omega=(\omega_1,\elldots,\omega_{n})$ satisfies the ``intersection properties $I_k$'', if {for each $p\in\Sigma^{k-1}(\omega)$ there exist an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ and a map $\Omega^{k-1}: \Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U}\rightarrow T_{\Sigma^{k-1}}^{\alphast}M^{r}$ as defined above, such that on $\textsl{m}athcal{U}$ the following properties hold:}
\betaegin{enumerate}[(a)]
\item $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$;
\item $(\Omega^{k-1})^{-1}(N_{\Sigma^{k-1}}^{\alphast}M^{r,\gammaeq2})=\emptyset$;
\end{enumerate} where \small{$N_{\Sigma^{k-1}}^{\alphast}M^{r,\gammaeq2}=\{(x,\varphi)\in T_{\Sigma^{k-1}}^{\alphast}M^{r} \ | \ \omegaperatorname{rank}(\varphi_1,\elldots,\varphi_{r})=r,
\omegaperatorname{dim}(\ellambdangle\varphi_1,\elldots,\varphi_{r}\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))\gammaeq2\}$}.
\end{defi}
\normalsize
Note that, if the $n$-coframe $\omega$ satisfies the properties $I_{k}$ $(a)$ and $(b)$ then, for each $x\in\Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U}$, $\omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))$ is either equal to $0$ or equal to $1$.
\betaegin{defi}\ellambdabel{defiSk} Let $\omega$ be an $n$-coframe with $\omegaperatorname{corank} 1$ that satisfies the intersection properties $I_{k}$ $(a)$ and $(b)$. We say that a point $x\in\Sigma^{k-1}(\omega)$ belongs to $A_{k-1}(\omega)$ if $$\omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))=0;$$ and we say that $x$ belongs to $\Sigma^{k}(\omega)$ if $x\in\Sigma^{k-1}(\omega)\setminus A_{k-1}(\omega)$, that is, if $$\omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))=1.$$ Therefore, \betaegin{equation*}\setlength{\alpharraycolsep}{0.05cm}{\betaegin{array}{cl}
A_{k-1}(\omega)&=\{x\in\Sigma^{k-1}(\omega)|\omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))=0\};\\
\Sigma^{k}(\omega)&=\{x\in\Sigma^{k-1}(\omega)|\omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega))=1\}.
\end{array}}\end{equation*}
\end{defi}
\betaegin{defi}\ellambdabel{Morinsing} {Let $\omega$ be an $n$-coframe with $\omegaperatorname{corank} 1$ that satisfies the intersection properties $I_{k}$ $(a)$ and $(b)$.} We say that a point $x\in M$ is a Morin singular point of type $A_{k}$ of the $n$-coframe $\omega$ if $x\in A_{k}(\omega)$.
\end{defi}
\betaegin{lema}\ellambdabel{skdimension} By Definition \ref{def2point3}, $\Sigma^{k}(\omega)$ is either the empty set or a smooth submanifold of $M$ of dimension $n-k$ .
\end{lema}
\betaegin{proof} Note that, locally, $\Sigma^{k}(\omega)=(\Omega^{k-1})^{-1}(N^{\alphast}_{\Sigma^{k-1}}M^{r})$ and $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$, thus, if $\Sigmaigma^k(\omega)\neq\emptyset$ then $\Sigmaigma^k(\omega)$ is a smooth submanifold of $\Sigma^{k-1}(\omega)$ of codimension 1, that is, $\omegaperatorname{dim}(\Sigmaigma^k(\omega))=n-k$.
\end{proof}
\betaegin{lema}\ellambdabel{dimensaointersecao} If $p\in\Sigma^{k}(\omega)$ then $\omegaperatorname{dim}(\ellambdangle\omega_1(p),\elldots,\omega_{n}(p)\rangle\cap N_p^{\alphast}\Sigma^{k-1}(\omega))=k-1.$
\end{lema}
\betaegin{proof} For clearer notations, let us write $\ellambdangle\betaar{\omega}(x)\rangle=\ellambdangle\omega_1(x),\elldots,\omega_n(x)\rangle$ and $\ellambdangle\betaar{\Omega}^{k-1}(x)\rangle=\ellambdangle\Omega_1(x),\elldots,\Omega_{r}(x)\rangle$.
Let $p\in\Sigma^{k}(\omega)$, since $\Sigma^{k}(\omega)\subset\Sigma^{k-1}(\omega)\subset\Sigma^{k-2}(\omega)$, {there exist an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ and functions $F_{1}, \elldots, F_{m-n+k}:\textsl{m}athcal{U}\rightarrow\textsl{m}athbb{R}$ such that} the submanifolds $\Sigma^{i}(\omega)$, $i=k-2,k-1,k,$ can be locally defined by $$\textsl{m}athcal{U}\cap\Sigma^{i}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)= \elldots= F_{m-n+i}(x)=0\},$$ where the derivatives $\{dF_1(x), \elldots, dF_{m-n+i}(x)\}$ are 1-forms linearly independent for each $x\in \Sigma^{i}(\omega)\cap\textsl{m}athcal{U}$ and $N_{x}^{\alphast}\Sigma^{i}(\omega)=\ellambdangle dF_1(x), \elldots, dF_{m-n+i}(x)\rangle$.
By the way the $r$-coframe $\{\Omega_1(x),\elldots,\Omega_{r}(x)\}$ has been chosen, for each $x\in\Sigma^{k-1}(\omega)\cap\textsl{m}athcal{U}$ we have $$\ellambdangle\betaar{\omega}(x)\rangle=(\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-2}(\omega))\omegaplus\ellambdangle\betaar{\Omega}^{k-1}(x)\rangle,$$ and since $N_x^{\alphast}\Sigma^{k-2}(\omega)\subset N_x^{\alphast}\Sigma^{k-1}(\omega)$, $\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)$ is equal to $$(\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-2}(\omega))\omegaplus(\ellambdangle\betaar{\Omega}^{k-1}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)).$$ Since $p\in\Sigma^{k}(\omega)\subset\Sigma^{k-1}(\omega)$, we know that $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k-2}(\omega))=k-2$ and by the definition of $\Sigma^{k}(\omega)$, $\omegaperatorname{dim}(\ellambdangle\betaar{\Omega}^{k-1}(p)\rangle\cap N_p^{\alphast}\Sigma^{k-1}(\omega))=1$. Therefore, $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k-1}(\omega))=(k-2)+1=k-1$. \end{proof}
Next, we will show that Definitions \ref{def2point3} and \ref{defiSk} do not depend on the choice of the basis $\{\Omega_1(x), \elldots, \Omega_{r}(x)\}$. To do this, first we must find equations that define the manifold $\Sigma^{k}(\omega)$ locally.
{
\betaegin{lema}\ellambdabel{eqlocalSk} Let $p\in\Sigma^{k-1}(\omega)$. There are an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ and functions $F_i:\textsl{m}athcal{U}\rightarrow\textsl{m}athbb{R}$, $i=1, \elldots, m-r$, such that $$\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-r}(x)=0\},$$ and a smooth $r$-coframe defined on $\textsl{m}athcal{U}$ $\{\Omega_1(x), \elldots, \Omega_{r}(x)\}$ which is a basis of a vector subspace supplementary to $\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-2}(\omega)$ in $\ellambdangle\betaar{\omega}(x)\rangle$ for each $x\in\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)$. Let $$\Delta_{k}(x)=\Deltaet(dF_1,\elldots,dF_{m-r},\Omega_1, \elldots, \Omega_{r})(x).$$ Then $\omega$ satisfies the intersection properties $I_k$ on $\textsl{m}athcal{U}$ if and only if the following properties hold:
\betaegin{enumerate}[(i)]
\item $\omegaperatorname{dim}\ellambdangle\Omega_1(x), \elldots, \Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)=0$ or $1$ for $x\in\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)$;\\
\item if $\omegaperatorname{dim}\ellambdangle\Omega_1(x), \elldots, \Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)=1$ (or equivalently $\Delta_k(x)=0$), then $\omegaperatorname{rank}(dF_1(x),\elldots, dF_{m-r}(x), d\Delta_{k}(x))=m-r+1$.
\end{enumerate}
In this case, $\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-r}(x)=\Delta_k(x)=0\}.$
\end{lema}}
\betaegin{proof}
{First, let us show that for each $\betaar{x}\in\textsl{m}athcal{U}\cap\Sigma^k(\omega)$, $$\omegaperatorname{rank}\elleft(dF_1(\betaar{x}),\elldots,dF_{m-r}(\betaar{x}),d\Delta_k(\betaar{x})\right)$$ is equal to $m-r+1$ if and only if $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at $\betaar{x}$.}
{By Lemma \ref{dimNSM}, $N_{\Sigma^{k-1}}^{\alphast}M^{r}$ can be locally defined by $$N_{\Sigma^{k-1}}^{\alphast}M^{r}=\{(x,\varphi)\in\textsl{m}athcal{U}\times \textsl{m}athcal{V}|F_1=\elldots=F_{m-r}=\Delta=0\},$$ where $\Delta(x,\varphi)=\Deltaet(dF_1(x),\elldots,dF_{m-r}(x),\varphi_1,\elldots,\varphi_{r})$ and $\textsl{m}athcal{V}\subset\textsl{m}athbb{R}^{mr}$. Let $$G(\Omega^{k-1})=\{(x,\Omega_1(x),\elldots,\Omega_{r}(x))\ | \ x\in\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)\}$$ be the restriction of the graph of $(\Omega_1(x),\elldots,\Omega_{r}(x))$ to $\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)$, $G(\Omega^{k-1})$ can be locally defined by
\betaegin{equation*}\ellambdabel{eqlocalGz}\betaegin{array}{c}
G(\Omega^{k-1})=\{(x,\varphi)\in T^{\alphast}M^{r} \ | \ F_1(x)=\elldots=F_{m-r}(x)=0;\\
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \Omega_i^j(x)-\varphi_i^j=0, i=1,\elldots,r \text{ and } j=1,\elldots,m\},
\end{array}
\end{equation*} where $T^{\alphast}M^{r}$ denotes the $r$-cotangent bundle of $M$, $\Omega_i(x)=(\Omega_i^1(x),\elldots,\Omega_i^m(x))$ and $\varphi_i=(\varphi_i^1,\elldots,\varphi_i^m)$ for $i=1,\elldots, r$. In particular, the local equations of $G(\Omega^{k-1})$ are clearly independent and $\omegaperatorname{dim} G(\Omega^{k-1})=r$.
Let $(x,\varphi)$ be local coordinates in $T^{\alphast}M^{r}$, with $x=(x_1,\elldots,x_m)$ and $$\varphi=(\varphi_1^1,\elldots,\varphi_1^m,\varphi_2^1,\elldots,\varphi_2^m,\elldots,\varphi_{r}^1,\elldots,\varphi_{r}^m),$$ let us consider the derivatives of the local equations of $N_{\Sigma^{k-1}}^{\alphast}M^{r}$ and $G(\Omega^{k-1})$ with respect to $(x,\varphi)$.
We will denote the derivative with respect to $x$ by $d_x$ and the derivative with respect to $\varphi$ by $d_{\varphi}$, then we have
\betaegin{equation}\ellambdabel{gradeqgraf}d\elleft( \Omega_i^j(x)-\varphi_i^j\right)=\elleft( d_x\Omega_i^j(x) \ , -d_{\varphi}\varphi_i^j \right),\end{equation} for $i=1,\elldots,r$ and $j=1,\elldots,m$, where $d_{\varphi} {\varphi}_i^j=(0,\elldots,0,1,0,\elldots,0)$ is the vector whose $m(i-1)+j^{th}$ entry is equal to $1$ and the others are zero. By Lagrange's rules the determinant $\Delta(x,{\varphi})=\Deltaet(d F_1(x),\elldots,d F_{m-r}(x),{\varphi}_1,\elldots,{\varphi}_{r})$ can be written as $$\Delta(x,{\varphi})=\sum_I{F_I(x)N_I({\varphi})}$$ for $I=\{i_1,\elldots,i_{r}\}\subset\{1,\elldots,m\}$, where
\betaegin{equation}\ellambdabel{NIU}N_I({\varphi})=\elleft|\betaegin{array}{ccc}
{\varphi}_1^{i_1} & \elldots & {\varphi}_{r}^{i_1}\\
\vdots & \Deltadots & \vdots\\
{\varphi}_1^{i_{r}} & \elldots & {\varphi}_{r}^{i_{r}}\\
\end{array}\right|
\end{equation} is the minor obtained from the matrix
\betaegin{equation*}\elleft[\betaegin{array}{ccc}
{\varphi}_1^{1} & \elldots & {\varphi}_{r}^{1}\\
\vdots & \Deltadots & \vdots\\
{\varphi}_1^{m} & \elldots & {\varphi}_{r}^{m}\\
\end{array}\right]
\end{equation*} taking the lines $i_1,\elldots,i_{r}$, and
\betaegin{equation}\ellambdabel{FI}F_I(x)=\pm\elleft|\betaegin{array}{ccc}
\Deltaisplaystyle\varphirac{\partial F_1}{\partial x_{k_1}}(x) & \elldots & \Deltaisplaystyle\varphirac{\partial F_{m-r}}{\partial x_{k_1}}(x)\\
\vdots & \Deltadots & \vdots\\
\Deltaisplaystyle\varphirac{\partial F_{1}}{\partial x_{k_{m-r}}}(x) & \elldots & \Deltaisplaystyle\varphirac{\partial F_{m-r}}{\partial x_{k_{m-r}}}(x)\\
\end{array}\right|
\end{equation} is, up to sign, the minor obtained from the matrix $(d F_1(x) \elldots d F_{m-r}(x))$ removing the lines $i_1,\elldots,i_{r}$, that is, $\{k_1,\elldots,k_{m-r}\}=\{1,\elldots,m\}\setminus I$. Therefore,
$$d\Delta(x,\varphi)=( \ \Deltaisplaystyle\sum_I{N_I(\varphi)d_xF_I(x)} \ , \ \Deltaisplaystyle\sum_I{F_I(x)d_{\varphi}N_I(\varphi)} \ ).$$}
{Note that $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at the point $x\in\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)$ if and only if $G(\Omega^{k-1})\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at $(x,\Omega^{k-1}(x))$. Let $\pi_1$ be the projection of the contangent space of $T^{\alphast}M^{r}$ over the contangent space of $T_{\Sigma^{k-1}}^{\alphast}M^{r}$:
\betaegin{equation*}\betaegin{array}{cccc}
\pi_1: & T_{(x,\varphi)}^{\alphast}(T^{\alphast}M^{r}) & \ellongrightarrow & T_{(x,\varphi)}^{\alphast}(T_{\Sigma^{k-1}}^{\alphast}M^{r})\\
& (\psi(x),\varphi_1,\elldots,\varphi_{r}) & \ellongmapsto & (\pi(\psi(x)),\varphi_1,\elldots,\varphi_{r})
\end{array}
\end{equation*} where $\pi$ denotes the restriction to $T_x\Sigma^{k-1}(\omega)$, that is, $\pi(\psi(x))=\psi(x)_{|_{T_x\Sigma^{k-1}(\omega)}}$. By Equation (\ref{gradeqgraf}), $$\pi_1\elleft(d( \Omega_i^j(x)-\varphi_i^j)\right)=\elleft( \pi(d_x\Omega_i^j(x)) \ , -d_{\varphi}\varphi_i^j \right),$$ for $i=1,\elldots,r$ and $j=1,\elldots,m$. We also have that $$\pi_1\elleft(d\Delta(x,\varphi)\right)=\elleft( \ \pi\elleft(\Deltaisplaystyle\sum_I{N_I(\varphi)d_xF_I(x)}\right) \ , \ \Deltaisplaystyle\sum_I{F_I(x)d_{\varphi}N_I(\varphi)} \ \right).$$ Then, $G(\Omega^{k-1})\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at $(x,\Omega^{k-1}(x))$ if and only if the matrix
\betaegin{equation}\ellambdabel{LijLdelta}\elleft[\setlength{\alpharraycolsep}{0.12cm}{\betaegin{array}{ccc}
\pi(d_x\Omega_1^1(x)) & \vdots & \\
\vdots & \vdots & \\
\pi(d_x\Omega_1^m(x)) & \vdots & -Id_{mr} \\
\vdots & \vdots & \\
\pi(d_x\Omega_{r}^m(x)) & \vdots & \\
\cdots \ \cdots \ \cdots \ \cdots \ \cdots \ \cdots \ \cdots & \vdots & \cdots \ \cdots \ \cdots \ \cdots \ \cdots \ \cdots \\
\pi\elleft(\Deltaisplaystyle\sum_I{N_I(\varphi)d_xF_I(x)}\right) & \vdots & \Deltaisplaystyle\sum_I{F_I(x)d_{\varphi}N_I(\varphi)}
\end{array}}\right]
\end{equation} has maximal rank at $x$. By the expression of $N_I(\varphi)$ in (\ref{NIU}), we have
\betaegin{equation}\ellambdabel{dN}
d_{\varphi} N_I(\varphi)=\sum_{i,j}{\omegaperatorname{cof}(\varphi_i^j)d_{\varphi} \varphi_i^j},
\end{equation} for $i=1,\elldots, r$, $j\in I$ and $\omegaperatorname{cof}(\varphi_i^j)$ denoting the cofactor of $\varphi_i^j$ in the matrix
\betaegin{equation*}\elleft[\betaegin{array}{ccc}
{\varphi}_1^{i_1} & \elldots & {\varphi}_{r}^{i_1}\\
\vdots & \Deltadots & \vdots\\
{\varphi}_1^{i_{r}} & \elldots & {\varphi}_{r}^{i_{r}}\\
\end{array}\right].
\end{equation*} Let $d=C_{m,r}=\Deltaisplaystyle\varphirac{m!}{r!(m-r)!}$, we will denote by $I_1, \elldots, I_d$ the subsets of $\{1, \elldots, m\}$ containing exactly $r$ elements. By equation (\ref{dN}), $$\Deltaisplaystyle\sum_I{F_I(x)d_{\varphi}N_I({\varphi})}=\Deltaisplaystyle\sum_{\ell=1}^{d}F_{I_{\ell}}(x)\elleft(\Deltaisplaystyle\sum_{i=1}^{r}\Deltaisplaystyle\sum_{j\in {I_{\ell}}}\omegaperatorname{cof}({\varphi}_i^j)d_{\varphi} {\varphi}_i^j\right)$$ and,
$$\setlength{\alpharraycolsep}{0.1cm}{\renewcommand{l}{l}
\Deltaisplaystyle\sum_{\ell=1}^{d}F_{I_{\ell}}(x)\elleft(\Deltaisplaystyle\sum_{i=1}^{r}\Deltaisplaystyle\sum_{j\in {I_{\ell}}}\omegaperatorname{cof}({\varphi}_i^j)d_{\varphi} {\varphi}_i^j\right)\\
=\Deltaisplaystyle\sum_{i=1}^{r}\elleft[F_{I_1}(x)\elleft(\Deltaisplaystyle\sum_{j\in I_1}\omegaperatorname{cof}({\varphi}_i^j)d_{\varphi} {\varphi}_i^j\right)+ \elldots +F_{I_d}(x)\elleft(\Deltaisplaystyle\sum_{j\in I_d}\omegaperatorname{cof}({\varphi}_i^j)d_{\varphi} {\varphi}_i^j\right)\right]\\
=\Deltaisplaystyle\sum_{i=1}^{r}\elleft[\elleft(\Deltaisplaystyle\sum_{I: \, 1\in I}F_I(x)\right)\omegaperatorname{cof}({\varphi}_i^1)d_{\varphi}{{\varphi}_i^1}+\elldots+\elleft(\Deltaisplaystyle\sum_{I: \, m\in{I}}F_I(x)\right)\omegaperatorname{cof}({\varphi}_i^m)d_{\varphi} {\varphi}_i^m \right]\\
=\Deltaisplaystyle\sum_{i=1}^{r}\elleft[\Deltaisplaystyle\sum_{j=1}^{m}\elleft(\Deltaisplaystyle\sum_{I: \, j\in I}F_I(x)\right)\omegaperatorname{cof}({\varphi}_i^j)d_{\varphi} {\varphi}_i^j\right].
\end{array}}}$$}
{Thus, for $i=1, \elldots, r$ and $j=1, \elldots, m$, we can write
\betaegin{equation}\ellambdabel{Ldelta2}
\Deltaisplaystyle\sum_I{F_I(x)d_{\varphi}N_I({\varphi})}= \Deltaisplaystyle\sum_{i,j}{\betaeta_{i}^j(x,{\varphi})d_{\varphi} {\varphi}_i^j},
\end{equation} where $$\betaeta_{i}^j(x,{\varphi})=\elleft(\Deltaisplaystyle\sum_{I: \, j\in I}F_I(x)\right)\omegaperatorname{cof}({\varphi}_i^j).$$}
{We will denote the rows of the Matrix (\ref{LijLdelta}) by $R_i^j=\elleft( \pi(d_x\Omega_i^j(x)) \ , -d_{\varphi}{\varphi}_i^j \right)$, for $i=1,\elldots,r$ and $j=1,\elldots,m$, and we denote the last row of the Matrix (\ref{LijLdelta}) by $R_{\Delta}$.
Replacing the row $R_{\Delta}$ by $$R_{\Delta}+\sum_{i,j}\betaeta_i^j(x,{\varphi})R_i^j$$ for $i=1,\elldots,r$ and $j=1, \elldots, m$, we obtain a new matrix
\betaegin{equation}\ellambdabel{novaLijLdelta}\elleft[\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{ccc}
\pi(d_x\Omega_1^1(x)) & \vdots & \\
\vdots & \vdots & -Id_{mr} \\
\pi(d_x\Omega_{r}^m(x)) & \vdots & \\
\cdots \ \cdots \ \cdots \ \cdots \ \cdots \ \cdots & \vdots & \cdots \ \cdots \ \cdots \ \cdots \ \cdots \\
R_{\Delta}' & \vdots & R_{\Delta}''
\end{array}}\right]
\end{equation} which has rank equal to the rank of the Matrix (\ref{LijLdelta}), where
\betaegin{equation*}
R_{\Delta}''=\Deltaisplaystyle\sum_I{F_I(x)d_{\varphi}N_I({\varphi})}+\sum_{i,j}\betaeta_i^j(x,{\varphi})(-d_{\varphi} {\varphi}_i^j)\omegaverset{(\ref{Ldelta2})}{=}\vec{0}
\end{equation*} and
\betaegin{equation*}{\renewcommand{rl}{ll}
R_{\Delta}'&=\pi\elleft(\Deltaisplaystyle\sum_I{N_I({\varphi})d_xF_I(x)}\right)+ \Deltaisplaystyle\sum_{i,j}\betaeta_i^j(x,{\varphi})\pi\elleft(d_x \Omega_i^j(x)\right)\\
&=\pi\elleft(\Deltaisplaystyle\sum_I{N_I({\varphi})d_xF_I(x)}+ \sum_{i,j}\betaeta_i^j(x,{\varphi})d_x \Omega_i^j(x)\right).
\end{array}}
\end{equation*} Note that for each $\betaar{x}\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$, we have $\Omega_i^j(\betaar{x})={\varphi}_i^j$. In this case, Equation (\ref{Ldelta2}) implies that $$\Deltaisplaystyle\sum_{i,j}\betaeta_i^j(\betaar{x},{\varphi})d_x \Omega_i^j(\betaar{x})=\sum_{i,j}\betaeta_i^j(\betaar{x},\Omega^{k-1}(\betaar{x}))d_x \Omega_i^j(\betaar{x})=\Deltaisplaystyle\sum_I{F_I(\betaar{x})d_xN_I(\Omega^{k-1}(\betaar{x}))}.$$ Thus, at $\betaar{x}$ $$R_{\Delta}'=\pi\elleft(\Deltaisplaystyle\sum_I{N_I(\Omega^{k-1}(\betaar{x}))d_xF_I(\betaar{x})}+ \Deltaisplaystyle\sum_I{F_I(\betaar{x})d_xN_I(\Omega^{k-1}(\betaar{x}))}\right)=\pi(d\Delta_k(\betaar{x}))$$ and the Matrix (\ref{novaLijLdelta}) is equal to
\betaegin{equation*}\elleft[\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{ccc}
\pi(d_x\Omega_1^1(\betaar{x})) & \vdots & \\
\vdots & \vdots & -Id_{mr} \\
\pi(d_x\Omega_{r}^m(\betaar{x})) & \vdots & \\
\cdots \ \cdots \ \cdots \ \cdots \ \cdots \ \cdots & \vdots & \cdots \ \cdots \ \cdots \ \cdots \ \cdots \\
\pi(d\Delta_k(\betaar{x})) & \vdots & \vec{0}
\end{array}}\right].
\end{equation*} Thus, for each $\betaar{x}\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$, $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at $\betaar{x}$ if and only if $\pi(d\Delta_k(\betaar{x}))\neq0$, that is, the restriction of $d\Delta_k(\betaar{x})$ to $T_{\betaar{x}}\Sigma^{k-1}(\omega)$ is not zero, which means that $d\Delta_k(\betaar{x})\notin\ellambdangle d F_1(\betaar{x}),\elldots,d F_{m-r}(\betaar{x})\rangle$, or equivalently $\omegaperatorname{rank}\elleft(dF_1(\betaar{x}),\elldots,dF_{m-r}(\betaar{x}),d\Delta_k(\betaar{x})\right)=m-r+1$.}
{Now suppose that $\omega$ satisfies the intersection properties $I_k$ on $\textsl{m}athcal{U}$. By property $(b)$ of Definition \ref{def2point3}, we have that $\omegaperatorname{dim}\ellambdangle\Omega_1(x), \elldots, \Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)$ is either equal to $0$ or equal to $1$ for $x\in\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)$. If $\omegaperatorname{dim}\ellambdangle\Omega_1(x), \elldots, \Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)=1$, then $\Delta_k(x)=0$ and $x\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$. In this case, the tranversality given by property $(a)$ of Definition \ref{def2point3} implies that $\omegaperatorname{rank}\elleft(dF_1(x),\elldots,dF_{m-r}(x),d\Delta_k(x)\right)=m-r+1$.}
{On the other hand, we assume that properties $(i)$ and $(ii)$ hold for each $x\in\textsl{m}athcal{U}\cap\Sigma^{k-1}(\omega)$. By property $(i)$, the intersection property $(b)$ of Definition \ref{def2point3} holds on $\textsl{m}athcal{U}$. If $\omegaperatorname{dim}\ellambdangle\Omega_1(x), \elldots, \Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)=0$ then $\Omega^{k-1}(x)$ does not intersect $N_{\Sigma^{k-1}}^{\alphast}M^r$, thus $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at $x$. If $\omegaperatorname{dim}\ellambdangle\Omega_1(x), \elldots, \Omega_{r}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)=1$ then $x\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$ by Definition \ref{defiSk} and $\omegaperatorname{rank}\elleft(dF_1(x),\elldots,dF_{m-r}(x),d\Delta_k(x)\right)=m-r+1$ by property $(ii)$. Thus $\Omega^{k-1}\pitchfork N_{\Sigma^{k-1}}^{\alphast}M^{r}$ in $T_{\Sigma^{k-1}}^{\alphast}M^{r}$ at $x$ and $\omega$ satisfies the intersection properties $I_k$ on $\textsl{m}athcal{U}$.}
{Finally, if $\omega$ satisfies the intersection properties $I_k$ on $\textsl{m}athcal{U}$, it follows by Definition \ref{defiSk} that $\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-r}(x)=\Delta_k(x)=0\}$.}
\end{proof}
The following technical lemma will be used in the proof of Lemma \ref{indepdabase}.
\betaegin{lema}\ellambdabel{lematecnicodois} Let $f_i:\textsl{m}athcal{V}\subset\textsl{m}athbb{R}^{\ell}\rightarrow\textsl{m}athbb{R}, i=1, \elldots,s$ be smooth functions defined on an open neighborhood of $\textsl{m}athbb{R}^{\ell}$. Let $M\subset\textsl{m}athbb{R}^{\ell}$ be a manifold given locally by $M=\{x\in\textsl{m}athcal{V}| f_1(x)=\elldots=f_s(x)=0\}$, with $\omegaperatorname{rank}(df_1(x), \elldots,df_s(x))=s$, for all $x\in M\cap\textsl{m}athcal{V}$. If $g,h:\textsl{m}athcal{V}\subset\textsl{m}athbb{R}^{\ell}\rightarrow\textsl{m}athbb{R}$ are smooth functions such that $g(x)=\ellambdambda(x)h(x)$, for all $ x\in M\cap\textsl{m}athcal{V}$ and some smooth function $\ellambdambda:\textsl{m}athcal{V}\rightarrow\textsl{m}athbb{R}$, then:
\betaegin{enumerate}[(i)]
\item If $\ellambdambda(x)\neq0$ and $x\in M$ then $g(x)=0\textsl{m}athcal{L}eftrightarrow h(x)=0.$
\item If $\ellambdambda(x)\neq0$, $x\in M$ and $h(x)=0$ then $$\ellambdangle df_1(x), \elldots, df_s(x),dg(x)\rangle=\ellambdangle df_1(x), \elldots, df_s(x), dh(x)\rangle.$$
\end{enumerate}
\end{lema}
\betaegin{lema}\ellambdabel{indepdabase} The definitions of $\Sigma^{k+1}(\omega)$ and $A_k(\omega)$ do not depend on the choice of the basis $\{\Omega_1, \elldots, \Omega_{n-k}\}$, for every $k\gammaeq 1$.
\end{lema}
\betaegin{proof} As for the definition of $\Sigma^{k+1}(\omega)$ and $A_k(\omega)$, for $k\gammaeq 1$, we will proceed by induction on $k$. First, note that the definition of $\Sigma^1(\omega)$ does not depend on the choice of any basis. Then, assume as induction hypothesis that the definition of $\Sigma^i(\omega)$ does not depend on the choice of the basis for every $i\elleq k$.
{We know that, for each $p\in\Sigma^k(\omega)$, there is an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ so that}
\betaegin{equation*}\setlength{\alpharraycolsep}{0.06cm}{\betaegin{array}{cll}
\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)&=&\{x\in\textsl{m}athcal{U}: F_1(x)=\elldots=F_{m-n+1}(x)=\Delta_2(x)=\elldots=\Delta_{k}(x)=0\},\\
\textsl{m}athcal{U}\cap\Sigma^{k+1}(\omega)&=&\{x\in\textsl{m}athcal{U}: F_1(x)=\elldots=F_{m-n+1}(x)=\Delta_2(x)=\elldots=\Delta_{k+1}(x)=0\},\\
\end{array}}
\end{equation*} with $\omegaperatorname{rank}(dF_1(x),\elldots,dF_{m-n+1}(x),d\Delta_2(x),\elldots,d\Delta_{k}(x))=m-n+k$, for $x\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$ and $\omegaperatorname{rank}(dF_1(x),\elldots,dF_{m-n+1}(x),d\Delta_2(x),\elldots,d\Delta_{k+1}(x))=m-n+k+1$, for $x\in\textsl{m}athcal{U}\cap\Sigma^{k+1}(\omega)$.
Let us recall that $$\Delta_{k+1}(x)=\Deltaet(dF_1,\elldots,dF_{m-n+1},d\Delta_2,\elldots,d\Delta_{k}, \Omega_1, \elldots, \Omega_{n-k})(x),$$ {where $\{\Omega_1(x), \elldots, \Omega_{n-k}(x)\}$ is a smooth $(n-k)$-coframe defined on $\textsl{m}athcal{U}$ which is a basis of a vector subspace supplementary to $\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)$ in $\ellambdangle\betaar{\omega}(x)\rangle$ for each $x\in\textsl{m}athcal{U}\cap\Sigma^k(\omega)$.}
{Let us consider $\{\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\}$ a smooth $(n-k)$-coframe defined on $\textsl{m}athcal{U}$ such that, for each $x\in\textsl{m}athcal{U}\cap\Sigma^k(\omega)$, $\{\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\}$ is another basis of a vector subspace supplementary to $\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)$ in $\ellambdangle\betaar{\omega}(x)\rangle$.} Then, $$\ellambdangle\betaar{\omega}(x)\rangle=\elleft(\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)\right)\omegaplus\ellambdangle\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\rangle$$ and {$\omegaperatorname{dim}(\ellambdangle\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\rangle\cap N_x^{\alphast}\Sigma^{k}(\omega))$ is either equal to 0 or equal to $1$, for $x\in\textsl{m}athcal{U}\cap\Sigma^k(\omega)$. Moreover,}
\betaegin{equation*}\elleft\{\renewcommand{ll}{l}
\tilde{\Omega}_1(x)=\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell1}(x)\Omega_{\ell}(x)} + \varphi_1(x)\\
\tilde{\Omega}_2(x)=\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell2}(x)\Omega_{\ell}(x)} + \varphi_2(x)\\
\vdots\\
\tilde{\Omega}_{n-k}(x)=\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell(n-k)}(x)\Omega_{\ell}(x)} + \varphi_{n-k}(x)\\
\end{array}}\right.
\end{equation*} where $a_{ij}(x)\in\textsl{m}athbb{R}$ and $\varphi_j(x)\in\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)$, for $j=1, \elldots, n-k$. {We will show that for each $x\in\textsl{m}athcal{U}\cap\Sigma^k(\omega)$}, $$\Deltaet(A(x))=\elleft|\betaegin{array}{cccc}
a_{11}(x) & a_{12}(x) & \cdots & a_{1(n-k)}(x)\\
\vdots & \vdots & \Deltadots & \vdots\\
a_{(n-k)1}(x) & a_{(n-k)2}(x) & \cdots & a_{(n-k)(n-k)}(x)\\
\end{array}\right|\neq0.$$
Suppose that the statement is false, that is, $\Deltaet(A(x))=0$.
This means that the columns of matrix $A(x)$ are linearly dependent.
So we can suppose without loss of generality that the first column of $A(x)$ can be written as a linear combination of the others columns: $$(a_{11}(x), \elldots, a_{(n-k)1}(x))=\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s(a_{1s}(x), \elldots, a_{(n-k)s}(x))},$$ where $\ellambdambda_s\in\textsl{m}athbb{R}$, for $s=2, \elldots, n-k$. Thus, deleting $x$ in the notation, we have
$$\renewcommand{ll}{ll}
\tilde{\Omega}_1=\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell1}\Omega_{\ell}} + \varphi_1 &\textsl{m}athbb{R}ightarrow\tilde{\Omega}_1=\Deltaisplaystyle\sum_{\ell=1}^{n-k}{\elleft(\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_sa_{\ell s}}\right)\Omega_{\ell}} + \varphi_1\\
&\textsl{m}athbb{R}ightarrow\tilde{\Omega}_1=\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s\elleft(\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell s}\Omega_{\ell}}\right)} + \varphi_1\\
\end{array}}$$ then, $$\renewcommand{lcl}{lcl}
\tilde{\Omega}_1-\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s\tilde{\Omega}_s}&=&\elleft[\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s\elleft(\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell s}\Omega_{\ell}}\right)} + \varphi_1\right] -\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s\elleft(\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell s}\Omega_{\ell}} + \varphi_s\right)}\\
&=&\varphi_1-\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s\varphi_s}.
\end{array}}}$$ This means that $$\tilde{\Omega}_1-\Deltaisplaystyle\sum_{s=2}^{n-k}{\ellambdambda_s\tilde{\Omega}_s}\in\elleft(\ellambdangle\betaar{\omega}\rangle\cap N_x^{\alphast}\Sigma^{k-1}(\omega)\right)\cap\ellambdangle\tilde{\Omega}_1, \elldots, \tilde{\Omega}_{n-k}\rangle=\{0\},$$ that is, $\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)$ are linearly dependent. However, this contradicts the initial assumption that $\{\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\}$ is a basis of a vector subspace for each $x$ in $\textsl{m}athcal{U}\cap\Sigma^k(\omega)$. Therefore, $\Deltaet(A(x))\neq0$.
{Let ${}^t\!A(x)$ be the transpose of matrix $A(x)$}. For each $x\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$, we have $\Deltaet({}^t\!A(x))=\Deltaet(A(x))\neq0$ and, deleting $x$ in the notation,
$$\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{lll}
\tilde{\Delta}_{k+1}&=&\Deltaet(dF_1,\elldots,dF_{m-n+1},d\Delta_2,\elldots,d\Delta_{k},\tilde{\Omega}_1, \elldots, \tilde{\Omega}_{n-k})\\
&=&\Deltaet(dF_1,\elldots,dF_{m-n+1},d\Delta_2,\elldots,d\Delta_{k},\Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell1}\Omega_{\ell}}, \elldots, \Deltaisplaystyle\sum_{\ell=1}^{n-k}{a_{\ell(n-k)}\Omega_{\ell}})\\
&=&\Deltaet({}^t\!A)\Deltaet(dF_1,\elldots,dF_{m-n+1},d\Delta_2,\elldots,d\Delta_{k},\Omega_1, \elldots, \Omega_{n-k})\\
&=&\Deltaet({}^t\!A)\Delta_{k+1}.
\end{array}}$$ {So, by statement $(i)$ of Lemma \ref{lematecnicodois}, $\tilde{\Delta}_{k+1}(x)=0\textsl{m}athcal{L}eftrightarrow \Delta_{k+1}(x)=0$ for $x\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$. Since $\Delta_{k+1}(x)=0$ if and only if $\omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{n-k}(x)\rangle\cap N_x^{\alphast}\Sigma^{k}(\omega))=1$ and $\tilde{\Delta}_{k+1}(x)=0$ if and only if $\omegaperatorname{dim}(\ellambdangle\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\rangle\cap N_x^{\alphast}\Sigma^{k}(\omega))=1$, by Definition \ref{defiSk} we have that
\betaegin{equation*}\betaegin{array}{ccl}
x\in\textsl{m}athcal{U}\cap\Sigma^{k+1}(\omega) & \textsl{m}athcal{L}eftrightarrow & \omegaperatorname{dim}(\ellambdangle\Omega_1(x),\elldots,\Omega_{n-k}(x)\rangle\cap N_x^{\alphast}\Sigma^{k}(\omega))=1\\
& \textsl{m}athcal{L}eftrightarrow &\Delta_{k+1}(x)=0\\
& \textsl{m}athcal{L}eftrightarrow &\tilde{\Delta}_{k+1}(x)=0\\
& \textsl{m}athcal{L}eftrightarrow & \omegaperatorname{dim}(\ellambdangle\tilde{\Omega}_1(x), \elldots, \tilde{\Omega}_{n-k}(x)\rangle\cap N_x^{\alphast}\Sigma^{k}(\omega))=1
\end{array}
\end{equation*}}
{In particular, if $x\in\textsl{m}athcal{U}\cap\Sigma^{k+1}(\omega)$ we have $\Delta_{k+1}(x)=0$ and $\tilde{\Delta}_{k+1}(x)=0$ so that by statement $(ii)$ of Lemma \ref{lematecnicodois},
$$\renewcommand{ccccc}{l}
\ellambdangle dF_1(x),\elldots,dF_{m-n+1}(x),d\Delta_2(x),\elldots,d\Delta_{k}(x), d\Delta_{k+1}(x)\rangle\\
=\ellambdangle dF_1(x),\elldots,dF_{m-n+1}(x),d\Delta_2(x),\elldots,d\Delta_{k}(x), d\tilde{\Delta}_{k+1}(x)\rangle,\end{array}}$$ which implies that $\omegaperatorname{rank}(dF_1(x),\elldots,dF_{m-n+1}(x),d\Delta_2(x),\elldots,d\Delta_{k}(x), d\tilde{\Delta}_{k+1}(x))$ is equal to $m-n+k+1$.
Therefore, the intersection properties $I_{k+1}$ and the definition of $\Sigma^{k+1}(\omega)$ do not depend on the choice of the basis $\{\Omega_1(x),\elldots,\Omega_{n-k}(x)\}$. Since $A_k(\omega)=\Sigma^k(\omega)\setminus\Sigma^{k+1}(\omega),$ we conclude that $A_k(\omega)$ also does not depend on the choice of the basis.} \end{proof}
\betaegin{obs}\ellambdabel{akclosure} It is not difficult to see that $\Sigma^k(\omega)$ is a closed submanifold of $M$, for $k\gammaeq1$. Moreover, we can write $$\Sigma^k(\omega)=A_k(\omega)\cup\Sigma^{k+1}(\omega)=\Deltaisplaystyle\cup_{i\gammaeq k}A_i(\omega)$$
so that $A_k(\omega)=\Sigma^k(\omega)\setminus\Sigma^{k+1}(\omega).$ That is, the singular sets $A_k(\omega)$ are $(n-k)$-dimensional submanifolds of $M$ such that $\omegaverline{A_k(\omega)}=\Sigma^k(\omega)$.
\end{obs}
Finally, based on the previous considerations, we define:
\betaegin{defi}\ellambdabel{def:ncoframe} An $n$-coframe $\omega$ is a Morin $n$-coframe if $\omega$ has $\omegaperatorname{corank} 1$ and it satisfies the intersection properties $I_{k}$ $(a)$ and $(b)$ for $k=2,\elldots,n$.
\end{defi}
\betaegin{obs}\ellambdabel{singak} By Definition \ref{def:ncoframe}, if $\omega$ is a Morin $n$-coframe then $\omega$ admits only singular points of type $A_{k}$ for $k=1,\elldots,n$.
\end{obs}
As we mentioned in Section \ref{Introduction}, fixed a Riemannian metric on $M$, we can consider vector fields instead of 1-forms and define the notion of Morin $n$-frames analogously to the definition of Morin $n$-coframes:
\betaegin{defi}\ellambdabel{def:nframe} {An $n$-frame $V=(V_1,\elldots, V_n): M\rightarrow TM^n$ is a Morin $n$-frame if $V$ has $\omegaperatorname{corank} 1$ and it satisfies the intersection properties $I_{k}$ $(a)$ and $(b)$ for $k=2,\elldots,n$.}
\end{defi}
Next, we present some examples of Morin $n$-frames.
\betaegin{ex}\ellambdabel{ex1} Let $f:M^m\rightarrow\textsl{m}athbb{R}^n$ be a smooth Morin map defined on an $m$-dimensional Riemannian manifold $M$, with $m\gammaeq n$. The $n$-frame $V(x)=(\nabla f_1(x), \elldots, \nabla f_n(x))$ given by the gradient of the coordinate functions of $f$ is, clearly, a Morin $n$-coframe whose singular points are the same that the singular points of $f$. That is, $A_k(V)=A_k(f)$, $\varphiorall k=1, \elldots, n$.
\end{ex}
\betaegin{ex}\ellambdabel{ex2} Let $a\in\textsl{m}athbb{R}$ be a regular value of a $C^2$ mapping $f:\textsl{m}athbb{R}^3\rightarrow\textsl{m}athbb{R}$. Suppose that $M=f^{-1}(a)$ and consider $V=(V_1,V_2)$ be a $2$-frame on $M$, given by
\betaegin{equation*}\betaegin{array}{ccc}
V_1(x)&=&(-f_{x_2}(x),f_{x_1}(x),0);\\
V_2(x)&=&(-f_{x_3}(x),0,f_{x_1}(x)).
\end{array}
\end{equation*} Since $a$ is a regular value of $f$, we have that $\nabla f(x)=(f_{x_1}(x), f_{x_2}(x), f_{x_3}(x))\neq\vec{0}$, $\varphiorall x\in M$. Thus, $\omegaperatorname{rank}(V_1(x), V_2(x))$ is either equal to $2$ or equal to $1$ . The singular points of $V$ are the points $x\in M$ where $\omegaperatorname{rank}(V_1(x), V_2(x))=1$, that is, $$\Sigma^1(V)=\{x\in M | f_{x_1}(x)=0\}$$ {and $V=(V_1, V_2)$ has $\omegaperatorname{corank} 1$ if and only if $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))=2$ for each $x\in\Sigma^1(V)$. In this case, $\Sigma^1(V)$ is a submanifold of $M$ of dimension 1}. Let $x\in\Sigma^1(V)$ be a singular point of $V$, then the space $\ellambdangle V_1(x),V_2(x)\rangle$ is spanned by the vector $e_1=(1,0,0)$ and $x\in A_2(V)$ if and only if $$\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x), e_1)<3,$$ that is, if and only if
$\Delta_2:=f_{x_2}f_{x_1x_3}-f_{x_3}f_{x_1x_2}$ vanishes at $x$. {Moreover, $V$ satisfies the intersection properties $I_2$ if and only if $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x), \nabla\Delta_2(x))=3$ for $x\in A_2(V)$.} In this case, $A_2(V)$ is a submanifold of $M$ of dimension 0. Therefore, $V=(V_1, V_2)$ is a Morin $2$-frame if and only if $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))=2$ on the singular set $\Sigma^1(V)=\{x\in M | f_{x_1}(x)=0\}$ and $\Deltaet(\nabla f(x), \nabla f_{x_1}(x), \nabla\Delta_2(x))\neq0$ on $A_2(V)=\{x\in M | f_{x_1}(x)=0, \Delta_2(x)=0\}.$
\end{ex}
\betaegin{ex}\ellambdabel{ex3} Let us apply Example \ref{ex2} to the $2$-frame $V=(V_1, V_2)$ defined on the torus $\emph{T}:=f^{-1}(R^2)$, where $R^2$ is a regular value of $$f(x_1,x_2,x_3)=(\sqrt{x_2^2+x_3^2}-a)^2+(x_1+x_2)^2,$$ with $a>R$. Then, one can verify that $\Sigma^1(V)=\{x\in \emph{T} \, | \, x_1+x_2=0\}$, that is,
$$\Sigma^1(V)=\{(x_1,x_2,x_3)\in\textsl{m}athbb{R}^3 | \sqrt{x_2^2+x_3^2}-a)^2=R^2\}$$ and $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))$ is equal to
$$\omegaperatorname{rank}\elleft[\betaegin{array}{ccc}
0& \Deltaisplaystyle\varphirac{2x_2(\sqrt{x_2^2+x_3^2}-a)}{\sqrt{x_2^2+x_3^2}} & \Deltaisplaystyle\varphirac{2x_3(\sqrt{x_2^2+x_3^2}-a)}{\sqrt{x_2^2+x_3^2}}\\
1&1&0
\end{array}\right]
$$ which is $2$, for all $x\in \emph{T}\cap\Sigma^1(V)$. Moreover, $$\Delta_2(x)=\Deltaisplaystyle\varphirac{-4x_3(\sqrt{x_2^2+x_3^2}-a)}{\sqrt{x_2^2+x_3^2}},$$ so that $A_2(V)=\{x\in \emph{T} \, | \, x_1+x_2=0; x_3=0\}$ which is the set given by the points $(-a-R,a+R,0)$, $(a+R,-a-R,0)$, $(-a+R,a-R,0)$ and $(a-R,-a+R,0)$. It is not difficult to see that $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x), \nabla\Delta_2(x))=3, \varphiorall x\in\emph{T}\cap A_2(V)$. Therefore, the frame $V=(V_1,V_2)$ given by
\betaegin{equation*}\betaegin{array}{lll}
V_1(x)&=&\elleft(\varphirac{-2x_2(\sqrt{x_2^2+x_3^2}-a)}{\sqrt{x_2^2+x_3^2}}-2(x_1+x_2),2(x_1+x_2),0\right);\\
V_2(x)&=&\elleft(\varphirac{-2x_3(\sqrt{x_2^2+x_3^2}-a)}{\sqrt{x_2^2+x_3^2}},0,2(x_1+x_2)\right).
\end{array}
\end{equation*} is a Morin $2$-frame defined on the torus $\emph{T}$ which admits singular points of type $A_1$ and $A_2$.
\end{ex}
\betaegin{ex}\ellambdabel{ex4} Let $a\in\textsl{m}athbb{R}$ be a regular value of a $C^2$ mapping $f:\textsl{m}athbb{R}^3\rightarrow\textsl{m}athbb{R}$. Suppose that $M=f^{-1}(a)$ and consider $\omegaverline{W_1}$ and $\omegaverline{W_2}$ {be the orthogonal projections of $e_2=(0,1,0)$ and $e_3=(0,0,1)$ over $T_xM$} given by
\betaegin{equation*}\renewcommand{lll}{ccc}
\omegaverline{W_1}&=& e_2-\elleft\ellambdangle e_2,\varphirac{\nabla f}{|\nabla f|}\right\rangle\varphirac{\nabla f}{|\nabla f|};\\
\omegaverline{W_2}&=& e_3-\elleft\ellambdangle e_3,\varphirac{\nabla f}{|\nabla f|}\right\rangle\varphirac{\nabla f}{|\nabla f|}.\\
\end{array}}
\end{equation*} Let $W=(W_1,W_2)$ be the $2$-frame defined by $W_1=\elleft\|\nabla f\right\|^2\omegaverline{W_1}$ and $W_2=\elleft\|\nabla f\right\|^2\omegaverline{W_2}$, that is,
\betaegin{equation*}\betaegin{array}{ccc}
W_1&=&(-f_{x_1}f_{x_2},f_{x_1}^2+f_{x_3}^2,-f_{x_2}f_{x_3});\\
W_2&=&(-f_{x_1}f_{x_3},-f_{x_2}f_{x_3},f_{x_1}^2+f_{x_2}^2).
\end{array}
\end{equation*} {Note that in this case, $W_1$ and $W_2$ are gradients vector fields, that is, $W$ is a $2$-frame gradient}. It is not difficult to see that $\omegaperatorname{rank}(W_1(x), W_2(x))$ is either equal to $2$ or equal to $1$ and the singular set of $W$ is $\Sigma^1(W)=\{x\in M | f_{x_1}(x)=0\}$. Let $x\in\Sigma^1(W)$ be a singular point of $W$, then the space $\ellambdangle W_1(x),W_2(x)\rangle$ is spanned by the vector $(0,f_{x_3}, -f_{x_2})$, so that $A_2(W)=\{x\in M | f_{x_1}(x)=0, f_{x_1x_1}(x)=0\}$. Therefore, $W=(W_1, W_2)$ is a Morin $2$-frame if and only if $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))=2$ on the singular set $\Sigma^1(W)$ and $\Deltaet(\nabla f(x), \nabla f_{x_1}(x), \nabla f_{x_1x_1}(x))\neq0$ on $A_2(W)$.
\end{ex}
\betaegin{ex} \ellambdabel{ex5} Let us apply Example \ref{ex4} to the $2$-frame $W=(W_1, W_2)$ defined on the torus $\emph{T}:=f^{-1}(R^2)$ of Example \ref{ex3}. In this situation, one can verify that $\Sigma^1(W)$ is the same singular set as $\Sigma^1(V)$ in the Example \ref{ex3}. Moreover, $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))=2, \varphiorall x\in\Sigma^1(W)$. However, since $f_{x_1x_1}(x)=2, \varphiorall x\in\Sigma^1(W)$, we have that $W$ does not admits singular points of type $A_2$. That is, $W$ is Morin $2$-frame on $\emph{T}$ which admits only Morin singularities of type $A_1$.
\end{ex}
\betaegin{ex} \ellambdabel{ex6} Let us consider the $2$-frames $V=(V_1,V_2)$ and $W=(W_1, W_2)$ from Examples \ref{ex2} and \ref{ex4} defined on {the unit sphere $M:=f^{-1}(1)$}, where $f(x_1,x_2,x_3)=x_1^2+x_2^2+x_3^2$. We know that the singular sets of $V$ and $W$ are the same, that is, $\Sigma^1(V)=\Sigma^1(W)=\{x\in M \, | \, x_1=0\}$ and $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))=2$ for all singular point $x$. However, $\Delta_2(x)=0, \varphiorall x\in\Sigma^1(V)$, so that $\nabla\Delta_2\equiv\vec{0}$. On the other hand, $f_{x_1x_1}(x)\neq0, \varphiorall x\in\Sigma^1(W)$, so that $A_2(W)=\emptyset$. Therefore, $V$ is not a Morin $2$-frame and $W$ is a Morin $2$-frame that admits only Morin singularities of type $A_1$.
\end{ex}
\betaegin{ex}\ellambdabel{ex7} In the Example \ref{ex6}, if we consider $f(x_1,x_2,x_3)=x_1^2-x_1x_2+x_3^2$ then one can verify that $V$ and $W$ are both Morin $2$-frames that admits only Morin singularities of type $A_1$. {Let us consider the case where $V$ of Example \ref{ex2} is defined on $M:=f^{-1}(-1)$ and $f(x_1,x_2,x_3)=x_1^2-x_1x_2+x_3^2$. It is easy to see that $-1$ is a regular value of $f$ and $\Sigma^1(V)=\{x\in \emph{M} \, | \, 2x_1-x_2=0\}$. That is,
$$\Sigma^1(V)=\{(x_1,x_2,x_3)\in\textsl{m}athbb{R}^3 | \, x_1^2-x_1x_2+x_3^2+1=0; 2x_1-x_2=0\}$$ and $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x))$ is equal to
$$\omegaperatorname{rank}\elleft[\betaegin{array}{ccc}
(2x_1-x_2)& -x_1 & 2x_3\\
2&-1&0
\end{array}\right]
$$ which is $2$, for all $x\in M\cap\Sigma^1(V)$. Moreover, $\Delta_2(x)=2x_3$ and $$A_2(V)=\{(x_1,x_2,x_3)\in\textsl{m}athbb{R}^3 \, | \, x_1^2-x_1x_2+x_3^2+1=0; 2x_1-x_2=0; x_3=0\}$$ which is the set given by the points $(1,2,0)$ and $(-1,-2,0)$. We also have that $\Deltaet(\nabla f(x), \nabla f_{x_1}(x), \nabla\Delta_2(x))$ is equal to
$$\Deltaet\elleft[\betaegin{array}{ccc}
(2x_1-x_2)& -x_1 & 2x_3\\
2&-1&0\\
0&0&2
\end{array}\right]=4x_1
$$ which is equal to $\pm4$ for each $x\in A_2(V)$. That is, $\omegaperatorname{rank}(\nabla f(x), \nabla f_{x_1}(x), \nabla\Delta_2(x))=3$, $\varphiorall x\in M\cap A_2(V)$. Therefore, the $2$-frame $V=(V_1,V_2)$ given by
\betaegin{equation*}\betaegin{array}{lll}
V_1(x)&=&\elleft(x_1, 2x_1-x_2,0\right);\\
V_2(x)&=&\elleft(-2x_3, 0, 2x_1-x_2\right).
\end{array}
\end{equation*} is a Morin $2$-frame defined on $M$ which admits singular points of type $A_1$ and $A_2$.}
\end{ex}
\section{Zeros of a generic 1-form $\xi(x)$ associated to a Morin $n$-coframe}\ellambdabel{s2}
Let $a=(a_1,\elldots,a_n)\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ and let $\omega=(\omega_1,\elldots,\omega_n)$ be a Morin $n$-coframe defined on an $m$-dimensional manifold $M$. In this section, we will consider the 1-form $\xi(x)=\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i(x)}$ defined on $M$ and we will proof some properties of the zeros of $\xi$ and its restrictions to the singular sets of $\omega$.
\betaegin{lema}\ellambdabel{zeroszsobresigma1} If $p$ is a zero of the 1-form $\xi$
then $p\in\Sigma^1(\omega)$ and $p$ is a zero of $\xi_{|_{\Sigma^1(\omega)}}$.
\end{lema}
\betaegin{proof} Suppose that $\xi(p)=0$. So $\omegaperatorname{rank}(\omega_1(p),\elldots,\omega_n(p))\elleq n-1$, since $a\neq\vec{0}$. However, the $n$-coframe $\omega$ has $\omegaperatorname{corank} 1$, thus $\omegaperatorname{rank}(\omega_1(p),\elldots,\omega_n(p))=n-1$. That is, $p\in\Sigma^1(\omega)$. Moreover, $\xi(p)=0$ implies that $T_pM\subset\ker(\xi(p))$ and since $T_p\Sigma_1(\omega)\subset T_pM$, we conclude that $p$ is a zero of $\xi_{|_{\Sigma^1(\omega)}}=0$.
\end{proof}
\betaegin{lema}\ellambdabel{lemazerosrestricoes} If $p\in A_{k+1}(\omega)$ then, for each $k=0,\elldots,n-2$, $p$ is a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if $p$ is a zero of $\xi_{|_{\Sigma^{k}(\omega)}}$.
\end{lema}
\betaegin{proof} Suppose that $p\in A_{k+1}(\omega)$ and that, locally, we have:
\betaegin{equation*}\setlength{\alpharraycolsep}{0.06cm}{\betaegin{array}{cl}
\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)&=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-n+1}(x)=\Delta_2(x)=\elldots=\Delta_k(x)=0\};\\
\textsl{m}athcal{U}\cap\Sigma^{k+1}(\omega)&=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-n+1}(x)=\Delta_2(x)=\elldots=\Delta_{k+1}(x)=0\};
\end{array}}\end{equation*} for an open neighborhood $\textsl{m}athcal{U}\subset M$, with $p\in\textsl{m}athcal{U}$. If $p$ is a zero of the restriction $\xi_{|_{\Sigma^{k}(\omega)}}$ then $\xi(p)\in N_p^{\alphast}\Sigma^{k}(\omega)=\ellambdangle dF_1(p),\elldots,dF_{m-n+1}(p),d\Delta_2(p),\elldots,d\Delta_k(p)\rangle.$ In particular,
$\xi(p)\in N_p^{\alphast}\Sigma^{k+1}(\omega)$, therefore $p$ is a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$.
On the other hand, if $p$ is a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ then $\xi(p)\in N_p^{\alphast}\Sigma^{k+1}(\omega)\cap\ellambdangle\betaar{\omega}(p)\rangle$.
Since $p\in A_{k+1}(\omega)$, we have that $p\in\Sigma_{k+1}(\omega)\setminus\Sigma_{k+2}(\omega)$, thus
\betaegin{equation*}\elleft\{\betaegin{array}{l}
\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k}(\omega))=k;\\
\omegaperatorname{dim}(\ellambdangle\betaar{\Omega}^{k+1}(p)\rangle\cap N_p^{\alphast}\Sigma^{k+1}(\omega))=0;
\end{array}\right.
\end{equation*} where $\betaar{\Omega}^{k+1}(p)$ represents a smooth basis for a vector subspace supplementary to $\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k}(\omega)$ in $\ellambdangle\betaar{\omega}(p)\rangle$. Since $\omegaperatorname{dim}(N_p^{\alphast}\Sigma^{k}(\omega))=m-n+k$, $\omegaperatorname{dim}(N_p^{\alphast}\Sigma^{k+1}(\omega))=m-n+k+1$ and $N_p^{\alphast}\Sigma^{k}(\omega)\subset N_p^{\alphast}\Sigma^{k+1}(\omega)$, we have $$\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k+1}(\omega))=\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k}(\omega))=k.$$ Thus, $\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k}(\omega)=\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{k+1}(\omega).$ Therefore, $\xi(p)\in N_p^{\alphast}\Sigma^k(\omega)$, that is, $p$ is a zero of $\xi_{|_{\Sigma^{k}(\omega)}}$. \end{proof}
\betaegin{lema}\ellambdabel{ptosansaozeros} If $p\in A_n(\omega)$ then $p$ is a zero of the restriction $\xi_{|_{\Sigma^{n-1}(\omega)}}$.
\end{lema}
\betaegin{proof} Analogously to Lemma \ref{lemazerosrestricoes}, we consider local equations of $\Sigma^{n}(\omega)$:
\betaegin{equation*}
\textsl{m}athcal{U}\cap\Sigma^{n}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-n+1}(x)=\Delta_2(x)=\elldots=\Delta_n(x)=0\},
\end{equation*} with $N_x^{\alphast}\Sigma^{n}(\omega)=\ellambdangle dF_1(x),\elldots,dF_{m-n+1}(x),d\Delta_2(x),\elldots,d\Delta_n(x)\rangle.$ Since $A_n(\omega)=\Sigma^{n}(\omega)$, if $p\in A_n(\omega)$ then $$\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle\cap N_p^{\alphast}\Sigma^{n-1}(\omega))=n-1.$$
Thus, $\ellambdangle\betaar{\omega}(p)\rangle\subset N_p^{\alphast}\Sigma^{n-1}(\omega)$ and consequently, $\xi(p)\in N_p^{\alphast}\Sigma^{n-1}(\omega)$. Therefore,
$p$ is a zero of $\xi_{|_{\Sigma^{n-1}(\omega)}}$. \end{proof}
\betaegin{remark}\ellambdabel{obsmatrixM} If $p\in\Sigma^1(\omega)$ then $\omegaperatorname{rank}(\omega_1(p), \elldots, \omega_n(p))=n-1$ and, writing $\omega_i=(\omega_i^1, \elldots, \omega_i^m)$, we can suppose without loss of generality that \betaegin{equation}\ellambdabel{matrizli}\textbf{M}(x)=\elleft|
{\renewcommand{2}{2}
\betaegin{array}{cccc}
\omega_1^1(x)&\omega_2^1(x)&\cdots &\omega_{n-1}^1(x)\\
\vdots & \vdots &\Deltadots &\vdots\\
\omega_1^{n-1}(x)&\omega_2^{n-1}(x)&\cdots &\omega_{n-1}^{n-1}(x)
\end{array}}\right|\neq0,
\end{equation} for all $x$ in an open neighborhood $\textsl{m}athcal{U}\subset M$ with $p\in\textsl{m}athcal{U}$. In particular, if $p\in\textsl{m}athcal{U}$ is a singular point of $\xi$ then $a_n\neq0$, otherwise, we would have $a_1=\elldots=a_{n-1}=a_n=0$. We will use this fact in next results.
\end{remark}
\betaegin{lema}\ellambdabel{lemaauxiliar} Let $p\in\Sigma^1(\omega)$ such that $\textbf{M}(p)\neq0$. Then $\xi(p)=0$ if and only if $\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i^j(p)}=0$, for every $j=1,\elldots,n-1$.
\end{lema}
\betaegin{proof}
It follows easily from the definition of $\Sigma^1(\omega)$ and $\xi$.
\end{proof}
\betaegin{lema}\ellambdabel{lemainterzeroscomsigma2} Let $Z(\xi)$ be the zero set of the 1-form $\xi$. Then for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $Z(\xi)\cap\Sigma^2(\omega)=\emptyset$.
\end{lema}
\betaegin{proof} Let $\textsl{m}athcal{U}\subset M$ be an open neighborhood on which $\textbf{M}(x)\neq0$ and
$$\textsl{m}athcal{U}\cap\Sigma^2(V)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-n+1}(x)=\Delta_2(x)=0\},$$ with $\omegaperatorname{rank}(dF_1(x),\elldots, dF_{m-n+1}(x),d\Delta_2(x))=m-n+2,$ for each $x\in\Sigma^2(V)\cap\textsl{m}athcal{U}$. Let us consider $F:\textsl{m}athcal{U}\times\textsl{m}athbb{R}^n\setminus\{\vec{0}\}\rightarrow\textsl{m}athbb{R}^{m+1}$ the mapping defined by $$F(x,a)=(F_1(x),\elldots,F_{m-n+1}(x),\Delta_2(x),\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i^1(x)},\elldots,\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i^{n-1}(x)}).$$ By Lemma \ref{lemaauxiliar}, if $x\in\Sigma^1(\omega)$ then $$\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i(x)}=0\textsl{m}athcal{L}eftrightarrow\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i^j(x)}=0, \varphiorall j=1,\elldots,n-1.$$ Thus, if $(x,a)\in F^{-1}(\vec{0})$ we have that $x\in Z(\xi)\cap\Sigma^2(V)$. Furthermore, the Jacobian matrix of $F$ at a point $(x,a)\in F^{-1}(\vec{0})$:
\betaegin{equation*}\elleft[\setlength{\alpharraycolsep}{0.12cm}{\betaegin{array}{cccccc}
dF_1(x) & \vdots & &&& \\
\vdots & \vdots &&\textsl{m}ulticolumn{2}{c}{\textsl{m}ultirow{2}{*}{$O_{(m-n+2)\times n}$}}& \\
dF_{m-n+1}(x) & \vdots &&&& \\
d\Delta_2(x) & \vdots &&&& \\
\cdots \ \cdots \ \cdots \ \cdots & \vdots & \cdots \ \, \cdots \ \, \cdots & \cdots &\cdots \ \, \cdots \ \, \cdots& \cdots \ \, \cdots \\
& \vdots & \omega_1^1(x) & \cdots & \omega_{n-1}^1(x) & \omega_n^1(x) \\
\textsl{m}ultirow{2}{*}{$(*)$} & \vdots & \omega_1^2(x) & \cdots & \omega_{n-1}^2(x) & \omega_n^2(x) \\
& \vdots & \vdots & \Deltadots & \vdots & \vdots \\
& \vdots & \omega_1^{n-1}(x) & \cdots & \omega_{n-1}^{n-1}(x) & \omega_n^{n-1}(x) \\
\end{array}}\right]
\end{equation*} has $\omegaperatorname{rank}$ $m+1$. That is, $\vec{0}$ is regular value of $F$ and $F^{-1}(\vec{0})$ is a submanifold of dimension $n-1$. Let $\pi:F^{-1}(\vec{0})\rightarrow\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ be the projection over $\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ given by $\pi(x,a)=a$, by Sard's Theorem, $a$ is regular value of $\pi$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$. Therefore, $\pi^{-1}(a)\cap F^{-1}(\vec{0})=\emptyset$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$. However, $
\pi^{-1}(a)\cap F^{-1}(\vec{0})=\{(x,a)\in\textsl{m}athcal{U}\times\{a\}: x\in Z(\xi)\cap\Sigma^2(\omega)\}.$ Thus, $Z(\xi)\cap\Sigma^2(\omega)=\emptyset$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$. \end{proof}
\betaegin{lema}\ellambdabel{ptscrticrestasigmak} Let $Z(\xi_{|_{\Sigma^k(\omega)}})$ be the zero set of the restriction of the 1-form $\xi$ to $\Sigma^k(\omega)$, with $k\gammaeq1$. Then for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $Z(\xi_{|_{\Sigma^k(\omega)}})\cap\Sigma^{k+2}(\omega)=\emptyset$.
\end{lema}
\betaegin{proof} {For each $k=1, \elldots, n-2$}, let $\textsl{m}athcal{U}\subset M$ be an open neighborhood on which,
$$\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-n+k}(x)=0\},$$ with $\omegaperatorname{rank}(dF_1(x),\elldots, dF_{m-n+k}(x))=m-n+k$, for all $x\in\textsl{m}athcal{U}\cap\Sigma^{k}(\omega)$ {and $$\textsl{m}athcal{U}\cap\Sigma^{k+2}(V)=\{x\in\textsl{m}athcal{U} \, | \, F_1(x)=\elldots=F_{m-n+k+2}(x)=0\},$$ with $\omegaperatorname{rank}(dF_1(x),\elldots, dF_{m-n+k+2}(x))=m-n+k+2$, for all $x\in\textsl{m}athcal{U}\cap\Sigma^{k+2}(V)$.}
By Szafraniec's characterization (see \cite[p. 196]{sza2}) adapted to 1-forms, $x$ is a zero of the restriction $\xi_{|_{\Sigma^k(\omega)}}$ if and only if there exists $(\ellambdambda_1,\elldots,\ellambdambda_{m-n+k})\in\textsl{m}athbb{R}^{m-n+k}$ such that $$\xi(x)=\Deltaisplaystyle\sum_{j=1}^{m-n+k}{\ellambdambda_jdF_j(x)}.$$
Let us write $\xi(x)=(\xi_1(x),\elldots,\xi_m(x))$, where $\xi_s(x)=\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i^s(x)}$, $s= 1,\elldots, m$, we define
$$N_s(x,a,\ellambdambda):=\xi_s(x)-\Deltaisplaystyle\sum_{j=1}^{m-n+k}{\ellambdambda_j\Deltaisplaystyle\varphirac{\partial F_j}{\partial x_s}(x)},$$ so that $\xi_{|_{\Sigma^k(\omega)}}(x)=0$ if and only if $N_s(x,a,\ellambdambda)=0$, for all $s=1,\elldots,m$.
Let $F:\textsl{m}athcal{U}\times\textsl{m}athbb{R}^n\setminus\{\vec{0}\}\times\textsl{m}athbb{R}^{m-n+k}\rightarrow\textsl{m}athbb{R}^{2m-n+k+2}$ be the mapping defined by $$F(x,a,\ellambdambda)=(F_1,\elldots, F_{m-n+k+2}, N_1,\elldots,N_m),$$ if $(x,a,\ellambdambda)\in F^{-1}(\vec{0})$ then $x\in Z(\xi_{|_{\Sigma^k(\omega)}})\cap\Sigma^{k+2}(\omega)$ and the Jacobian matrix of $F$ at $(x,a,\ellambdambda)$: \betaegin{equation*}\elleft[\setlength{\alpharraycolsep}{0.12cm}{\renewcommand{2}{1}{\betaegin{array}{ccccc}
dF_1(x) & \vdots & && \\
\vdots & \vdots &\textsl{m}ulticolumn{3}{c}{O_{(m-n+k+2)\times(m+k)}} \\
dF_{m-n+k+2}(x) & \vdots & && \\
\cdots \ \, \cdots \ \, \cdots \ \, \cdots \ \, \cdots & \vdots &\cdots \ \, \cdots \ \, \cdots \ \, \cdots & \cdots & \cdots \ \, \cdots \ \, \cdots \ \, \cdots\\
d_xN_1(x,a,\ellambdambda) & \vdots & & \vdots & \\
\vdots & \vdots & B_{m\times n} & \vdots & C_{m\times(m-n+k)}\\
d_xN_m(x,a,\ellambdambda) & \vdots & & \vdots & \\
\end{array}}}\right]
\end{equation*} has $\omegaperatorname{rank}$ $2m-n+k+1$, where $O_{(m-n+k+2)\times(m+k)}$ is a null matrix, $B_{m\times n}$ is a matrix whose columns vectors are given by the coefficients of the 1-forms $\omega_1(x), \elldots, \omega_n(x)$ of the $n$-coframe $\omega$:
\betaegin{equation*}B_{m\times n}=\elleft[\betaegin{array}{ccc}
\omega_1^1(x) & \cdots & \omega_n^1(x) \\
\vdots & \Deltadots & \vdots \\
\omega_1^{m}(x) & \cdots & \omega_n^{m}(x) \\
\end{array}\right]
\end{equation*} and $C_{m\times(m-n+k)}$ is the matrix whose columns vectors are, up to sign, the coefficients of the derivatives $dF_1,\elldots,dF_{m-n+k}$ with respect to $x$:
\betaegin{equation*}C_{m\times(m-n+k)}=\elleft[\setlength{\alpharraycolsep}{0.12cm}{\betaegin{array}{ccc}
-\Deltaisplaystyle\varphirac{\partial F_1}{\partial x_1}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial F_{m-n+k}}{\partial x_1}(x) \\%& -\Deltaisplaystyle\varphirac{\partial\Delta_2}{\partial x_1}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_1}(x)\\
\vdots & \Deltadots & \vdots \\
-\Deltaisplaystyle\varphirac{\partial F_{1}}{\partial x_m}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial F_{m-n+k}}{\partial x_m}(x) \\
\end{array}}\right].
\end{equation*}
Note that, if $(x,a,\ellambdambda)\in F^{-1}(\vec{0})$ then, in particular, $x\in\Sigma^{k+1}(\omega)$ and by Lemma \ref{dimensaointersecao}, $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(x)\rangle\cap N_x^{\alphast}\Sigma^k(\omega))=k$. Thus, $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(x)\rangle + N_x^{\alphast}\Sigma^k(\omega))=m-1$.
Therefore,
\betaegin{equation*}\omegaperatorname{rank}\elleft[\betaegin{array}{ccc}
B_{m\times n} & \vdots & C_{m\times(m-n+k)} \\
\end{array}\right]=m-1
\end{equation*} and the Jacobian matrix of $F$ at $(x,a,\ellambdambda)$ has rank $2m-n+k+1$. That is, $F^{-1}(\vec{0})$ has dimension less or equal to $n-1$. Let $\pi:F^{-1}(\vec{0})\rightarrow\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ be the projection over $\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, that is, $\pi(x,a,\ellambdambda)=a.$ By Sard's Theorem, $a$ is regular value of $\pi$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$. Therefore, $\pi^{-1}(a)\cap F^{-1}(\vec{0})=\emptyset$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$.
However, \betaegin{equation*}
\pi^{-1}(a)\cap F^{-1}(\vec{0})=\{(x,a,\ellambdambda)\in\textsl{m}athcal{U}\times\{a\}\times\textsl{m}athbb{R}^{m-n+k}\, | \, x\in Z(\xi_{|_{\Sigma^k(\omega)}})\cap\Sigma^{k+2}(\omega)\}.
\end{equation*} Thus, $Z(\xi_{|_{\Sigma^k(\omega)}})\cap\Sigma^{k+2}(\omega)=\emptyset$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$.
\end{proof}
\section{Non-degenerate zeros of a generic 1-form $\xi(x)$ associated to a Morin $n$-coframe}\ellambdabel{s3}
In this section we will verify that, generically, the 1-form $\xi(x)$ and its restrictions $\xi_{|_{\Sigma^k(\omega)}}$, $\xi_{|_{A_k(\omega)}}$ admit only non-degenerate zeros. Furthermore, we will see how these non-degenerate zeros
can be related.
We start with some technical lemmas.
\betaegin{lema}\ellambdabel{lematecnico} Let $A$ be a square matrix of order m given by: $$A=\elleft[\betaegin{array}{ccc}
a_{11} & \cdots & a_{1m}\\
a_{21} & \cdots & a_{2m}\\
\vdots & \cdots & \vdots\\
a_{m1} & \cdots & a_{mm}
\end{array}\right]. $$ If there exist $(\ellambdambda_1, \elldots, \ellambdambda_m)\in\textsl{m}athbb{R}^m\setminus\{\vec{0}\}$ such that $\Deltaisplaystyle\sum_{j=1}^{m}{\ellambdambda_ja_{ij}}=0$, $i=1, \elldots, m,$ then $$\ellambdambda_j\omegaperatorname{cof}(a_{ik})-\ellambdambda_k\omegaperatorname{cof}(a_{ij})=0, \,\varphiorall j,k=1,\elldots, m.$$
\end{lema}
\betaegin{lema}\ellambdabel{lemadoscofatores} Let us consider the matrix
\betaegin{equation*}M_i(x)=\elleft[\renewcommand{2}{2}{\betaegin{array}{ccccc}
\omega_1^1(x) & \cdots & \omega_{n-1}^1(x) & \omega_n^1(x) \\
\vdots & \Deltadots & \vdots & \vdots \\
\omega_1^{n-1}(x) & \cdots & \omega_{n-1}^{n-1}(x) & \omega_n^{n-1}(x) \\
\omega_1^{i}(x) & \cdots & \omega_{n-1}^{i}(x) & \omega_n^{i}(x) \\
\end{array}}\right].
\end{equation*} If $x$ is a zero of $\xi$ then for $\ell\in\{1, \elldots,n-1\}$, $j\in\{1, \elldots, n-1,i\}$ and $i\in\{n, \elldots, m\}$, we have $$a_n\omegaperatorname{cof}(\omega_{\ell}^j, M_i)=a_{\ell}\omegaperatorname{cof}(\omega_n^j, M_i).$$
\end{lema}
\betaegin{proof} This result is a consequence of Lemma \ref{lematecnico} applied to the matrix $A=M_i(x)$, where $a_{\ell j}=\omega_j^{\ell}(x)$, for $j=1,\elldots,n$ and $\ell=1,\elldots, n-1, i$. It is enough to take $(\ellambdambda_1, \elldots, \ellambdambda_n)=(a_1, \elldots, a_n)$.
\end{proof}
\betaegin{lema}\ellambdabel{lemadorank} Let $\textsl{m}athcal{U}\subset\textsl{m}athbb{R}^m$ be an open set and let $H:\textsl{m}athcal{U}\times\textsl{m}athbb{R}^n\setminus\{\vec{0}\}\rightarrow\textsl{m}athbb{R}^m$ be a smooth mapping given by $H(x,a)=(h_1(x,a),\elldots, h_m(x,a))$. If $$\omegaperatorname{rank}(dh_1(x,a),\elldots, dh_m(x,a))=m, \varphiorall (x,a)\in H^{-1}(\vec{0})$$ then $\omegaperatorname{rank}(d_x h_1(x,a),\elldots, d_x h_m(x,a))=m$ for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$.
\end{lema}
In the previous section we proved that every zero of $\xi$ belongs to $\Sigma^1(\omega)$. Next, we will show that, generically, such zeros belong to $A_1(\omega)$ and they are non-degenerate. To do this, we must find explicit equations that define locally the manifolds $T^{\alphast}M^{n,n-1}$ and $\Sigma^1(\omega)$.
\betaegin{lema}\ellambdabel{eqloc2} Let $(p,\tilde{\varphi})\in T^{\alphast}M^{n,n-1}$, it is possible to exhibit, explicitly, functions $\textsl{m}_i(x,\varphi):\tilde{\textsl{m}athcal{U}}\rightarrow\textsl{m}athbb{R}$, $i=n, \elldots, m$, defined on an open neighborhood $\tilde{\textsl{m}athcal{U}}\subset T^{\alphast}M^n$, with $(p,\tilde{\varphi})\in\tilde{\textsl{m}athcal{U}}$, such that, locally $$T^{\alphast}M^{n,n-1}=\elleft\{(x,\varphi)\in \tilde{\textsl{m}athcal{U}} \ | \ \textsl{m}_n=\elldots=\textsl{m}_m=0\right\}$$ with $\omegaperatorname{rank}\elleft(d\textsl{m}_n,\elldots,d\textsl{m}_m\right)=m-n+1$, for all $(x,\varphi)\in T^{\alphast}M^{n,n-1}\cap\tilde{\textsl{m}athcal{U}}$.
\end{lema}
\betaegin{proof} Let $(p,\tilde{\varphi})\in T^{\alphast}M^{n,n-1}$, we may suppose without loss of generality that
\betaegin{equation*}\textsl{m}(\varphi)=\elleft|\betaegin{array}{cccc}
\varphi_1^1&\varphi_2^1&\cdots &\varphi_{n-1}^1\\
\vdots & \vdots &\Deltadots &\vdots\\
\varphi_1^{n-1}&\varphi_2^{n-1}&\cdots &\varphi_{n-1}^{n-1}
\end{array}\right|\neq0
\end{equation*} for $(x,\varphi)$ in an open neighborhood $\tilde{\textsl{m}athcal{U}}$ of $T^{\alphast}M^n$, with $(p,\tilde{\varphi})\in\tilde{\textsl{m}athcal{U}}$. In this situation, $T^{\alphast}M^{n,n-1}$ can be locally defined as $$T^{\alphast}M^{n,n-1}=\elleft\{(x,\varphi)\in \tilde{\textsl{m}athcal{U}} \ | \ \textsl{m}_n=\elldots=\textsl{m}_m=0\right\}$$ where $\textsl{m}_i:=\textsl{m}_i(\varphi)$ is the determinant \betaegin{equation*}\textsl{m}_i(\varphi)=\elleft|\betaegin{array}{ccccc}
\varphi_1^1&\varphi_2^1&\cdots &\varphi_{n-1}^1&\varphi_{n}^1\\
\vdots & \vdots &\Deltadots &\vdots &\vdots\\
\varphi_1^{n-1}&\varphi_2^{n-1}&\cdots &\varphi_{n-1}^{n-1}&\varphi_{n}^{n-1}\\
\varphi_1^{i}&\varphi_2^{i}&\cdots &\varphi_{n-1}^{i}&\varphi_{n}^{i}
\end{array}\right|, \ i=n,\elldots,m.
\end{equation*}
Let us verify that $\omegaperatorname{rank}\elleft(d\textsl{m}_n,\elldots,d\textsl{m}_m\right)=m-n+1$ in $(T^{\alphast}M^{n,n-1})\cap\tilde{\textsl{m}athcal{U}}$.
For clearer notations, consider $I=\{1,\elldots,n\}$ and $I_i=\{1,\elldots,n-1,i\}$ for each $i\in\{n,\elldots, m\}$. Then
\betaegin{equation}\ellambdabel{gradmii}d\textsl{m}_i(\varphi)=\Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^\ell,\textsl{m}_i)d \varphi_j^\ell},\end{equation} where $\omegaperatorname{cof}(\varphi_j^\ell,\textsl{m}_i)$ is the cofactor of $\varphi_j^\ell$ in the matrix $$\elleft[\betaegin{array}{ccccc}
\varphi_1^1&\varphi_2^1&\cdots &\varphi_{n-1}^1&\varphi_{n}^1\\
\vdots & \vdots &\Deltadots &\vdots &\vdots\\
\varphi_1^{n-1}&\varphi_2^{n-1}&\cdots &\varphi_{n-1}^{n-1}&\varphi_{n}^{n-1}\\
\varphi_1^{i}&\varphi_2^{i}&\cdots &\varphi_{n-1}^{i}&\varphi_{n}^{i}
\end{array}\right]$$ and $$d \varphi_j^\ell=\elleft(\varphirac{\partial \varphi_j^\ell}{\partial \varphi_1^1},\elldots,\varphirac{\partial \varphi_j^\ell}{\partial \varphi_1^m},\varphirac{\partial \varphi_j^\ell}{\partial \varphi_2^1},\elldots,\varphirac{\partial \varphi_j^\ell}{\partial \varphi_2^m},\elldots,\varphirac{\partial \varphi_j^\ell}{\partial \varphi_n^1},\elldots,\varphirac{\partial \varphi_j^\ell}{\partial \varphi_n^m}\right)$$ is the vector whose coordinate at the position $(j-1)m+\ell$ is equal to $1$ and all the others are zero. In particular, since $i\in\{n, \elldots, m\}$, $$d \varphi_n^i=(0,\elldots,0,\underbrace{0,\elldots,\omegaverset{i}{1},\elldots,0}_{m-n+1})\in\underbrace{(\textsl{m}athbb{R}^m)^{\alphast}\times\elldots\times(\textsl{m}athbb{R}^m)^{\alphast}}_{n \text{ times}}$$ and the $m-n+1$ last coordinates of $d \varphi_j^\ell$ are zero for all $j\neq n$ or $\ell\neq i$. Moreover, $\omegaperatorname{cof}(\varphi_n^i,\textsl{m}_i)=\textsl{m}(\varphi)\neq0$, for $i=n,\elldots,m$. Thus, $$\varphirac{\partial(\textsl{m}_n,\elldots,\textsl{m}_m)}{\partial(\varphi_n^n,\elldots,\varphi_n^m)}=\elleft|\betaegin{array}{ccc}
\omegaperatorname{cof}(\varphi_n^n,\textsl{m}_n) & \cdots & 0\\
\vdots & \Deltadots & \vdots \\
0 & \cdots & \omegaperatorname{cof}(\varphi_n^m,\textsl{m}_m)\\
\end{array}\right|.$$ That is, for all $(x,\varphi)\in(T^{\alphast}M^{n,n-1})\cap\tilde{\textsl{m}athcal{U}}$, we have
\betaegin{equation}\ellambdabel{menornaonulo}\varphirac{\partial(\textsl{m}_n,\elldots,\textsl{m}_m)}{\partial(\varphi_n^n,\elldots,\varphi_n^m)}=\textsl{m}(\varphi)^{(m-n+1)}\elleft|
{\renewcommand{2}{1}
\betaegin{array}{ccc}
1& \cdots & 0\\
\vdots & \Deltadots & \vdots \\
0 & \cdots & 1\\
\end{array}}\right|\neq0.\end{equation} Therefore, $\omegaperatorname{rank}(\textsl{m}_n,\elldots,\textsl{m}_m)=m-n+1$
for all $(x,\varphi)\in(T^{\alphast}M^{n,n-1})\cap\tilde{\textsl{m}athcal{U}}$.
\end{proof}
\betaegin{lema}\ellambdabel{eqloc3} Let $p\in\Sigma^1(\omega)$ be a singular point of $\omega$, it is possible to exhibit, explicitly, functions ${\normalfont\textbf{M}_i(x):\textsl{m}athcal{U}\rightarrow\textsl{m}athbb{R}}$, $i=n, \elldots, m$, defined on an open neighborhood $\textsl{m}athcal{U}\subset M$, with $p\in\textsl{m}athcal{U}$, such that, locally $$\textsl{m}athcal{U}\cap\Sigma^1(\omega)=\elleft\{x\in \textsl{m}athcal{U} \ | \ {\normalfont \textbf{M}_n(x)}=\elldots={\normalfont \textbf{M}_m(x)=0}\right\}$$ with $\omegaperatorname{rank}\elleft({\normalfont d\textbf{M}_n(x),\elldots,d\textbf{M}_m(x)}\right)=m-n+1$, for all $x\in \Sigma^1(\omega)\cap\textsl{m}athcal{U}$.
\end{lema}
{\normalfont }
\betaegin{proof}
Let $\omega=(\omega_1,\elldots,\omega_n)$ be a Morin $n$-coframe and let $p\in\Sigmaigma^1(\omega)$. By Remark \ref{obsmatrixM}, we can consider $\textsl{m}athcal{U}\subset M$ an open neighborhood with $p\in\textsl{m}athcal{U}$, where $\textbf{M}(x)\neq0$.
Thus, in this neighborhood the set $\Sigma^1(\omega)$ can be defined as $$\textsl{m}athcal{U}\cap\Sigma^1(\omega)=\{x\in\textsl{m}athcal{U} \ | \ \textbf{M}_n=\elldots=\textbf{M}_m=0\},$$ where $\textbf{M}_i:=\textbf{M}_i(x)$ is the determinant \betaegin{equation}\ellambdabel{expMi}\textbf{M}_i(x)=\elleft|
{\renewcommand{2}{2}
\betaegin{array}{ccccc}
\omega_1^1(x)&\omega_2^1(x)&\cdots &\omega_{n-1}^1(x)&\omega_{n}^1(x)\\
\vdots & \vdots &\Deltadots &\vdots &\vdots\\
\omega_1^{n-1}(x)&\omega_2^{n-1}(x)&\cdots &\omega_{n-1}^{n-1}(x)&\omega_{n}^{n-1}(x)\\
\omega_1^{i}(x)&\omega_2^{i}(x)&\cdots &\omega_{n-1}^{i}(x)&\omega_{n}^{i}(x)
\end{array}}\right|
\end{equation} for $i=n,\elldots,m$. Let us verify that $\omegaperatorname{rank}\elleft(d\textbf{M}_n(x),\elldots,d\textbf{M}_m(x)\right)=m-n+1$, for all $x\in\Sigma^1(\omega)\cap\textsl{m}athcal{U}$.
Let $G(\omega)=\{(x,\omega_1(x),\elldots,\omega_n(x))\ | \ x\in M\}$ be the graph of the $n$-coframe $\omega$. By Lemma \ref{eqloc2}, we can consider an open neighborhood $\tilde{\textsl{m}athcal{U}}$ in $T^{\alphast}M^n$ with $\elleft(p,\omega(p)\right)\in \tilde{\textsl{m}athcal{U}}$ and $\pi_x(\tilde{\textsl{m}athcal{U}})=\textsl{m}athcal{U}$, where $\pi_x:(\textsl{m}athbb{R}^m)^n\rightarrow\textsl{m}athbb{R}^m$ is the projection on the $m$ first coordinates, so that the manifolds $T^{\alphast}M^{n,n-1}$ and $G(\omega)$ can be locally defined as: $$T^{\alphast}M^{n,n-1}=\{(x,\varphi)\in \tilde{\textsl{m}athcal{U}} \ | \ \textsl{m}_n=\elldots=\textsl{m}_m=0\},$$ with $\omegaperatorname{rank}\elleft(d \textsl{m}_n,\elldots,d \textsl{m}_m\right)=m-n+1$ on $ T^{\alphast}M^{n,n-1}\cap\tilde{\textsl{m}athcal{U}}$; and \betaegin{equation*}\setlength{\alpharraycolsep}{0.06cm}{\betaegin{array}{cl}
G(\omega)&=\{(x,\varphi)\in \tilde{\textsl{m}athcal{U}} \ | \ \varphi_j^{\ell}=\omega_j^{\ell}(x); \ j=1,\elldots,n; \ \ell=1,\elldots,m \}\\
&=\{(x,\varphi)\in \tilde{\textsl{m}athcal{U}}\ | \ g_{\ell j}(x,\varphi)=0; \ j=1,\elldots,n; \ \ell=1,\elldots,m\},
\end{array}}
\end{equation*} with $\omegaperatorname{rank} \elleft(d g_{11},\elldots,d g_{m1},\elldots,d g_{1n},\elldots,d g_{mn}\right)=nm$ on $G(\omega)\cap \tilde{\textsl{m}athcal{U}} $, where the functions $g_{\ell j}:T^{\alphast}M^n\rightarrow \textsl{m}athbb{R}$ are given by $g_{\ell j}(x,\varphi)=\varphi_j^{\ell}-\omega_j^{\ell}(x)$.
Let $x\in\Sigma^1(\omega)\cap\textsl{m}athcal{U}$, then $G(\omega)\pitchfork T^{\alphast}M^{n,n-1}$ at $(x,\omega(x))\in \tilde{\textsl{m}athcal{U}}$ and, at this point, $\omegaperatorname{rank} \elleft(d \textsl{m}_n,\elldots,d \textsl{m}_m,d g_{11},\elldots,d g_{mn}\right)=m-n+1+nm$. That is, the matrix \betaegin{equation}\ellambdabel{matrizJAc}\elleft[\betaegin{array}{c}
d\textsl{m}_n\\
\vdots\\
d\textsl{m}_m\\
d g_{11}\\
\vdots\\
d g_{mn}
\end{array}\right]=\elleft[\setlength{\alpharraycolsep}{0.1cm}{
\betaegin{array}{ccccccc}
& & & \vdots & & d_{\varphi}\textsl{m}_n & \\
& O_{(m-n+1)\times m}& & \vdots & & \vdots & \\
& & & \vdots & & d_{\varphi}\textsl{m}_m & \\
\cdots&\cdots \ \cdots \ \cdots \ \cdots & \cdots & \vdots & \cdots & \cdots \ \cdots &\cdots \\
& -d_x \omega_{1}^{1} & & \vdots & & & \\
& \vdots & & \vdots & & Id_{(nm)} & \\
& -d_x \omega_{n}^{m} & & \vdots & & & \\
\end{array}}\right]
\end{equation} has maximal rank at $(x,\omega(x))$, where $O_{(m-n+1)\times m}$ is the null matrix of size $(m-n+1)\times m$, $Id_{(nm)}$ represents the identity matrix of order $nm$ and $d_x$ and $d_{\varphi}$ denote the derivatives with respect to $x=(x_1,\elldots,x_m)$ and $\varphi=(\varphi_1^1,\elldots,\varphi_1^m,\elldots,\varphi_n^1,\elldots,\varphi_n^m)$ respectively.
We have that Equation (\ref{gradmii}) of Lemma \ref{eqloc2} is the derivative of $\textsl{m}_i$ with respect to $\varphi$. Thus, we can write \betaegin{equation}\ellambdabel{gradmi}
d\textsl{m}_i(\varphi)=\Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^\ell,\textsl{m}_i)f_{\ell j}}
\end{equation} for $I=\{1,\elldots,n\}$, $I_i=\{1,\elldots,n-1,i\}$ and $i=n,\elldots, m$. Where $f_{\ell j}$ denotes the vector $f_{\ell j}=(0,\elldots,0,0\elldots,0,1,0,\elldots,0)\in \underbrace{(\textsl{m}athbb{R}^m)^{\alphast}\times\elldots\times(\textsl{m}athbb{R}^m)^{\alphast}}_{n+1 \text{ times }}$ whose coordinates are all zero, except at the position $jm+\ell$, for each $j\in I$ and $\ell\in I_i$.
We also have, \betaegin{equation*}
d\textbf{M}_i(x)=\Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\omega_j^\ell(x),\textbf{M}_i(x))d_x \omega_j^{\ell}(x)}
\end{equation*} and since $(x,\omega(x))\in G(\omega)\pitchfork T^{\alphast}M^{n,n-1}$, we have $\omega_j^{\ell}(x)=\varphi_j^{\ell}$ so that \betaegin{equation}\ellambdabel{gradMi}
d\textbf{M}_i(x)=\Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)d_x \omega_j^{\ell}(x)}.
\end{equation}
Let us suppose that $\omegaperatorname{rank}\elleft(d\textbf{M}_n(x),\elldots,d\textbf{M}_m(x)\right)<m-n+1$. Then, there exists $(\alpha_n,\elldots,\alpha_m)\neq(0,\elldots,0)$ such that $$\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_id\textbf{M}_i(x)}=0.$$ Thus,
\betaegin{equation}\ellambdabel{eqnula}0=\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_id\textbf{M}_i(x)}\omegaverset{\elleft(\ref{gradMi}\right)}{=}
\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_i\elleft[ \ \Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)d_x \omega_j^{\ell}(x)} \ \right]}.
\end{equation}
Let $d \tilde{\omega}_j^{\ell}(x)=(d_x \omega_j^{\ell}(x),0,\elldots,0)\in\underbrace{(\textsl{m}athbb{R}^m)^{\alphast}\times\elldots\times(\textsl{m}athbb{R}^m)^{\alphast}}_{n+1 \text{ times }}$, we have \betaegin{equation}\ellambdabel{equaalpha}{\renewcommand{rl}{rl}
\Deltaisplaystyle\sum_{i=n}^{m}\alpha_i\elleft[ \Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)d g_{\ell j}}\right]\omegaverset{\elleft(\ref{matrizJAc}\right)}{=}&\Deltaisplaystyle\sum_{i=n}^{m}\alpha_i\elleft[\Deltaisplaystyle\sum_{j\in I, \ell\in I_i}\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)\elleft(f_{\ell j}-d\tilde{\omega}_j^{\ell}\right) \right]\\
\omegaverset{\elleft(\ref{eqnula}\right)}{=}&\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_i\elleft[\Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)f_{\ell j}} \right]}\\
\omegaverset{\elleft(\ref{gradmi}\right)}{=}&\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_id\textsl{m}_i}.
\end{array}}
\end{equation}
On the other hand,
\betaegin{equation}\ellambdabel{equabeta}
\Deltaisplaystyle\sum_{i=n}^{m}\alpha_i\elleft[ \Deltaisplaystyle\sum_{j\in I, \ell\in I_i}{\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)d g_{\ell j}}\right]=\Deltaisplaystyle\sum_{j\in I, \ell\in \{1,\elldots,m\}}\betaeta_{\ell j}d g_{\ell j}
\end{equation} where
\betaegin{equation*}\betaeta_{\ell j}=\elleft\{
\betaegin{array}{ll}
\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_i\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_i)}, & j\in I, \ell=1,\elldots,n-1;\\
\alpha_{\ell}\omegaperatorname{cof}(\varphi_j^{\ell},\textsl{m}_{\ell}), & j\in I, \ell=n,\elldots,m.
\end{array}\right.
\end{equation*} Since $(\alpha_n,\elldots,\alpha_m)\neq(0,\elldots,0)$, by Equations (\ref{equaalpha}) and (\ref{equabeta}), we obtain
\betaegin{equation*}
\Deltaisplaystyle\sum_{j, \ell}\betaeta_{\ell j}d g_{\ell j}-\Deltaisplaystyle\sum_{i=n}^{m}{\alpha_id\textsl{m}_i}=0
\end{equation*} which is a linear combination (with non-zero coefficients) of the row vectors of the matrix (\ref{matrizJAc}). This is a contradiction, since $\omegaperatorname{rank} \elleft(d \textsl{m}_n,\elldots,d \textsl{m}_m,d g_{11},\elldots,d g_{mn}\right)$ is maximal. Therefore $\omegaperatorname{rank}\elleft(d\textbf{M}_n(x),\elldots,d\textbf{M}_m(x)\right)=m-n+1.$
\end{proof}
\betaegin{lema}\ellambdabel{nondegeneratexi} For almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, the 1-form $\xi(x)=\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i(x)}$ admits only non-degenerate zeros. Moreover, such zeros belong to $A_1(\omega)$.
\end{lema}
\betaegin{proof} Suppose that $p\in M$ is a zero of $\xi$. Then, by Lemmas \ref{zeroszsobresigma1} and \ref{lemainterzeroscomsigma2}, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ we have that $p\in\Sigma^1(\omega)\setminus\Sigma^2(\omega)$, that is, $p\in A_1(\omega)$. Assume that $\textbf{M}(x)\neq0$ in an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$ (see Remark \ref{obsmatrixM}) so that $\textsl{m}athcal{U}\cap\Sigma^1(\omega)=\{x\in\textsl{m}athcal{U}: \textbf{M}_n(x)=\elldots=\textbf{M}_m(x)=0\}$.
Let us write $$\xi_s(x)=\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i^s(x)}, \, s= 1,\elldots, m$$ and let us consider the mapping $F:\textsl{m}athcal{U}\times\textsl{m}athbb{R}nm\rightarrow\textsl{m}athbb{R}^m$ defined by $$F(x,a)=(\textbf{M}_n(x),\elldots, \textbf{M}_m(x), \xi_1(x), \elldots, \xi_{n-1}(x)).$$ Its Jacobian matrix at a point $(x,a)$ is given by:
\betaegin{equation*}\omegaperatorname{Jac} F(x,a)=\elleft[\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{cccccccc}
d_x\textbf{M}_n(x) & \vdots & &&& \\
\vdots & \vdots & &\textsl{m}ulticolumn{2}{c}{O_{(m-n)\times n}}& \\
d_x\textbf{M}_m(x) & \vdots & &&& \\
\cdots \ \ \cdots \ \ \cdots& \vdots & \cdots \ \ \cdots & \cdots & \cdots \ \ \cdots \ \ \cdots& \cdots \ \ \cdots \\
d_x\xi_1(x) & \vdots & \omega_1^1(x) & \cdots & \omega_{n-1}^1(x) & \omega_n^1(x) \\
\vdots & \vdots & \vdots & \Deltadots & \vdots & \vdots \\
d_x\xi_{n-1}(x) & \vdots & \omega_1^{n-1}(x) & \cdots & \omega_{n-1}^{n-1}(x) & \omega_n^{n-1}(x) \\
\end{array}}\right].
\end{equation*} Note that, by Lemma {\ref{lemaauxiliar}}, $F^{-1}(\vec{0})$ corresponds to the zeros of $\xi$ on $\Sigma^1(\omega)\cap\textsl{m}athcal{U}$. Since $\textbf{M}(x)\neq0$ and $\omegaperatorname{rank}(d\textbf{M}_n(x), \elldots,d\textbf{M}_m(x))=m-n+1$ for all $x\in\Sigma^1(\omega)\cap\textsl{m}athcal{U}$, then $\omegaperatorname{rank}(\omegaperatorname{Jac} F(x,a))=m$ for all $(x,a)\in F^{-1}(\vec{0})$. Thus, $\omegaperatorname{dim} F^{-1}(\vec{0})=n$.
Let $\pi:F^{-1}(\vec{0})\rightarrow\textsl{m}athbb{R}nm$ be the projection $\pi(x,a)=a$, by Sard's Theorem, almost every $a\in\textsl{m}athbb{R}nm$ is a regular value of $\pi$ and $\omegaperatorname{dim}(\pi^{-1}(a)\cap F^{-1}(\vec{0}))=0$. That is, for almost every $a$, the zeros of $\xi$ are isolated in $\Sigma^1(\omega)$. Let us proof that, moreover, these zeros are non-degenerate.
Since $\omegaperatorname{rank}(\omegaperatorname{Jac} F(x,a))=m$, for all $(x,a)\in F^{-1}(\vec{0})$, then by Lemma \ref{lemadorank} we have that $\omegaperatorname{rank}(d_x\textbf{M}_n(p), \elldots, d_x\textbf{M}_m(p), d_x\xi_1(p), \elldots, d_x\xi_{n-1}(p))=m$, which happens if and only if $\omegaperatorname{rank}(B)=m$, where $B$ is the matrix
\betaegin{equation*}B=\elleft[\betaegin{array}{c}
d_x\xi_1(p) \\
\vdots \\
d_x\xi_{n-1}(p) \\
a_nd_x\textbf{M}_n(p) \\
\vdots \\
a_nd_x\textbf{M}_m(p) \\
\end{array}\right]
\end{equation*} whose row vectors we will denote by $R_i, i=1, \elldots,m$ (by Remark \ref{obsmatrixM}, $a_n\neq0$).
Let us denote $I=\{1,\elldots,n\}$ and $I_i=\{1,\elldots,n-1,i\}$ for each $i\in\{n,\elldots, m\}$. By Equation (\ref{expMi}), we can write $$d \textbf{M}_i(x)=\Deltaisplaystyle\sum_{\ell\in I, j\in I_i}{\omegaperatorname{cof}(\omega_{\ell}^j(x),M_i)d \omega_{\ell}^j(x)}$$ and by Lemma \ref{lemadoscofatores}, $$d \textbf{M}_i(p)=\Deltaisplaystyle\sum_{\ell\in I, j\in I_i}{\Deltaisplaystyle\varphirac{a_{\ell}}{a_n}\omegaperatorname{cof}(\omega_{n}^j(p),M_i)d \omega_{\ell}^j(p)}.$$ Thus,
\betaegin{equation*}\renewcommand{2}{2}{\betaegin{array}{ccl}
a_nd \textbf{M}_i(p)&=&\Deltaisplaystyle\sum_{\ell\in I, j\in I_i}{a_{\ell}\omegaperatorname{cof}(\omega_{n}^j(p),M_i)d \omega_{\ell}^j(p)}\\
&=&\Deltaisplaystyle\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_{n}^j(p),M_i)\elleft[\Deltaisplaystyle\sum_{\ell\in I}a_{\ell}d \omega_{\ell}^j(p)\right]}\\
&=&\Deltaisplaystyle\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_{n}^j(p),M_i)\elleft[d_x\xi_j(p)\right]}\\
&=&\omegaperatorname{cof}(\omega_{n}^i(p),M_i)\elleft[d_x\xi_i(p)\right]+\Deltaisplaystyle\sum_{j\in I_i\setminus\{i\}}{\omegaperatorname{cof}(\omega_{n}^j(p),M_i)\elleft[d_x\xi_j(p)\right]}.
\end{array}}
\end{equation*} Note that, $\omegaperatorname{cof}(\omega_{n}^i(p),M_i)=\textbf{M}(p)\neq0$, for all $i=n, \elldots,m$. Then, for each $i=n, \elldots,m$, we replace the $i^{th}$ row $R_i$ of matrix $B$ by $$\Deltaisplaystyle\varphirac{1}{\omegaperatorname{cof}(\omega_n^i(p),M_i)}\elleft(R_i-\sum_{j=1}^{n-1}{\omegaperatorname{cof}(\omega_n^j(p),M_i)R_j}\right)$$ so that we obtain the matrix of maximal rank:
\betaegin{equation*}\elleft[\betaegin{array}{c}
d_x\xi_1(p) \\
\vdots \\
d_x\xi_{n-1}(p) \\
d_x\xi_n(p) \\
\vdots \\
d_x\xi_m(p) \\
\end{array}\right].
\end{equation*} Therefore, the zeros of $\xi(x)$ are non-degenerate. \end{proof}
\betaegin{lema}\ellambdabel{zerosnaodegdeak} For almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, the 1-form $\xi_{|_{A_k(\omega)}}$ admits only non-degenerate zeros, $k\gammaeq2$.
\end{lema}
\betaegin{proof} Suppose that $\xi_{|_{A_k(\omega)}}(p)=0$. By Lemmas \ref{eqlocalSk} and \ref{eqloc3}, we can consider $\textsl{m}athcal{U}\subset M$ an open neighborhood of $p$ where $\textbf{M}(x)\neq0$ and on which the respective singular sets $(k=2, \elldots, n)$ can be locally defined as
\betaegin{equation*}\setlength{\alpharraycolsep}{0.06cm}{\betaegin{array}{lll}
\textsl{m}athcal{U}\cap\Sigma^1(\omega)&=&\{x\in\textsl{m}athcal{U}: \textbf{M}_n(x)=\elldots=\textbf{M}_m(x)=0\},\\
\textsl{m}athcal{U}\cap\Sigma^k(\omega)&=&\{x\in\textsl{m}athcal{U}: \textbf{M}_n(x)=\elldots=\textbf{M}_m(x)=\Delta_2(x)=\elldots=\Delta_k(x)=0\},
\end{array}}
\end{equation*} with
\betaegin{equation*}\setlength{\alpharraycolsep}{0.06cm}{\betaegin{array}{l}
\omegaperatorname{rank}(d\textbf{M}_n, \elldots,d\textbf{M}_m)=m-n+1, \varphiorall x\in\Sigma^1(\omega)\cap\textsl{m}athcal{U},\\
\omegaperatorname{rank}(d\textbf{M}_n, \elldots,d\textbf{M}_m,d\Delta_2,\elldots,d\Delta_k)=m-n+k, \varphiorall x\in\Sigma^k(\omega)\cap\textsl{m}athcal{U}.
\end{array}}
\end{equation*}
Analogously to the proof of Lemma \ref{ptscrticrestasigmak}, by Szafraniec's characterization (see \cite[p. 196]{sza2}), $x$ is a zero of the restriction $\xi_{|_{\Sigma^k(\omega)}}$ if and only if there exists $(\ellambdambda_n,\elldots,\ellambdambda_m,\betaeta_2,\elldots,\betaeta_k)\in\textsl{m}athbb{R}^{m-n+k}$ such that $$\xi(x)=\Deltaisplaystyle\sum_{j=n}^{m}{\ellambdambda_jd\textbf{M}_j(x)}+\Deltaisplaystyle\sum_{\ell=2}^{k}{\betaeta_{\ell}d\Delta_{\ell}(x)}.$$ Let us consider the functions $$N_s(x,a,\ellambdambda,\betaeta):=\xi_s(x)-\Deltaisplaystyle\sum_{j=n}^{m}{\ellambdambda_j\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_s}(x)}-\Deltaisplaystyle\sum_{\ell=2}^{k}{\betaeta_{\ell}\Deltaisplaystyle\varphirac{\partial\Delta_{\ell}}{\partial x_s}(x)}, \ s=1,\elldots, m,$$ so that $\xi_{|_{\Sigma^k(\omega)}}(x)=0$ if and only if $N_s(x,a,\ellambdambda,\betaeta)=0$, for all $s=1,\elldots,m$.
Let $G:\textsl{m}athcal{U}\setminus\{\Delta_{k+1}=0\}\times\textsl{m}athbb{R}nm\times\textsl{m}athbb{R}^{m-n+k}\rightarrow\textsl{m}athbb{R}^{2m-n+k}$ be the mapping $$G(x,a,\ellambdambda,\betaeta)=(\textbf{M}_n,\elldots,\textbf{M}_m,\Delta_2,\elldots,\Delta_{k},N_1,\elldots,N_m).$$ Its Jacobian matrix at a point $(x,a,\ellambdambda,\betaeta)\in G^{-1}(\vec{0})$ is given by:
\betaegin{equation*}\omegaperatorname{Jac} G(x,a,\ellambdambda,\betaeta)=\elleft[\setlength{\alpharraycolsep}{0.12cm}{\betaegin{array}{ccccc}
d_x\textbf{M}_n(x) & \vdots & \\
\vdots & \vdots & && \\
d_x\textbf{M}_m(x) & \vdots &\textsl{m}ulticolumn{3}{c}{\textsl{m}ultirow{2}{*}{$O_{(m-n+k)\times(m+k)}$}} \\
d_x\Delta_2(x) & \vdots & && \\
\vdots & \vdots & && \\
d_x\Delta_{k}(x) & \vdots & && \\
\cdots \ \cdots \ \cdots \ \cdots \ \cdots & \vdots & \cdots \ \cdots & \cdots &\cdots \ \cdots \ \cdots \ \cdots \\
d_xN_1(x,a,\ellambdambda,\betaeta) & \vdots & & \vdots & \\
\vdots & \vdots & \ B_{m\times n} & \vdots & C_{m\times(m-n+k)} \\
d_xN_m(x,a,\ellambdambda,\betaeta) & \vdots & & \vdots &
\end{array}}\right]
\end{equation*} where $O_{(m-n+k)\times(m+k)}$ is a null matrix, $B_{m\times n}$ is the matrix whose column vectors are given by the coefficients of the 1-forms $\omega_1(x), \elldots, \omega_n(x)$
and $C_{m\times(m-n+k)}$ is the matrix whose column vectors are, up to sign, the coefficients of the derivatives $d\textbf{M}_n,\elldots,d\textbf{M}_m,d\Delta_2,\elldots,d\Delta_{k}$ with respect to $x$:
\betaegin{equation*}C_{m\times(m-n+k)}=\elleft[\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{cccccc}
-\Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_1}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_1}(x) & -\Deltaisplaystyle\varphirac{\partial\Delta_2}{\partial x_1}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_1}(x)\\
\vdots & \Deltadots & \vdots & \vdots \\
-\Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_m}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_m}(x) & -\Deltaisplaystyle\varphirac{\partial\Delta_2}{\partial x_m}(x) & \cdots & -\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_m}(x)\\
\end{array}}\right].
\end{equation*} Thus, if $(x,a,\ellambdambda,\betaeta)\in G^{-1}(\vec{0})$ then $x\in\Sigma^{k}(\omega)\cap\textsl{m}athcal{U}$, $\Delta_{k+1}(x)\neq0$ and $\xi_{|_{\Sigma^k(\omega)}}(x)=0$. And since $A_k(\omega)=\Sigma^k(\omega)\setminus\Sigma^{k+1}(\omega)$, we have $x\in A_k(\omega)\cap Z(\xi_{|_{\Sigma^k(\omega)}})$, for all $(x,a,\ellambdambda,\betaeta)\in G^{-1}(\vec{0}).$
On the other hand, if $x\in A_k(\omega)$ then $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(x)\rangle \cap N_x^{\alphast}\Sigma^{k-1}(\omega))=k-1$ and $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(x)\rangle \cap N_x^{\alphast}\Sigma^{k}(\omega))=k-1$, so that $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(x)\rangle + N_x^{\alphast}\Sigma^{k}(\omega))=m.$ Thus,
\betaegin{equation*}\omegaperatorname{rank}\elleft[\betaegin{array}{ccccccc}
d_xN_1(x,a,\ellambdambda,\betaeta) & \vdots & & & & \vdots & \\
\vdots & \vdots & &B_{m\times n} & & \vdots & C_{m\times(m-n+k)} \\
d_xN_m(x,a,\ellambdambda,\betaeta) & \vdots & & & & \vdots & \\
\end{array}\right]=m
\end{equation*} and the Jacobian matrix of $G$ has maximal rank at every $(x,a,\ellambdambda,\betaeta)\in G^{-1}(\vec{0})$. Therefore, $\omegaperatorname{dim} G^{-1}(\vec{0})=(2m+k)-(2m-n+k)=n$. Let $\pi:G^{-1}(\vec{0})\rightarrow\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ be the projection $\pi(x,a,\ellambdambda,\betaeta)=a$, then almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ is a regular value of $\pi$. So, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $\omegaperatorname{dim}(\pi^{-1}(a)\cap G^{-1}(\vec{0}))=0$ and $\pi^{-1}(a)\pitchfork G^{-1}(\vec{0})$. Therefore, the zeros of $\xi_{|_{A_k(\omega)}}$ are non-degenerate.
\end{proof}
\betaegin{lema}\ellambdabel{zerosnaodegdeaum} For almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, the 1-form $\xi_{|_{A_1(\omega)}}$ admits only non-degenerate zeros.
\end{lema}
\betaegin{proof} This proof follows analogously the proof of Lemma \ref{zerosnaodegdeak}.
\end{proof}
By Lemma \ref{lemazerosrestricoes}, if $p\in A_{k+1}(\omega)$, then $p$ is a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if $p$ is a zero of $\xi_{|_{\Sigma^k(\omega)}}$. The next results state that this relation also holds for non-degenerate zeros.
\betaegin{lema}\ellambdabel{nondegenerateequivalenceA1} Let $p\in A_1(\omega)$ be a zero of $\xi_{|_{\Sigma^{1}(\omega)}}$, then $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{1}(\omega)}}$ if and only if $p$ is a non-degenerate zero of $\xi$.
\end{lema}
\betaegin{proof} Let $p\in A_1(\omega)$ be a zero of the restriction $\xi_{|_{\Sigma^{1}(\omega)}}$ and let $\textsl{m}athcal{U}\subset M$ be an open neighborhood of $p$ at which $\textbf{M}(x)\neq0$
and $\textsl{m}athcal{U}\cap\Sigma^1(\omega)=\{x\in\textsl{m}athcal{U}: \textbf{M}_n(x)=\elldots=\textbf{M}_m(x)=0\}$.
By Szafraniec's characterization (\cite[p. 196]{sza2}), $\exists!(\ellambdambda_n, \elldots,\ellambdambda_m)\in\textsl{m}athbb{R}^{m-n+1}$, such that $$\xi(p)+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i(p)}=0.$$ Furthermore, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{1}(\omega)}}$ if and only if the matrix
\betaegin{equation}\ellambdabel{matrizinicial}\elleft[\renewcommand{2}{1.7}{\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{ccccc}
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1}(p) \\
\omegaperatorname{Jac}\elleft(\xi+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i}\right)(p) & \vdots & \vdots & \Deltadots & \vdots \\
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m}(p) \\
\cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots & \vdots &\cdots \ \ \cdots&\cdots \ \ \cdots&\cdots \ \ \cdots\\
d_x\textbf{M}_n(p) & \vdots & & & \\
\vdots & \vdots & & O_{(m-n+1)} & \\
d_x\textbf{M}_m(p) & \vdots & & & \\
\end{array}}}\right]
\end{equation} is non-singular. Since $\xi(p)=0$, then $p\in\Sigma^1(\omega)\cap\textsl{m}athcal{U}$ and $\ellambdambda_nd\textbf{M}_n(p)+\elldots+\ellambdambda_md\textbf{M}_m(p)=\vec{0}$, thus, $\ellambdambda_n=\elldots=\ellambdambda_m=0$ and, writing $\xi=(\xi_1, \elldots, \xi_m)$, we have $$\omegaperatorname{Jac}\elleft(\xi+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i}\right)(p)=\elleft[\betaegin{array}{c}d_x \xi_1(p)\\
\vdots\\
d_x \xi_m(p)
\end{array}\right].$$ This means that the Matrix (\ref{matrizinicial}) is non-singular if and only if the matrix
\betaegin{equation}\ellambdabel{matrizinicialxan}\elleft[\setlength{\alpharraycolsep}{0.1cm}{\renewcommand{2}{1.6}{\betaegin{array}{ccccc}
d_x\xi_1(p) & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1}(p) \\
\vdots & \vdots & \vdots & \Deltadots & \vdots \\
d_x\xi_m(p) & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m}(p) \\
\cdots \ \ \cdots \ \ \cdots \ \ \cdots & \vdots &\cdots \ \ \cdots&\cdots \ \ \cdots&\cdots \ \ \cdots\\
a_nd_x\textbf{M}_n(p) & \vdots & & & \\
\vdots & \vdots & & O_{(m-n+1)} & \\
a_nd_x\textbf{M}_m(p) & \vdots & & & \\
\end{array}}}\right]
\end{equation} is non-singular (by Remark \ref{obsmatrixM}, $a_n\neq0$). By Equation (\ref{expMi}), $$d_x \textbf{M}_i(x)=\Deltaisplaystyle\sum_{\ell\in I, j\in I_i}{\omegaperatorname{cof}(\omega_{\ell}^j(x),M_i)d \omega_{\ell}^j(x)},$$ and applying
Lemma \ref{lemadoscofatores}, we obtain
\betaegin{equation*}\renewcommand{2}{2}{\betaegin{array}{cl}
a_nd_x \textbf{M}_i(p)&=\Deltaisplaystyle\sum_{\ell\in I, j\in I_i}{a_{\ell}\omegaperatorname{cof}(\omega_{n}^j(p),M_i)d \omega_{\ell}^j(p)}\\
&=\Deltaisplaystyle\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_{n}^j(p),M_i)\elleft[\Deltaisplaystyle\sum_{\ell\in I}a_{\ell}d \omega_{\ell}^j(p)\right]}\\
&=\Deltaisplaystyle\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_{n}^j(p),M_i)\elleft[d_x\xi_j(p)\right]}.
\end{array}}
\end{equation*}
Let us denote the $m$ first row vectors of Matrix (\ref{matrizinicialxan}) by $L_j, j=1, \elldots, m,$ and let us denote the $m-n+1$ last row vectors of Matrix (\ref{matrizinicialxan}) by $R_i, i=n, \elldots, m$: $$\renewcommand{2}{3}{\betaegin{array}{ccl}
L_j&=&\elleft(d_x \xi_j(p), \Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_j}(p), \elldots, \Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_j}(p)\right);\\
R_i&=&\elleft(a_n\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_1}(p), \elldots, a_n\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_m}(p), \vec{0}\right).
\end{array}}$$ We replace each row vector $R_i$, $i=n, \elldots, m$, by $R_i-\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_n^j, M_i)L_j}$ so that we obtain $$R_i=\elleft(\underbrace{0,\elldots\ 0}_{m \text{ times }},-\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_n^j,M_i)\Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_j}}, \elldots, -\sum_{j\in I_i}{\omegaperatorname{cof}(\omega_n^j,M_i)\Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_j}}\right)$$ and the Matrix (\ref{matrizinicialxan}) becomes:
\betaegin{equation}\ellambdabel{matrizcomMlinha}\elleft[\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{ccccc}
d_x\xi_1(p) & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1}(p) \\
\vdots & \vdots & \vdots & \Deltadots & \vdots \\
d_x\xi_m(p) & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m}(p) \\
\cdots \ \ \cdots \ \ \cdots & \vdots &\cdots \ \ \cdots&\cdots \ \ \cdots&\cdots \ \ \cdots\\
& \vdots & & & \\
O_{(m-n+1)\times m} & \vdots & & \textbf{M}'_{(m-n+1)} & \\
& \vdots & & & \\
\end{array}}\right]
\end{equation} where $$\textbf{M}'_{(m-n+1)}=-\elleft(\betaegin{array}{c}m_{ij}\end{array}\right)_{n\elleq i,j\elleq m}$$ is the matrix given by \betaegin{equation}\ellambdabel{eqdosmij} m_{ij}=\sum_{k\in I_i}{\omegaperatorname{cof}(\omega_n^k,M_i)\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}}, \, i,j=n, \elldots, m.\end{equation}
Next, we will verify that the matrix $\textbf{M}'$ is non-singular. Since $p\in A_1(\omega)$, then $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle \cap N_p^{\alphast}\Sigma^1(\omega))=0$ and $\omegaperatorname{dim}(\ellambdangle\betaar{\omega}(p)\rangle \omegaplus N_p^{\alphast}\Sigma^1(\omega))=m.$ Since $\textbf{M}(p)\neq0$, $\{\omega_1(p), \elldots, \omega_{n-1}(p)\}$ is a basis of the space $\ellambdangle\betaar{\omega}(p)\rangle $. Thus the matrix
\betaegin{equation}\ellambdabel{matrizdeVseMis}\elleft[\renewcommand{2}{2}{\betaegin{array}{cccccc}
\omega_1^1(p) & \cdots & \omega_1^{n-1}(p)& \omega_1^{n}(p)& \cdots & \omega_1^{m}(p)\\
\vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots\\
\omega_{n-1}^1(p) & \cdots & \omega_{n-1}^{n-1}(p)& \omega_{n-1}^{n}(p)& \cdots & \omega_{n-1}^{m}(p)\\
\Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_1}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_{n-1}}(p) & \Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_n}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial\textbf{M}_n}{\partial x_m}(p)\\
\vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots\\
\Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_1}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_{n-1}}(p) & \Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_n}(p) & \cdots & \Deltaisplaystyle\varphirac{\partial\textbf{M}_m}{\partial x_m}(p)
\end{array}}\right]
\end{equation} has rank maximal. Let us denote the row vectors of Matrix (\ref{matrizdeVseMis}) by $L_j', j=1, \elldots, m$. For $j=1, \elldots, n-1$, we replace $L_j'$ by \betaegin{equation}\ellambdabel{novaslinhas} \sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^{j},M)L_k'}=\elleft(\sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^{j},M)\omega_k^1}, \elldots, \sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^{j},M)\omega_k^m}\right),\end{equation} where
\betaegin{equation*} \sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^j,M)\omega_k^{\ell}}=\elleft\{\betaegin{array}{ll}
\textbf{M}, & \ell=j;\\
0 & \ell=1, \elldots,n-1 \text{ and } \ell\neq j;\\
-\omegaperatorname{cof}(\omega_n^{j},\textbf{M}_{\ell}), & \ell=n, \elldots, m.
\end{array}\right.
\end{equation*} Indeed,
\betaegin{itemize}
\item For $\ell=1, \elldots,n-1$ with $\ell=j$, we have: $$\sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^{j},M)\omega_k^j}=\elleft| \setlength{\alpharraycolsep}{0.12cm}{\renewcommand{ccccc}{ccccc}
\omega_1^1 & \cdots & \omega_k^1 & \cdots & \omega_{n-1}^1\\
\vdots & \Deltadots & \vdots & \Deltadots & \vdots\\
\omega_1^{j} & \cdots & \omega_k^{j} & \cdots & \omega_{n-1}^{j}\\
\vdots & \Deltadots & \vdots & \Deltadots & \vdots\\
\omega_1^{n-1} & \cdots & \omega_k^{n-1} & \cdots & \omega_{n-1}^{n-1}\\
\end{array}}}\right|=\textbf{M};$$
\item For $\ell=1, \elldots,n-1$ and $\ell\neq j$, we have: $$\sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^{j},M)\omega_k^{\ell}}=\elleft| \setlength{\alpharraycolsep}{0.12cm}{\betaegin{array}{ccccc}
\omega_1^1 & \cdots & \omega_k^1 & \cdots & \omega_{n-1}^1\\
\vdots & \Deltadots & \vdots & \Deltadots & \vdots\\
\omega_1^{j-1} & \cdots & \omega_k^{j-1} & \cdots & \omega_{n-1}^{j-1}\\
\omega_1^{\ell} & \cdots & \omega_k^{\ell} & \cdots & \omega_{n-1}^{\ell}\\
\omega_1^{j+1} & \cdots & \omega_k^{j+1} & \cdots & \omega_{n-1}^{j+1}\\
\vdots & \Deltadots & \vdots & \Deltadots & \vdots\\
\omega_1^{n-1} & \cdots & \omega_k^{n-1} & \cdots & \omega_{n-1}^{n-1}\\
\end{array}}\right|=0,$$ because this is the determinant of a matrix with two equal rows.
\item For $\ell=n, \elldots, m$, we have: $$\omegaperatorname{cof}(\omega_n^{j},\textbf{M}_{\ell})=(-1)^{n+j}\elleft|\renewcommand{2}{1.3}{\setlength{\alpharraycolsep}{3mm}{\betaegin{array}{ccc}
\omega_1^1 & \cdots & \omega_{n-1}^1\\
\vdots & \Deltadots & \vdots \\
\omega_1^{j-1} & \cdots & \omega_{n-1}^{j-1}\\
\omega_1^{j+1} & \cdots & \omega_{n-1}^{j+1}\\
\vdots & \Deltadots & \vdots\\
\omega_1^{n-1} & \cdots & \omega_{n-1}^{n-1}\\
\omega_1^{\ell} & \cdots & \omega_{n-1}^{\ell}\\
\end{array}}}\right|$$
$$\renewcommand{2}{2.6}{\betaegin{array}{l}
=(-1)^{n+j}\Deltaisplaystyle\sum_{k=1}^{n-1}{\omega_{k}^{\ell}(-1)^{n-1+k}}\elleft|\renewcommand{2}{1.5}{\setlength{\alpharraycolsep}{2mm}{
\betaegin{array}{cccccc}
\omega_1^1 & \cdots & \omega_{k-1}^{1} & \omega_{k+1}^{1} & \cdots & \omega_{n-1}^1\\
\vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots \\
\omega_1^{j-1} & \cdots & \omega_{k-1}^{j-1} & \omega_{k+1}^{j-1} & \cdots & \omega_{n-1}^{j-1}\\
\omega_1^{j+1} & \cdots & \omega_{k-1}^{j+1} & \omega_{k+1}^{j+1} & \cdots & \omega_{n-1}^{j+1}\\
\vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots \\
\omega_1^{n-1} & \cdots & \omega_{k-1}^{n-1} & \omega_{k+1}^{n-1} & \cdots & \omega_{n-1}^{n-1}\\\end{array}}}\right|\\
= \Deltaisplaystyle\sum_{k=1}^{n-1}{(-1)^{j-1+k}\omega_{k}^{\ell}(-1)^{j+k}\omegaperatorname{cof}(\omega_k^{j},M)} = -\Deltaisplaystyle\sum_{k=1}^{n-1}{\omegaperatorname{cof}(\omega_k^{j},M)\omega_{k}^{\ell}}\end{array}}$$
\end{itemize}
Thus, replacing the rows $L_j'$, for $j=1, \elldots, n-1$, Matrix (\ref{matrizdeVseMis}) becomes
\betaegin{equation}\ellambdabel{matrizobtida}\elleft[\betaegin{array}{ccccccc}
\textbf{M} & \cdots & 0 & \vdots & -\omegaperatorname{cof}(\omega_n^1,\textbf{M}_n)& \cdots & -\omegaperatorname{cof}(\omega_n^1,\textbf{M}_m)\\
\vdots & \Deltadots & \vdots & \vdots & \vdots & \Deltadots & \vdots\\
0 & \cdots & \textbf{M} & \vdots & -\omegaperatorname{cof}(\omega_n^{n-1},\textbf{M}_n)& \cdots & -\omegaperatorname{cof}(\omega_n^{n-1},\textbf{M}_m)\\
\cdots \ & \cdots & \cdots & \vdots & \cdots \ \ \ \cdots \ \ \ \cdots & \cdots & \cdots \ \ \ \cdots \ \ \ \cdots\\
\Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_{n-1}} & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_p}& \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m}\\
\vdots & \Deltadots & \vdots & \vdots & \vdots & \Deltadots & \vdots\\
\Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_{n-1}} & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_p}& \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m}\\
\end{array}\right].
\end{equation} that still has maximal rank. {Let us denote the first $n-1$ row vectors of Matrix (\ref{matrizobtida}) by $L_j''$, for $j=1, \elldots, n-1$,} and let us consider the following expression for $j=n,\elldots, m$,
\varphiootnotesize \betaegin{equation*}\renewcommand{2}{3}{\betaegin{array}{l} \textbf{M} L_j'-\Deltaisplaystyle\sum_{k=1}^{n-1}{\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_k}L_k''}\\
=\textbf{M}\elleft(\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_1}, \elldots, \Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_{n-1}}, \Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_n}, \elldots, \Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_m}\right) \\
+\elleft(-\textbf{M}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_1}, \elldots, -\textbf{M}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_{n-1}}, \Deltaisplaystyle\sum_{k=1}^{n-1}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}\omegaperatorname{cof}(\omega_n^k,M_n), \elldots, \Deltaisplaystyle\sum_{k=1}^{n-1}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}\omegaperatorname{cof}(\omega_n^k,M_m) \right)\\
=\elleft(0, \elldots, 0, \Deltaisplaystyle\sum_{k=1}^{n-1}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}\omegaperatorname{cof}(\omega_n^k,M_n)+\textbf{M}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_n}, \elldots, \Deltaisplaystyle\sum_{k=1}^{n-1}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}\omegaperatorname{cof}(\omega_n^k,M_m)+\textbf{M}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_m} \right).\\
\end{array}}
\end{equation*} \normalsize Note that $\textbf{M}=\omegaperatorname{cof}(\omega_n^i,\textbf{M}_i)$, for $i=n, \elldots,m$, so that the expression $$\textbf{M} L_j'-\Deltaisplaystyle\sum_{k=1}^{n-1}{\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_k}L_k''}$$ is equal to $$\elleft(0, \elldots, 0, \Deltaisplaystyle\sum_{k\in I_n}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}\omegaperatorname{cof}(\omega_n^k,M_n), \elldots, \Deltaisplaystyle\sum_{k\in I_m}\Deltaisplaystyle\varphirac{\partial \textbf{M}_j}{\partial x_k}\omegaperatorname{cof}(\omega_n^k,M_m)\right).$$ \normalsize Thus, by Equation (\ref{eqdosmij}), we obtain $$\textbf{M} L_j'-\Deltaisplaystyle\sum_{k=1}^{n-1}{\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_k}L_k''}=(0, \elldots, 0, m_{nj}, \elldots, m_{mj}).$$
For $j=n,\elldots, m$, we replace the row $L_j'$ in Matrix (\ref{matrizobtida}) by $$\textbf{M} L_j'-\Deltaisplaystyle\sum_{k=1}^{n-1}{\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_k}L_k''},$$ such that the matrix obtained:
\betaegin{equation}\ellambdabel{matrizobtidadois}\elleft[\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{ccccccc}
\textbf{M} & \cdots & 0 & \vdots & -\omegaperatorname{cof}(\omega_n^1,\textbf{M}_n)& \cdots & -\omegaperatorname{cof}(\omega_n^1,\textbf{M}_m)\\
\vdots & \Deltadots & \vdots & \vdots & \vdots & \Deltadots & \vdots\\
0 & \cdots & \textbf{M} & \vdots & -\omegaperatorname{cof}(\omega_n^{n-1},\textbf{M}_n)& \cdots & -\omegaperatorname{cof}(\omega_n^{n-1},\textbf{M}_m)\\
\cdots \ & \cdots & \cdots & \vdots & \cdots \ \ \ \ \cdots \ \ \ \ \cdots & \cdots & \cdots \ \ \ \ \cdots \ \ \ \ \cdots\\
& & & \vdots & & & \\
& O_{(n-1)} & & \vdots & & (-\textbf{M}')^t & \\
& & & \vdots & & &\\
\end{array}}\right]
\end{equation} also is non-singular. So, since $\textbf{M}\neq0$, we have that $\Deltaet\textbf{M}'\neq0.$
Thus, we can conclude that Matrix (\ref{matrizinicialxan}) is non-singular if and only if Matrix (\ref{matrizcomMlinha}) is non-singular, which occurs if and only if $$\Deltaet\elleft[\betaegin{array}{c}
d_x\xi_1(p) \\
\vdots \\
d_x\xi_m(p) \\
\end{array}\right]\neq0.$$ Therefore, $p$ will be a non-degenerate zero of $\xi_{|_{\Sigma^1(\omega)}}$ if and only if $p$ is a non-degenerate zero of $\xi$. \end{proof}
\betaegin{lema}\ellambdabel{naodegkekmaisum} Let $p\in A_{k+1}(\omega)$ be a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$. Then, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{k}(\omega)}}$.
\end{lema}
\betaegin{proof} Let $p\in A_{k+1}(\omega)$ be a zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ and let $\textsl{m}athcal{U}\subset M$ be an open neighborhood of $p$ at which $\textbf{M}(x)\neq0$ and the singular sets $\Sigma^k(\omega)$ ($k=2,\elldots,n$) are defined by $\textsl{m}athcal{U}\cap\Sigma^k(\omega)=\{x\in\textsl{m}athcal{U}: \textbf{M}_n(x)=\elldots=\textbf{M}_m(x)=\Delta_2(x)=\elldots=\Delta_k(x)=0\}$.
By Szafraniec's characterization (\cite[p. 196]{sza2}), $p$ is a zero of the restriction $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if there exists an unique $(\ellambdambda_n,\elldots,\ellambdambda_m,\betaeta_2,\elldots,\betaeta_{k+1})\in\textsl{m}athbb{R}^{m-n+k+1}$ such that
\betaegin{equation*} \xi(p)+\Deltaisplaystyle\sum_{j=n}^{m}{\ellambdambda_id\textbf{M}_i(p)}+\Deltaisplaystyle\sum_{j=2}^{k+1}{\betaeta_{j}d\Delta_{j}(p)}=0.
\end{equation*} Since $p$ is a zero of $\xi_{|_{\Sigma^{k}(\omega)}}$, we have $\beta_{k+1}=0$. Moreover, also by Szafraniec's characterization, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$ if and only if the determinant of the following matrix does not vanish at $p$:
\varphiootnotesize
\betaegin{equation}\ellambdabel{matrizinicialcasok}\elleft[\setlength{\alpharraycolsep}{0.05cm}{\betaegin{array}{ccccccccc}
\textsl{m}ultirow{3}{*}{$\omegaperatorname{Jac}_{x}\elleft(\xi+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i}+\Deltaisplaystyle\sum_{j=2}^{k}{\betaeta_{j}d\Delta_{j}}\right)$} & \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1} & \Deltaisplaystyle\varphirac{\partial \Delta_2}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \Delta_k}{\partial x_1}& \Deltaisplaystyle\varphirac{\partial \Delta_{k+1}}{\partial x_1} \\
& \vdots & \vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots & \vdots\\
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m} & \Deltaisplaystyle\varphirac{\partial \Delta_2}{\partial x_m} & \cdots & \Deltaisplaystyle\varphirac{\partial \Delta_k}{\partial x_m}& \Deltaisplaystyle\varphirac{\partial \Delta_{k+1}}{\partial x_m} \\
\cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots & \vdots & \cdots & \cdots & \cdots \ \ \cdots & \cdots & \cdots & \cdots & \cdots \ \ \cdots\\
d_x\textbf{M}_n & \vdots & & & &&&&\\
\vdots & \vdots & & && && \\
d_x\textbf{M}_m & \vdots & & & &&& \\
d_x\Delta_2 & \vdots & & \textsl{m}ulticolumn{5}{c}{O_{(m-n+k+1)}} &\\
\vdots & \vdots & & &&&& \\
d_x\Delta_{k+1} & \vdots & & & &&&\\
\end{array}}\right].
\end{equation}
\normalsize
Analogously, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^k(\omega)}}$ if and only if the determinant of the following matrix does not vanish at $p$:
\varphiootnotesize
\betaegin{equation}\ellambdabel{matrizinicialsemdeltakmais1}\elleft[\setlength{\alpharraycolsep}{0.05cm}{\betaegin{array}{cccccccc}
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1} & \Deltaisplaystyle\varphirac{\partial \Delta_2}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \Delta_k}{\partial x_1} \\
\omegaperatorname{Jac}_x\elleft(\xi+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i}+\Deltaisplaystyle\sum_{j=2}^{k}{\betaeta_{j}d\Delta_{j}}\right) & \vdots & \vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots \\
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m} & \Deltaisplaystyle\varphirac{\partial \Delta_2}{\partial x_m} & \cdots & \Deltaisplaystyle\varphirac{\partial \Delta_k}{\partial x_m} \\
\cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots & \vdots & \cdots & \cdots & \cdots \ \ \cdots & \cdots & \cdots & \cdots \\
d_x\textbf{M}_n & \vdots & & & &&&\\
\vdots & \vdots & & && & \\
d_x\textbf{M}_m & \vdots & & & && \\
d_x\Delta_2 & \vdots & \textsl{m}ulticolumn{5}{c}{O_{(m-n+k)}} & \\
\vdots & \vdots & & &&& \\
d_x\Delta_k & \vdots & & & &&\\
\end{array}}\right].
\end{equation} \normalsize
Thus, to prove the lemma it is enough to show that the Matrix (\ref{matrizinicialcasok}) is non-singular at $p$ if and only if the Matrix (\ref{matrizinicialsemdeltakmais1}) is non-singular at $p$.
Note that the Jacobian matrix with respect to $x$ \betaegin{equation}\ellambdabel{Jacmatrix}\omegaperatorname{Jac}_x\elleft(\xi+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i}+\Deltaisplaystyle\sum_{j=2}^{k}{\betaeta_{j}d\Delta_{j}}\right)\end{equation} is a submatrix of both the Matrices (\ref{matrizinicialcasok}) and (\ref{matrizinicialsemdeltakmais1}). And remember that, {for $x$ in an open neighborhood of $p$}, $\Delta_{k+1}=\Deltaet(d\textbf{M}_n, \elldots, d\textbf{M}_m,d\Delta_2,\elldots, d\Delta_k, \Omega_1,\elldots,\Omega_{n-k}),$ where $\{\Omega_1(x),\elldots,\Omega_{n-k}(x)\}$ is a basis of a vector subspace supplementary to $\ellambdangle\betaar{\omega}(x)\rangle \cap N_x^{\alphast}\Sigma^{k-1}(\omega)$ in $\ellambdangle\betaar{\omega}(x)\rangle $. Thus, $$\ellambdangle\betaar{\omega}(x)\rangle =\ellambdangle\Omega_1(x),\elldots,\Omega_{n-k}(x)\rangle \omegaplus(\ellambdangle\betaar{\omega}(x)\rangle \cap N_x^{\alphast}\Sigma^{k-1}(\omega)).$$
Since, for almost every $a$, $\xi_{|_{\Sigma^{k-1}(\omega)}}(p)\neq0$ then $\xi(p)\in\ellambdangle\betaar{\omega}(p)\rangle \setminus N_p^{\alphast}\Sigma^{k-1}(\omega)$. {That is, there exists $(\textsl{m}u_1, \elldots, \textsl{m}u_{n-k})\in\textsl{m}athbb{R}^{n-k}\setminus\{\vec{0}\}$ so that $$\xi(p)=\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i(p)}+\varphi(p),$$ for some $\varphi(p)\in N_p^{\alphast}\Sigma^{k-1}(\omega)$, where $\varphi(p)=\Deltaisplaystyle\sum_{i=n}^{m}{\tilde{\ellambdambda_i}d\textbf{M}_i(p)}+\Deltaisplaystyle\sum_{j=2}^{k-1}{\tilde{\beta_j}d\Delta_j(p)}$. Thus,
\betaegin{equation}\ellambdabel{eqmi}
\xi(p)=\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i(p)}+\Deltaisplaystyle\sum_{i=n}^{m}{\tilde{\ellambdambda_i}d\textbf{M}_i(p)}+\Deltaisplaystyle\sum_{j=2}^{k-1}{\tilde{\beta_j}d\Delta_j(p)},
\end{equation} and the expression $$\xi(p)+\Deltaisplaystyle\sum_{j=n}^{m}{\ellambdambda_id\textbf{M}_i(p)}+\Deltaisplaystyle\sum_{j=2}^{k}{\betaeta_{j}d\Delta_{j}(p)}$$ can be written as: \betaegin{equation}\ellambdabel{expressaodozmais}\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i(p)}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})d\textbf{M}_i(p)}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})d\Delta_j(p)}+\beta_k d\Delta_k(p).\end{equation}}
{Let us consider the mapping $$H(x)=\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i(x)}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})d\textbf{M}_i(x)}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})d\Delta_j(x)}+\beta_k d\Delta_k(x),$$ defined on an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$, which is equal to $$
\resizebox{!}{1.15cm}{$
\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\elleft[
\betaegin{array}{c}
\Omega_{i}^1\\
\vdots\\
\Omega_{i}^m
\end{array}\right]}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})\elleft[\betaegin{array}{c}
\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_1}\\
\vdots\\
\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_m}
\end{array}\right]}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})\elleft[\betaegin{array}{c}
\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_1}\\
\vdots\\
\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_m}
\end{array}\right]}+\beta_k\elleft[\betaegin{array}{c}
\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_1}\\
\vdots\\
\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_m}
\end{array}\right]$}
$$ and can be written as $$\elleft[\betaegin{array}{c}\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_{i}^1}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_1}}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_1}}+\beta_k\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_1}\\
\vdots\\
\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i^m}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_m}}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_m}}+\beta_k\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_m}
\end{array}\right].$$ Then, the Jacobian matrix of $H(x)$ is given by:}
\varphiootnotesize
\betaegin{equation}\ellambdabel{matjacdaexpressao}\elleft[\betaegin{array}{c}\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_id_x\Omega_{i}^1}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})d_x\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_1}}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})d_x\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_1}}+\beta_kd_x\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_1}\\
\vdots\\
\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_id_x\Omega_{i}^m}+\Deltaisplaystyle\sum_{i=n}^{m}{(\ellambdambda_i+\tilde{\ellambdambda_i})d_x\Deltaisplaystyle\varphirac{\partial\textbf{M}_i}{\partial x_m}}+\Deltaisplaystyle\sum_{j=2}^{k-1}{(\beta_j+\tilde{\beta_j})d_x\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_m}}+\beta_kd_x\Deltaisplaystyle\varphirac{\partial\Delta_k}{\partial x_m}
\end{array}\right].\end{equation}
\normalsize
To apply the Lemma \ref{lematecnico}, fix the notation: $A_i(x)=\elleft(a_{1i}(x), \elldots, a_{mi}(x)\right)$, such that
\betaegin{equation*}\betaegin{array}{l}
{A_i(x):=\elleft\{\betaegin{array}{ll}
\Omega_i(x), & i=1, \elldots, n-k;\\
d\textbf{M}_i(x), & i=n, \elldots, m;\\
\end{array}\right.}\\
\\
{A_{n-k+j-1}(x):=d\Delta_j(x), \ \ j=2, \elldots, k;} \\
\\
{\alpha_i:=\elleft\{\betaegin{array}{ll}
\textsl{m}u_i, & i=1, \elldots, n-k; \ (\text{we can suppose } \alpha_1\neq0, \text{ since } \xi(p)\neq \varphi(p))\\
(\ellambdambda_i+\tilde{\ellambdambda_i}), & i=n, \elldots, m;\\
\end{array}\right.}\\
\\
\alpha_{n-k+j-1}:=(\beta_j+\tilde{\beta_j}), \ \ j=2, \elldots, k; \ \ (\tilde{\beta_k}=0).\\
\end{array}
\end{equation*}
Since $\xi(p)+\Deltaisplaystyle\sum_{j=n}^{m}{\ellambdambda_id\textbf{M}_i(p)}+\Deltaisplaystyle\sum_{j=2}^{k+1}{\betaeta_{j}d\Delta_{j}(p)}=0$, by Expression (\ref{expressaodozmais}) we have $$\Deltaisplaystyle\sum_{i=1}^{m}{\alpha_iA_i}(p)=0.$$ That is, $$\Deltaisplaystyle\sum_{i=1}^{m}{\alpha_ia_{ji}(p)}=0, \, \varphiorall j=1, \elldots, m.$$ Then, applying Lemma \ref{lematecnico} we know that \betaegin{equation}\alpha_1\omegaperatorname{cof}(a_{ik}(p))-\alpha_k\omegaperatorname{cof}(a_{i1}(p))=0, \, \varphiorall i,k=1, \elldots m.\end{equation}
We also have that \betaegin{equation*}\betaegin{array}{ll}
\Delta_{k+1}&=\Deltaet\elleft(d\textbf{M}_n, \elldots,d\textbf{M}_m, d\Delta_2, \elldots, d\Delta_k, \Omega_1, \elldots, \Omega_{n-k}\right)\\
&=\Deltaet\elleft(A_n, \elldots, A_m, A_{n-k+1}, \elldots, A_{n-1}, A_1, \elldots, A_{n-k}\right)\\
&=(-1)^{\varepsilon}\Deltaet\elleft(A_1, \elldots, A_m\right)
\end{array}\end{equation*} where $\varepsilon$ is either equal to zero or equal to $1$, depending on the number of required permutations between the column vectors of the matrix $$\elleft(A_n, \elldots, A_m, A_{n-k+1}, \elldots, A_{n-1}, A_1, \elldots, A_{n-k}\right)$$ in order to obtain the matrix $\elleft(A_1, \elldots, A_m\right)$. Thus,\betaegin{equation*}\renewcommand{2}{2.3}{\setlength{\alpharraycolsep}{0.13cm}{\betaegin{array}{lcl}
(-1)^{\varepsilon}d\Delta_{k+1}&=&\Deltaisplaystyle\sum_{i,j=1}^{m}{\omegaperatorname{cof}(a_{ij})d a_{ij}}\\
&=&\Deltaisplaystyle\sum_{i=1}^{m}{\elleft(\omegaperatorname{cof}(a_{i1})d a_{i1} + \Deltaisplaystyle\sum_{j=2}^{m}{\omegaperatorname{cof}(a_{ij})d a_{ij}}\right)}\\
&\omegaverset{\alpha_1\neq0}{=}&\Deltaisplaystyle\sum_{i=1}^{m}{\elleft(\omegaperatorname{cof}(a_{i1})d a_{i1} + \Deltaisplaystyle\sum_{j=2}^{m}{\Deltaisplaystyle\varphirac{\alpha_j}{\alpha_1}\omegaperatorname{cof}(a_{i1})d a_{ij}}\right)}\\
\end{array}}}\end{equation*} which implies that, for each $x\in\textsl{m}athcal{U}$ \betaegin{equation}\ellambdabel{graddeltakmaisum}\renewcommand{2}{2.3}{\betaegin{array}{ll}
(-1)^{\varepsilon}\alpha_1d\Delta_{k+1}&=\Deltaisplaystyle\sum_{i=1}^{m}{\elleft(\alpha_1\omegaperatorname{cof}(a_{i1})d a_{i1} + \Deltaisplaystyle\sum_{j=2}^{m}{\alpha_j\omegaperatorname{cof}(a_{i1})d a_{ij}}\right)}\\
&=\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})\elleft[\Deltaisplaystyle\sum_{j=1}^{m}{\alpha_jd a_{ij}}\right]}\\
&=\Deltaisplaystyle\sum_{i=1}^{m}{ \omegaperatorname{cof}(a_{i1})\textsl{m}athcal{L}_i }\\
\end{array}}\end{equation} where $\textsl{m}athcal{L}_i, i=1, \elldots, m$, denote the rows of the Jacobian matrix (\ref{matjacdaexpressao}). {If we denote by $\tilde{L}_i, i=1, \elldots,m$, the row vectors of Jacobian matrix (\ref{Jacmatrix}) at $p$, then we can verify that \betaegin{equation}\ellambdabel{equalJacobian}\Deltaisplaystyle\sum_{i=1}^{m}{ \omegaperatorname{cof}(a_{i1})\textsl{m}athcal{L}_i }=\Deltaisplaystyle\sum_{i=1}^{m}{ \omegaperatorname{cof}(a_{i1})\tilde{L}_i}\end{equation} }
{Let us denote the first $m$ row vectors of Matrix (\ref{matrizinicialcasok}) by $L_i, i=1, \elldots,m$, and its last row vector by $L_{\Delta_{k+1}}$. Based on Expressions (\ref{graddeltakmaisum}) at $p$ and (\ref{equalJacobian}), we replace the row vector $L_{\Delta_{k+1}}$ by \betaegin{equation}\ellambdabel{operacaocomlinhas}(-1)^{\varepsilon}\alpha_1L_{\Delta_{k+1}}-\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})L_i},\end{equation} in order to obtain a new last row vector given by $$L_{\Delta_{k+1}}:=(\underbrace{0, \elldots, 0}_{m}, \gamma_n, \elldots, \gamma_m, \tilde{\gamma_2}, \elldots, \tilde{\gamma_k}, \tilde{\gamma_{k+1}}),$$} where \betaegin{equation*}\renewcommand{2}{1}{\betaegin{array}{lll}
\gamma_j&=-\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_i}}, & \varphiorall j=n, \elldots, m;\\
\\
\tilde{\gamma_j}&=-\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_i}}, & \varphiorall j=2, \elldots, k+1.\\
\end{array}}\end{equation*} Note that, for $j=n, \elldots, m,$ $$\renewcommand{lll}{l}
\gamma_j=-\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_i}}=-\elleft|\betaegin{array}{cccc}
\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_1} & a_{12} & \cdots & a_{1m}\\
\vdots & \vdots & \Deltadots & \vdots\\
\Deltaisplaystyle\varphirac{\partial\textbf{M}_j}{\partial x_m} & a_{m2} & \cdots & a_{mm}
\end{array}\right|\\
\\
\textsl{m}athbb{R}ightarrow\gamma_j=-\Deltaet\elleft(A_j, A_2, \elldots, A_m\right)=0.\end{array}}$$ For $j=2, \elldots, k$, $$\renewcommand{lll}{l}
\tilde{\gamma_j}=-\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_i}}=-\elleft|\betaegin{array}{cccc}
\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_1} & a_{12} & \cdots & a_{1m}\\
\vdots & \vdots & \Deltadots & \vdots\\
\Deltaisplaystyle\varphirac{\partial\Delta_j}{\partial x_m} & a_{m2} & \cdots & a_{mm}
\end{array}\right|\\
\\
\textsl{m}athbb{R}ightarrow\tilde{\gamma_j}=-\Deltaet\elleft(A_{n-k+j-1}, A_2, \elldots, A_m\right)=0.
\end{array}}$$
Therefore, after replacing the vector row $L_{\Delta_{k+1}}$ in the Matrix (\ref{matrizinicialcasok}), we obtain
\varphiootnotesize
\betaegin{equation}\ellambdabel{matrizgammatil}\elleft[ \renewcommand{2}{1}{\setlength{\alpharraycolsep}{0.04cm}{\betaegin{array}{cccccccccc}
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_1} & \Deltaisplaystyle\varphirac{\partial \Delta_2}{\partial x_1} & \cdots & \Deltaisplaystyle\varphirac{\partial \Delta_k}{\partial x_1}& \vdots &\Deltaisplaystyle\varphirac{\partial \Delta_{k+1}}{\partial x_1} \\
\omegaperatorname{Jac}\elleft(\xi+\Deltaisplaystyle\sum_{i=n}^{m}{\ellambdambda_id\textbf{M}_i}+\Deltaisplaystyle\sum_{j=2}^{k}{\betaeta_{j}d\Delta_{j}}\right) & \vdots & \vdots & \Deltadots & \vdots & \vdots & \Deltadots & \vdots & \vdots &\vdots\\
& \vdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_n}{\partial x_m} & \cdots & \Deltaisplaystyle\varphirac{\partial \textbf{M}_m}{\partial x_m} & \Deltaisplaystyle\varphirac{\partial \Delta_2}{\partial x_m} & \cdots & \Deltaisplaystyle\varphirac{\partial \Delta_k}{\partial x_m}& \vdots &\Deltaisplaystyle\varphirac{\partial \Delta_{k+1}}{\partial x_m} \\
\cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots & \vdots & \cdots & \cdots & \cdots \ \ \cdots & \cdots & \cdots & \cdots & \vdots & \cdots \ \cdots\\
d_x\textbf{M}_n & \vdots & & & &&&&\vdots &0\\
\vdots & \vdots & & & & &&&\vdots &\vdots \\
d_x\textbf{M}_m & \vdots & & & & &&&\vdots &0\\
d_x\Delta_2 & \vdots &\textsl{m}ulticolumn{6}{c}{O_{(m-n+k-1)}} &\vdots & 0\\
\vdots & \vdots & & &&&&&\vdots & \vdots\\
d_x\Delta_k & \vdots & & & &&&&\vdots &0\\
\cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots \ \ \cdots & \vdots & \cdots & \cdots & \cdots \ \ \cdots & \cdots & \cdots & \cdots & \vdots & \cdots \ \cdots\\
\vec{0} & \vdots & \textsl{m}ulticolumn{6}{c}{\vec{0}} & \vdots &\tilde{\gamma_{k+1}}\\
\end{array}}}\right].
\end{equation} \normalsize
Let us show that $\tilde{\gamma_{k+1}}(p)\neq0$. We have
$$\renewcommand{2}{2}{\betaegin{array}{cl}
\tilde{\gamma_{k+1}}&=-\Deltaisplaystyle\sum_{i=1}^{m}{\omegaperatorname{cof}(a_{i1})\Deltaisplaystyle\varphirac{\partial\Delta_{k+1}}{\partial x_i}}\\
&=-\Deltaet(d\Delta_{k+1}, A_2, \elldots, A_{m})\\
&=-\Deltaet(d\Delta_{k+1}, \Omega_2, \elldots, \Omega_{n-k},d\Delta_2, \elldots, d\Delta_k, d\textbf{M}_n, \elldots, d\textbf{M}_m).
\end{array}}$$ Suppose that $\tilde{\gamma_{k+1}}=0$. Since each one of the sets $\{\Omega_2(p), \elldots, \Omega_{n-k}(p)\}$ and $\{d\Delta_{k+1}(p),d\Delta_2(p), \elldots, d\Delta_k(p), d\textbf{M}_n(p), \elldots, d\textbf{M}_m(p)\}$ consist of linearly independent vectors, there exists $j\in\{2, \elldots, n-k\}$ such that $\Omega_j(p)\in N_p^{\alphast}\Sigma^{k+1}(\omega).$ We can suppose that without loss of generality that $j=n-k$, that is, $$\Omega_{n-k}(p)\in N_p^{\alphast}\Sigma^{k+1}(\omega)=\ellambdangle d\textbf{M}_n, \elldots, d\textbf{M}_m,d\Delta_2, \elldots, d\Delta_k,d\Delta_{k+1}\rangle .$$ Since $\xi_{|_{\Sigma^{k+1}}}(p)=0$, we have $\xi(p)\in N_p^{\alphast}\Sigma^{k+1}(\omega)$. Thus, by Equation (\ref{eqmi}), we obtain
$$\renewcommand{ll}{ll}
& \Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i}+ \underbrace{\Deltaisplaystyle\sum_{i=n}^{m}{\tilde{\ellambdambda_i}d\textbf{M}_i}+ \Deltaisplaystyle\sum_{j=2}^{k-1}{\tilde{\beta_j}d\Delta_j}}_{\small \in N_p^{\alphast}\Sigma^{k+1}(\omega)}\normalsize\in N_p^{\alphast}\Sigma^{k+1}(\omega)\\
& \textsl{m}athbb{R}ightarrow \Deltaisplaystyle\sum_{i=1}^{n-k-1}{\textsl{m}u_i\Omega_i}=\Deltaisplaystyle\sum_{i=1}^{n-k}{\textsl{m}u_i\Omega_i}-\textsl{m}u_{n-k}\Omega_{n-k}\in N_p^{\alphast}\Sigma^{k+1}(\omega).
\end{array}}$$ Therefore, $\Deltaisplaystyle\sum_{i=1}^{n-k-1}{\textsl{m}u_i\Omega_i}$ and $\textsl{m}u_{n-k}\Omega_{n-k}$ are linearly independent vectors in the vector subspace $\ellambdangle\Omega_1, \elldots, \Omega_{n-k}\rangle \cap N_p^{\alphast}\Sigma^{k+1}(\omega)$. That is, $$\omegaperatorname{dim}\elleft(\ellambdangle\Omega_1(p), \elldots, \Omega_{n-k}(p)\rangle \cap N_p^{\alphast}\Sigma^{k+1}(\omega)\right)\gammaeq2.$$ Since $\ellambdangle\betaar{\omega}\rangle =\ellambdangle\Omega_1, \elldots, \Omega_{n-k}\rangle \omegaplus\elleft(\ellambdangle\betaar{\omega}\rangle \cap N_p^{\alphast}\Sigma^{k-1}(\omega)\right),$ we have
$$\setlength{\alpharraycolsep}{0.1cm}{\betaegin{array}{lcl}
\ellambdangle\betaar{\omega}\rangle \cap N_p^{\alphast}\Sigma^{k+1}(\omega)
&=&\ellambdangle\Omega_1, \elldots, \Omega_{n-k}\rangle \cap N_p^{\alphast}\Sigma^{k+1}(\omega)\\
&\omegaplus&\ellambdangle\betaar{\omega}\rangle \cap N_p^{\alphast}\Sigma^{k-1}(\omega).\end{array}}$$ That is, $$\omegaperatorname{dim}\elleft(\ellambdangle\betaar{\omega}(p)\rangle \cap N_p^{\alphast}\Sigma^{k+1}(\omega)\right)\gammaeq 2+(k-1)=k+1,$$ which means that $p\in\Sigma^{k+2}(\omega)$. But this is a contradiction, since $p\in A_{k+1}(\omega)$ by hypothesis and $\Sigma^{k+2}(\omega)=\Sigma^{k+1}(\omega)\setminus A_{k+1}(\omega)$. Therefore $\tilde{\gamma_{k+1}}(p)\neq0$.\\
Thus, we conclude that the Matrix (\ref{matrizinicialcasok}) is non-singular at $p$ if and only if the Matrix (\ref{matrizgammatil}) is non-singular at $p$, which occurs if and only if the Matrix (\ref{matrizinicialsemdeltakmais1}) is non-singular at the point $p$.
\end{proof}
\betaegin{lema}\ellambdabel{naodegan} For almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, if $p\in A_n(\omega)$ then $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{n-1}(\omega)}}$.
\end{lema}
\betaegin{proof} We know that if $p\in A_n(\omega)$ then $\xi_{|_{\Sigma^{n-1}(\omega)}}(p)=0$. By Szafraniec's characterization \cite[p.149-151]{sza1}, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{n-1}(\omega)}}$ if and only if the following conditions hold:
\betaegin{enumerate}[$(i)$]
\item $\Delta(p)=\Deltaet(d\textbf{M}_n, \elldots,d\textbf{M}_m,d\Delta_2,\elldots,d\Delta_{n-1},\xi)(p)=0$;\\
\item $\Deltaet(d\textbf{M}_n, \elldots,d\textbf{M}_m,d\Delta_2,\elldots,d\Delta_{n-1},d\Delta)(p)\neq0$.
\end{enumerate}
Condition $(i)$ is clearly satisfied, since $\xi_{|_{\Sigma^{n-1}(\omega)}}(p)=0$. Let us verify that condition $(ii)$ also holds.
For each $x\in\Sigma^{n-1}(\omega)$ in an open neighborhood $\textsl{m}athcal{U}\subset M$ of $p$, let $\{\Omega'(x)\}$ be a smooth basis for a vector subspace supplementary to $\ellambdangle\betaar{\omega}(x)\rangle \cap N_x^{\alphast}\Sigma^{n-2}(\omega)$ in the vector space $\ellambdangle\betaar{\omega}(x)\rangle $. Since $\xi(x)\in\ellambdangle\betaar{\omega}(x)\rangle $, we have $$\xi(x)=\ellambdambda(x)\Omega'(x)+\varphi(x),$$ where $\ellambdambda(x)\in\textsl{m}athbb{R}$ and $\varphi(x)\in \ellambdangle\betaar{\omega}(x)\rangle \cap N_x^{\alphast}\Sigma^{n-2}(\omega)$, $\varphiorall x\in\textsl{m}athcal{U}\cap\Sigma^{n-1}(\omega)$.
In particular, if $x\in A_n(\omega)$, we know that, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, {$\xi_{|_{\Sigma^{n-2}(\omega)}}(x)\neq0$} and, consequently, $\xi(x)\notin N_x^{\alphast}\Sigma^{n-2}(\omega)$. Thus $\ellambdambda(p)\neq0$. For all $x\in\textsl{m}athcal{U}\cap\Sigma^{n-1}(\omega)$, we obtain $$\setlength{\alpharraycolsep}{0.07cm}{\betaegin{array}{ccl}
\Delta(x)&=&\Deltaet(d\textbf{M}_n, \elldots,d\textbf{M}_m,d\Delta_2,\elldots,d\Delta_{n-1},\ellambdambda \Omega'+\varphi)(x)\\
&=&\ellambdambda(x)\Deltaet(d\textbf{M}_n, \elldots,d\textbf{M}_m,d\Delta_2,\elldots,d\Delta_{n-1},\Omega')(x)\\
&=&\ellambdambda(x)\Delta_n(x),
\end{array}}$$ with $\Delta_n(p)=0$ and $\ellambdambda(p)\neq0$. Then, by Lemma \ref{lematecnicodois} we have $$\betaegin{array}{l}\ellambdangle d\textbf{M}_n(p), \elldots, d\textbf{M}_m(p), d\Delta_2(p), \elldots, d\Delta_{n-1}(p), d\Delta(p)\rangle \\
=\ellambdangle d\textbf{M}_n(p), \elldots, d\textbf{M}_m(p), d\Delta_2(p), \elldots, d\Delta_{n-1}(p), d(\ellambdambda\Delta_n)(p)\rangle .
\end{array}$$ However, $d(\ellambdambda\Delta_n)(x)=d\ellambdambda(x)\Delta_n(x)+\ellambdambda(x)d\Delta_n(x)$, $\Delta_n(p)=0$ and $\ellambdambda(p)\neq0$. Thus, $$\betaegin{array}{l}\ellambdangle d\textbf{M}_n(p), \elldots, d\textbf{M}_m(p), d\Delta_2(p), \elldots, d\Delta_{n-1}(p), d\Delta(p)\rangle \\
=\ellambdangle d\textbf{M}_n(p), \elldots, d\textbf{M}_m(p), d\Delta_2(p), \elldots, d\Delta_{n-1}(p), d\Delta_n(p)\rangle .
\end{array}$$ Therefore, $\Deltaet(d\textbf{M}_n(p), \elldots, d\textbf{M}_m(p), d\Delta_2(p), \elldots, d\Delta_{n-1}(p), d\Delta(p))\neq0$.
\end{proof}
\betaegin{lema}\ellambdabel{nondegeneratexisk} For almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, the 1-form $\xi_{|_{\Sigma^k(\omega)}}$ admits only non-degenerate zeros, $k\gammaeq1$.
\end{lema}
\betaegin{proof} Suppose that $\xi_{|_{\Sigma^k(\omega)}}(p)=0$. Then, for almost every $a\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$, $p\in A_{k}(\omega)\cup A_{k+1}(\omega)$ since $Z(\xi_{|_{\Sigma^k(\omega)}})\cap\Sigma^{k+2}(\omega)=\emptyset$ by Lemma \ref{ptscrticrestasigmak} and $\Sigma^{k}(\omega)=A_k(\omega)\cup A_{k+1}(\omega)\cup\Sigma^{k+2}(\omega)$.
If $p\in A_k(\omega)$ then $\xi_{|_{A_k(\omega)}}(p)=0$. Since $\xi_{|_{A_k(\omega)}}$ admits only non-degenerate zeros and $A_k(\omega)\subset\Sigma^k(\omega)$ is a open subset, we conclude that $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^k(\omega)}}$.
If $p\in A_{k+1}(\omega)$ and $k< n-1$ then $\xi_{|_{\Sigma^{k+1}(\omega)}}(p)=0$. In particular, since $A_{k+1}(\omega)\subset\Sigma^{k+1}(\omega)$ is an open subset then $\xi_{|_{A_{k+1}(\omega)}}(p)=0$. By Lemmas \ref{zerosnaodegdeaum} and \ref{zerosnaodegdeak}, $\xi_{|_{A_{k+1}(\omega)}}$ admits only non-degenerate zeros, and since $A_{k+1}(\omega)$ is an open set of $\Sigma^{k+1}(\omega)$, we conclude that $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{k+1}(\omega)}}$. Therefore, by Lemma \ref{naodegkekmaisum}, $p$ is non-degenerate zero of $\xi_{|_{\Sigma^k(\omega)}}$. Finally, if $p\in A_{n}(\omega)$, by Lemma \ref{naodegan}, $p$ is a non-degenerate zero of $\xi_{|_{\Sigma^{n-1}(\omega)}}$.
\end{proof}
\betaegin{teo}\ellambdabel{fukudaparacampos} {Let $\omega=(\omega_1, \elldots, \omega_n)$ be a Morin $n$-coframe defined on an $m$-dimensional compact manifold $M$. Then, $$\chi(M)\equiv\Deltaisplaystyle\sum_{k=1}^{n}{\chi(\omegaverline{A_k(\omega)})} \textsl{m}od 2.$$}
\end{teo}
\betaegin{proof} Let us denote by $Z(\varphi)$ the set of zeros of a 1-form $\varphi$ and let us denote by $\#Z(\varphi)$ the number of elements of this set, whenever $Z(\varphi)$ is finite. {Let $$\xi(x)=\Deltaisplaystyle\sum_{i=1}^{n}{a_i\omega_i(x)}$$ be a 1-form with $a=(a_1, \elldots, a_n)\in\textsl{m}athbb{R}^n\setminus\{\vec{0}\}$ satisfying the generic conditions of the previous lemmas of Sections 3 and 4.}
Since $M$ is compact and the submanifolds $\Sigma^k(\omega)$ are closed in $M$, by the Poincaré-Hopf Theorem for 1-forms we obtain \betaegin{itemize}
\item $\chi(M)\equiv\#Z(\xi) \textsl{m}od2$;
\item $\chi(\omegaverline{A_k(\omega)})=\chi(\Sigma^k(\omega))\equiv\#Z(\xi_{|_{\Sigma^k(\omega)}}) \textsl{m}od2$, for $k=1, \elldots, n-1$;
\item $\chi(\omegaverline{A_n(\omega)})=\chi(\Sigma^n(\omega))\equiv\#Z(\xi_{|_{\Sigma^n(\omega)}}) \textsl{m}od2$.
\end{itemize}
By Lemma \ref{zeroszsobresigma1}, if $p\in Z(\xi)$ then $p\in\Sigma^1(\omega)$ and $\xi_{|_{\Sigma^1(\omega)}}(p)=0$. Moreover, by Lemma \ref{lemainterzeroscomsigma2}, $Z(\xi)\cap\Sigma^2(\omega)=\emptyset$. Thus $p\in A_1(\omega)$. On the other hand, Lemma \ref{lemazerosrestricoes} shows that if $p\in Z(\xi_{|_{\Sigma^1(\omega)}})\cap A_1(\omega)$ then $p$ is also a zero of the 1-form $\xi$. Thus, $$\#Z(\xi)\equiv\#Z(\xi_{|_{\Sigma^1(\omega)}}\cap A_1(\omega)) \textsl{m}od2.$$
By Lemma \ref{ptscrticrestasigmak}, if $p\in Z(\xi_{|_{\Sigma^k(\omega)}})$ then $p\notin\Sigma^{k+2}(\omega)$. Thus, $p\in A_k(\omega)\cup A_{k+1}(\omega)$ and, for $k=1, \elldots, n-1$, we have $$\#Z(\xi_{|_{\Sigma^k(\omega)}})\equiv\#Z(\xi_{|_{\Sigma^k(\omega)}}\cap A_k(\omega))+ \#Z(\xi_{|_{\Sigma^k(\omega)}}\cap A_{k+1}(\omega))\textsl{m}od2.$$
By Lemma \ref{lemazerosrestricoes}, we also have $$\#Z(\xi_{|_{\Sigma^k(\omega)}}\cap A_{k+1}(\omega))=\#Z(\xi_{|_{\Sigma^{k+1}(\omega)}}\cap A_{k+1}(\omega))$$ and by Lemma \ref{ptosansaozeros}, $$\#A_n(\omega)=\#Z(\xi_{|_{\Sigma^{n-1}(\omega)}}\cap A_{n}(\omega)).$$
Then, \betaegin{itemize}
\item $\chi(M)\equiv\#Z(\xi_{|_{\Sigma^1(\omega)}}\cap A_1(\omega)) \textsl{m}od2$;
\item For $k=1, \elldots, n-1$, $$\chi(\omegaverline{A_k(\omega)})\equiv\#Z(\xi_{|_{\Sigma^k(\omega)}}\cap A_k(\omega))+ \#Z(\xi_{|_{\Sigma^{k+1}(\omega)}}\cap A_{k+1}(\omega))\textsl{m}od2;$$
\item $\chi(\omegaverline{A_n(\omega)})=\#Z(\xi_{|_{\Sigma^{n-1}(\omega)}}\cap A_{n}(\omega))$.
\end{itemize}
Therefore, $$\renewcommand{lll}{lll}
\chi(M)+\Deltaisplaystyle\sum_{k=1}^{n}{\chi(\omegaverline{A_k(\omega)})}&\equiv&2\#Z(\xi_{|_{\Sigma^1(\omega)}}\cap A_1(\omega))\\
&+&2 \#Z(\xi_{|_{\Sigma^2(\omega)}}\cap A_2(\omega))+\elldots\\
&+&2 \#Z(\xi_{|_{\Sigma^{n-1}(\omega)}}\cap A_{n-1}(\omega))\\
&+&2 \#Z(\xi_{|_{\Sigma^{n-1}(\omega)}}\cap A_{n}(\omega))\textsl{m}od2\\
&\equiv& 0\textsl{m}od2.
\end{array}}$$\end{proof}
{As for the definition of Morin $n$-coframes, the results presented in Sections \ref{s2} and \ref{s3} of this paper also can be naturally adapted to the context of $n$-frames.} In particular, the main theorems that have been used, as the Poincaré-Hopf Theorem and the Szafraniec's characterization, have their respective versions for vector fields.
Finally, we end the paper with a very simple example. Let us verify that Theorem \ref{fukudaparacampos} indeed holds for the Morin $2$-frame $V=(V_1,V_2)$ presented in the Example \ref{ex3}. To do this, it is enough to see that the torus $\emph{T}$ is a compact manifold with $\chi(\emph{T})=0$. Moreover, $\omegaverline{A_1(V)}=\Sigma^1(V)$ is given by two circles in $\textsl{m}athbb{R}^3$ and $\omegaverline{A_2(V)}$ consists of four points, so that $\chi(\omegaverline{A_1(V)})=0$ and $\chi(\omegaverline{A_2(V)})=4$. Therefore, $$\chi(\emph{T})\equiv \chi(\omegaverline{A_1(V)})+\chi(\omegaverline{A_2(V)}) \textsl{m}od 2.$$
\betaegin{thebibliography}{10}
\betaibitem{Ando}
Yoshifumi Ando.
\newblock On local structures of the singularities {$A_k\;D_k$} and {$E_k$} of
smooth maps.
\newblock {\em Trans. Amer. Math. Soc.}, 331(2):639--651, 1992.
\betaibitem{Dutertrefukui}
Nicolas Dutertre and Toshizumi Fukui.
\newblock On the topology of stable maps.
\newblock {\em J. Math. Soc. Japan}, 66(1):161--203, 2014.
\betaibitem{Eliasberg}
Ja~M \`Elia\v{s}berg.
\newblock On singularities of folding type.
\newblock {\em Mathematics of the USSR-Izvestiya}, 4(5):1119, 1970.
\betaibitem{Fukuda}
Takuo Fukuda.
\newblock Topology of folds, cusps and {M}orin singularities.
\newblock In {\em A f\^ete of topology}, pages 331--353. Academic Press,
Boston, MA, 1988.
\betaibitem{InaIshiKawaThang}
Kazumasa {Inaba}, Masaharu {Ishikawa}, Masayuki {Kawashima}, and Tat {Thang
Nguyen}.
\newblock {On linear deformations of Brieskorn singularities of two variables
into generic maps}.
\newblock {\em ArXiv e-prints}, November 2014.
\betaibitem{KalmarTerpai}
Boldizs{\'a}r Kalm{\'a}r and Tam{\'a}s Terpai.
\newblock Characteristic classes and existence of singular maps.
\newblock {\em Trans. Amer. Math. Soc.}, 364(7):3751--3779, 2012.
\betaibitem{Morin2}
Bernard Morin.
\newblock Formes canoniques des singularit\'{e}s d'une application
diff\'{e}rentiable.
\newblock {\em Comptes Rendus Hebdomadaires des s\'{e}ances de l'Acad\'{e}mie
des Sciences}, 260(25):6503--6506, 1965.
\betaibitem{Quine}
J.~R. Quine.
\newblock A global theorem for singularities of maps between oriented
{$2$}-manifolds.
\newblock {\em Trans. Amer. Math. Soc.}, 236:307--314, 1978.
\betaibitem{Ruiz1}
Camila~M. {Ruiz}.
\newblock {A new proof of a theorem of Dutertre and Fukui on Morin
singularities}.
\newblock {\em ArXiv e-prints}, February 2016.
\betaibitem{Saeki}
Osamu Saeki.
\newblock Studying the topology of {M}orin singularities from a global
viewpoint.
\newblock {\em Math. Proc. Cambridge Philos. Soc.}, 117(2):223--235, 1995.
\betaibitem{SaekiSakuma}
Osamu Saeki and Kazuhiro Sakuma.
\newblock Maps with only {M}orin singularities and the {H}opf invariant one
problem.
\newblock {\em Math. Proc. Cambridge Philos. Soc.}, 124(3):501--511, 1998.
\betaibitem{Saji1}
Kentaro {Saji}.
\newblock {Criteria for Morin singularities into higher dimensions}.
\newblock {\em ArXiv e-prints}, December 2014.
\betaibitem{Saji3}
Kentaro {Saji}.
\newblock {Criteria for Morin singularities for maps into lower dimensions, and
applications}.
\newblock {\em ArXiv e-prints}, October 2015.
\betaibitem{Saji2}
Kentaro {Saji}.
\newblock {Isotopy of Morin singularities}.
\newblock {\em ArXiv e-prints}, October 2015.
\betaibitem{SzaboSzucsTerpai}
Endre Szab{\'o}, Andr{\'a}s Sz{\H{u}}cs, and Tam{\'a}s Terpai.
\newblock On bordism and cobordism groups of {M}orin maps.
\newblock {\em J. Singul.}, 1:134--145, 2010.
\betaibitem{sza2}
Zbigniew Szafraniec.
\newblock The {E}uler characteristic of algebraic complete intersections.
\newblock {\em J. Reine Angew. Math.}, 397:194--201, 1989.
\betaibitem{sza1}
Zbigniew Szafraniec.
\newblock A formula for the {E}uler characteristic of a real algebraic
manifold.
\newblock {\em Manuscripta Math.}, 85(3-4):345--360, 1994.
\betaibitem{Szucs}
Andr{\'a}s Sz{\"u}cs.
\newblock On the cobordism groups of cooriented, codimension one {M}orin maps.
\newblock {\em J. Singul.}, 4:196--205, 2012.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Specht modules with abelian vertices}
\author{Kay Jin Lim}
\address{Department of Mathematics, National University of Singapore, Block S17, 10 Lower Kent Ridge Road, Singapore 119076.}
\email{[email protected]}
\date{January 2011}
\thanks{Supported by MOE Academic Research Fund R-146-000-135-112.}
\subjclass[2000]{20C20, 20C30}
\begin{abstract} In this article, we consider indecomposable Specht modules with abelian vertices. We show that the corresponding partitions are necessarily $p^2$-cores where $p$ is the characteristic of the underlying field. Furthermore, in the case of $p\geq 3$, or $p=2$ and $\mu$ is $2$-regular, we show that the complexity of the Specht module $S^\mu$ is precisely the $p$-weight of the partition $\mu$. In the latter case, we classify Specht modules with abelian vertices. For some applications of the above results, we extend a result of M. Wildon and compute the vertices of the Specht module $S^{(p^p)}$ for $p\geq 3$.
\end{abstract}
\maketitle
\section{Introduction} The representation theory of symmetric groups has been one of the major research areas since early on in the last century. The theory has been well-developed but yet it seems that very little is known for the modular case. One way of understanding the structure of representations of finite groups is through the notion of relative projectivity, on which J. A. Green \cite{JG1} defined the vertices of modules of finite groups about fifty years ago. The vertices of modules are, in some way, related to the complexity of the modules defined by J. Alperin and L. Evens \cite{JALE}, and the rank variety defined by J. Carlson \cite{JC}.
Classically, Young modules, Specht modules and simple modules are the major objects extensively studied in the representation theory of symmetric groups. The computation of the vertices of Young modules has been done by J. Grabmeier \cite{JGrab}. The vertices of signed Young modules have been computed by S. Donkin \cite{SD}. However, the vertices of simple modules and Specht modules remain mostly unknown. The vertices of Specht modules of hook shape in $p=2$ case were first considered by G. M. Murphy and M. H. Peel in \cite{GMMP}, with a mistake which has been corrected by M. Wildon \cite{MW}. M. Wildon has made some progress on the computation of the vertices of Specht modules which he has computed the vertices of simple Specht modules of hook shape \cite{MW} and showed that, in general, a vertex of a Specht module contains some large $p$-subgroup \cite{MW2}. Recently, there are some computation of the vertices of simple modules made by S. Danz, B. K\"{u}lshammer, J. M\"{u}ller and R. Zimmermann \cite{SDBKRZ,JMRZ}.
It is well known that, in general, if the defect groups of an indecomposable module are abelian then its vertices are necessarily abelian too. In particular, for the representations of symmetric groups in the modular case, representations with abelian defect correspond to partitions of $p$-weights strictly less than the characteristic $p$ of the underlying field. But there are examples of Specht modules of hook shape whose vertices are abelian but yet their defect groups are not abelian. For example, for $p=3$ the Specht module $S^{(7,1^3)}$ has vertices the Sylow $3$-subgroups of $\sym{6}\times \sym{3}$ (see \cite[Theorem 2]{MW}) but with defect groups the Sylow $3$-subgroups of $\sym{9}$.
In this article, we shall mainly be concerned with Specht modules with abelian vertices. Motivated by the question of classifying these Specht modules, we give some necessary conditions for such Specht modules. Indeed, for $p\geq 3$, we show that no other abelian subgroups can be the vertices of Specht modules besides the elementary abelian ones. For $p=2$, we could not give a definite answer as in the odd characteristic case unless the corresponding partitions are $2$-regular. Under the hypothesis that the vertices of the Specht modules are abelian, a class of partitions arises naturally; namely the $p^2$-core partitions. In fact, for $p=2$, we show that a $2$-regular partition $\mu$ is a $4$-core if and only if the Specht module $S^\mu$ has elementary abelian vertices. For all the Specht modules mentioned above, we conclude that their complexities are precisely the $p$-weights of their labeled partitions.
We organize the article in the following way. In \S \ref{S: notation}, we lay down some basic knowledge about the representations of symmetric groups, the complexities and the rank varieties for modules. We state our main results in \S \ref{S: main results} and prove them in \S \ref{S: proofs}. In \S \ref{S: consequences}, we draw some consequences of the main results, in which we generalize a result of M. Wildon and show that the vertices of $S^{(p^p)}$ are the Sylow $p$-subgroups of $\sym{p^2}$ when $p\geq 3$. This is an example where the partition $(p^p)$ is a $p^2$-core but the Specht module $S^{(p^p)}$ does not have abelian vertices. In the last section \S \ref{S: some further questions}, we post some questions which arise naturally from our results.
\section{Preliminaries}\label{S: notation}
We introduce the notation and background which we require. General references for this section are \cite{DB,GJAK,GJ1}.
\subsection{The representations}\label{S: the representations} Let $G$ be a finite group and $F$ be a field of characteristic $p$. In this article, all $FG$-modules are finite dimensional vector spaces over $F$.
Let $M$ be a $FG$-module and $H$ be a subgroup of $G$. We regard the restriction $\operatorname{Res}^G_H M$ as the $FH$-module in the obvious way. If $S$ is a $FH$-module we write $\operatorname{Ind}^G_H S$ for the induced module. Let $N$ be another $FG$-module. We write $N\mid M$ if $N$ is isomorphic to a direct summand of $M$ as $FG$-modules.
Suppose that $p>0$. We say that the module $M$ is relatively $H$-projective if $M\mid \operatorname{Ind}^G_H \operatorname{Res}^G_H M$. A vertex $Q$ of an indecomposable $FG$-module $M$ is a minimal subgroup of $G$ subject to the condition that $M$ is relatively $Q$-projective. Given a vertex $Q$ of $M$, a source of $M$ is an indecomposable $FQ$-module $S$ such that $M\mid \operatorname{Ind}^G_Q S$.
Let $FG=I_1\oplus\cdots\oplus I_m$ be a decomposition of the $F(G\times G)$-module $FG$, with the action given by $(g,h)x=gxh^{-1}$, into indecomposables. Suppose that $1=\sum_{i=1}^m e_i$ with $e_i\in I_i$ for each $1\leq i\leq m$. Indeed, $I_i=e_i FG$. The elements $e_i$ are mutually orthogonal primitive central idempotents of the algebra $FG$ and they are called the blocks of $FG$. For an indecomposable $FG$-module $M$, there is a unique block $e_j$ such that $e_jM\neq 0$ and $e_iM=0$ for all $i\neq j$. In this case, $e_jM=M$ and we say that the module $M$ lies in the block $e_j$. Note that $FG\cong \operatorname{Ind}^{G\times G}_{\Delta(G)} F$ where $\Delta(G)$ is the diagonal embedding of $G$ into $G\times G$. Thus, for each $1\leq i\leq m$, a vertex of $I_i$ is of the form $\Delta(D_i)$ for some subgroup $D_i$ of $G$ and the subgroup $D_i$ is called a defect group of the block $e_i$.
\begin{thm}[\cite{JG1}] Let $F$ be a field of characteristic $p>0$, $G$ be a finite group and $M$ be an indecomposable $FG$-module. Then
\begin{enumerate}
\item [(i)] any vertex of $M$ is a $p$-subgroup of $G$,
\item [(ii)] the vertices of $M$ are conjugate to each other in $G$, and
\item [(iii)] if $M$ lies inside the block $e_j$ of $FG$ then $M$ is relatively $D_j$-projective where $D_j$ is a defect group of $e_j$. In particular, a vertex of $M$ is necessarily a subgroup of $D_j$ up to conjugation.
\end{enumerate}
\end{thm}
Suppose that $L$ is a field extension of $F$. We write $L\otimes_F M$ for the $LG$-module upon field extension. In the latter part of this article, we are required to deal with vertices of modules over field extension. We include a little lemma here.
\begin{lem}[{\cite[\S III Lemma 4.14]{WF}}]\label{L:vtx fld ext} Let $L$ be a field extension of $F$ and $H$ be a $p$-subgroup of a finite group $G$. Then an $FG$-module $M$ is relatively $H$-projective if and only if $L\otimes_F M$ is relatively $H$-projective.\\
In particular, if $M$ is indecomposable and $L\otimes_F M$ remains indecomposable as a $LG$-module then a $p$-subgroup $Q$ of $G$ is a vertex of $M$ if and only if $Q$ is a vertex of $L\otimes_F M$.
\end{lem}
\subsection{Rank varieties}\label{S:varieties} Let $p$ be a prime. The $p$-rank of a finite group $G$ is the largest integer $n$ subject to the condition that $G$ contains an elementary abelian $p$-subgroup of order $p^n$. The $p$-rank of the abelian $p$-group $\mathbb{Z}_{p^{n_1}}\times \cdots \times \mathbb{Z}_{p^{n_m}}$ with $n_1,\ldots,n_m>0$ is $m$.
Let $E$ be an elementary abelian $p$-group of $p$-rank $n$ with generators $g_1,\ldots,g_n$. For each non-zero point $\alpha=(\alpha_1,\ldots,\alpha_n)$ of the space $F^n$, we write $u_\alpha=1+\sum_{i=1}^n \alpha_i(g_i-1)\in FE$. Note that $\langle u_\alpha\rangle$ is a cyclic $p$-group. Suppose that $F$ is algebraically closed. The rank variety of a $FE$-module $M$ is the set $$V^\#_E(M)=\{\mathbf{0}\}\cup \{\mathbf{0}\neq \alpha\in F^n\,|\, \text{$M$ is not projective as $F\langle u_\alpha\rangle$-module}\}.$$ The rank variety $V^\#_E(M)$ is a homogeneous and closed subvariety of the affine space $\mathbb{A}^n(F)$ \cite[Theorem 4.3]{JC}. We shall write $\dim V^\#_E(M)$ for the dimension of the algebraic variety $V^\#_E(M)$. The rank variety depends on the choice and order of the generators of $E$, but its dimension does not. In the case where $p\nmid \dim_F M$, for each $\mathbf{0}\neq \alpha\in F^n$, $M$ necessarily has a summand of dimension coprime to $p$ as $F\langle u_\alpha\rangle$-modules. In this case, we have $V^\#_E(M)=V^\#_E(F)$ and thus $\dim V^\#_E(M)=n$. Let $N$ be another $FE$-module. It is clear from the definition that $V^\#_E(M\oplus N)=V^\#_E(M)\cup V^\#_E(N)$.
Let $G$ be a finite group and $M$ be a $FG$-module. Let $$\mathbf{P}:\cdots \to P_2\to P_1 \to P_0\to M$$ be a minimal projective resolution of $M$. Following \cite{JALE}, the complexity $c$ of the module $M$ is the smallest non-negative integer such that $$\lim_{n\to\infty} \frac{\dim_F P_n}{n^c}=0;$$ in other words, the polynomial rate growth of the dimensions of the terms of the sequence $\mathbf{P}$. We write $c_G(M)$ for the number $c$ to indicate the dependence of $c$ on $M$ as $FG$-module.
\begin{thm}\label{P:complexity of mod}\
\begin{enumerate}
\item [(i)] \textup{({\cite[\S 1 Theorem]{JALE}, \cite[Theorem 1.1]{GALS} and \cite[Theorem 7.6]{JC}})} Let $M$ be an $FG$-module, $\mathcal{E}$ be a set of representatives of elementary abelian $p$-subgroups of $G$ up to conjugation and $\mathcal{E}^{\text{max}}$ be a set of representatives of maximal elementary abelian $p$-subgroups of $G$ up to conjugation. Then $$c_G(M)=\max_{E\in\mathcal{E}}\{\dim V^\#_E(M)\}=\max_{E\in\mathcal{E}^{\text{max}}}\{\dim V^\#_E(M)\}.$$
\item [(ii)] \cite[Corollary 4,5]{JALE} The complexity of an indecomposable $FG$-module $M$ is bounded above by the $p$-rank of a defect group of the block in which $M$ lies.
\end{enumerate}
\end{thm}
Following Theorem \ref{P:complexity of mod} (i), we have the conclusion that $c_G(M)$ is the $p$-rank of $G$ provided $p\nmid \dim_F M$. We have a more refined statement of Theorem \ref{P:complexity of mod} (ii).
\begin{prop}[{\cite[Lemmas 5.2, 5.3]{JALE}}]\label{L:complx rel vtx} Let $M$ be a $FG$-module where $F$ is a field of characteristic $p>0$.
\begin{enumerate}
\item [(i)] Suppose that $M$ is relatively $H$-projective for some subgroup $H$ of $G$. Then $c_G(M)=c_H(M)$.
\item [(ii)] Suppose that $M$ is indecomposable. Let $Q$ be a vertex of $M$ with a source $S$. Then $c_G(M)=c_Q(S)$.
\end{enumerate}
\end{prop}
Readers who are familiar with the variety theory for modules would have realized that the author has avoided taking cohomological variety into discussion, thanks to the Quillen stratification theorem and \cite[Theorem 1.1]{GALS}.
\subsection{The representations of symmetric groups} We now briefly go through the representations of symmetric groups. Let $n$ be a non-negative integer. A partition $\mu$ of $n$ is a sequence of positive integers $(\mu_1,\mu_2,\cdots,\mu_s)$ such that $\mu_1\geq \mu_2\geq \cdots\geq \mu_s$ and $\sum_{i=1}^s\mu_i=n$. In this case, we write $|\mu|=n$. Note that we allow the empty partition $\varnothing$ to be the unique partition of $0$. The partition $\mu$ is called $p$-singular if there is some $i\geq 0$ such that $\mu_{i+1}=\mu_{i+2}=\cdots=\mu_{i+p}>0$; otherwise, it is called $p$-regular. Let $\Lambda(n)$ be the set consisting of all partitions of $n$. There is a one-to-one correspondence between $\Lambda(n)$ and the set of Young diagrams with $n$ nodes in an obvious way. The Young diagram of $\mu$ is written as $[\mu]$. Fix a positive integer $m$, not necessarily a prime. Each partition $\mu$ is associated to a partition $\widetilde{\mu}$ and an non-negative integer $w$ such that $|\mu|=|\widetilde{\mu}|+mw$. The partition $\widetilde{\mu}$ and the integer $w$ is called the $m$-core and the $m$-weight of $\mu$, respectively \cite[\S 2.7]{GJAK}. The $m$-core of $\mu$ is obtained by successively removing $w$ removable rim $m$-hooks. In the case where $w=0$ or equivalently $\mu=\widetilde{\mu}$, we say that the partition $\mu$ is an $m$-core.
We write $\sym{n}$ for the symmetric group acting on $n$ letters. To each partition $\mu$ of $n$, we have the $F\sym{n}$-module $S^\mu_F$, the Specht module labeled by the partition $\mu$. We usually write $S^\mu$ for $S^\mu_F$ if the underlying field is understood. The dimension of the Specht module $S^\mu_F$ is given by the hook formula $$\dim_F S^\mu_F=\frac{n!}{\prod_{(i,j)\in [\mu]} h_{i,j}}$$ where $h_{i,j}$ denotes the hook length of the node $(i,j)$ of $[\mu]$ and $n=|\mu|$. We note that the dimension of $S^\mu_F$ is independent of the field $F$.
\begin{thm}\label{P:Specht mod} Let $p$ be the characteristic of a field $F$ and $n$ be a positive integer.
\begin{enumerate}
\item [(i)] \cite[2.7.40]{GJAK} Let $m$ be a positive integer. The number of hook lengths of a Young diagram $[\mu]$ which are divisible by $m$ is precisely the $m$-weight of $\mu$.
\item [(ii)] \cite[Theorem 4.12]{GJ1} For $p=0$, the set $\{S^\mu\,|\,\mu\in\Lambda(n)\}$ is a complete set of non-isomorphic simple $F\sym{n}$-modules.
\item [(iii)] \cite[Corollary 13.18]{GJ1} Let $\mu$ be a partition. For $p\geq 3$, or $p=2$ and $\mu$ is $2$-regular, the Specht module $S^\mu$ is indecomposable.
\item [(iv)] \cite[\S 6.1 and Theorem 6.2.45]{GJAK} The blocks of $F\sym{n}$ are parametrized by the $p$-cores of the partitions of $n$ such that the block $e_{\widetilde{\mu}}$ labeled by the $p$-core $\widetilde{\mu}$ contains the Specht module $S^\mu$. In particular, if $\mu,\lambda$ are partitions of $n$ then the Specht modules $S^\mu, S^\lambda$ lie inside the same block of $F\sym{n}$ if and only if $\widetilde{\mu}=\widetilde{\lambda}$. A defect group of the block $e_{\widetilde{\mu}}$ is conjugate to a Sylow $p$-subgroup of $\sym{wp}$ where $w$ is the $p$-weight of $\mu$.
\end{enumerate}
\end{thm}
We shall take a step further to discuss the vertices of a special class of modules of the symmetric groups, the Young modules \cite{GJ2}. Here, and hereafter, whenever we have subgroups $H\subseteq \sym{a}$, $K\subseteq \sym{b}$ where $a+b\leq n$ for some positive integers $a,b,n$, we write $H\times K\hookrightarrow \sym{a}\times \sym{b}\hookrightarrow \sym{n}$ for the obvious inclusions.
Let $\mu=(\mu_1,\ldots,\mu_r)$ be a partition of $n$. Let $\mathfrak{S}_{\mu}$ be the Young subgroup of $\sym{n}$, i.e. $$\mathfrak{S}_\mu=\sym{\mu_1}\times \sym{\mu_2}\times\cdots\times \sym{\mu_r}.$$ Let $M^\mu\cong \operatorname{Ind}^{\sym{n}}_{\mathfrak{S}_\mu} F$ be the associated permutation module. Suppose that $M^\mu=M_1\oplus\cdots\oplus M_k$ is a decomposition of $M^\mu$ into indecomposable summands. Then there is a unique direct summand $M_j$ of $M^\mu$ such that $S^\mu\subseteq M_j$. We write $Y^\mu$ for $M_j$ and call it a Young module. Clearly, the Young modules are well-defined up to isomorphism. Every indecomposable direct summand of $M^\mu$ is isomorphic to some Young module and $Y^\mu\cong Y^\lambda$ if and only if $\mu=\lambda$. The Young modules have trivial sources. It is well known that if $\mu$ is a $p$-regular partition and $S^\mu$ is a simple Specht module then $S^\mu\cong Y^\mu$; see for example \cite[\S 1]{JCKMT}.
Every partition $\mu$ can be written as its $p$-adic expansion as follows. Let $\mu_{(0)}$ be the partition obtained from $\mu$ by successively stripping off all horizontal $p$-hooks. Then we have the coordinate-wise summation $\mu=p\mu(1)+\mu_{(0)}$ for some partition $\mu(1)$. Inductively, we can write $\mu(k-1)=p\mu(k)+\mu_{(k-1)}$ for $k\geq 2$ and hence $\mu=p^k\mu(k)+\cdots+p\mu_{(1)}+\mu_{(0)}$. The process ends if for some $k$, $\mu(k)$ has no removable horizontal $p$-hooks. Let $\mu_{(k)}=\mu(k)$. Then $\mu=p^k\mu_{(k)}+\cdots+p\mu_{(1)}+\mu_{(0)}$ is the $p$-adic expansion of $\mu$. Let $\rho(\mu)$ be the partition $$(\underbrace{p^k,\ldots,p^k}_{\text{$|\mu_{(k)}|$ factors}}\,,\ldots,\underbrace{p,\ldots,p}_{\text{$|\mu_{(1)}|$ factors}}\,,\underbrace{p^0,\ldots,p^0}_{\text{$|\mu_{(0)}|$ factors}})$$ of $n$ and $\mathfrak{S}_{\rho(\mu)}$ be the corresponding Young subgroup of $\sym{n}$.
\begin{thm}[{\cite{JGrab}, \cite[\S 4.1]{KE}}]\label{T: vtx Young} Any vertex of the Young module $Y^\mu$ is conjugate to a Sylow $p$-subgroup of $\mathfrak{S}_{\rho(\mu)}$.
\end{thm}
The main purpose of this article is to study the vertices and complexity of Specht modules. Thus, by Theorem \ref{P:complexity of mod} (i), it is crucial to understand the maximal elementary abelian $p$-subgroups of a symmetric group up to conjugation. Let $m$ be a positive integer. Let $(\mathbb{Z}_p)^m$ act on itself by the left regular action. This induces an injective group homomorphism $(\mathbb{Z}_p)^m\hookrightarrow \sym{p^m}$. Since the action is faithful we write $V_m(p)$ for the image of this homomorphism.
\begin{thm}[{\cite[\S VI Theorem 1.3]{AARM}}]\label{T:max ele abl} Let $n$ be a positive integer. There is a one-to-one correspondence between the ways of writing $n=i_0+i_1p+\cdots+i_rp^r$ for some non-negative integer $r$ such that all $i_0,\ldots,i_r$ are non-negative integers and $0\leq i_0\leq p-1$, and the set of representatives of all maximal elementary abelian $p$-subgroups of $\sym{n}$ up to conjugation, given by $$\underbrace{V_1(p)\times\cdots\times V_1(p)}_{\text{$i_1$ times}}\times \cdots\times \underbrace{V_r(p)\times\cdots\times V_r(p)}_{\text{$i_r$ times}}\hookrightarrow$$ $$\underbrace{\sym{p}\times\cdots\times \sym{p}}_{\text{$i_1$ times}}\times \cdots\times \underbrace{\sym{p^r}\times\cdots\times \sym{p^r}}_{\text{$i_r$ times}}\hookrightarrow \sym{n}.$$
\end{thm}
Note that the $p$-rank of the maximal elementary abelian $p$-subgroup of $\sym{n}$ given in Theorem \ref{T:max ele abl} is $i_1+2i_2+\cdots+ri_r$. We shall draw an easy conclusion, following Theorem \ref{P:Specht mod} (iv) and Theorem \ref{P:complexity of mod} (ii), that the complexity of a Specht module $S^\mu$ is bounded above by the $p$-weight of $\mu$. The statement remains valid if we replace the Specht module by the simple module $D^\mu$, whenever $\mu$ is $p$-regular, of $F\sym{n}$ as there is an analogous statement to Theorem \ref{P:Specht mod} (iv) for simple modules, though this is not of the main concern of this article.
\section{Main results}\label{S: main results}
We present our main results in this section.
\begin{thm}\label{T: main result 1} Let $F$ be a field of characteristic $p>0$ and $S^\mu$ be an indecomposable Specht module. Suppose that $S^\mu$ has an abelian vertex of $p$-rank $m$. Then $\mu$ is a $p^2$-core and the complexity of $S^\mu$ is $m$.
\end{thm}
\begin{thm}\label{T: main result 2} Let $F$ be a field of characteristic $p>0$ and $\mu$ be a partition. Suppose that $p\geq 3$, or $p=2$ and $\mu$ is $2$-regular. Suppose that the Specht module $S^\mu$ has an abelian vertex $Q$ of $p$-rank $m$. Let $c$ be the complexity of the Specht module $S^\mu$ and $w$ be the $p$-weight of the partition $\mu$. Then $c=m=w$ and $Q$ is conjugate to the elementary abelian $p$-subgroup $$\underbrace{V_1(p)\times \cdots\times V_1(p)}_{\text{$w$ factors}}\hookrightarrow \sym{p}\times \cdots \times \sym{p}\hookrightarrow \sym{n}.$$
\end{thm}
\begin{thm}\label{T: main result 3} Let $p=2$, $\mu$ be a $2$-regular partition and $Q$ be a vertex of the Specht module $S^\mu$. Let $w$ be a non-negative integer. Then the following statements are equivalent.
\begin{enumerate}
\item [(i)] The vertex $Q$ is abelian of $2$-rank $w$.
\item [(ii)] The vertex $Q$ is elementary abelian of $2$-rank $w$.
\item [(iii)] The partition $\mu$ is a $4$-core of $2$-weight $w$.
\end{enumerate} In any of these cases, the Specht module $S^\mu$ has trivial source, complexity $w$ and is a simple Young module.
\end{thm}
\begin{rem} The hypothesis of Theorem \ref{T: main result 3} cannot be loosened in the following sense. For $p=2$, it is known that the Specht modules $S^{(n-r,1^r)}$ are indecomposable when $2\mid n$ \cite[Theorem 4.1]{GM}. In this case, the vertices of $S^{(n-r,1^r)}$ are the Sylow $2$-subgroups of $\sym{n}$ \cite[Theorem 4.5]{GMMP}. Thus, for instance, the Specht module $S^{(4,1,1)}$ has a non-abelian vertex but yet the partition $(4,1,1)$ is a $4$-core. So, in Theorem \ref{T: main result 3}, we cannot replace the $2$-regularity condition merely by the indecomposability condition.
\end{rem}
\begin{comment}
\section{Some examples}
\subsection{The Specht module $S^{(p^2,1^p)}$} For the technically of the variety theory, we assume that the field $F$ is algebraically closed. Thus all indecomposable modules are absolutely indecomposable. It is known that the vertices of $S^{(n-r,1^r)}$ are the Sylow $2$-subgroups of $\sym{n}$ if $2\mid n$, see \cite[Theorem 4.5]{GMMP}. In this case, the Specht modules are also indecomposable, see \cite[Theorem 4.1]{GM}. In this section, we show that the Specht module $S^{(p^2,1^p)}$ has vertices the Sylow $p$-subgroups of $\sym{p^2+p}$ and thus non-abelian. Note that, yet, $(p^2,1^p)$ is a $p^2$-core. These show that
\begin{enumerate}
\item [(i)] a $p^2$-core partition $\mu$ does not necessarily imply that the Specht module $S^\mu$ has abelian vertices, in general, and
\item [(ii)] the equivalence of the statements in Theorem \ref{T: main result 3} is not true if we loosen the $2$-regularity condition of the partitions to the indecomposability of the Specht modules.
\end{enumerate}
We shall use the following theorem by Benson which has been written in its contrapositive form.
\begin{thm}[{\cite[Theorem 3.14]{SDBKRZ}}] Let $M$ be an indecomposable $FG$-module and $Q$ be a vertex of $M$. Let $E=
\langle g_1,\ldots,g_n\rangle$ be an elementary abelian $p$-subgroup of $G$ of $p$-rank $p^n$. Suppose that there is a point $0\neq \alpha=(\alpha_1,\ldots,\alpha_n)\in F^n$ such that $\{\alpha_1,\ldots,\alpha_n\}$ is linearly independent over the ground field $\mathbb{F}_p$ of $F$ and $\operatorname{Res}^G_{\langle u_\alpha\rangle} M$ is not a free $F\langle u_\alpha\rangle$-module where $u_\alpha=1+\sum_{i=1}^n \alpha_i(g_i-1)$. Then there is a conjugate of $Q$ which contains $E$.
\end{thm}
The next corollary is immediate. We shall also provide a short proof for the corollary without assuming the theorem.
\begin{cor}\label{C: not gen free} Let $M$ be an indecomposable $FG$-module and $Q$ be a vertex of $M$. Suppose that there is an elementary abelian subgroup $E$ of $G$ of $p$-rank $n$ such that $\operatorname{Res}^G_E M$ has complexity $n$, or equivalently, $V^\#_E(M)=F^n$. Then there is a conjugate of $Q$ which contains $E$.
\end{cor}
\begin{proof} Let $S$, an $FQ$-module, be a source of $M$. By Mackey decomposition theorem, we have $$\operatorname{Res}^G_EM\mid \operatorname{Res}^G_E \operatorname{Ind}^G_Q S\cong \bigoplus_{g\in E\backslash G/Q}\operatorname{Ind}^E_{{}^gQ\cap E}\operatorname{Res}^{{}^g Q}_{{}^gQ\cap E}{}^g S.$$ If ${}^gQ\cap E\subsetneq E$ for all $g\in E\backslash G/Q$ then $$c_E(\operatorname{Ind}^E_{{}^gQ\cap E}\operatorname{Res}^{{}^g Q}_{{}^gQ\cap E}{}^g S)=c_E(\operatorname{Res}^{{}^g Q}_{{}^gQ\cap E}{}^g S)\leq \text{$p$-rank of ${}^g Q\cap E$}< n.$$ The equivalence between $V^\#_E(M)=F^n$ and $c_E(\operatorname{Res}^G_E M)=n$ is given in \cite[Proposition 2.2]{KJL3}.
\end{proof}
Let $E_{p+1}=V_1(p)\times \cdots \times V_1(p)=\langle (1,\ldots,p),\ldots,(p^2+1,\ldots,(p+1)p)\rangle$. We have shown in \cite[Theorem 4.5]{KJL3} that $\operatorname{Res}^{\sym{p^2+p}}_{u_\alpha} S^{(p^2,1^p)}$ is not free for any $0\neq \alpha\in F^{p+1}$. Thus $$E_{p+1}\subseteq Q$$ for some vertex $Q$ of $S^{(p^2,1^p)}$. The Sylow $p$-subgroups of $\sym{p^2+p}$ has order $p^{p+2}$. As such, $Q$ is either $E_{p+1}$ or a Sylow $p$-subgroup of $\sym{p^2+p}$. Let $$g_1=\prod_{i=2}^{p+1}((i-1)p+1,\ldots,ip),\,\, g_2=\prod_{i=p+1}^{2p}(i,p+i,2p+i,\ldots,(p-1)p+i)$$ be the elements of $\sym{p^2+p}$. We will show that $Q$ contains a conjugate of the elementary abelian $p$-subgroup $E=\langle g_1,g_2\rangle$ and hence $E_{p+1}\subsetneq Q$.
We shall use the standard basis of Specht modules consisting of standard polytabloids $e_t$, see \cite[\S 8]{GJ}. Note that the elements of $E$ do not move the number $1$ and thus we do not have to use Garnir relation, see \cite[\S 7]{GJ}, for straightening process. Readers who are familiar with the material we have just discussed can easily see that elements of $E$ permute the basis elements, up to a sign.
\begin{lem} The rank variety $V^\#_E(S^{(p^2,1^p)})$ contains the line passing through the point $(1,1)$ of $\mathbb{A}^2(F)$.
\end{lem}
\begin{proof} Let $M=S^{(p^2,1^p)}$, $\mu=(p^2,1^p)$ and $G=\sym{p^2+p}$. Since the rank variety is homogenous we just need to show that $\alpha=(1,1)\in V^\#_E(M)$. Note that $u_\alpha=g_1+g_2-1$. Let $\sigma=g_1^{p-1}g_2\in G$ and $$\Omega=\{p+1,\sigma(p+1),\sigma^2(p+1),\ldots,\sigma^{p-1}(p+1)\}.$$ Note that $|\Omega|=p$ and $p+1<\sigma(p+1)<\cdots<\sigma^{p-1}(p+1)$. Let $t$ be the standard $\mu$-tableau with the numbers $\{1\}\cup \Omega$ lying in its first column. Since $\Omega$ is the orbit of $p+1$ under the action of $\langle\sigma\rangle$ it is immediate that $\sigma e_{t}=\operatorname{sgn}(\pi)e_{t}$ for some $p$-cycle $\pi$; namely $g_1e_{t}=g_2e_{t}$. The elements of $E$ permute the standard polytabloids, up to a sign. So we conclude that the submodule $N$ with basis $\{e_{t},g_1e_{t},\ldots,g_1^{p-1}e_{t}\}$ is a direct summand of $\operatorname{Res}^{G}_E M$. Since $g_2(g_1^je_t)=g_1^{j+1}e_t=g_1(g_1^je_t)$ we have $u_\alpha-1=g_1+g_2-2\in \mathrm{Ann}(N)$. So $\operatorname{Res}^G_{\langle u_\alpha\rangle} N$ is a direct sum of trivial modules and $\operatorname{Res}^G_{\langle u_\alpha\rangle} M$ is not projective.
\end{proof}
\begin{conj} Any Sylow $p$-subgroup of $\sym{p^2+p}$ is a vertex of the Specht module $S^{(p^2,1^p)}$.
\end{conj}
\begin{proof} Let $G=\sym{p^2+p}$, $M=S^{(p^2+p)}$ and $S$ be a source of $M$ as $FQ$-module. By Mackey's decomposition theorem, we have $$\operatorname{Res}^G_E M\mid \operatorname{Res}^G_E \operatorname{Ind}^G_Q S\cong \bigoplus_{g\in E\backslash G/Q} \operatorname{Ind}^E_{{}^gQ\cap E} \operatorname{Res}^{{}^gQ}_{{}^gQ\cap E}{}^gS.$$ Suppose that ${}^g Q\cap E\subsetneq E$ for any $g\in \sym{p^2+p}$. Let $S_1,\ldots,S_m$ be indecomposable $FH_i$-modules with $H_i\subsetneq E$ such that $$\operatorname{Res}^G_E M\cong \bigoplus_{i=1}^m \operatorname{Ind}^E_{H_i} S_i.$$ Note that, for each $1\leq i\leq m$, we have $V^\#_{H_i}(S_i)$ is either $\{0\}$ or $\mathbb{A}^1(F)$. Note also that, by a consequence of Green's indecomposability theorem \cite[Corollary 52.6]{LD}, the $FE$-modules $\operatorname{Ind}^E_{H_i} S_i$ are indecomposable.
Suppose that $V^\#_{H_i}(S_i)=\mathbb{A}^1(F)$. Then $\{1,g_j,\ldots,g_j^{p-1}\}$ is a left coset representatives of $H_i$ in $E$ if and only if
\end{proof}
\end{comment}
\section{Proof of the main results}\label{S: proofs}
In this section we shall prove our main theorems. We first prove Theorem \ref{T: main result 1}. We then divide the latter part of this section into two subsections. The first part concerns the proof of Theorem \ref{T: main result 2} when $p\geq 3$. The second part concerns the proofs of Theorem \ref{T: main result 2} when $p=2$ and Theorem \ref{T: main result 3}.
We state two known results upon which our proofs rely heavily.
\begin{thm}[{\cite{AP}, \cite[Theorem 1]{GK}}]\label{P:injection} Let $n_1,\ldots,n_s$ be positive integers. If there is an injective group homomorphism $\mathbb{Z}_{p^{n_1}}\times \ldots\times \mathbb{Z}_{p^{n_s}}\hookrightarrow \sym{m}$ then $p^{n_1}+\ldots+p^{n_s}\leq m$.
\end{thm}
For any positive integer $n$ and prime number $p$, we write $n_p$ for the prime power $p^a$ such that $p^a\mid n$ and $\gcd(p,n/p^a)=1$.
\begin{thm}[{\cite[Theorem 1.1]{CB}}]\label{T:Bessenrodt} Let $M$ be a $FG$-module for some finite group $G$. Suppose that $M$ is relatively $H$-projective for some subgroup $H$ of $G$. Let $Q\cong \mathbb{Z}_{p^{n_1}}\times \ldots\times \mathbb{Z}_{p^{n_m}}$ be an abelian subgroup of $H$ of $p$-rank $m$ with $n_1\geq \cdots \geq n_m\geq 1$. Let $c=c_Q(M)$ be the complexity of the $FQ$-module $\operatorname{Res}^G_Q M$. Then $$\left . \frac{|G:H|_p|Q|_p}{p^{n_1}\cdots p^{n_c}}\,\,\right |\,\, (\dim_F M)_p.$$
\end{thm}
We can now prove Theorem \ref{T: main result 1}.
\begin{proof}[Proof of Theorem \ref{T: main result 1}] We assume the notation as in Theorem \ref{T: main result 1}. Let $n=|\mu|$ and $w$ be the $p$-weight of $\mu$. Let $D$ be a defect group of the block $e_{\widetilde{\mu}}$ and $Q$ be a vertex of $S^\mu$. Without loss of generality, we may assume that $$Q\subseteq D\subseteq \sym{pw}.$$
We apply Theorem \ref{T:Bessenrodt} and Proposition \ref{L:complx rel vtx} by taking $M=S^\mu$, $H=Q\cong \mathbb{Z}_{p^{n_1}}\times \ldots\times \mathbb{Z}_{p^{n_m}}$ with $n_1\geq \cdots\geq n_m\geq 1$. Let $c=c_Q(S^\mu)=c_{\sym{n}}(S^\mu)$. We have $$p^{a-n_1-\cdots-n_m}\cdot p^{n_{c+1}}\cdots p^{n_m}=p^{a-n_1-\cdots-n_c}\mid (\dim_F S^\mu)_p$$ where $p^a=(n!)_p$. By Theorem \ref{P:Specht mod} (i) and the hook formula, we let $s$ be the non-negative integer such that $$p^s=\frac{p^a}{p^w\cdot (\dim_F S^\mu)_p}=\frac{\prod_{(i,j)\in [\mu]} (h_{i,j})_p}{p^w}.$$ Note that $s=0$ if and only if $\mu$ is a $p^2$-core; namely every hook length of $[\mu]$ is not divisible by $p^2$. With this notation, we deduce that
\begin{equation}\label{Eq:1}
s+w\leq n_1+\cdots+n_c.
\end{equation} On the other hand, we have the injection $Q\hookrightarrow\sym{pw}$. As such, by Theorem \ref{P:injection}, we get $p^{n_1}+\cdots+p^{n_m}\leq pw$; namely $p^{n_1-1}+\cdots+p^{n_m-1}\leq w$. Combining this inequality with (\ref{Eq:1}), we have \begin{equation}\label{Eq:2}
(p^{n_1-1}-n_1)+\cdots+(p^{n_c-1}-n_c)+p^{n_{c+1}-1}+\cdots+p^{n_m-1}+s\leq 0.
\end{equation} Since $p^{k-1}-k\geq 0$ for every positive integer $k$ we conclude that $m=c$, $s=0$, $p^{n_i-1}=n_i$ for all $1\leq i\leq c$ and $w=n_1+\cdots+n_c$. We have proved Theorem \ref{T: main result 1}.
\end{proof}
\subsection{The case where $p\geq 3$}\label{sS: p geq 3 case}
Assuming all the computations what we have just done in the proof of Theorem \ref{T: main result 1}, we continue to prove Theorem \ref{T: main result 2} for the case of $p\geq 3$.
\begin{proof}[Proof of Theorem \ref{T: main result 2} when $p\geq 3$] If $p$ is odd then, from $p^{n_i-1}=n_i$, necessarily $n_i=1$ for all $1\leq i\leq c$; namely $m=c=w$ and $Q$ is an elementary abelian $p$-group of $p$-rank $w$. Since $Q$ is also a subgroup of $\sym{pw}$, by Theorem \ref{T:max ele abl}, we let $$E=(V_1(p))^{i_1}\times \cdots\times (V_r(p))^{i_r}$$ be an maximal elementary $p$-subgroup of $\sym{pw}$ containing $Q$. Comparing their $p$-ranks, we have $w\leq i_1+2i_2+\cdots+ri_r$. On the other hand, we also have $i_1p+i_2p^2+\cdots+i_rp^r=pw$. Thus we deduce that $$i_2(p-2)+\cdots+i_r(p^{r-1}-r)\leq 0$$ and hence $i_2=\cdots=i_r=0$, $i_1=w$. Namely, $Q$ is necessarily conjugate to $(V_1(p))^w$. This completes the proof of Theorem \ref{T: main result 2} when $p$ is odd.
\end{proof}
\begin{rem} The author would like to point out that the proofs given above, for Theorem \ref{T: main result 1} and Theorem \ref{T: main result 2} when $p\geq 3$, is very similar to the proof of \cite[Theorem 1]{MW} in which Wildon dealt with the case where $m=1$.
\end{rem}
\subsection{The case where $p=2$}\label{sS: p=2 case}
Note that the proof of Theorem \ref{T: main result 1} assumes nothing about the $2$-regularity of the partition $\mu$, in the case of $p=2$. It works for all indecomposable Specht modules with abelian vertices. In view of Theorem \ref{T: main result 2}, we still have a result for $p=2$ case without assuming the $2$-regularity condition.
\begin{prop}\label{P: factors of vertices} Let $p=2$ and $S^\mu$ be indecomposable. Suppose that a vertex $Q$ of $S^\mu$ is abelian of $p$-rank $m$. Let $c$ be the complexity of $S^\mu$ and $w$ be the $2$-weight of $\mu$. Then $m=c$, $c\leq w\leq 2c$ and the direct factors of $Q$ are either $\mathbb{Z}_2$ or $\mathbb{Z}_4$.
\end{prop}
\begin{proof} By (\ref{Eq:2}) with $p=2$, we have $m=c$, $2^{n_i-1}=n_i$ for all $1\leq i\leq c$ and $w=n_1+\cdots+n_c$. Thus each of the $n_i$ is either $1$ or $2$.
\end{proof}
In general, we do not know when a Specht module over an even characteristic field is decomposable. Since vertices are defined only for indecomposable modules, in view of Theorem \ref{P:Specht mod} (iii), the $2$-regularity assumption is technically required.
Note that the proof which we have just given in \S \ref{sS: p geq 3 case} for Theorem \ref{T: main result 2} fails when $p=2$ because of the possibility that $Q$ may have $\mathbb{Z}_4$ as a direct factor. Thus we need to take a closer look at these partitions $\mu$ which are $2$-regular and the vertices of $S^\mu$ are abelian.
\begin{lem}\label{L: 4-core} Let $\mu=(\mu_1,\ldots,\mu_r)$ be a partition. Then $\mu$ is both a $2$-regular and $4$-core partition if and only if exactly one of the following holds.
\begin{enumerate}
\item [(i)] $\mu_r=1$ and there exists a number $0\leq s\leq r-1$ such that $\mu_i-\mu_{i+1}=3$ for all $1\leq i\leq s$ and $\mu_i-\mu_{i+1}=1$ for all $s+1\leq i\leq r-1$.
\item [(ii)] $\mu_r\in \{2,3\}$ and $\mu_i-\mu_{i+1}=3$ for all $1\leq i\leq r-1$.
\end{enumerate}
\end{lem}
\begin{proof} Suppose that $\mu$ is $2$-regular and is a $4$-core. It is easy to see that $1\leq \mu_i-\mu_{i+1}\leq 3$ for all $1\leq i\leq r-1$ and $\mu_i-\mu_{i+1}\neq 2$ for all $1\leq i\leq r-2$. Suppose that $\mu_j-\mu_{j+1}=3$ and $\mu_{j-1}-\mu_j=1$ for some $2\leq j\leq r-1$. Then the node $(j-1,\mu_{j-1}-2)$ has hook length $4$. If $\mu_r$ is $2$ or $3$, then $\mu_{r-1}-\mu_r\neq 1$; otherwise the node $(r-1,1)$, respectively $(r-1,2)$, has hook length $4$. This proves one direction of the characterization. The converse is easy.
\end{proof}
Using the above characterization of $2$-regular and $4$-core partitions, and the characterization of simple Specht modules for $p=2$ \cite[Main Theorem]{GJAM}, we obtain the following corollary.
\begin{cor}[{of \cite[Main Theorem]{GJAM}}]\label{C: 4-core is simple} Let $p=2$ and $\mu$ be a $2$-regular partition. If $\mu$ is a $4$-core then $S^\mu$ is simple and $D^\mu\cong S^\mu\cong Y^\mu$.
\end{cor}
\begin{cor}\label{C: vtx 4-core} Let $p=2$ and $\mu$ be a $2$-regular partition of $n$. Suppose that $\mu$ is a $4$-core. Then any vertex $Q$ of $S^\mu$ is conjugate to $$\underbrace{V_1(2)\times \cdots\times V_1(2)}_{\text{$w$ factors}}\hookrightarrow (\sym{2})^w\hookrightarrow \sym{n}$$ where $w$ is the $2$-weight of $\mu$. Furthermore, the complexity of $S^\mu$ is $w$.
\end{cor}
\begin{proof} Let $\mu=2^k\mu_{(k)}+\cdots+2\mu_{(1)}+\mu_{(0)}$ be the $2$-adic expansion of $\mu$ with $|\mu_{(k)}|\neq 0$. We claim that $0\leq k\leq 1$ and $\mu_{(0)}$ is the $2$-core of $\mu$. Note that there are only two types of removable skew $2$-hooks, the horizontal $2$-hook and the vertical $2$-hook. Since $\mu_{(0)}$ is obtained from $\mu$ by successively removing all horizontal $2$-hooks it is then not a $2$-core if there is some removable vertical $2$-hook of $\mu_{(0)}$. In this case, the original partition $\mu$ has two successive non-empty rows whose difference of their sizes is divisible by $2$. However, this is not allowed by the characterization in Lemma \ref{L: 4-core}. Thus $\mu_{(0)}=\widetilde{\mu}$. It is clear that, in general, if $\mu$ is a $p^k$-core for some positive integer $k$ then $\mu_{(l)}=\varnothing$ for all $l\geq k$.
By Corollary \ref{C: 4-core is simple}, the Specht module $S^\mu$ is isomorphic to the Young module $Y^\mu$. By Theorem \ref{T: vtx Young}, we deduce that any vertex $Q$ of the Specht module $S^\mu\cong Y^\mu$ is conjugate to a Sylow $2$-subgroup of $\mathfrak{S}_{\rho(\mu)}=(\sym{2})^{|\mu_{(1)}|}$ where $$|\mu_{(1)}|=(n-|\mu_{(0)}|)/2=(n-|\widetilde{\mu}|)/2=w.$$ Since the Young module $Y^\mu$ has a trivial source; namely the $FQ$-module $F$, by Proposition \ref{L:complx rel vtx} (ii), we have $$c_{\sym{n}}(S^\mu)=c_Q(F)=w.$$ The proof of our corollary is now complete.
\end{proof}
The proofs for Theorem \ref{T: main result 2} when $p=2$ and Theorem \ref{T: main result 3} are now clear:
\begin{proof}[Proof of Theorem \ref{T: main result 2} when $p=2$] We assume all the notation in Theorem \ref{T: main result 2}. By Theorem \ref{T: main result 1}, the partition $\mu$ is necessarily a $4$-core. By Corollary \ref{C: vtx 4-core}, we get $m=w=c$ and $Q$ is conjugate to $(V_1(2))^w$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{T: main result 3}] Corollary \ref{C: vtx 4-core} shows that (iii) implies (ii). The implication from (ii) to (i) is trivial. Theorem \ref{T: main result 1} shows that (i) implies (iii). The final statement comes from Corollaries \ref{C: 4-core is simple} and \ref{C: vtx 4-core}.
\end{proof}
\section{Some applications of the main results}\label{S: consequences}
In \cite[Theorem 1]{MW}, M. Wildon showed that the vertices of an indecomposable Specht module $S^\mu$ are non-trivial cyclic if and only if the $p$-weight of $\mu$ is $1$. We have a generalization of this result.
\begin{cor}\label{C: abl vtx in abl dft} Let $p$ be the characteristic of the field $F$ and suppose that $p\geq 3$. Let $1\leq m\leq p-1$ be an integer and $\mu$ be a partition such that the Specht module $S^\mu$ is indecomposable. Then a vertex $Q$ of $S^\mu$ is abelian of $p$-rank $m$ if and only if the $p$-weight of $\mu$ is $m$. In this case, $Q$ is necessarily elementary abelian.
\end{cor}
\begin{proof} Assume that $Q$ is abelian. Theorem \ref{T: main result 2} shows that $Q$ is elementary abelian and $m=w$ where $w$ is the $p$-weight of $\mu$. Conversely, suppose that $m=w$. In this case, we have the abelian defect case. Thus $Q$ is necessarily abelian as a subgroup of some defect group of $S^\mu$. By Theorem \ref{T: main result 2}, $Q$ is has $p$-rank $w=m$.
\end{proof}
\begin{rem} By virtue of \cite[Theorem 1]{MW}, Corollary \ref{C: abl vtx in abl dft} also holds for $p=2$.
\end{rem}
There is a special class of Specht modules $S^\mu$ whose partition $\mu$ is of the form $({\mu_1}^{a_1},{\mu_2}^{a_2},\ldots,{\mu_r}^{a_r})$ where both $\mu_i,a_i$ are multiples of $p$ for every $1\leq i\leq r$. We call them the $(p\times p)$-partitions. As another application of our results, we study the vertices of the Specht modules $S^\mu$ where $\mu$ is a $(p\times p)$-partition for $p\geq 3$. We have seen in Lemma \ref{L:vtx fld ext} that the vertices of an indecomposable module remain unchanged, should the module remains indecomposable upon field extension. Since all Specht modules in the case of $p\geq 3$ are indecomposable, we may assume that $F$ is algebraically closed for the rest of this section.
Let $E_p=V_1(p)\times \cdots\times V_1(p)$ ($p$ factors) be the maximal elementary abelian $p$-subgroup of $\sym{p^2}$ as in Theorem \ref{T:max ele abl}. We have $c_{\sym{p^2}}(S^{(p^p)})=c_{E_p}(S^{(p^p)})=p-1$ \cite[Theorem 3.1(i)]{KJL}. Furthermore, for $p\geq 3$, with respect to the generators $$(1,\ldots,p),(p+1,\ldots,2p),\ldots,(p^2-p+1,\ldots,p^2)$$ of $E_p$, a description of the union of irreducible components $W_{p-1}$ of dimension $p-1$ of $V^\#_{E_p}(S^{(p^p)})$ and the vanishing ideal of $W_{p-1}$ is given in \cite[Theorem 3.1(ii)]{KJL}. The vanishing ideal $I$ of $W_{p-1}$ is generated by the element $$f(x_1,\ldots,x_p)=(x_1\cdots x_p)^{p-1}\widetilde{f}+\sum_{i=1}^p {x_1}^{n(p-1)}\cdots \widehat{{x_i}^{n(p-1)}}\cdots {x_p}^{n(p-1)}$$ for some homogeneous polynomial $\widetilde{f}$ and positive integer $n$, and where the term ${x_1}^{n(p-1)}\cdots \widehat{{x_i}^{n(p-1)}}\cdots {x_p}^{n(p-1)}$ is the product of all ${x_j}^{n(p-1)}$'s with $1\leq j\leq p$ except the term ${x_i}^{n(p-1)}$. In particular, we have the following lemma.
\begin{lem}\label{L: irreducible component of (p^p)} Any irreducible component of $W_{p-1}\subseteq V^\#_{E_p}(S^{(p^p)})$ is not a hyperplane of $\mathbb{A}^p(F)$.
\end{lem}
\begin{proof} Suppose that $W_{p-1}$ contains a hyperplane, i.e. $f(x_1,\ldots,x_p)\in I(W_{p-1})\subseteq \langle a_1x_1+\cdots+a_px_p\rangle$ for some non-zero vector $v=(a_1,\ldots,a_p)\in \mathbb{A}^p(F)$. Let $g\in F[x_1,\ldots,x_p]$ be the polynomial such that
\begin{equation}\label{Eq:3}
f(x_1,\ldots,x_p)=(a_1x_1+\cdots+a_px_p)g(x_1,\ldots,x_p).
\end{equation} Suppose that at least two of the coordinates of $v$ are non-zero; say $a_r$ and $a_s$. Let $1\leq i\leq p$ be such that $i$ is different from either $r$ or $s$. Then there exists elements $b_1,\ldots,b_p$ of $F$ with $b_i=0$, and $b_j\neq 0$ for all $1\leq j\leq p$ and $j\neq i$, such that $a_1b_1+\cdots+a_pb_p=0$. Substitute the values $b_1,\ldots,b_p$ into Equation (\ref{Eq:3}), we have the contradiction where $$0\neq {b_1}^{n(p-1)}\cdots \widehat{{b_i}^{n(p-1)}}\cdots {b_p}^{n(p-1)}=0\cdot g(b_1,\ldots,b_p)=0.$$ This shows that the coordinates of $v$ are all zero except $a_j\neq 0$ for some unique $1\leq j\leq p$. We deduce that $f(x_1,\ldots,x_p)=a_jx_jg(x_1,\ldots,x_p)$ and hence $x_j$ is a factor of $f(x_1,\ldots,x_p)$. However, it is clear from the description of $f$ that $x_j$ is not a factor. The proof is now complete.
\end{proof}
For $p\geq 3$, Hemmer show that the complexity of the Specht module $S^\mu$ when $\mu$ is a $(p\times p)$-partition is strictly less than the $p$-weight of $\mu$ \cite[Corollary 1.4]{DH}. Thus the following corollary is immediate by applying Theorem \ref{T: main result 2}.
\begin{cor}\label{C: p by p vtx is non-abelian} Suppose that $p\geq 3$ and $\mu$ is a $(p\times p)$-partition. Then the vertices of $S^\mu$ are non-abelian.
\end{cor}
The next corollary describes the vertices of the Specht module $S^{(p^p)}$ for $p\geq 3$.
\begin{cor}\label{C: p by p vtx} For $p\geq 3$, any vertex of $S^{(p^p)}$ is a Sylow $p$-subgroup of $\sym{p^2}$.
\end{cor}
\begin{proof} Let $Q$ be a vertex of $M=S^{(p^p)}$ and $S$ be a source of $M$. By Mackey Decomposition Theorem, we have $$\operatorname{Res}^{\sym{p^2}}_{E_p}M\mid \operatorname{Res}^{\sym{p^2}}_{E_p}\operatorname{Ind}^{\sym{p^2}}_{Q}S\cong \bigoplus_{g\in E_p\backslash \sym{p^2}/Q} \operatorname{Ind}^{E_p}_{{}^g Q\cap E_p} \operatorname{Res}^{{}^g Q}_{{}^gQ\cap E_p}{}^g S$$ where $E_p\backslash \sym{p^2}/Q$ is a set of double coset representatives of $(E_p,Q)$ in $\sym{p^2}$. Suppose that ${}^gQ\cap E_p\neq E_p$ for all $g$; namely ${}^gQ\cap E_p$ is a proper subgroup of $E_p$ and hence has order at most $p^{p-1}$. Let $$U_g=V^\#_{E_p}(\operatorname{Ind}^{E_p}_{{}^g Q\cap E_p} \operatorname{Res}^{{}^g Q}_{{}^gQ\cap E_p}{}^g S).$$ In fact, $\dim U_g=\dim V^\#_{{}^gQ\cap E_p} (\operatorname{Res}^{{}^g Q}_{{}^gQ\cap E_p}{}^g S)$. Since $U_g$ is an induced module induced from a proper subgroup of $E_p$ we have that the rank variety $$U:=V^\#_{E_p}(\operatorname{Res}^{\sym{p^2}}_{E_p}\operatorname{Ind}^{\sym{p^2}}_{Q}S)=\bigcup U_g\subseteq \mathbb{A}^p(F)$$ is a finite union of subvarieties of dimension at most $p-1$. If $U_g$ has dimension $p-1$ for some $g$ then it is necessarily that $V^\#_{{}^gQ\cap E_p} (\operatorname{Res}^{{}^g Q}_{{}^gQ\cap E_p}{}^g S)\cong \mathbb{A}^{p-1}(F)$ and hence $U_g$ is a union of hyperplanes of $\mathbb{A}^p(F)$.
Let $N$ be a $kE_p$-module such that $N\oplus \operatorname{Res}^{\sym{p^2}}_{E_p}M=\operatorname{Res}^{\sym{p^2}}_{E_p}\operatorname{Ind}^{\sym{p^2}}_{Q}S$. We have $V^\#_{E_p}(M)\cup V^\#_{E_p}(N)=U$. Let $V$ be an irreducible component of $V^\#_{E_p}(M)$ of dimension $p-1$. By the unique decomposition property of varieties into their irreducible varieties, see \cite[Corollary 1.6]{RH}, we conclude that $V$ is a hyperplane of $\mathbb{A}^p(F)$. This contradicts to Lemma \ref{L: irreducible component of (p^p)} and hence we conclude that ${}^gQ\cap E_p=E_p$ for some $g\in \mathfrak{S}_{p^2}$; namely $E_p\subseteq Q$ up to some conjugation.
Since $S^{(p^p)}$ has complexity $p-1$, different from the $p$-weight of $(p^p)$, by Theorem \ref{T: main result 2} or Corollary \ref{C: p by p vtx is non-abelian}, the vertices cannot be abelian. Thus $Q$ has order strictly larger than $p^p$, i.e. $p^{p+1}$ and hence is a Sylow $p$-subgroup of $\sym{p^2}$.
\end{proof}
\begin{rem} The case of $p=2$ is dealt with in \cite[Lemma 6]{MW}: the Specht module $S^{(2,2)}$ has the Klein four group $V_2(2)$ as its vertex, which is different from the description in Corollary \ref{C: p by p vtx}.
\end{rem}
\section{Some further questions}\label{S: some further questions}
In this section, we include some further questions. The following question is natural following our Theorem \ref{T: main result 3}.
\begin{ques}\label{Q: Question 1}
Given that $\mu$ is $p$-regular and $p\geq 3$, are the vertices of the Specht module $S^\mu$ abelian if $\mu$ is a $p^2$-core?
\end{ques}
To answer Question \ref{Q: Question 1}, our method used in \S \ref{sS: p=2 case} does not help so much. The main obstruction is that there are Specht modules $S^\mu$ which are not Young modules, with $\mu$ $p$-regular and a $p^2$-core (for example, the Specht module $S^{(6,3)}$ with $p=3$). Readers may have observed that, in order to achieve Theorem \ref{T: main result 3}, the main idea is to prove the simplicity of the corresponding Specht modules as in Corollary \ref{C: 4-core is simple}. Thus we can partly answer Question \ref{Q: Question 1} by imposing this crude assumption.
\begin{prop}\label{P: classfication for simple, p odd} Let $p\geq 3$, $\mu$ be a $p$-regular partition and $Q$ be a vertex of the Specht module $S^\mu$. Suppose that $S^\mu$ is simple. Let $w$ be a non-negative integer. Then the following statements are equivalent.
\begin{enumerate}
\item [(i)] The vertex $Q$ is abelian of $p$-rank $w$.
\item [(ii)] The vertex $Q$ is elementary abelian of $p$-rank $w$.
\item [(iii)] The partition $\mu$ is a $p^2$-core of $p$-weight $w$.
\end{enumerate} In any of these cases, the Specht module $S^\mu$ has trivial source, complexity $w$ and is a simple Young module.
\end{prop}
\begin{proof} We only need to show that (iii) implies (i). As we have mentioned in the proof of Corollary \ref{C: vtx 4-core}, the $p$-adic expansion of $\mu$ is $\mu_{(0)}+p\mu_{(1)}$, here we allow $\mu_{(1)}=\varnothing$. By the simplicity condition, we deduce that $S^\mu\cong Y^\mu$ and hence $Q$ is conjugate to a Sylow $p$-subgroup of $\sym{\rho(\mu)}=(\sym{p})^{|\mu_{(1)}|}$ by Theorem \ref{T: vtx Young}. So $Q$ is abelian, necessarily of $p$-rank $w$ and the Specht module $S^\mu$ has complexity $w$ by Theorem \ref{T: main result 1}.
\end{proof}
Of course, ultimately, we will be delighted to achieve the following goal, if the computation of the vertices of all Specht modules is difficult.
\begin{ques}
Classify all indecomposable Specht modules with abelian vertices.
\end{ques}
As we have pointed out in the beginning of \S \ref{sS: p=2 case}, the technical assumption of $2$-regularity is required in Theorem \ref{T: main result 2}. The result will be nicer if it works for all indecomposable Specht modules with abelian vertices. The possibility of the existence of an example of a Specht module $S^\mu$ with abelian vertices $Q$ which contain $\mathbb{Z}_4$ as a direct factor is the main obstruction.We do not have an example of a Specht module that support this possibility of Proposition \ref{P: factors of vertices}.
\begin{ques} For $p=2$, is there an indecomposable Specht module $S^\mu$ whose vertices are abelian and contain $\mathbb{Z}_4$ as a direct factor?
\end{ques}
\subsection*{Acknowledgment} The results of this article have been partly achieved during my days in Aberdeen \cite[Remark 4.14]{KJL2} and partly done in Singapore. I would like to take the chance to thank Dave Benson and Radha Kessar. I would like to thank Kai Meng Tan for his availability for discussion and careful reading of this article, and Mark Wildon for pointing out Theorem \ref{P:injection} and improving an earlier version of Theorem \ref{T: main result 1}.
I would like to thank the referees for their valuable suggestions.
\end{document}
|
\begin{document}
\title{Generalized solutions for a system of partial differential equations arising from urban crime modeling with a logistic source term}
\begin{abstract}
\noindent We consider the system
\[
\left\{
\begin{aligned}
u_t &= \Delta u - \chi \nabla \cdot ( \tfrac{u}{v} \nabla v) - uv + \rho u - \mu u^2, \\
v_t &= \Delta v - v + u v
\end{aligned}
\right.
\tag{$\star$}
\]
with $\rho \in \mathbb{R}, \mu > 0, \chi > 0$ in a bounded domain $\Omega \subseteq \mathbb{R}^2$ with smooth boundary. While very similar to chemotaxis models from biology, this system is in fact inspired by recent modeling approaches in criminology to analyze the formation of crime hot spots in cities. The key addition here in comparison to similar models is the logistic source term.
\\[0.5em]
The central complication this system then presents us with, apart from us allowing for arbitrary $\chi > 0$, is the nonlinear growth term $uv$ in the second equation as it makes obtaining a priori information for $v$ rather difficult. Fortunately, it is somewhat tempered by its negative counterpart and the logistic source term in the first equation. It is this interplay that still gives us enough access to a priori information to achieve the main result of this paper, namely the construction of certain generalized solutions to ($\star$).
\\[0.5em]
To illustrate how close the interaction of the $uv$ term in the second equation and the $-\mu u^2$ term in the first equation is to granting us classical global solvability, we further give a short argument showing that strengthening the $-\mu u^2$ term to $-\mu u^{2+\gamma}$ with $\gamma > 0$ in the first equation directly leads to global classical solutions.
\\[0.25em]
\textbf{Keywords:} urban crime, reaction diffusion equation, global existence, generalized solutions, logistic source term \\
\textbf{MSC (2010):} 35Q91 (primary); 35B40, 35K55, 91D10 (secondary)
\end{abstract}
\section{Introduction}
In this paper we discuss the system
\begin{equation}
\left\{
\begin{aligned}
u_t &= \Delta u - \chi \,\mathrm{d}iv ( \tfrac{u}{v} \nabla v) - uv + \rho u - \mu u^2, \\
v_t &= \Delta v - v + u v
\end{aligned}
\right. \label{problem}
\end{equation}
with $\rho \in \mathbb{R}$, $\mu > 0$, $\chi > 0$. While this system is in fact motivated by recent modeling approaches in criminology, we will first establish some of its broader context and therefore take a quick detour to biology and the mathematical modeling of chemotactic movement of certain microscopic organisms. Here, chemotaxis means the process whereby organisms move along a chemical gradient towards an attractant. Modeling this process using systems of partial differential equations has largely been started by the seminal work of Keller and Segel in 1970 (cf.\ \cite{keller1970initiation}), in which they modeled a population of \enquote*{dictyostelium discoideum} slime mold to understand their aggregation behavior observed in experiments using the following (here somewhat simplified) system:
\begin{align*}
\left\{
\begin{aligned}
u_t &= \Delta u - \,\mathrm{d}iv (u\nabla v) \\
v_t &= \Delta v - v + u
\end{aligned}
\right.
\end{align*}
In this system, the functions $u$ and $v$ model the cell and attractant concentrations while the term $-\,\mathrm{d}iv (u\nabla v)$ models the central mechanism of the model, namely the chemotaxis. The remaining terms either model the diffusion of cells or attractant or their production and decay behavior.
\\[0.5em]
The efficacy of this approach was then later confirmed from a mathematical perspective as the same aggregation behavior is also present in solutions to this system for a large set of initial data when considered in three dimensions. In mathematical terms, this is expressed by solutions blowing up in finite time but conserving their mass (cf.\ \cite{MR3115832}). Among others, this success has then led to various further chemotactic processes from biology being modeled and then subsequently mathematically analyzed in recent years. For a broader overview of this, see \cite{MR3351175}.
\\[0.5em]
As biology is not the only field concerned with analyzing the movement of agents towards some kind of goal, other fields have taken notice of this new, successful modeling approach and translated it to their setting. One such field is criminology. Here, cells are replaced by criminals and attractant chemicals are replaced by a somewhat more abstract notion of attractiveness of locations for criminal activity. As such with the central goal of understanding crime hot spots, Short et al. introduced the following (here somewhat simplified) system in 2008 (cf.\ \cite{short2008statistical}), which is based on a \enquote*{routine activity} modeling approach (cf.\ \cite{cohen1979social} and \cite{felson1987routine}) and insights gained in \cite{johnson1997new}, \cite{short2009measuring} and \cite{wilson1982broken} about repeat victimization and crime and disorder generally leading to more of the same:
\begin{equation}
\left\{
\begin{aligned}
u_t &= \Delta u - \chi\,\mathrm{d}iv (\tfrac{u}{v}\nabla v) - uv + \Psi \\
v_t &= \Delta v - v + uv + \Phi
\end{aligned}
\right.
\label{classic_crime_model}
\end{equation}
with $\chi = 2$.
In this system $u$ and $v$ represent the criminal population and attractiveness factor for criminal activity respectively, while $-\chi\,\mathrm{d}iv (\tfrac{u}{v}\nabla v)$ is still the, here slightly modified, taxis term, which this time models the tendency of criminals to move towards high attractiveness areas. This modification to the taxis term has been introduced by Short et al. to account for the fact that there is an aspect of diminishing returns to consider regarding attractiveness, meaning that a high attractiveness of the current location of a criminal makes them much less likely to move from there as even a higher attractiveness areas do not seem much better in comparison. The paired $uv$ terms are meant to represent expected values of crime in an area at a certain time modeling essentially that crime in an area leads to higher attractiveness and less repeat crime (cf.\ again \cite{johnson1997new}, \cite{short2009measuring} and \cite{wilson1982broken}). The functions $\Phi$ and $\Psi$ further represent some growth information about criminals and attractiveness independent of the model functions $u$ and $v$, e.g.\ the socio-economic state of certain areas of a city at certain points in time influencing criminalization and creation of attractive targets for criminal activity. For a broader survey of models derived from this, see \cite{d2015statistical}.
\\[0.5em]
In terms of the mathematical analysis of this model, there have been e.g.\ global classical existence results in one dimension in \cite{rodriguez2019global} and arbitrary dimension, but with some restrictions on $\chi$, in \cite{MR3879245}. Furthermore, existence of solutions for the two-dimensional, radially symmetrical case has been studied in \cite{MR4002172} and a similar result for classical solutions given small initial data can be found in \cite{PreprintSmallDataSolution}. See also \cite{PreprintNonLinearDiffusion}, in which existence of certain weak solutions for a variant of (\ref{problem}) with sufficiently strong nonlinear diffusion is discussed. As it is the central feature of interest from an application perspective, there have also been various discussions of hot spot formation in e.g.\ \cite{MR3163243}, \cite{MR2982715}, \cite{MR3491512}. For some theory about models from biology featuring a similar singular sensitivity function in various settings see e.g.\ \cite{MR2778870}, \cite{MR3674184}. \cite{1937-1632_2020_2_119} or \cite{MR3887138} for a case also featuring a logistic source term.
\\[0.5em]
Let us now return our focus to the model (\ref{problem}), which is the central object of study in this paper. While it is still very similar to the classic model (\ref{classic_crime_model}) introduced by Short et al., there exist some important differences, namely that we removed the static source terms $\Phi$ and $\Psi$ for convenience of notation, but introduced an additional logistic source term in the first equation. Source terms of this kind are a fairly standard addition to chemotaxis models in biology to represent that cells reproduce while still incorporating the idea that this reproduction even when considered in isolation cannot be unbounded as cells compete for some finite resources, e.g.\ space. This idea then fairly cleanly translates to criminals, where reproduction is replaced by criminalization of individuals in the area by the existing criminal population while criminals still compete with each other for e.g.\ good targets, which are a limited resource.
\paragraph{Main result.}
The main result of this paper is the construction of certain generalized solutions for the system (\ref{problem}) similar to those considered in \cite{MR3383312} or \cite{MR3859449}. Or put more precisely, we consider the following setting: We study the system (\ref{problem}) with parameters $\rho \in \mathbb{R}$, $\mu > 0$, $\chi > 0$ in a bounded domain $\Omega \subseteq \mathbb{R}^2$ with smooth boundary. We further add the boundary conditions
\begin{equation}
\nabla u \cdot \nu = \chi\tfrac{u}{v} \nabla v \cdot \nu, \;\;\;\; \nabla v \cdot \nu = 0 \;\;\;\; \text{ for all } x\in\partial\Omega, t > 0
\label{boundary_conditions}
\end{equation}
and initial conditions
\begin{equation}
u(x,0) = u_0(x), \;\;\;\; v(x,0) = v_0(x) \;\;\;\; \text{ for all } x \in \Omega
\label{intial_conditions}
\end{equation}
for initial data with the following properties:
\begin{equation}
\left\{\;
\begin{aligned}
&u_0 \in C^0(\overline{\Omega}) \;\;\;\;\;\;&& \text{with } u_0 \geq 0 \,\text{ in } \overline{\Omega}, \\
&v_0 \in W^{1,\infty}(\Omega) && \text{with } v_0 > 0 \;\text{ in } \overline{\Omega}
\end{aligned}
\right. .
\label{intial_data_regularity}
\end{equation}
For the sake of simplicity, we fix the domain $\Omega$ and parameters $\rho, \mu, \chi$ from here on out.
\\[0.5em]
Under these assumptions, we then derive the following existence result:
\begin{theorem}
\label{theorem:main}
The system (\ref{problem}) with boundary conditions (\ref{boundary_conditions}) and initial data (\ref{intial_conditions}) with properties (\ref{intial_data_regularity}) has a global generalized solution $(u,v)$ in the sense of \Cref{definition:weak_solution} below.
\end{theorem}
\paragraph{Complications.}
As is common to most (chemo)taxis type systems, the taxis term is always somewhat of a complication because it often stands in the way of easy access to a priori information for the first solution component. In our case, it could be argued that this is amplified by the fact that we allow it to be arbitrarily strong (meaning allowing for arbitrarily big values of $\chi > 0$, $\chi = 2$ being the critical case in the classic Short model (\ref{classic_crime_model})) and have it include a singular sensitivity function $\frac{u}{v}$. While the singular sensitivity might seem critical at first glance, it poses at least for existence theory only negligible problems. This is the case because at least for finite times there always exists a positive lower bound for $v$ by straightforward use of semigroup methods, which means that for all the relevant existence theory the sensitivity $\frac{u}{v}$ is no more problematic than a sensitivity of the form $u$. On the other hand, allowing for arbitrary $\chi > 0$ and therefore $\chi = 2$ seems to be much more of a hurdle to constructing solutions as it makes adapting the techniques seen in e.g.\ \cite{MR3879245} for small values of $\chi$ infeasible.
\\[0.5em]
Apart from the fairly standard complications introduced by the taxis term, the main complication in terms of us being able to derive sufficient a priori estimates to allow for the existence of global solutions is the nonlinear $uv$ term in the second equation. While it can be played against a similar term in the first equation to at least gain some initial $L^1$ type estimates for $u$ and $v$, it is still highly problematic when trying to derive higher $L^p$ bounds for the second solution component. This problem is only slightly tempered by the integrability properties for $\int_\Omega u^2$ granted to us by the logistic source term in the first equation of (\ref{problem}), which allow us to at least gain $L^p$ bounds for $v$ and any finite $p$, but are to our knowledge not quite enough to gain the critical $L^\infty$ bound for $v$ we would need to gain classical solutions.
\paragraph{Existence of classical solutions given a stronger logistic source.} To illustrate how critical this interaction of the logistic source term in the first equation and the growth term $uv$ in the second equation is in two dimensions, we will in this paper also consider an altered version of (\ref{problem}) with a slightly strengthened logistic source term, namely
\begin{equation}
\left\{
\begin{aligned}
u_t &= \Delta u - \chi \,\mathrm{d}iv ( \tfrac{u}{v} \nabla v) - uv + \rho u - \mu u^{2+\gamma}, \\
v_t &= \Delta v - v + uv
\end{aligned}
\right. \label{weaker_problem}
\end{equation}
with $\gamma > 0$, which is also fixed from here on out similar to the other parameters. While this system is in fact very similar to (\ref{problem}), we will later see in \Cref{section:weakend_case} that this small addition of a slightly stronger logistic source term directly leads to classical solvability, or more precisely, to the following proposition:
\begin{prop}
\label{prop:weaker_system_stronger_solution}
The system (\ref{weaker_problem}) with boundary conditions (\ref{boundary_conditions}) and initial data (\ref{intial_conditions}) with properties (\ref{intial_data_regularity}) has a unique, global classical solution $(u,v)$.
\end{prop}
\noindent This result is mostly made possible due to the fact that the stronger logistic source term allows us to bridge a critical gap in a priori information for $v$ (more precisely it lets us derive an $L^\infty$ bound for $v$ and some crucial bounds for the gradient of $v$ as seen in \Cref{lemma:weak_higher_v_bounds}). Considered in this way, our case therefore seems to be just on the boundary to classical solvability, but as far as we know only allows for e.g.\ generalized solutions in the sense of \Cref{definition:weak_solution}.
\paragraph{Approach.} While some ideas could maybe already be gleamed from the discussion of the critical terms in (\ref{problem}), let us now give a more detailed overview of our approach in this paper:
\\[0.5em]
As is common when constructing weak or generalized solutions, our approach is based on the analysis of regularized versions of the problem (\ref{problem}), indexed by $\varepsilon \in (0,1)$ (cf.\ (\ref{approx_problem})), that admit global classical solutions $(u_\varepsilon, v_\varepsilon)$ and approach the original problem as $\varepsilon \searrow 0$. It is then our aim to derive bounds for these approximate solutions independent of $\varepsilon$ and use well-known compact embedding properties of certain function spaces (e.g.\ due to the Aubin--Lions lemma) to gain solution candidates as limits of the approximate solutions $(u_\varepsilon, v_\varepsilon)_{\varepsilon \in (0,1)}$ along a suitable sequence $(\varepsilon_j)_{j\in\mathbb{N}} \subseteq (0,1)$ with $\varepsilon_j \searrow 0$ as $j \rightarrow \infty$. The last step is then to derive sufficient convergence properties for the sequence $(u_{\varepsilon_j}, v_{\varepsilon_j})_{j\in\mathbb{N}}$ to translate the necessary solutions properties from the approximate solutions to our solution candidates.
\\[0.5em]
The key point in this approach (as in many others) is the derivation of sufficient a priori information. While some baseline $L^1$ estimates can be gained by the fairly common approach to add the first two equations in (\ref{approx_problem}) to cancel out the $u_\varepsilon v_\varepsilon$ terms, it is higher $L^p$ bounds for $v$ and its gradients where the key insight in this paper comes in. To derive these, we first notice that the logistic source term in the first equation in (\ref{approx_problem}) gives us a very useful integrability property for $\int_\Omega u_\varepsilon^2$, which can then be used when testing the second equation in (\ref{approx_problem}) with $v_\varepsilon^{p-1}$ to rein in the problematic influence coming from the resulting $u_\varepsilon v_\varepsilon^p$ terms just about enough to gain $L^p$ bounds for $v_\varepsilon$ for all finite $p$ and an integrability property for $\int_\Omega|\nabla v_\varepsilon|^2$ (cf.\ \Cref{lemma:v_bounds}) due to us only considering a two-dimensional setting. It is both of these properties that lead us to the necessary compact embedding properties for the second solution component and allow us to derive a useful integrability property for $\int_\Omega\frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2}$ by testing the first equation with $\frac{1}{u_\varepsilon + 1}$. While the latter does not help us in deriving further a priori estimates for the first solution component itself, this integrability property at least ensures sufficient compact embedding properties for $\ln(u_\varepsilon + 1)$.
\\[0.5em]
Though the above a priori information already grants us most of the convergence properties we need to translate solution properties from the approximate solutions to the solution candidates as a consequence of the used compactness arguments, we devote \Cref{section:grad_v_convergence} to deriving some additional convergence properties for $\nabla v_\varepsilon$ by adapting methods found in e.g.\ \cite{MR3383312} and \cite{MR3859449}. These additional properties are mostly necessary to handle the taxis-induced terms.
\section{Generalized solution concept and approximate solutions}
Due to the complications laid out in the introduction, classical solutions to (\ref{problem}) seem to us to be out of reach for now and as such we will in this paper focus on a more generalized solution concept similar to the one introduced in e.g.\ \cite{MR3383312}. These solutions are defined as follows:
\begin{definition}
We call nonnegative functions $u,v$ with
\begin{equation}
\begin{aligned}
u &\in L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty))\cap L^\infty([0,\infty);L^1(\Omega)) , \\
\ln(u + 1) &\in L^{2}_\mathrm{loc}([0,\infty);W^{1,2}(\Omega)), \\
v &\in \medcap_{p\geq 1} L_\mathrm{loc}^\infty([0,\infty); L^p(\Omega)) \cap L_\mathrm{loc}^2([0,\infty); W^{1,2}(\Omega)) \text{ and } \\
v^{-1} &\in L^\infty_\mathrm{loc}(\overline{\Omega}\times[0,\infty))
\end{aligned}
\label{wsol:regularity}
\end{equation}
a generalized solution of (\ref{problem}) with (\ref{boundary_conditions}) and (\ref{intial_conditions}), if
\[
\int_\Omega u(\cdot, T) - \int_\Omega u_0 \leq -\int_0^T\int_\Omega uv + \rho\int_0^T \int_\Omega u - \mu \int_0^T \int_\Omega u^2
\numberthis
\label{wsol:mass_property}
\]
for a.e.\ $T > 0$ and
\begin{align*}
-\int_0^\infty \int_{\Omega} \ln(u+1)\varphi_t - \int_{\Omega} \ln(u_0 + 1)\varphi(\cdot, 0) \geq& \int_0^\infty \int_{\Omega} \ln(u+1)\Delta\varphi + \int_0^\infty \int_{\Omega} |\nabla \ln(u+1)|^2\varphi \\
& - \chi \int_0^\infty \int_{\Omega} \frac{u}{v(u+1)} (\nabla \ln(u+1) \cdot \nabla v)\varphi \\
& + \chi\int_0^\infty \int_{\Omega} \frac{u}{v(u+1)} \nabla v \cdot \nabla \varphi \\
& -\int_0^\infty \int_\Omega \frac{uv}{u+1}\varphi \\
& + \rho\int_0^\infty \int_\Omega \frac{u}{u+1}\varphi - \mu \int_0^\infty \int_\Omega \frac{u^2}{u+1} \varphi \numberthis \label{wsol:ln_u_inequality}
\end{align*}
holds for all nonnegative $\varphi \in C^\infty_0(\overline{\Omega}\times[0,\infty))$ with $\nabla \varphi \cdot \nu = 0$ on $\partial \Omega\times(0,\infty)$ and if
\[
\int_0^\infty\int_\Omega v \varphi_t + \int_\Omega v_0 \varphi(\cdot, t) = \int_0^\infty \int_\Omega \nabla v \cdot \nabla \varphi + \int_0^\infty\int_\Omega v \varphi - \int_0^\infty\int_\Omega uv \varphi
\numberthis
\label{wsol:v_inequality}
\]
holds for all $\varphi \in\medcap_{p\geq 1} L^\infty((0,\infty); \L{p}) \cap L^2((0,\infty); W^{1,2}(\Omega))$ with $\varphi_t \in L^2(\Omega\times[0,\infty))$ and compact support in $\overline{\Omega}\times[0,\infty)$.
\label{definition:weak_solution}
\end{definition}
\noindent First note that, due to the regularity properties in (\ref{wsol:regularity}), all the integrals in the above definition are well-defined.
\\[0.5em]
\noindent Let us now briefly argue that this solution concept is sensible, meaning that classical solutions of (\ref{problem}) with (\ref{boundary_conditions}) and (\ref{intial_conditions}) are generalized solutions and sufficiently regular generalized solutions are in fact classical.
That classical solutions satisfy \Cref{definition:weak_solution} is fairly easy to see by testing the first equation in (\ref{problem}) with $1$ as well as $\frac{\varphi}{u + 1}$ and second equation in (\ref{problem}) with $\varphi$ for appropriate functions $\varphi$, applying partial integration and rearranging somewhat. As such, we will not expand on this point, but rather focus on the opposite direction, which is far more tricky and non-obvious. We will therefore now give the full argument for this based on prior work in \cite[Lemma 2.1]{MR3383312} for a similar generalized solution concept.
\begin{lemma}
If $u,v \in C^{2,1}(\overline{\Omega}\times(0,\infty)) \cap C^0(\overline{\Omega}\times[0,\infty))$ is a generalized solution in the sense of \Cref{definition:weak_solution} with initial data according to (\ref{intial_data_regularity}), then it is already a classical solution of (\ref{problem}) with boundary conditions (\ref{boundary_conditions}) and initial conditions (\ref{intial_conditions}).
\end{lemma}
\begin{proof}
As (\ref{wsol:v_inequality}) is a fairly standard weak solution formulation for the second equation in (\ref{problem}) and therefore well-known arguments directly apply to show that $v$ is in fact a classical solution of said equation, we will focus our efforts here on the $u$ component and the two inequalities (\ref{wsol:mass_property}) and (\ref{wsol:ln_u_inequality}).
\\[0.5em]
As our first step, let us verify that $u$ satisfies its initial conditions. For this, we first fix a nonnegative $\psi \in C_0^\infty(\Omega)$ and a sequence of cut-off functions $(\zeta_i)_{i\in\mathbb{N}} \subseteq C_0^\infty([0,\infty))$ with
\[
\zeta_i(t) \in [0,1] \;\; \text{ for all } t\in[0,\infty), \;\;\;\; \zeta_i(0) = 1, \;\;\;\; \text{supp}(\zeta_i) \subseteq [0, \tfrac{1}{i}] \stext{ and } \zeta_i' \leq 0 \;\;\;\; \text{ for all }i\in\mathbb{N}
\]
in the same way as in \cite{MR3383312}.
We then define $\varphi_i(x,t) \coloneqq \psi(x)\zeta_i(t)$ for all $i\in\mathbb{N},x\in\Omega,t\in[0,\infty)$, which are of appropriate regularity to be test functions for (\ref{wsol:ln_u_inequality}). If we then plug these test functions into (\ref{wsol:ln_u_inequality}) and take the limit $i \rightarrow \infty$, we gain that
\[
\int_\Omega \ln(u(\cdot,0) + 1)\psi - \int_\Omega \ln(u_0 + 1)\psi \geq 0 \;\;\;\; \text{ for all } x \in \Omega
\]
due to the dominated convergence theorem and the sequence $(\zeta'_i)_{i\in\mathbb{N}}$ approaching the Dirac measure $-\,\mathrm{d}elta(t)$. This and the fact that $\ln(\cdot + 1)$ is monotonically increasing then directly imply
\[
u(x,0) \geq u_0(x) \;\;\;\; \text{ for all } x \in \Omega. \numberthis \label{eq:u_initial_ineq}
\]
Because of (\ref{wsol:mass_property}) and the continuity of $u$, we further gain that
\[
\int_\Omega u(\cdot,0) \leq \int_\Omega u_0,
\]
which then together with (\ref{eq:u_initial_ineq}) gives us
\begin{equation}
u(x,0) = u_0(x) \label{eq:initial_data_works}
\end{equation}
for all $x \in \Omega$.
\\[0.5em]
By reversing the partial integration steps that would lead from testing the first equation in (\ref{problem}) with $\frac{\varphi}{u+1}$ to (\ref{wsol:ln_u_inequality}) and applying a straightforward density argument, we immediately see that $u$ satisfies
\[
\frac{u_t}{u+1} \geq \frac{\Delta u}{u + 1} - \frac{\chi \,\mathrm{d}iv (\frac{u}{v} \nabla v)}{u+1} - \frac{uv}{u+1} + \rho \frac{u}{u+1} - \mu \frac{u^2}{u+1}
\]
or
\[
u_t \geq \Delta u - \chi \,\mathrm{d}iv \left(\tfrac{u}{v} \nabla v\right) - uv + \rho u - \mu u^2 \numberthis \label{eq:comp1_ineq}
\]
after multiplication with $u+1 > 0$ on $\Omega\times(0,\infty)$. A similar density argument with $\varphi$ supported near the boundary then further yields
\[
\nabla u \cdot \nu \geq \chi \tfrac{u}{v} \nabla v \cdot \nu \numberthis \label{eq:grad_ienq}
\]
on $\partial\Omega\times(0,\infty)$ in a similar fashion.
\\[0.5em]
Let us now assume that $u$ is not a classical solution of the first equation in (\ref{problem}). Because of continuity, there then exist open sets $U_1\subseteq \Omega,V_1 \subseteq [0,\infty)$ such that
\[
u_t > \Delta u - \chi \,\mathrm{d}iv \left(\tfrac{u}{v} \nabla v\right) - uv + \rho u - \mu u^2 \;\;\;\; \text{on } U_1\times V_1
\]
or open sets $U_2 \subseteq \partial \Omega, V_2 \subseteq [0,\infty)$ such that
\[
\nabla u \cdot \nu > \chi \tfrac{u}{v} \nabla v \cdot \nu \;\;\;\;\text{on } U_2\times V_2.
\]
or both.
In the latter case, this combined with (\ref{eq:initial_data_works}) then implies that
\begin{align*}
\int_\Omega u(\cdot, T) - \int_\Omega u_0 = \int_0^T\int_\Omega u_t &\geq \int_0^T\int_{\partial\Omega} (\nabla u \cdot \nu - \chi\tfrac{u}{v}\nabla v \cdot \nu) - \int_0^T \int_\Omega uv + \rho\int_0^T \int_\Omega u - \mu \int_\Omega u^2 \\
&> - \int_0^T \int_\Omega uv + \rho\int_0^T \int_\Omega u - \mu \int_\Omega u^2
\end{align*}
for all $T \in V_2$ after some partial integration steps, which contradicts (\ref{wsol:mass_property}). A similar contradiction can be derived for the remaining case and as such $u$ must solve the first equation in (\ref{problem}) classically. This completes the proof.
\end{proof}
\noindent After having now established that existence of these generalized solutions is in fact desirable, let us now proceed to laying the groundwork for their construction.
To do this, we first fix a family of cut-off functions $(\eta_\varepsilon)_{\varepsilon \in (0,1)}$ with
\[
\eta_\varepsilon \in C_0^\infty([0,\infty)) \stext{such that} 0 \leq \eta_\varepsilon \leq 1 \text{ in } [0,\infty) \stext{and} \eta_\varepsilon \nearrow 1 \text{ pointwise in } [0,\infty) \text{ as } \varepsilon \searrow 0.
\]
We then use these to define the following approximated and regularized version of (\ref{problem}) with (\ref{boundary_conditions}) and (\ref{intial_conditions}):
\begin{equation}
\left\{
\begin{aligned}
{u_\varepsilon}_t &= \Delta u_\varepsilon - \chi\,\mathrm{d}iv ( \eta_\varepsilon(u_\varepsilon) \tfrac{u_\varepsilon}{v_\varepsilon} \nabla v_\varepsilon) - u_\varepsilon v_\varepsilon + \rho u_\varepsilon - \mu u_\varepsilon^2, && x \in \Omega, t > 0 \\
{v_\varepsilon}_t &= \Delta v_\varepsilon - v_\varepsilon + u_\varepsilon v_\varepsilon, && x \in \Omega, t > 0 \\
\nabla u_\varepsilon \cdot \nu &= 0, \;\; \nabla v_\varepsilon \cdot \nu = 0, && x \in \partial\Omega, t > 0 \\
u_\varepsilon(x,0) &= u_0(x), \;\; v_\varepsilon(x,0) = v_0(x), && x \in \overline{\Omega} \\
\end{aligned}
\right. \label{approx_problem} .
\end{equation}
This system or more precisely its solutions will play a key role in the construction of generalized solutions in the sense of \Cref{definition:weak_solution}.
\\[0.5em]
As such, let us now briefly consider the changes made in (\ref{approx_problem}) as compared to (\ref{problem}), which, while small, do have substantial impact concerning the existence of global classical solutions to this system. This stems mostly from the fact that introducing a cut-off function into the taxis term allows us to gain a critical $L^\infty$ estimate for $u$ by straightforward comparison with a constant function. This is then enough to derive sufficient bounds to show that finite-time blow-up in all the necessary norms is impossible for a local solution gained by adaption of standard local existence theory. Let us now make this precise:
\begin{lemma}\label{lemma:approx_exist}
For each $\varepsilon \in (0,1)$ and initial data $(u_0, v_0)$ according to (\ref{intial_data_regularity}), there exist functions
\[
u_\varepsilon, v_\varepsilon \in C^0(\overline{\Omega}\times[0,\infty))\cap C^{2,1}(\overline{\Omega}\times(0,\infty))
\]
such that \[
u_\varepsilon(x,t) \geq 0, \;\; v_\varepsilon(x,t) \geq e^{-t} \inf_{y\in\Omega} v_0(y) > 0 \;\;\;\;\;\; \text{ for all } x \in \Omega, t \in [0, \infty) \numberthis \label{eq:approx_lower_bounds}
\] and $(u_\varepsilon, v_\varepsilon)$ is a classical solution of (\ref{approx_problem}).
\end{lemma}
\begin{proof}
A standard contraction mapping argument adapted from e.g.\ \cite{MR2146345} immediately gives us a local solution of (\ref{approx_problem}) on $[0,{T_{\mathrm{max}}}eps)$ for a maximal ${T_{\mathrm{max}}}eps \in (0,\infty]$ and the following blow-up criterion:
\begin{align*}
&\text{If } {T_{\mathrm{max}}}eps < \infty, \\
&\text{then } \limsup_{t\nearrow {T_{\mathrm{max}}}eps} \left\{ \, \|u_\varepsilon(\cdot, t)\|_\L{\infty} + \|v_\varepsilon(\cdot, t)\|_{W^{1,\infty}(\Omega)} \, \right\} = \infty \text{ or } \liminf_{t\nearrow {T_{\mathrm{max}}}eps} \inf_{x\in\Omega} v_\varepsilon(x,t) = 0
\numberthis \label{eq:blow-up}
\end{align*}
Nonnegativity of $u_\varepsilon$ and $v_\varepsilon$ then immediately follows by maximum principle. Further, by analyzing $v_\varepsilon$ using its mild solution representation (relative to the semigroup $e^{t(\Delta - 1)}$), we see that
\begin{equation}
v_\varepsilon(\cdot,t) = e^{t(\Delta - 1)}v_0 + \int_0^t e^{(t-s)(\Delta - 1)} u_\varepsilon(\cdot,s) v_\varepsilon(\cdot,s) \,\mathrm{d} s \geq e^{t(\Delta - 1)}v_0 \geq e^{-t}\inf_{x\in\Omega} v_0(x) \;\;\;\; \text{ for all } t \in [0,{T_{\mathrm{max}}}eps). \label{eq:lower_bound_v}
\end{equation}
This lower bound for $v$ will not only be necessary for almost all of the following arguments, but also implies the second property in (\ref{eq:approx_lower_bounds}) after we have demonstrated that ${T_{\mathrm{max}}}eps = \infty$ later in this proof.
\\[0.5em]
To now show that finite-time blow-up is impossible, let us first assume the opposite, namely that ${T_{\mathrm{max}}}eps < \infty$. The property (\ref{eq:lower_bound_v}) then immediately gives us a positive lower bound for $v_\varepsilon$ on $\Omega\times[0,{T_{\mathrm{max}}}eps)$, which already rules out one of the possible blow-up scenarios.
As further $\eta_\varepsilon(u)$ is zero and $u \mapsto \rho u - \mu u^2$ is negative for sufficiently big values of $u$, a standard comparison argument applied to the first equation in (\ref{approx_problem}) with an appropriate constant function gives us $K_1 > 0$ with
\[
\|u_\varepsilon(\cdot, t)\|_\L{\infty} \leq K_1 \;\;\;\; \text{ for all } t \in [0,{T_{\mathrm{max}}}eps)
\]
making another blow-up scenario impossible.
This then in turn allows us to use a similar comparison argument for the second equation in (\ref{approx_problem}) to gain $K_2 > 0$ such that
\[
\|v_\varepsilon(\cdot, t)\|_\L{\infty} \leq K_2 \;\;\;\; \text{ for all } t \in [0,{T_{\mathrm{max}}}eps)
\]
by comparing with the solution to the initial value problem $y'(t) = (K_1 - 1) y(t)$, $t \in [0,{T_{\mathrm{max}}}eps)$, $y(0) = \|v_0\|_\L{\infty}$ extended in such a way as to be constant on each $\Omega\times\{t\}$ for all $t \in [0,{T_{\mathrm{max}}}eps)$. Lastly, another use of the mild solution representation of $v_\varepsilon$ in tandem with well-known smoothness estimates for the semigroup~$(e^{t\Delta})_{t\geq 0}$ gives us
\[
\|\nabla v(\cdot, t)\|_\L{\infty} \leq K_3\|\nabla v_0\|_\L{\infty} + K_3 \int_0^t (t-s)^{-\frac{1}{2}}e^{(-1 -\lambda)(t-s)} \|u\|_\L{\infty}\|v\|_\L{\infty} \leq K_4
\]
for all $t \in [0, {T_{\mathrm{max}}}eps)$ and appropriate constants $K_3, K_4 > 0$. This rules out the final possible blow-up scenario in (\ref{eq:blow-up}), which implies that our assumption ${T_{\mathrm{max}}}eps < \infty$ must have been wrong. As such, we have proven that ${T_{\mathrm{max}}}eps = \infty$, which completes the proof.
\end{proof}
\noindent
For the rest of this paper, we now fix some initial data $(u_0, v_0)$ according to (\ref{intial_data_regularity}) and a corresponding family of approximate solutions $(u_\varepsilon, v_\varepsilon)_{\varepsilon \in (0,1)}$ as constructed in \Cref{lemma:approx_exist}.
\section{A priori estimates}
\label{section:apriori}
This section will be mostly concerned with deriving a priori bounds for the approximate solutions that we fixed in the previous section as preparation for later convergence arguments. We start this process by combining the first two equations in (\ref{problem}) (because the $-u_\varepsilon v_\varepsilon$ in the first equation will cancel out its counterpart in the second equation) to gain some important baseline estimates:
\begin{lemma}
\label{lemma:l1_l2_props}
There exists $C_1 > 0$ such that
\begin{equation}
\|u_\varepsilon(\cdot, t)\|_\L{1} \leq C_1, \;\; \|v_\varepsilon(\cdot, t)\|_\L{1} \leq C_1 \;\;\;\; \text{ for all } t > 0 \label{eq:l1_bounded}
\end{equation}
and, for each $T > 0$, there exists $C_2(T) > 0$ such that
\begin{equation}
\int_0^T \int_\Omega u_\varepsilon^2 \leq C_2(T)
\label{eq:u_l2_weak_boundedness}
\end{equation}
for all $\varepsilon \in (0,1)$.
\end{lemma}
\begin{proof}
As our first step, we add the equations for $u$ and $v$ together and then integrate to gain that
\begin{align*}
\frac{\,\mathrm{d} }{\,\mathrm{d} t}\int_\Omega (u_\varepsilon + v_\varepsilon) &= -\int_\Omega v_\varepsilon + \rho\int_\Omega u_\varepsilon - \mu \int_\Omega u_\varepsilon^2 \\
&\leq -\int_\Omega v_\varepsilon + |\rho|\int_\Omega u_\varepsilon - \mu \int_\Omega u_\varepsilon^2 \;\;\;\; \text{ for all } t > 0 \text{ and } \varepsilon \in (0,1) \label{eq:l1_added} \numberthis
\end{align*}
after partial integration and use of the boundary conditions. By Young's inequality, we see that
\[
\int_\Omega u_\varepsilon \leq \frac{\mu}{|\rho| + 1}\int_\Omega u_\varepsilon^2 + \frac{|\rho| + 1}{4\mu}|\Omega|
\]
or further that
\[
-\mu\int_\Omega u_\varepsilon^2 \leq -(|\rho| + 1) \int_\Omega u_\varepsilon + \frac{(|\rho| + 1)^2}{4\mu}|\Omega|
\]
for all $t > 0$ and $\varepsilon \in (0,1)$. If we now apply this to (\ref{eq:l1_added}), we gain that
\[
\frac{\,\mathrm{d} }{\,\mathrm{d} t}\int_\Omega (u_\varepsilon + v_\varepsilon) \leq - \int_\Omega (u_\varepsilon + v_\varepsilon) + \frac{(|\rho| + 1)^2}{4\mu}|\Omega|
\]
for all $t > 0$ and $\varepsilon \in (0,1)$. This immediately implies (\ref{eq:l1_bounded}) by a straightforward comparison argument with the constant
\[
C_1 \coloneqq \max\left( \frac{(|\rho| + 1)^2}{4\mu}|\Omega|, \int_\Omega (u_0 + v_0) \right).
\]
If we now slightly rearrange (\ref{eq:l1_added}) and integrate, we further see for each $T > 0$ that
\[
\mu \int_0^T \int_\Omega u_\varepsilon^2 \leq \int_\Omega (u_0 + v_0) + T|\rho| C_1
\]
for all $\varepsilon \in (0,1)$, which then directly implies our second result (\ref{eq:u_l2_weak_boundedness}) because $\mu > 0$.
\end{proof}
\noindent Having now established the baseline estimates above, we can proceed to deriving the linchpin for our main existence result of this paper, namely $L^p$ bounds for $v_\varepsilon$ and an integrability property for terms of the form $\int_\Omega|\nabla v_\varepsilon^{p/2}|^2$ for all finite $p$. The argument used for this mainly rests on the integrability property for $\int_\Omega u_\varepsilon^2$ afforded to us by the logistic source term in the first equation in (\ref{approx_problem}) and gained in the previous lemma. As such, the following lemma presents the key insight in this paper of how to use the logistic source term in the first equation to temper the influence of the $u_\varepsilon v_\varepsilon$ growth term in the second equation and gain just about enough a priori estimates for the construction of generalized solutions in a two-dimensional setting.
\begin{lemma}\label{lemma:v_bounds}
For each $T > 0$ and $p > 1$, there exists $C(T,p) > 0$ such that
\[
\|v_\varepsilon(\cdot, t)\|_\L{p} \leq C(T,p) \;\;\;\; \text{ for all } t \in (0,T]
\]
and
\[
\int_0^T \int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 \leq C(T,p)
\]
for all $\varepsilon \in (0,1)$.
\end{lemma}
\begin{proof}
Fix $p > 1$. Using the well-known Gagliardo--Nirenberg inequality, we can then fix $K_1 > 0$ such that
\[
\|\varphi\|^2_\L{4} \leq K_1 \|\nabla \varphi\|_\L{2}\|\varphi\|_\L{2} + K_1\|\varphi\|^2_\L{\frac{2}{p}}
\]
for all $\varphi \in C^1(\Omega)$. Now testing the second equation in (\ref{approx_problem}) with $v_\varepsilon^{p-1}$ and applying the above inequality results in
\begin{align*}
\frac{1}{p} \frac{\,\mathrm{d} }{\,\mathrm{d} t} \int_\Omega v_\varepsilon^p =& -\frac{4(p-1)}{p^2}\int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 - \int_\Omega v_\varepsilon^p + \int_\Omega u_\varepsilon v_\varepsilon^p \\
\leq& -\frac{4(p-1)}{p^2}\int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 + \|u_\varepsilon\|_\L{2}\|v_\varepsilon^\frac{p}{2}\|^2_\L{4} \\
\leq& -\frac{4(p-1)}{p^2}\int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 + K_1\|u_\varepsilon\|_\L{2}\|\nabla v_\varepsilon^\frac{p}{2}\|_\L{2}\|v_\varepsilon^\frac{p}{2}\|_\L{2} + K_1\|u_\varepsilon\|_\L{2}\|v_\varepsilon^\frac{p}{2}\|^2_\L{\frac{2}{p}} \numberthis \label{eq:vp_diff_ineq_proto}
\end{align*}
for all $t > 0$ and $\varepsilon \in (0,1)$.
Because of \Cref{lemma:l1_l2_props}, there exists a constant $K_2 > 0$ such that
\[
\|v^\frac{p}{2}_\varepsilon\|^2_\L{\frac{2}{p}} = \left\{\int_\Omega v_\varepsilon \right\}^p \leq K_2^p \;\;\;\; \text{ for all } t > 0 \text{ and } \varepsilon \in (0,1)
\]
and therefore we can improve (\ref{eq:vp_diff_ineq_proto}) using Young's inequality as follows:
\begin{align}
\frac{1}{p} \frac{\,\mathrm{d} }{\,\mathrm{d} t} \int_\Omega v_\varepsilon^p \leq&-\frac{p-1}{p^2}\int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 + K_3\left\{ \int_\Omega u_\varepsilon^2 \right\} \int_\Omega v_\varepsilon^p + K_4\left(1+\int_\Omega u_\varepsilon^2\right) \label{eq:vp_diff_ineq}
\end{align}
for all $t > 0$ and $\varepsilon \in (0,1)$ and with $K_3 \coloneqq \frac{K_1^2 p^2}{12(p-1)}$ and $K_4 \coloneqq \frac{1}{2} K_1 K_2^p$.
\\[0.5em]
We now fix $T > 0$. Because we then know from \Cref{lemma:l1_l2_props} that there exists $K_5 > 0$ such that
\[
\int_0^T \int_\Omega u_\varepsilon^2 \leq K_5
\]
for all $\varepsilon \in (0,1)$, integration of (\ref{eq:vp_diff_ineq}) gives us
\[
\int_\Omega v_\varepsilon^p(\cdot, t) + \frac{p-1}{p}\int_0^t\int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 \leq K_6 + p K_3 \int_0^t \left\{ \int_\Omega u_\varepsilon^2 \right\} \int_\Omega v_\varepsilon^p\label{eq:vp_int_ineq} \numberthis
\]
with $K_6 \coloneqq pK_4(T + K_5) + \int_\Omega v_0^p$ for all $t\in[0,T]$ and $\varepsilon \in (0,1)$. This then implies
\[
\int_\Omega v_\varepsilon^p(\cdot, t) \leq K_6 \exp\left( pK_3 \int_0^t \int_\Omega u_\varepsilon^2 \right) \leq K_6e^{pK_3 K_5} =: K_7
\]
for all $t \in (0,T]$ and $\varepsilon \in (0,1)$ by Gronwall's inequality,
which is our first desired result. By now combining this new $L^p$ bound for $v_\varepsilon$ with (\ref{eq:vp_int_ineq}), we then further see that
\[
\int_0^T\int_\Omega |\nabla v_\varepsilon^\frac{p}{2}|^2 \leq \frac{p}{p - 1} \left[ K_6 + pK_3 K_5 K_7 \right]
\]
for all $\varepsilon \in (0,1)$.
\end{proof}
\noindent As the $L^p$ bounds established for $v_\varepsilon$ in the above lemma seem to be insufficient to gain higher $L^p$ bounds for $u_\varepsilon$ or its derivatives due to the taxis term in the first equation of (\ref{approx_problem}), we will instead restrict ourselves to establishing bounds for $\ln(u_\varepsilon + 1)$ and its derivatives as is not uncommon for this type of problem. Given that the $L^1$ bound for $u_\varepsilon$ found in \Cref{lemma:l1_l2_props} already gives us all possible $L^p$ bounds with finite $p$ for $\ln(u_\varepsilon + 1)$, we will focus in the following lemma on establishing a useful, albeit fairly weak bound for the first derivatives of $\ln(u_\varepsilon + 1)$. This is mostly made possible by the integrability properties for $\int_\Omega|\nabla v_\varepsilon|^2$ and baseline estimates for $u_\varepsilon$ and $v_\varepsilon$ already derived in this section.
\begin{lemma}\label{lemma:grad_ln_u_bound}
For each $T > 0$, there exists $C(T) > 0$ such that
\[
\int_0^T\int_\Omega \frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2} \leq C(T)
\]
for all $\varepsilon \in (0,1)$.
\end{lemma}
\begin{proof}
We start by fixing $T > 0$. We then test the first equation in (\ref{approx_problem}) with $\frac{1}{u_\varepsilon+1}$ to see that
\begin{align*}
\frac{\,\mathrm{d}}{\,\mathrm{d} t}\int_\Omega \ln(u_\varepsilon + 1) &= \int_\Omega \frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2} - \chi\int_\Omega \frac{u_\varepsilon}{v_\varepsilon(u_\varepsilon + 1)^2} \nabla u_\varepsilon \cdot \nabla v_\varepsilon - \int_\Omega \frac{u_\varepsilon v_\varepsilon}{u_\varepsilon + 1} + \rho \int_\Omega \frac{u_\varepsilon}{u_\varepsilon + 1} - \mu \int_\Omega\frac{u^2_\varepsilon}{u_\varepsilon + 1} \\
&\geq \frac{1}{2}\int_\Omega \frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2} - K_1 \int_\Omega |\nabla v_\varepsilon|^2 - \int_\Omega v_\varepsilon - \mu \int_\Omega u_\varepsilon \numberthis \label{eq:ln_u_test_1}
\end{align*}
for all $t \in [0,T]$ and $\varepsilon \in (0,1)$ with $K_1 \coloneqq \tfrac{\chi^2}{2}(\inf_{x\in\Omega} v_0(x))^{-2} e^{2T}$. Because \Cref{lemma:l1_l2_props} and \Cref{lemma:v_bounds} then give us a constant $K_2 > 0$ such that
\[
\int_\Omega u_\varepsilon(\cdot, t) \leq K_2, \;\; \int_\Omega v_\varepsilon(\cdot, t) \leq K_2 \;\;\text{and}\;\; \int_0^T\int_\Omega |\nabla v_\varepsilon|^2 \leq K_2
\]
for all $t\in[0,T]$ and $\varepsilon \in (0,1)$,
time integration and some rearranging of inequality (\ref{eq:ln_u_test_1}) results in
\[
\int_0^T \int_\Omega \frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2} \leq 2\left[ \int_\Omega \ln(u_\varepsilon(\cdot, T) + 1) + K_1 K_2 + (1+\mu)K_2T\right] \leq 2 (K_1 + 1)K_2 + 2(1 + \mu) K_2 T
\]
for all $\varepsilon \in (0,1)$. This completes the proof.
\end{proof}
\section{Construction of limit functions as solution candidates}
This section will now be focused on using the a priori bounds above to construct a sequence $(\varepsilon_j)_{j\in\mathbb{N}}$, along which our approximate solutions converge towards some limit functions $u$, $v$. These will then later play the role of candidates to be a generalized solution in the sense of \Cref{definition:weak_solution}. As it is often the case, the construction of said sequence will be built on well-known compact embedding properties of various function spaces, chief among them those afforded to us by the Aubin--Lions lemma (cf.\ \cite{TemamNavierStokes}). Specifically to enable us to use said lemma, we will now derive the following integrability properties for the time derivatives of the families $(\ln(u_\varepsilon + 1))_{\varepsilon \in (0,1)}$ and $(v_\varepsilon)_{\varepsilon \in (0,1)}$:
\begin{lemma}\label{lemma:dt_bounds}
For each $T > 0$, there exists a constant $C(T) > 0$ such that
\[
\int_0^T \|\partial_t \ln(u_\varepsilon(\cdot, t) + 1)\|_{(W^{2,2}(\Omega))^\star} \,\mathrm{d} t \leq C(T) \numberthis \label{eq:dt_ln_u_ineq}
\]
and
\[
\int_0^T \|{v_\varepsilon}_t(\cdot, t)\|_{(W^{2,2}(\Omega))^\star} \,\mathrm{d} t \leq C(T)
\numberthis
\label{eq:dt_v_ineq}
\]
for all $\varepsilon \in (0,1)$.
\end{lemma}
\begin{proof}
To prove (\ref{eq:dt_ln_u_ineq}), we first fix $\varphi \in W^{2,2}(\Omega)$ and then test the first equation in (\ref{approx_problem}) with $\frac{\varphi}{u_\varepsilon + 1}$ to gain that
\begin{align*}
\int_\Omega \partial_t \ln(u_\varepsilon + 1) \varphi =& \int_\Omega \frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2}\varphi - \int_\Omega \frac{\nabla u_\varepsilon \cdot \nabla \varphi}{u_\varepsilon + 1} - \chi\int_\Omega \frac{\eta_\varepsilon(u_\varepsilon) u_\varepsilon }{v_\varepsilon(u_\varepsilon + 1)^2} (\nabla v_\varepsilon \cdot \nabla u_\varepsilon) \varphi
\\
&+\chi\int_\Omega \frac{\eta_\varepsilon(u_\varepsilon) u_\varepsilon }{v_\varepsilon(u_\varepsilon + 1)} (\nabla v_\varepsilon \cdot \nabla \varphi)- \int_\Omega \frac{u_\varepsilon v_\varepsilon}{u_\varepsilon + 1}\varphi + \rho\int_\Omega \frac{u_\varepsilon}{u_\varepsilon + 1}\varphi - \mu \int_\Omega \frac{u_\varepsilon^2}{u_\varepsilon + 1} \varphi \numberthis \label{eq:u_phi_test}
\end{align*}
for all $t \in [0,T]$ and $\varepsilon \in (0,1)$. Due to Young's inequality, the Cauchy--Schwarz inequality and the fact that \Cref{lemma:approx_exist} gives us that
\[
\inf_{x\in\Omega} v_\varepsilon(x, t) \geq e^{-t}\inf_{x\in\Omega} v_0(x) \geq e^{-T}\inf_{x\in\Omega} v_0(x) > 0
\] for all $t \in [0,T]$ and $\varepsilon \in (0,1)$, the above equality directly implies that there exists a constant $K_1(T) > 0$ such that
\begin{align*}
\left| \int_\Omega \partial_t \ln(u_\varepsilon(\cdot, t) + 1) \varphi \right| \leq K_1(T)\left( \int_\Omega\frac{|\nabla u_\varepsilon|^2}{(u_\varepsilon + 1)^2} + \int_\Omega |\nabla v_\varepsilon|^2 + \int_\Omega u_\varepsilon + \int_\Omega v_\varepsilon + 1 \right) \left\{ \|\varphi\|_\L{\infty} + \|\varphi\|_{W^{2,2}(\Omega)} \right\}
\end{align*}
for all $t \in [0,T]$ and $\varepsilon \in (0,1)$. Given now the boundedness and integrability properties in \Cref{lemma:l1_l2_props}, \Cref{lemma:v_bounds} and \Cref{lemma:grad_ln_u_bound} and the fact that $W^{2,2}(\Omega)$ embeds continuously into $L^\infty(\Omega)$, this directly implies the inequality (\ref{eq:dt_ln_u_ineq}).\\[0.5em]
To now prove (\ref{eq:dt_v_ineq}), we again fix $\varphi \in W^{2,2}(\Omega)$ and this time test the second equation in (\ref{approx_problem}) with $\varphi$ to gain that
\[
\int_\Omega {v_\varepsilon}_t \varphi = - \int_\Omega \nabla v_\varepsilon \cdot \nabla \varphi - \int_\Omega v_\varepsilon \varphi + \int_\Omega u_\varepsilon v_\varepsilon \varphi \numberthis \label{eq:v_phi_test}
\]
for all $t \in [0,T]$ and $\varepsilon \in (0,1)$. By similar reasoning as above, we can now find a constant $K_2(T) > 0$ such that
\[
\left| \int_\Omega {v_\varepsilon}_t \varphi \right| \leq K_2(T) \left(
\int_\Omega |\nabla v_\varepsilon|^2 + \int_\Omega u_\varepsilon^2 +
\int_\Omega v^2_\varepsilon + 1
\right)\left\{ \|\varphi\|_\L{\infty} + \|\varphi\|_{W^{2,2}(\Omega)} \right\}
\]
for all $t \in [0,T]$ and $\varepsilon \in (0,1)$ based on (\ref{eq:v_phi_test}). Again due to \Cref{lemma:l1_l2_props} and \Cref{lemma:v_bounds}, this implies (\ref{eq:dt_v_ineq}) and therefore completes the proof.
\end{proof}
\noindent With all of the preparations now firmly in place, we can use the Aubin--Lions lemma and Vitali's theorem to construct our solution candidates as the limits of our approximate solutions along a suitable sequence of $\varepsilon \in (0,1)$. Apart from the extended convergence result presented in the sequel, we will also already derive most of the convergence properties needed to translate the necessary properties for a generalized solution from the approximate solutions to our solution candidates.
\begin{lemma}
\label{lemma:subsequence_extraction}
There exist a sequence $(\varepsilon_j)_{j\in\mathbb{N}} \subseteq (0,1)$ with $\varepsilon_j \searrow 0$ as $j\rightarrow\infty$ and a tuple $(u,v)$ of limit functions defined on $\Omega\times[0,\infty)$ such that
\begin{equation}
\left\{
\begin{aligned}
&u_\varepsilon \rightarrow u && \text{in } L^p_\mathrm{loc}(\overline{\Omega}\times[0,\infty)) \text{ for } p\in[1,2) \text{ and a.e.\ in } \Omega \times [0,\infty), \\
&u_\varepsilon(\cdot, t) \rightarrow u(\cdot, t) && \text{in } L^p(\Omega) \text{ for } p \in [1,2) \text{ and a.e.\ } t > 0, \\
&u_\varepsilon \rightharpoonup u \;\;\;\;\;\;\;\;\;\;\;\;\;\; && \text{in } L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty)), \\
&\ln(u_\varepsilon + 1) \rightharpoonup \ln(u + 1) \;\;\;\;\;\;\;\;\;\;\;\;\;\; && \text{in } L^2_\mathrm{loc}([0,\infty);W^{1,2}(\Omega)), \\
&v_\varepsilon \rightarrow v && \text{in } L^p_\mathrm{loc}(\overline{\Omega}\times[0,\infty)) \text{ for all } p \geq 1 \text{ and a.e.\ in } \Omega \times [0,\infty), \\
&v_\varepsilon(\cdot, t) \rightarrow v(\cdot, t) && \text{in } L^p(\Omega) \text{ for } p \geq 1 \text{ and for a.e.\ } t > 0 \text{ and } \\
&v_\varepsilon \rightharpoonup v && \text{in } L^2_\mathrm{loc}([0,\infty);W^{1,2}(\Omega))
\end{aligned}
\right.
\label{eq:basic_convergence_props}
\end{equation}
as $\varepsilon = \varepsilon_j \searrow 0$. Further, $u$ is nonnegative, $v$ has the property $v(x, t) \geq e^{-t}\inf_{y\in\Omega} v_0(y)$ for almost all $(x,t)\in\Omega\times[0,\infty)$ and both satisfy the regularity properties in (\ref{wsol:regularity}).
\end{lemma}
\begin{proof}
As we will successively extract subsequences multiple times in this lemma, we always denote the latest considered sequence as $(\varepsilon_j)_{j\in\mathbb{N}}$ for ease of notation and without loss of generality.
\\[0.5em]
Due to \Cref{lemma:l1_l2_props}, \Cref{lemma:v_bounds}, \Cref{lemma:grad_ln_u_bound}, and \Cref{lemma:dt_bounds} combined with the Aubin--Lions lemma (cf.\ \cite{TemamNavierStokes}), we immediately gain that the families $(\ln(u_\varepsilon + 1))_{\varepsilon \in (0,1)}$ and $(v_\varepsilon)_{\varepsilon \in (0,1)}$ are compact in $L^2_\mathrm{loc}([0,\infty);L^2(\Omega))$ and therefore in $L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty))$ with regard to the strong topology and in $L^2_\mathrm{loc}([0,\infty);W^{1,2}(\Omega))$ with regard to the weak topology. Due to the strong compactness above, successive extraction of subsequences then gives us a sequence $(\varepsilon_j)_{j\in\mathbb{N}}$ converging to zero and limit functions $u, v$ with
\[
\ln(u_\varepsilon + 1) \rightarrow \ln(u + 1) \stext{and} v_\varepsilon \rightarrow v \stext{in} L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty)) \stext{as} \varepsilon = \varepsilon_j \searrow 0.
\]
Again by successive subsequence extraction, we gain that for the new subsequence $(\varepsilon_j)_{j\in\mathbb{N}}$ the convergences above are additionally true in an almost everywhere pointwise sense. This directly implies that $u_\varepsilon \rightarrow u$ almost everywhere pointwise as $\mathbb{R} \ni x \mapsto e^{x} - 1$ is continuous. Note here that it is these pointwise convergences that make sure that all the limit functions found in this lemma are identical. Using that the bounds in \Cref{lemma:l1_l2_props} imply that $(u_\varepsilon)_{\varepsilon \in (0,1)}$ is compact in $L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty))$ with regards to the weak topology and further using the other weak compactness properties mentioned above, we also directly gain the weak convergence properties posited in (\ref{eq:basic_convergence_props}) by more subsequence extraction arguments.
\\[0.5em]
To prove the remaining convergence properties, we will now heavily lean on the Vitali convergence theorem and the de La Vallée Poussin criterion for uniform integrability (cf.\ \cite[pp.\ 23-24]{PropabilitiesAndPotential}). To this end, let us first note that the almost everywhere convergence of $u_{\varepsilon_j}$ to $u$ and $v_{\varepsilon_j}$ to $v$ implies that, for almost every $t > 0$, $u_{\varepsilon_j}(\cdot, t) \rightarrow u(\cdot, t)$ and $v_{\varepsilon_j}(\cdot, t) \rightarrow v(\cdot, t)$ pointwise almost everywhere. We further know from previous observations in \Cref{lemma:l1_l2_props} and \Cref{lemma:v_bounds} that, for each $T > 0$, $p\in[1,\infty)$, there exists constants $K_1(T), K_2(T, p) > 0$ with
\[
\int_0^T \int_\Omega u_\varepsilon^2 \leq K_1(T), \;\;\;\; \int_\Omega v_\varepsilon(\cdot, t)^p \leq K_2(T, p) \stext{ and therefore } \int_0^T\int_\Omega v_\varepsilon^p \leq T K_2(T, p)
\]
for all $t\in[0,T]$ and $\varepsilon \in (0,1)$, which by Vitali's theorem result in
\begin{align*}
&u_{\varepsilon_j} \rightarrow u && \text{ in } L^p_\mathrm{loc}(\overline{\Omega}\times[0,\infty)) \text{ for } p \in [1,2), \\
&v_{\varepsilon_j}(\cdot, t) \rightarrow v(\cdot, t) &&\text{ in } L^p(\Omega) \text{ for all } p \geq 1 \text{ and a.e.\ } t > 0 \text{ as well as } \\
&v_{\varepsilon_j} \rightarrow v && \text{ in } L^p_\mathrm{loc}(\overline{\Omega}\times[0,\infty)) \text{ for all } p \geq 1
\end{align*}
as $j \rightarrow \infty$.
\\[0.5em]
Because of the $L^p_\mathrm{loc}(\overline{\Omega}\times[0,\infty))$ convergence of the sequence $(u_{\varepsilon_j})_{j\in\mathbb{N}}$ for $p \in [1,2)$, one last set of successive subsequences extractions gives us our final desired convergence property in (\ref{eq:basic_convergence_props}), namely that the sequences $(u_{\varepsilon_j}(\cdot, t))_{j\in\mathbb{N}}$ converge to $u(\cdot, t)$ in $L^p(\Omega)$ for a.e.\ $t > 0$ and $p \in [1,2)$.
\\[0.5em]
The almost everywhere nonnegativity of $u$ and lower bound for $v$, which ensures the regularity property $v^{-1} \in L_\mathrm{loc}^\infty(\overline{\Omega}\times[0,\infty))$ from (\ref{wsol:regularity}), are inherited from the approximate solutions due to the almost everywhere pointwise convergence proven above. While most of the other regularity properties in (\ref{wsol:regularity}) are then already directly ensured by the convergence properties considered in (\ref{eq:basic_convergence_props}), the remaining $L_\mathrm{loc}^\infty([0,\infty);L^p(\Omega))$ type regularity properties follow because of the convergence of the norms $\|u_{\varepsilon_j}(\cdot, t)\|_\L{1}$ and $\|v_{\varepsilon_j}(\cdot, t)\|_\L{p}$ towards $\|u(\cdot, t)\|_\L{1}$ and $\|v(\cdot, t)\|_\L{p}$ for almost every $t > 0$ and $p\in[1,\infty)$ ensured by (\ref{eq:basic_convergence_props}) combined with already established boundedness properties in \Cref{lemma:l1_l2_props} and \Cref{lemma:v_bounds} for the approximate solutions.
\end{proof}
\section{An additional convergence property for $(\nabla v_{\varepsilon_j})_{j\in\mathbb{N}}$}
\label{section:grad_v_convergence}
While we already established a lot of convergence properties in the lemma above, we will still need one more critical strong convergence property for the sequence $(\nabla v_{\varepsilon_j})_{j\in\mathbb{N}}$ to handle the taxis-induced terms in (\ref{wsol:ln_u_inequality}). To derive said property, we follow an approach that can be found, for instance, in \cite[Lemma 4.4]{MR3859449} or \cite[Lemma 8.2]{MR3383312}, as both of these papers deal with very similar solution concepts and therefore also have very similar needs in terms of convergence properties.
\\[0.5em]
The first step towards the convergence property proven in \Cref{lemma:grad_v_convergence} later in this section is to argue that $v$ in fact already satisfies (\ref{wsol:v_inequality}). We do this by using the convergence properties in \Cref{lemma:subsequence_extraction} to show that (\ref{wsol:v_inequality}) directly translates from the approximate solutions to $v$ as follows:
\begin{lemma}\label{lemma:v_is_weak}
Let $v$ be as in \Cref{lemma:subsequence_extraction}. Then $v$ satisfies (\ref{wsol:v_inequality}) for the same functions $\varphi$ as in \Cref{definition:weak_solution}.
\end{lemma}
\begin{proof}
We first fix a test function $\varphi \in \medcap_{p\geq 1} L^\infty_\mathrm{loc}((0,\infty); \L{p}) \cap L^2((0,\infty);W^{1,2}(\Omega))$ with $\varphi_t \in L^2(\Omega\times[0,\infty))$. It is then easily checked by partial integration that each $v_\varepsilon$ satisfies (\ref{wsol:v_inequality}) with said $\varphi$ and as such we need now only further check that the equality survives the limit process $\varepsilon=\varepsilon_j \searrow 0$. For most of the terms in (\ref{wsol:v_inequality}), this is immediately obvious from the convergence properties seen in \Cref{lemma:subsequence_extraction} and therefore we will only give the argument for the $\int_\Omega u_\varepsilon v_\varepsilon \varphi$ term as the $u_\varepsilon v_\varepsilon$ growth term is generally the primary source of complications in the second equation of (\ref{approx_problem}).
\\[0.5em]
For this, let now $T > 0$ be such that $\text{supp}(\varphi) \subseteq \overline{\Omega}\times[0,T]$ and then observe that
\begin{align*}
&\left| \int_0^\infty\int_\Omega u_\varepsilon v_\varepsilon \varphi - \int_0^\infty\int_\Omega u v \varphi \right| \\
\leq& \int_0^T\int_\Omega |v_\varepsilon||u_\varepsilon - u||\varphi| + \int_0^T\int_\Omega|v_\varepsilon - v||u||\varphi| \\
\leq& \|v_\varepsilon\|_{L^5(\Omega\times(0,T))} \|u_\varepsilon - u\|_{L^\frac{5}{3}(\Omega\times(0,T))} \|\varphi\|_{L^5(\Omega\times(0,T))} + \|v_\varepsilon - v\|_{L^5(\Omega\times(0,T))} \|u\|_{L^\frac{5}{3}(\Omega\times(0,T))} \|\varphi\|_{L^5(\Omega\times(0,T))}
\end{align*}
for all $\varepsilon \in (0,1)$.
Due to the fact that $L^\infty((0, T);\L{5}) \hookrightarrow L^5(\Omega\times(0,T))$ and the convergence properties laid out in \Cref{lemma:subsequence_extraction}, this then implies
\[
\int_\Omega u_\varepsilon v_\varepsilon \varphi \rightarrow \int_\Omega u v \varphi
\]
as $\varepsilon = \varepsilon_j \searrow 0$. This completes the proof.
\end{proof}
\noindent As the convergence properties in \Cref{lemma:subsequence_extraction} for the sequence $(v_{\varepsilon_j})_{j\in \mathbb{N}}$ already provide us with the estimate \[
\int_0^T\int_\Omega |\nabla v|^2 \leq \liminf_{\varepsilon=\varepsilon_j \searrow 0} \int_0^T\int_\Omega |\nabla v_\varepsilon|^2,
\]
we now derive an important inequality from (\ref{wsol:v_inequality}) that will help us gain the corresponding estimate from below. To do this, the natural approach would be setting $\varphi = v$ in (\ref{wsol:v_inequality}), which is not possible due to insufficient time regularity of $v$. Therefore, we have to approximate $v$ with time averaged versions of itself and then use those as test functions $\varphi$. While using this approximation does not allow us to recover (\ref{wsol:v_inequality}) with $\varphi = v$ exactly, it still provides us with an inequality version that is sufficient for our purposes.
\\[0.5em]
As this approach is very similar to the one used in \cite[Lemma 4.4]{MR3859449} or \cite[Lemma 8.2]{MR3383312} for a corresponding inequality, we will only give the following argument in brief:
\begin{lemma} \label{lemma:lower_v_bound}
Let $v$ be as in \Cref{lemma:subsequence_extraction}.
There exists a null set $N \subseteq (0,\infty)$ such that
\begin{equation}
\frac{1}{2}\int_\Omega v^2(\cdot, T) - \frac{1}{2}\int_\Omega v_0^2 + \int_0^T \int_\Omega |\nabla v^2| \geq
\int_0^T \int_\Omega uv^2 - \int_0^T \int_\Omega v^2 \label{eq:v_ineqality}
\end{equation}
for all $T \in (0,\infty)\setminus N$.
\end{lemma}
\begin{proof}
As in the references, we start by first fixing a null set $N \subseteq (0, \infty)$ such that each $T\in(0,\infty)\setminus N$ is a Lebesgue point of the map
\[
[0,\infty) \rightarrow [0,\infty), \;\;\;\; t \mapsto \int_\Omega v^2(x,t)\,\mathrm{d} x
\] and we then fix one such $T$. While we ourselves will not reiterate this argument from the references, this property of $T$ is mainly used there to ensure that
\[
\frac{1}{\,\mathrm{d}elta}\int_{T}^{T+\,\mathrm{d}elta} \int_\Omega v^2(x,t) \,\mathrm{d} x \,\mathrm{d} t \rightarrow \int_\Omega v^2(x,T) \,\mathrm{d} x
\] for $\,\mathrm{d}elta \rightarrow 0$.
\\[0.5em]
Because $v$ does not have all the necessary regularity properties to be used as a test function in (\ref{wsol:v_inequality}), which is what we want to essential do, due to us not knowing much about its time derivative, we then construct a time averaged version of $v$ with regularized initial data to take its place as follows:
\\[0.5em]
Let first $(v_{0k})_{k\in\mathbb{N}} \subseteq C^1(\overline{\Omega})$ be such that $v_{0k} \rightarrow v_0$ in $L^2(\Omega)$ as $k \rightarrow \infty$ due to density. Let then $\zeta_\,\mathrm{d}elta$ be a cut-off function on $[0,\infty)$ such that $\zeta_\,\mathrm{d}elta \equiv 1$ on $[0,T]$ and $\zeta_\,\mathrm{d}elta \equiv 0$ on $[T+\,\mathrm{d}elta, \infty)$ for $\,\mathrm{d}elta \in (0,1)$ constructed in the same way as in the references. Further let
\[
\tilde{v}_k(x,t) \coloneqq \begin{cases}
v(x,t), \;\;\;\;\;\;& (x,t) \in \Omega \times (0,\infty), \\
v_{0k}(x), &(x,t) \in \Omega \times (-1, 0]
\end{cases}
\]
and then let $\varphi(x,t) \coloneqq \varphi_{h,\,\mathrm{d}elta, k}(x,t) \coloneqq \zeta_\,\mathrm{d}elta(t) (A_h\tilde{v}_k)(x,t)$ for all $(x,t) \in \Omega\times(0,\infty)$ with
\[
(A_h\tilde{v}_k)(x,t) \coloneqq \frac{1}{h}\int_{t-h}^t \tilde{v}_k(x,s) \,\mathrm{d} s
\]
for all $(x,t) \in \Omega \times (0,\infty)$ and $\,\mathrm{d}elta,h \in (0,1), k\in \mathbb{N}$. Similar to the references, it is then easy to show that the regularity properties of $v$ are enough to ensure that $\varphi$ is a valid test function for (\ref{wsol:v_inequality}). We are therefore allowed to apply it to said equality with the aim to gain (\ref{eq:v_ineqality}) after a number of limit processes for the parameters $\,\mathrm{d}elta, h$ and $k$. Most of the resulting integrals remain the same as in the references and converge or can be estimated in a similar fashion due to the regularity of $v$ and the fact that it implies that
\[
(A_h \tilde{v}_k) \rightharpoonup \tilde{v}_k \stext{ in all }L^p(\Omega\times(0,T)) \text{ for } p \in (1,\infty) \text{ as } h \searrow 0
\]
and
\[
\nabla (A_h \tilde{v}_k) = (A_h \nabla\tilde{v}_k) \rightharpoonup \nabla \tilde{v}_k \stext{ in }L^2(\Omega\times(0,T)) \text{ as } h \searrow 0
\]
because of \cite[Lemma A.2]{MR3383312} for all $k\in\mathbb{N}$. It is the estimates for these integrals, which we will not discuss here in more detail, that lead to us only deriving (\ref{eq:v_ineqality}) as an inequality as opposed to the equality one would expect for $\varphi = v$. For the full details concerning this, see e.g.\ \cite[Lemma 4.4]{MR3859449} or \cite[Lemma 8.2]{MR3383312}.
\\[0.5em]
We therefore will only take a closer look at the two integrals new to our setting: As we know due to the already established regularity properties of $u$ and $v$ that $uv \in L_\mathrm{loc}^{p}(\overline{\Omega}\times[0,\infty))$ for all $p \in [1,2)$ due to the Hölder inequality, we immediately see that
\[
\int_0^\infty\int_\Omega \zeta_\,\mathrm{d}elta(t) u(x,t)v(x,t) (A_h \tilde{v}_k)(x,t) \,\mathrm{d} x \,\mathrm{d} t \rightarrow \int_0^\infty \int_\Omega \zeta_\,\mathrm{d}elta(t) u(x,t)v^2(x,t) \,\mathrm{d} x \,\mathrm{d} t \rightarrow \int_0^T \int_\Omega u(x,t)v^2(x,t) \,\mathrm{d} x \,\mathrm{d} t
\]
and similarly that
\[
\int_0^\infty\int_\Omega \zeta_\,\mathrm{d}elta(t) v(x,t) (A_h \tilde{v}_k)(x,t) \,\mathrm{d} x \,\mathrm{d} t \rightarrow \int_0^\infty \int_\Omega \zeta_\,\mathrm{d}elta(t) v^2(x,t) \,\mathrm{d} x \,\mathrm{d} t \rightarrow \int_0^T \int_\Omega v^2(x,t) \,\mathrm{d} x \,\mathrm{d} t
\]
as first $h\searrow 0$ and then $\,\mathrm{d}elta \searrow 0$ for all $k\in\mathbb{N}$. Note hereby that the $\,\mathrm{d}elta \searrow 0$ limit process works due to the dominated convergence theorem.
\end{proof}
\noindent Given this inequality, we can now prove the following important convergence property:
\begin{lemma} \label{lemma:grad_v_convergence}
Let the function $v$ and sequence $(\varepsilon_j)_{j\in\mathbb{N}}$ be as in \Cref{lemma:subsequence_extraction}. Then
\[
\nabla v_\varepsilon \rightarrow \nabla v \stext{ as } \varepsilon = \varepsilon_j \searrow 0
\]
in $L_\mathrm{loc}^2(\overline{\Omega}\times[0,\infty))$.
\end{lemma}
\begin{proof}
Fix $T \in (0,\infty)\setminus N$ with $N$ as in \Cref{lemma:lower_v_bound}. As the already established convergence properties in \Cref{lemma:subsequence_extraction} for $v$ give us that
\[
\int_0^T\int_\Omega |\nabla v|^2 \leq \liminf_{\varepsilon=\varepsilon_j \searrow 0} \int_0^T\int_\Omega |\nabla v_\varepsilon|^2,
\]
it is sufficient to prove a similar estimate from below.
\\[0.5em]
As a preparation for this, let us now first observe that
\begin{align*}
&\left| \; \int_0^T\int_\Omega u v^2 - \int_0^T\int_\Omega u_\varepsilon v_\varepsilon^2 \; \right| \\
&\leq \int_0^T\int_\Omega |u - u_\varepsilon| v^2 + \int_0^T\int_\Omega u_\varepsilon |v - v_\varepsilon| v + \int_0^T\int_\Omega u_\varepsilon v_\varepsilon |v - v_\varepsilon| \\
&\leq \|u - u_\varepsilon\|_{L^\frac{4}{3}(\Omega\times(0,T))}\|v\|^2_{L^8(\Omega\times(0,T))} + \|u_\varepsilon\|_{L^\frac{5}{3}(\Omega\times(0,T))}\|v - v_\varepsilon\|_{L^{5}(\Omega\times(0,T))}\|v\|_{L^{5}(\Omega\times(0,T))} \\
&\;\;\;\;+ \|u_\varepsilon\|_{L^\frac{5}{3}(\Omega\times(0,T))}\|v_\varepsilon\|_{L^{5}(\Omega\times(0,T))}\|v - v_\varepsilon\|_{L^{5}(\Omega\times(0,T))}
\end{align*}
for all $\varepsilon \in (0,1)$. Due to the boundedness and convergence properties in \Cref{lemma:l1_l2_props}, \Cref{lemma:v_bounds} and \Cref{lemma:subsequence_extraction}, this then implies that
\[
\int_0^T\int_\Omega u_\varepsilon v_\varepsilon^2 \rightarrow \int_0^T\int_\Omega u v^2
\]
as $\varepsilon = \varepsilon_j \searrow 0$.
\\[0.5em]
Using this convergence property as well as the properties laid out in \Cref{lemma:subsequence_extraction} in combination with \Cref{lemma:lower_v_bound}, we directly see that
\begin{align*}
\int_0^T\int_\Omega |\nabla v|^2
&\geq -\frac{1}{2}\int_\Omega v^2(\cdot, T) + \frac{1}{2}\int_\Omega v_0^2 +
\int_0^T \int_\Omega uv^2 - \int_0^T\int_\Omega v^2 \\
&= \lim_{\varepsilon = \varepsilon_j \searrow 0} \left\{ -\frac{1}{2}\int_\Omega v_\varepsilon^2(\cdot, T) + \frac{1}{2}\int_\Omega v_0^2 +
\int_0^T \int_\Omega u_\varepsilon v_\varepsilon^2 - \int_0^T\int_\Omega v_\varepsilon^2
\right\} = \lim_{\varepsilon = \varepsilon_j \searrow 0} \int_0^T\int_\Omega |\nabla v_\varepsilon|^2.
\end{align*}
This completes the proof.
\end{proof}
\section{Proof of \Cref{theorem:main}}
\noindent Having now assembled all the necessary convergence properties for the sequences $(u_{\varepsilon_j})_{j\in\mathbb{N}}$, $(v_{\varepsilon_j})_{j\in\mathbb{N}}$ and even already some necessary properties for the limit functions and solution candidates $u$ and $v$, we can now begin the proof of our central result.
\begin{proof}[Proof of \Cref{theorem:main}]
Let the functions $u,v$ and sequence $(\varepsilon_j)_{j\in\mathbb{N}}$ be as in \Cref{lemma:subsequence_extraction}.
\\[0.5em]
As the properties (\ref{wsol:regularity}) and (\ref{wsol:v_inequality}) for $u$ and $v$ have already been established in \Cref{lemma:subsequence_extraction} and \Cref{lemma:v_is_weak} respectively, we only need to still prove the inequalities (\ref{wsol:mass_property}) and (\ref{wsol:ln_u_inequality}) for $u$ here.
\\[0.5em]
We start with (\ref{wsol:mass_property}). By just integrating the first equation the approximate solutions $u_\varepsilon$ solve, we directly see that
\begin{equation}
\int_\Omega u_\varepsilon(\cdot, T) - \int_\Omega u_0 = -\int_0^T \int_\Omega u_\varepsilon v_\varepsilon + \rho\int_0^T \int_\Omega u_\varepsilon - \mu \int_0^T \int_\Omega u_\varepsilon^2 \;\;\;\; \text{ for all } \varepsilon \in (0,1) \text{ and } T > 0. \label{eq:approx_mass_property}
\end{equation}
Apart from $\int_0^T \int_\Omega u_\varepsilon^2$, all of the terms above converge to their equivalent without $\varepsilon$ due to \Cref{lemma:subsequence_extraction}, while we only get that
\[
\int_0^T \int_\Omega u^2 \leq \liminf_{\varepsilon = \varepsilon_j \searrow 0} \int_0^T \int_\Omega u_\varepsilon^2
\]
for the remaining term due to weak convergence. Taking the limes superior on both sides of (\ref{eq:approx_mass_property}) then immediately yields (\ref{wsol:mass_property}).
\\[0.5em]
We now fix a nonnegative $\varphi \in C_0^\infty(\overline{\Omega}\times[0,\infty))$ with $\nabla \varphi \cdot \nu = 0$ on $\partial \Omega\times(0,\infty)$. Similar to the above, testing the first equation in (\ref{approx_problem}) with $\frac{\varphi}{u_\varepsilon + 1}$ and partial integration yields (\ref{wsol:ln_u_inequality}) with equality and some slightly different taxis terms due to the cut-off function $\eta_\varepsilon$ for the approximate solutions $u_\varepsilon$.
Then apart from
\[
\int_0^\infty \int_\Omega |\nabla \ln(u_\varepsilon+1)|^2\varphi,
\]
all of the remaining integral terms are convergent to their counterparts without $\varepsilon$ and without the cut-off function $\eta_\varepsilon$ as we will now briefly illustrate:
\\[0.5em]
Due to the $L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty))$ convergence of the sequence $(\ln(u_{\varepsilon_j} + 1))_{j\in\mathbb{N}} $, we immediately gain that
\[
\int_0^\infty\int_\Omega \ln(u_\varepsilon + 1) \varphi_t \rightarrow \int_0^\infty\int_\Omega \ln(u + 1) \varphi_t \stext{and}
\int_0^\infty\int_\Omega \ln(u_\varepsilon + 1) \Delta\varphi \rightarrow \int_0^\infty\int_\Omega \ln(u + 1) \Delta \varphi
\]
as $\varepsilon = \varepsilon_j \searrow 0$. To handle the taxis-induced terms, let us first observe that
\begin{align*}
&\left\| \; \frac{\eta_\varepsilon(u_\varepsilon)u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)} \nabla v_\varepsilon - \frac{u}{v(u+1)} \nabla v \; \right\|_{L^{2}(\Omega\times(0,T))} \\
\leq& \left\| \frac{\eta_\varepsilon(u_\varepsilon) u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)} \right\|_{L^{\infty}(\Omega\times(0,T))} \left\| \nabla v_\varepsilon - \nabla v \right\|_{L^{2}(\Omega\times(0,T))} + \int_0^T\int_\Omega |\nabla v|^2 \left( \frac{\eta_\varepsilon(u_\varepsilon)u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)} - \frac{u}{v(u+1)}\right)^2
\end{align*}
for each $T > 0$ by introducing a zero. We then further note that, for each $T > 0$, \Cref{lemma:approx_exist} and \Cref{lemma:subsequence_extraction} give us that $\frac{\eta_\varepsilon(u_\varepsilon)u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)}$ and $\frac{u}{v(u+1)}$ are uniformly bounded on $\Omega\times(0,T)$ independent of $\varepsilon$ and that $\nabla v\in L^2(\Omega\times(0,T))$, which implies that the first integral term converges to zero as $\varepsilon = \varepsilon_j \searrow 0$ due to \Cref{lemma:grad_v_convergence} and the second integral term converges to zero as $\varepsilon = \varepsilon_j \searrow 0$ due to the pointwise convergence proven in \Cref{lemma:subsequence_extraction} combined with the dominated convergence theorem. Thus,
\[
\frac{\eta_\varepsilon(u_\varepsilon)u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)}\nabla v_\varepsilon \rightarrow \frac{u}{v(u+1)}\nabla v \stext{in}L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty)) \text{ as } \varepsilon = \varepsilon_j \searrow 0.
\]
If we then combine this with the weak convergence properties of the sequence $(\nabla\ln(u_{\varepsilon_j} + 1))_{j\in\mathbb{N}}$ in $L^2_\mathrm{loc}(\overline{\Omega}\times[0,\infty))$ from \Cref{lemma:subsequence_extraction}, we directly gain that
\[
\int_0^\infty \int_\Omega \frac{\eta_\varepsilon(u_\varepsilon)u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)} (\nabla \ln(u_\varepsilon + 1) \cdot \nabla v_\varepsilon) \varphi \rightarrow \int_0^\infty \int_\Omega \frac{u}{v(u+1)} (\nabla \ln(u + 1) \cdot \nabla v) \varphi
\]
and
\[
\int_0^\infty \int_\Omega \frac{\eta_\varepsilon(u_\varepsilon)u_\varepsilon}{v_\varepsilon(u_\varepsilon+1)} \nabla v_\varepsilon \cdot \nabla \varphi \rightarrow \int_0^\infty \int_\Omega \frac{u}{v(u+1)} \nabla v \cdot \nabla \varphi
\]
as $\varepsilon = \varepsilon_j \searrow 0$. As for the convergence of the remaining three relevant integral terms
\[
\int_0^\infty\int_\Omega \frac{u_\varepsilon v_\varepsilon}{u_\varepsilon+1} \varphi, \;\;\;\; \int_0^\infty\int_\Omega \frac{u_\varepsilon}{u_\varepsilon+1} \varphi \stext{and} \int_0^\infty\int_\Omega \frac{u_\varepsilon^2}{u_\varepsilon+1} \varphi
\]
towards their counterparts without $\varepsilon$, the above argument can essentially be reused as they also feature the product of a pointwise convergent and uniformly bounded sequence of functions, which in this case is always $\frac{u_\varepsilon\varphi}{u_\varepsilon + 1}$, and a sequence of functions converging in an appropriate $L^p_\mathrm{loc}(\overline{\Omega}\times[0,\infty))$ as their integrand.
\\[0.5em]
As in reference \cite{MR3383312}, for the remaining term we at least have the property
\[
\int_0^\infty \int_\Omega |\nabla \ln(u+1)|^2\varphi \leq \liminf_{\varepsilon=\varepsilon_j \searrow 0} \int_0^\infty \int_\Omega |\nabla \ln(u_\varepsilon+1)|^2\varphi
\]
due to the weak convergence proven in \Cref{lemma:subsequence_extraction}. This then gives us (\ref{wsol:ln_u_inequality}) after taking the limes inferior of both sides of the approximated variant of (\ref{wsol:ln_u_inequality}). As such, $(u, v)$ is in fact a generalized solution in the sense of \Cref{definition:weak_solution} and the proof is complete.
\end{proof}
\section{Existence of classical solutions to the altered system (\ref{weaker_problem})}
\label{section:weakend_case}
As already mentioned in the introduction, we will devote this section to proving \Cref{prop:weaker_system_stronger_solution}, which is concerned with the global classical solvability of the altered system (\ref{weaker_problem}) featuring a stronger logistic source term. We do this to illustrate how close the interplay between the logistic term in the first equation and the growth term in the second equation is to immediately giving us global classical solvability for (\ref{problem}) while just about not being sufficient in our opinion.
\\[0.5em]
As our first step for this, we can use similar standard arguments as used in \Cref{lemma:approx_exist} to derive the following local existence result and blow-up criterion for (\ref{weaker_problem}):
\begin{lemma} \label{lemma:weaker_case_exist}
There exists a maximal constant ${T_{\mathrm{max}}} \in (0,\infty]$ and functions $u,v \in C^{2,1}(\overline{\Omega}\times(0,{T_{\mathrm{max}}}))\cap C^0(\overline{\Omega}\times[0,{T_{\mathrm{max}}}))$ with $u$ nonegative and $v$ positive such that $(u,v)$ is the unique classical solution of the system (\ref{weaker_problem}) with (\ref{boundary_conditions})--(\ref{intial_data_regularity}) on $\overline{\Omega}\times[0,{T_{\mathrm{max}}})$. Further, the solution $(u,v)$ adheres to the following blow-up criterion:
\begin{equation}
\label{eq:weak_blowup}
\text{If } {T_{\mathrm{max}}} < \infty,
\text{ then } \limsup_{t\nearrow {T_{\mathrm{max}}}} \left\{ \|u(\cdot, t)\|_\L{\infty} + \|v(\cdot, t)\|_{W^{1,q}(\Omega)} \right\} = \infty \text{ or } \liminf_{t\nearrow {T_{\mathrm{max}}}} \inf_{x\in\Omega} v(x,t) = 0.
\end{equation}
Here, $q$ is some real number in $(2,2+\gamma)$.
\end{lemma}
\noindent We now fix appropriate initial data $(u_0, v_0)$, a unique solution $(u,v)$ on $\overline{\Omega}\times[0,{T_{\mathrm{max}}})$ corresponding to said initial data and ${T_{\mathrm{max}}} \in (0,\infty]$, $q\in(2,2+\gamma)$ according to \Cref{lemma:weaker_case_exist}.
\\[0.5em]
One additional result directly reusable from the existence theory in \Cref{lemma:approx_exist} is that
\begin{equation}
v(\cdot, t) \geq e^{-t}\inf_{x\in\Omega}v_0(x) \;\;\;\; \text{ for all } t\in[0,{T_{\mathrm{max}}}) \label{eq:weak_lower_v_bound}
\end{equation}
due to semigroup methods, which immediately prevents one of the possible blow-up scenarios.
\\[0.5em]
As many techniques to derive a priori estimates for $(u,v)$ translate directly from \Cref{section:apriori} due to the changes above in a sense only working in our favor, we will now only briefly revisit some of the foundational results from said section and translate these to $(u,v)$.
\begin{lemma} \label{lemma:weak_baseline_estimates}
There exists a constant $C_1 > 0$ such that
\[
\|u(\cdot, t)\|_\L{1} \leq C_1,\;\; \|v(\cdot, t)\|_\L{1} \leq C_1 \;\;\;\; \text{ for all } t\in[0,{T_{\mathrm{max}}})
\]
and, if ${T_{\mathrm{max}}} < \infty$, there exists a constant $C_2 > 0$ such that
\[
\int_0^{T_{\mathrm{max}}} \int_\Omega u^{2} \leq C_2, \;\;\;\; \int_0^{T_{\mathrm{max}}} \int_\Omega u^{2+\gamma} \leq C_2.
\]
\end{lemma}
\begin{proof}
Similar to the proof of \Cref{lemma:l1_l2_props}, we can gain these bounds by adding the first and second equation and integrating to see that
\[
\frac{\,\mathrm{d} }{\,\mathrm{d} t} \int_\Omega (u + v) \leq -\int_\Omega (u + v) + K_1 \;\;\;\; \text{ for all } t \in [0,{T_{\mathrm{max}}})
\]
with $K_1 \coloneqq [ \frac{1+|\rho|}{2+\gamma} ]^{2+\gamma}\frac{1+\gamma}{\mu^{1+\gamma}}|\Omega|$ due to Young's inequality. Further if ${T_{\mathrm{max}}} < \infty$ and we just integrate the first equation in (\ref{weaker_problem}), we gain that
\[
\int_0^{T_{\mathrm{max}}} \int_\Omega u^{2+\gamma} \leq \frac{1}{\mu}\int_\Omega u_0 + \frac{|\rho|}{\mu}\int_0^{T_{\mathrm{max}}} \int_\Omega u.
\]
As in \Cref{lemma:l1_l2_props}, these inequalities directly imply most of our results while the last remaining bound follows due to the Hölder inequality and the fact that $2 < 2 + \gamma$.
\end{proof}
\noindent Because the second equation in the approximated system (\ref{approx_problem}) and the altered system (\ref{weaker_problem}) are the same, \Cref{lemma:v_bounds} translates almost verbatim.
\begin{lemma}\label{lemma:weak_vp_bounds}
If ${T_{\mathrm{max}}} < \infty$, there exists a constant $C(p) > 0$ such that
\[
\|v(\cdot, t)\|_\L{p} \leq C(p)
\]
for all $t \in [0,{T_{\mathrm{max}}})$.
\end{lemma}
\noindent While it might only seem like a slight improvement, the leeway afforded to us by $\gamma > 0$ then allows us to nonetheless achieve a critical $L^\infty$ bound for $v$ and an additional bound for the gradient of $v$, which both eluded us in the case discussed in the previous sections. It is both of these results that will ultimately prove to be the key to the existence of global classical solutions for this case.
\begin{lemma} \label{lemma:weak_higher_v_bounds}
If ${T_{\mathrm{max}}} < \infty$, there exists a constant $C > 0$ such that
\[
\|v(\cdot, t)\|_\L{\infty} \leq C
\]
and
\[
\|\nabla v(\cdot, t)\|_\L{q} \leq C
\]
for all $t \in [0,{T_{\mathrm{max}}})$.
\end{lemma}
\begin{proof}
By using the mild solution representation of $v$ (relative to the semigroup $e^{t(\Delta - 1)}$) and well-known smoothness estimates for said semigroup, we see that
\begin{align*}
\|v(\cdot, t)\|_\L{\infty} &\leq \|v_0\|_\L{\infty} + K_1\int_0^t (t-s)^{-\frac{1}{2}}e^{-(t-s)} \|u v\|_\L{2} \,\mathrm{d} s \\
&\leq \|v_0\|_\L{\infty} + K_1|\Omega|^{\frac{q-2}{2q}}\int_0^t (t-s)^{-\frac{1}{2}}e^{-(t-s)} \|u v\|_\L{q} \,\mathrm{d} s
\end{align*}
and
\[
\|\nabla v(\cdot, t)\|_\L{q} \leq K_1\|\nabla v_0\|_\L{q} + K_1\int_0^t (t-s)^{-\frac{1}{2}}e^{-(t-s)} \|u v\|_\L{q} \,\mathrm{d} s
\]
for all $t \in [0,{T_{\mathrm{max}}})$ and some constant $K_1 > 0$. Using the Hölder and Young inequalities, we can now further estimate the critical integral term in both of the above inequalities as
\begin{align*}
\int_0^t (t-s)^{-\frac{1}{2}}e^{-(t-s)} \|u v\|_\L{q} \leq& \int_0^t (t-s)^{-\frac{1}{2}}e^{-(t-s)} \|u\|_\L{2+\gamma}\|v\|_\L{p} \,\mathrm{d} s \\
\leq& \frac{1}{2+\gamma}\int_0^t \int_\Omega u^{2 + \gamma} \,\mathrm{d} s + \frac{1}{r} \int_0^t (t-s)^{-\frac{r}{2}}e^{-r(t-s)} \|v\|^r_\L{p} \,\mathrm{d} s \label{eq:critical_integral_term} \numberthis
\end{align*}
with $p \coloneqq \frac{q(2 + \gamma)}{2 + \gamma - q} \in (q,\infty)$ because $q\in(2,2+\gamma)$ and $r \coloneqq \frac{2 + \gamma}{1 + \gamma} \in (1,2)$ for all $t \in [0,{T_{\mathrm{max}}})$. Due to \Cref{lemma:weak_baseline_estimates}, \Cref{lemma:weak_vp_bounds} and the fact that $\frac{r}{2} < 1$, the remaining integrals in (\ref{eq:critical_integral_term}) are uniformly bounded for all $t \in [0,{T_{\mathrm{max}}})$, which directly implies our desired results.
\end{proof}
\noindent By similar semigroup methods, we can now gain a corresponding result for the first solution component $u$:
\begin{lemma}\label{lemma:weak_linfty_u}
If ${T_{\mathrm{max}}} < \infty$, there exists $C > 0$ such that
\[
\|u(\cdot, t)\|_\L{\infty} \leq C
\]
for all $t\in[0,{T_{\mathrm{max}}})$.
\end{lemma}
\begin{proof}
Due to the fact that there exists $K_1 > 0$ such that $\rho y - \mu y^{2 + \gamma} \leq K_1$ for all $y \geq 0$, we can estimate the mild solution representation of $u$ (relative to the semigroup $e^{t\Delta}$) as follows:
\[
u(\cdot, t) \leq \|u_0\|_\L{\infty} + \chi \int_0^t e^{(t-s)\Delta} \,\mathrm{d}iv (\tfrac{u}{v} \nabla v) \,\mathrm{d} s + \int_0^t K_1 \,\mathrm{d} s \leq K_2 + \chi\int_0^t e^{(t-s)\Delta} \,\mathrm{d}iv (\tfrac{u}{v} \nabla v) \,\mathrm{d} s
\]
with $K_2 \coloneqq \|u_0\|_\L{\infty} + {T_{\mathrm{max}}} K_1$ for all $t\in[0,{T_{\mathrm{max}}})$. Now fix $p\in (2,q)$. By well-known semigroup smoothness estimates and the Hölder inequality, we can then improve the above to
\begin{align*}
\|u(\cdot, t)\|_\L{\infty} &\leq K_2 + \chi K_3 \int_0^t (t-s)^{-\frac{1}{2} - \frac{1}{p}} \|\tfrac{u}{v} \nabla v\|_\L{p} \,\mathrm{d} s \\
&\leq K_2 + K_4\int_0^t (t-s)^{-\frac{1}{2} - \frac{1}{p}} \|u\|_\L{r} \|\nabla v\|_\L{q} \,\mathrm{d} s \\
&\leq K_2 + K_4\int_0^t (t-s)^{-\frac{1}{2} - \frac{1}{p}} \|u\|^\alpha_\L{1} \|u\|^{1-\alpha}_\L{\infty} \|\nabla v\|_\L{q} \,\mathrm{d} s
\end{align*}
with some constant $K_3 > 0$, $K_4 \coloneqq \chi K_3 e^{{T_{\mathrm{max}}}}(\inf_{x\in\Omega} v_0(x))^{-1},\; r \coloneqq \frac{pq}{q-p} \in (p,\infty)$ and $\alpha \coloneqq \frac{1}{r}$ for all $t \in [0,{T_{\mathrm{max}}})$. If we now define $M_T \coloneqq \|u\|_{L^\infty(\Omega\times[0,T])} < \infty$ for every $T \in [0,{T_{\mathrm{max}}})$, the above inequality allows us to derive that
\[
M_T \leq K_2 + K_5 M_T^{1-\alpha}
\]
for some $K_5 > 0$, which is independent of $T$, due to \Cref{lemma:weak_baseline_estimates}, \Cref{lemma:weak_higher_v_bounds} and the fact that $\frac{1}{2} + \frac{1}{q} < 1$. This implies that there exists a constant $K_6 > 0$ such that $M_T \leq K_6$ for all $T\in [0,{T_{\mathrm{max}}})$. This completes the proof.
\end{proof}
\noindent
\Cref{lemma:weak_higher_v_bounds} and \Cref{lemma:weak_linfty_u} have now shown that the remaining blow-up scenarios in (\ref{eq:weak_blowup}) are impossible as well and therefore we can now prove \Cref{prop:weaker_system_stronger_solution} as follows:
\begin{proof}[Proof of \Cref{prop:weaker_system_stronger_solution}]
Assume ${T_{\mathrm{max}}} < \infty$. Then by (\ref{eq:weak_lower_v_bound}), we directly gain that
\begin{equation}
\inf_{x\in\Omega}v(x,t) \geq e^{-{T_{\mathrm{max}}}}\inf_{x\in\Omega} v_0(x) > 0 \;\;\;\; \text{ for all } t\in [0,{T_{\mathrm{max}}}). \label{eq:no_blowup1}
\end{equation}
Further due to \Cref{lemma:weak_higher_v_bounds} and \Cref{lemma:weak_linfty_u}, we can gain $K_1 > 0$ such that
\begin{equation}
\|v(\cdot,t)\|_{W^{1,q}(\Omega)} \leq K_1, \;\;\;\; \|u(\cdot, t)\|_\L{\infty} \leq K_1 \;\;\;\; \text{ for all } t\in [0,{T_{\mathrm{max}}}). \label{eq:no_blowup2}
\end{equation}
Together (\ref{eq:no_blowup1}) and (\ref{eq:no_blowup2}) contradict the blow-up criterion (\ref{eq:weak_blowup}) and therefore we must have ${T_{\mathrm{max}}} = \infty$, which completes the proof.
\end{proof}
\section*{Acknowledgment} The author acknowledges support of the \emph{Deutsche Forschungsgemeinschaft} in the context of the project \emph{Emergence of structures and advantages in cross-diffusion systems}, project number 411007140.
\end{document}
|
\begin{equation}gin{document}
\title{A classical analogue of negative information}
\begin{equation}gin{abstract}
Recently, it was discovered that the {\it quantum partial information}
needed to merge one party's state with another party's state
is given by the conditional entropy, which can be negative
[Horodecki, Oppenheim, and Winter, Nature {\bf 436}, 673 (2005)].
Here we find a classical analogue of this, based on a long known relationship
between entanglement and shared private correlations: namely,
we consider a private distribution held between two parties, and
correlated to a reference system, and ask how much secret communication
is needed for one party to send her distribution to the other. We give
optimal protocols for this task, and find that private information can be
negative -- the sender's distribution can be transferred and the potential
to send future distributions in secret is gained through the
distillation of a secret key. An analogue of {\it quantum state exchange}
is also discussed and one finds cases where exchanging a distribution
costs less than for one party to send it. The results give new classical
protocols, and also clarify the various relationships between
entanglement and privacy.
\end{abstract}
\author{Jonathan Oppenheim}
\affiliation{Department of Applied Mathematics and Theoretical Physics, University of Cambridge U.K.}
\author{Robert W. Spekkens}
\affiliation{Perimeter Institute for Theoretical Physics,
31 Caroline St. N, Waterloo, Ontario N2L 2Y5, Canada}
\author{Andreas Winter}
\affiliation{Department of Mathematics, University of Bristol, Bristol BS8 1TW, U.K.}
\date{28 November 2005}
\maketitle
{\bf Introduction.} While evaluating the quality of information is
difficult, we can quantify it. This was first done by Shannon
{\cal C}_ite{Shannon1948} who showed that the amount of information of a
random variable $X$ is given by the Shannon entropy $H(X)=-\sum
P_X(x) \log_2 P_X(x)$ where $P_X(x)$ is the probability that the
source produces $X=x$ from distribution $P_X$. If $n$ is the
length of the message (of independent samples of $X$) we want to
communicate to a friend, then $\sim nH(X)$ is the number of bits
required to send them. If our friend already has some prior
information about the message we are going to send him (in the
form of the random variable $Y$), then the number of bits we need
to send him is less, and is given by $n$ times the conditional
entropy $H(X|Y)=H(XY)-H(Y)$, according to the Slepian-Wolf theorem
{\cal C}_ite{slepian-wolf}.
In the case of quantum information, it was shown by Schumacher
{\cal C}_ite{Schumacher1995} that for a source producing a string of $n$ unknown
quantum states with density matrix $\rho_A$, $\sim nS(A)$ quantum
bits (qubits) are necessary and sufficient to send the states
where $S(A)=-\operatorname{Tr}\rho_A\log\rho_A$ is the von Neumann entropy (we
drop the explicit dependence on $\rho$ in $S(A)$). One can now
ask how many qubits are needed to send the states if the receiver
has some prior information. More precisely, if two parties, Alice
and Bob, possess shares $A$ and $B$ of a bipartite system $AB$
described by the quantum state $\rho_{AB}$, how many qubits does
Alice need to send Bob so that he can locally prepare a bipartite
system $A'B$ described by the same quantum state (classical
communication is free in this model). We say that Bob has some
prior information in the form of state $\rho_B=\operatorname{Tr}\rho_{AB}$, and
Alice wants to {\it merge} her state with his by sending him some
{\it partial quantum information}.
Recently, it was found that a rate of $S(A|B)=S(AB)-S(B)$ qubits
are necessary and sufficient {\cal C}_ite{how-merge,how-merge2} for this
task. More mathematically: just as in Schumacher's quantum source
coding {\cal C}_ite{Schumacher1995}, we consider a source emitting a sequence of
$n$ unknown states, but the statistics of the source, i.e. the
average density matrix of the states, is known. The ensemble of
states which realize the density matrix is however unspecified. We
then demand that the protocol allows Alice to transfer her share
of the state to Bob with high probability for all possible states
from the ensemble. A more compact way to say this is to imagine
that the state which Alice and Bob share is part of some pure
state shared with a reference system $R$ and given by
$\ket\psi_{ABR}$ such that $\rho_{AB}$ is obtained by tracing over
the reference system. A successful protocol will result in
$\rho_{AB}^{\otimesimes n}$ being with Bob, and
$\ket\psi_{ABR}^{\otimesimes n}$ should be virtually unchanged, while
entanglement is consumed by the protocol at rate $S(A|B)$.
The quantity, $S(A|B)$ is the quantum conditional entropy, and it
can be negative {\cal C}_ite{Wehrl78,HH94-redun,cerfadami}. This
seemingly odd fact now has a natural interpretation {\cal C}_ite{how-merge} -- the
conditional entropy quantifies how many qubits need to be sent
from Alice to Bob, and if it is negative, they gain the
potential to send qubits in the future at no cost. That is, Alice
can not only send her state to Bob, but the parties are
additionally left with maximally entangled states which can be
later be used in a teleportation protocol to transmit quantum
states without the use of a quantum channel. This is the operational
meaning of the fact that partial information can be negative in
the quantum world.
{\bf A classical model.} In order to further understand the notion
of negative information, we are interested in finding some
classical analogue of it. Indeed we will find a paradigm in which
not only is there a notion of negative information, but also the
rate formulas and proof techniques are remarkably similar. We
shall take as our starting point the similarity between
entanglement and private correlations, a fact that was used in
constructing the first entanglement distillation protocols, was used
to conjecture new types of classical distributions {\cal C}_ite{gisin-wolf-99}, but
which was first made fully explicit by Collins and Popescu
{\cal C}_ite{collins-popescu}. In this paradigm, maximally entangled
states are replaced by perfect secret correlations (a ``key'')
$\overline\Psi$, with probability distribution
${\overline\Psi}_{XY}(0,0)={\overline\Psi}_{XY}(1,1)=\frac{1}{2}$.
By \emph{secret}, we mean that a third party, an eavesdropper Eve,
is uncorrelated with Alice and Bob's secret bit. We then replace
the notion of classical communication by public communication
(i.e., the eavesdropper gets a copy of the public messages that
Alice and Bob send to each other). Quantum communication (the
sending of coherent quantum states) is replaced by secret
communication, i.e. communication through a secure channel such
that the eavesdropper learns nothing about what is sent. We thus
have sets of states (i.e. classical distributions between various
parties and an eavesdropper), and a class of operations -- local
operations and public communication (LOPC). Under LOPC one cannot
increase secrecy, just as under local operations and classical
communication (LOCC) one cannot increase entanglement.
The analogy has the essential feature, as in entanglement theory,
that there is a resource (secret key, pure entanglement)
which allows for the transfer of information (private distributions, quantum states),
and this information can be manipulated (by means of classical or public information),
and transformed into the resource.
This allows for the possibility of negative information.
We will further be able to make new statements about
the analogy. For example, we will find indications for
an analogue of pure states, mixed states,
and various types of GHZ states {\cal C}_ite{ghz}.
Looking at the quantum model, we should consider an arbitrary
distributed source between Alice and Bob, described by a pair of
random variables with probability distribution $P_{XY}$;
furthermore we need a ``purification'', that is an extension of
this distribution to a distribution $P_{XYZ}$ with $Z$ being held
by a party $R$, which we call the reference (who has the marginal
distribution $P_Z$). According to this and
{\cal C}_ite{collins-popescu}, the natural approach will be as follows. A
pure quantum state held between two parties has a Schmidt
decomposition
$\ket\psi_{TR}=\sum_i\sqrt{p(i)}\ket{e_i}\otimes\ket{f_i}$, with
orthonormal bases $\{ \ket{e_i} \}$ and $\{ \ket{f_i} \}$. An
analogue of this is a private \emph{bi-disjoint distribution}, i.e. a
distribution $P_{TZ}$ (where $T \equiv XY$),
\begin{equation}gin{equation}
\langlebel{eq:bi-disjoint}
P_{TZ}(tz)=\sum_i p(i)\,P_{T|I=i}(t)P_{Z|I=i}(z),
\end{equation}
with conditional distributions $P_{Z|I}$ and $P_{T|I}$, such
that $P_{T|I=i}(t)P_{T|I=j}(t)=0$ and $P_{Z|I=i}(z)P_{Z|I=j}(z)=0$
for $i\neq j$. Just as the quantum system $TR$ is in a product
state between $T$ and $R$ once $i$ is known, so the bi-disjoint
distribution is in product form
$P_{TZ|I=i}(tz)=P_{T|I=i}(t)P_{Z|I=i}(z)$ once $i$ is known. And
just as a pure quantum state is decoupled from any environment, so
our distribution should be decoupled from the eavesdropper. Note
that it appears necessary here to introduce a fourth party $E$,
something we could avoid in the quantum setting by demanding that
the overall pure state is preserved -- for distributions the
meaning of this is staying decoupled from the eavesdropper, which
we have to distinguish from the reference {\cal C}_ite{foot-1}.
Introducing the eavesdropper into the notation, we have
$P_{XYZE}=P_{XYZ} \otimesimes P_E$. Such distributions we call
\emph{private}, meaning that $E$ is decoupled. In that regard, we
shall speak of \emph{secret} distributions (between Alice and Bob)
where they are decoupled from $R$ and $E$ -- following terminology
introduced on~{\cal C}_ite{csiszar:narayan}. We will provide further
justification for the appropriateness of this analogue of pure
states after we have fully analysed merging and negative
information {\cal C}_ite{foot-2}. Note however, that it has the following desired property:
in the quantum case, considering a purification of the $AB$ system allows
us to enforce the requirement that the protocol succeed for particular pure
state decompositions of $\rho_{AB}$. Likewise the distribution $P_{XYZ}$
allows us to enforce the requirement that the protocol succeed for a decomposition
of the distribution $P_{XY}$, with the record being held by $R$.
We now introduce the analogue of quantum state merging -- {\it
distribution merging} -- which naturally means that at the end Bob
and the reference should possess a sample $\widehat{X}\widehat{Y}Z$ from
the distribution $P_{XYZ}$, with $Z$ held by the reference and
$\widehat{X}\widehat{Y}$ by Bob. The protocol may use public communication
freely; we will consider only the rate of secret key used or
created. We also go to many copies of the random variables -- thus
we denote by $X^n$ many independent copies of random variable
$X$, while $\widehat{X}^n$ denotes the output sample of length $n$.
Formally:
\begin{equation}gin{definition}
Given $n$ instances of a private bi-disjoint distribution
$P_{XYZ}$ between $AB$ and $R$,
a {\it distribution merging} protocol between a sender who holds $X$
and receiver who holds $Y$, is one which creates, by possibly
using $k$ secret key bits and free public communication, a
distribution $P_{\widehat{U}^l(\widehat{X}^n\widehat{Y}^n\widehat{V}^l)Z^n\widehat{E}^n}'$
such that $P'$ approximates
$P_{XYZE}^{\otimesimes n} \otimesimes {\overline\Psi}_{UV}^{\otimesimes l}$
for large $n$ (in total variational, or $\ell^1$, distance).
Here $l$ is the number of secret bits shared at the end between
Alice and Bob; Alice has $\widehat{U}^l$ and Bob $\widehat{V}^l\widehat{X}^n\widehat{Y}^n$.
\par\noindent
The rate of consumption of secret key for the protocol, called its
\emph{secret key rate}, is defined to be $\frac{1}{n}(k-l)$.
\end{definition}
We can now state our main result:
\begin{equation}gin{theorem}
\langlebel{thm:main}
A secret key rate of
\begin{equation}gin{equation}
\langlebel{eq:rate}
I(X:Z)-I(X:Y) = H(X|Y)-H(X|Z)
\end{equation}
bits is necessary and sufficient to achieve distribution merging.
Here, $I(X:Y):= H(X)+H(Y)-H(XY)$
is the mutual information.
When this quantity is nonnegative, it is the minimum rate of secret
key consumed by an optimal merging protocol.
When it is negative, not only is distribution merging
achieved, but $I(X:Y)-I(X:Z)$
bits of secret key remain at the end of the protocol.
\end{theorem}
Before proving this theorem, and
introducing the protocol in full generality, it may be useful to
discuss three very simple examples:
\begin{equation}gin{enumerate}
\item Alice's bit is independent of Bob's bit, but correlated with Eve:
$P_{XYZ}(0,0,0) \!=\! P_{XYZ}(1,0,1) \!=\! \frac{1}{2}$
In this case, Alice must send her bit to Bob through a secret channel, consuming
one bit of secret key.
\item Alice and Bob have a perfect bit of shared secret correlation:
Bob can locally create a random
pair of correlated bits, and Alice and Bob keep the bit
of secret correlation as secret key (which they may use in the
future for private communication). There is one bit of negative information.
\item The distribution
$P_Xyz(0,1,1)=P_Xyz(0,0,0)=P_Xyz(1,0,1)=P_Xyz(1,1,0)=\frac{1}{4}$:
If $Z=0$ Alice and Bob are perfectly correlated,
and if $Z=1$ they are anti-correlated.
In such a case, Alice can tell Bob her bit publicly, and because
an eavesdropper doesn't know
Bob's bit, she would not be able to know the value of $Z$.
Bob will however know $Z$ and
can locally create a random pair of anti-correlated bits
or correlated bits depending on the
value of $Z$. Thus, the distribution merging is achieved with one bit of
public communication and no private communication.
This reminds one of the state merging
problem for the quantum state
$\rho_{AB}=\frac{1}{2}(\proj{00}+\proj{11})$ whose
purification on $R$ is the GHZ state where the merging
is achieved with one bit
of classical communication and no quantum communication.
Another potential classical analog of the GHZ
is the distribution $P_Xyz(1,1,1)=P_Xyz(0,0,0)=1/2$ {\cal C}_ite{collins-popescu},
which has perfect correlations for all sites
like for the GHZ state; it also has a merging cost of zero (although zero
classical communication unlike in the quantum case).
A distribution which has both the above features of the GHZ is
the distribution with an equal mixture of
$\{111,122,212,221,333,344,434,444\}$ inspired by {\cal C}_ite{spekkens-toy}.
It has perfect correlations ($1$ or $2$ on
one site is correlated with $1$ or $2$ on the others, and likewise
for $3$ and $4$), as well as the ability
of one of the parties to create secret key by informing the
other parties of her variable. Like the first GHZ like candidate,
it also has no secret communication cost for distribution
merging, and public communication cost of one bit,
reminiscent of the quantum GHZ state.
\end{enumerate}
{\bf\emph{Proof of Theorem~\ref{thm:main}.}}
We now describe the general protocol
for distribution merging. We will give two proofs of
achievability: the first is very simple and uses recycling of
the initial secret key resources. Namely, let Alice make
her transmission of Slepian-Wolf coding {\cal C}_ite{slepian-wolf}
secret, using a rate of $H(X|Y)$ secret bits. This gives Bob
knowledge of $XY$, which by the bi-disjointness of $P_{XYZ}$ informs
him of $Z$ [rather, the label $I$ in (\ref{eq:bi-disjoint})].
Hence he can produce a fresh sample $\widehat{X}\widehat{Y}$
of the conditional distribution $P_{XY|Z}$ -- this solves the
merging part. Now only observe that Alice and Bob are still left
with the shared $X$; from it they can extract $H(X|Z)$ secret bits
via privacy amplification {\cal C}_ite{privacy-BBCM}, i.e.~random hashing. By
repeatedly running this protocol, we can recover the startup
cost of providing $H(X|Y)$ secret bits, which is only later
recycled -- at least if the rate (\ref{eq:rate}) is positive. In
the appendix we show a direct proof in one step, which produces
secret key if (\ref{eq:rate}) is negative without the need to
provide some to start the process.
Now we turn to the converse, namely that this protocol is optimal.
Just as in state merging, the proof comes from looking at
monotones. Assuming first that secret key is consumed in the
protocol, then the initial amount of secrecy that Bob has with
Alice and the reference $R$ is $H(K)+I(Y:XZ)$ where $K$ is a
random variable describing the key. By monotonicity of secrecy
under local operations and public communication this must be
greater than the final amount of secrecy he has with them; but
since he then has $\widehat{X}\widehat{Y}$, this is $I(\widehat{X}\widehat{Y}:Z) =
I(XY:Z)$. Hence $H(K) \geq I(XY:Z)-I(XZ:Y) = I(X:Z)-I(X:Y)$ as
required. If key is acquired in the protocol, then the value
$H(K)$ should be put as part of the final
amount of secrecy, and we have again $H(K)\leq I(X:Y)-I(X:Z)$.
$\Box$
The cost of distribution merging might appear quite different to
the cost of quantum state merging. Actually this is not the case.
Since $\ket\psi_{ABR}$ is pure, we may rewrite \begin{equation}gin{equation}
S(A|B)=\frac{1}{2}[ I(A:R)-I(A:B) ],
\end{equation} in terms of the quantum mutual information
$I(A:B):=S(A)+S(B)-S(AB)$. This looks like the cost of
distribution merging, only with a mysterious factor of $1/2$. The
factor is the same one that accounts for the fact that while one
bit of secret key has $I(A:B)=1$ and can be used in a one-time pad
protocol for one bit of secret communication, a singlet has
$I(A:B)=2$ but can teleport only one qubit.
For an alternative explanation, see also {\cal C}_ite{compl}.
{\bf Pure and mixed state analogues.} Note that a crucial part of
the merging protocol is that once Bob knows Alice's variable, he
effectively knows $Z$ and can thus recreate the distribution (more
precisely, he knows the product distribution he shares with $R$).
Recreating the distribution would not be as easy if the total
distribution $P_Xyz$ were not bi-disjoint, which further serves to
motivate our definition of bi-disjoint distributions as the
analogues of pure quantum states (although only for this
particular merging task). Nevertheless, one might wonder if we
have not overly restricted our model. Let us go back to a general
distribution $P_{XYZ}$ of Alice, Bob and the reference, and
observe that it can always be written \begin{equation}gin{equation}
\langlebel{eq:P-purification}
P_{XYZ} = (\operatorname{id}_{XY}\otimesimes\Lambda) \widetilde{P}_{XY\widetilde{Z}},
\end{equation}
with $\operatorname{id}_{XY}$ the identity,
$\widetilde{P}_{XY\widetilde{Z}}$ a bi-disjoint distribution,
and a noisy channel (a stochastic map) $\Lambda:\widetilde{Z}\rightarrow Z$.
Up to relabelling of $\widetilde{Z}$ there is in fact a unique
\emph{minimal} distribution, denoted $\overline{P}_{XY\overline{Z}}$, in the
sense that every other $\widetilde{P}$ can be degraded to
$\overline{P}$ by locally applying a (deterministic) channel
$\widetilde{\Lambda}:\widetilde{Z}\rightarrow \overline{Z}$. One way of
doing this is by having $\overline{Z}$ be a record of which probability
distribution needs to be created, conditional on each $XY$. A channel
can then act on the record $\overline{Z}$ to create the needed probability
distribution $P_{Z|XY}$. I.e. we define (cf.~{\cal C}_ite{wolf-wulli})
\[
\overline{Z} = \Phi(XY) := P_{Z|XY},
\]
as an element of the probability simplex -- this means that pairs
$XY$ are labelled by the same $\overline{Z}$ (which is a deterministic
function $\Phi$ of $XY$) if and only if the
conditional distributions $P_{Z|XY}$ are the same. The channel
$\Lambda$ has the transition probabilities
$\Lambda(z|\overline{z}) = \overline{z}(z) = P_{Z=z|XY}$.
Note that $\overline{P}$ is indeed bi-disjoint.
Let us call this $\overline{P}_{XY\overline{Z}}$ the \emph{purified version}
of $P_{XYZ}$. Note the beautiful analogy to the quantum case, where
every mixed state $\rho_{ABR}$ on $ABR$ can be written
\[
\rho_{ABR} = (\operatorname{id}_{AB}\otimesimes\Lambda)\psi_{AB\overline{R}},
\]
with a quantum channel $\Lambda:\overline{R}\rightarrow R$ and an
essentially unique pure state $\psi_{AB\overline{R}}$ (up to local
unitaries).
\begin{equation}gin{theorem}
\langlebel{thm:mixed-main}
For general $P_{XYZ}$, the optimal rate of distribution merging
is that of the purified version $\overline{P}_{XY\overline{Z}}$, i.e.
\begin{equation}gin{equation}
\langlebel{eq:purified-rate}
I(X:\overline{Z})-I(X:Y) = H(X|Y)-H(X|\overline{Z}).
\end{equation}
\end{theorem}
Clearly, it is achievable: we have a protocol at this rate
for $\overline{P}_{XY\overline{Z}}$, which must work for $P_{XYZ}$
as well, since the latter is obtained by locally degrading
$\overline{Z}\rightarrow Z$ which commutes with the merging
protocol acting only on Alice and Bob and makes the secrecy
condition for the final key only easier to satisfy.
To show that the rate (\ref{eq:purified-rate}) is optimal, we shall
argue that successful merging with reference $Z$ implies
that the protocol is actually successful for reference $\overline{Z}$,
at which point we can use the previous converse for ``pure''
(bi-disjoint) distributions.
Observe that Bob at the end of the protocol has to produce
samples $\widehat{X}^n\widehat{Y}^n$ such that
$P_{\widehat{X}^n\widehat{Y}^nZ^n} \approx P_{X^nY^nZ^n}$.
Assume now that it were true that with high probability (over the
joint distribution of $X^nY^nZ^n\widehat{X}^n\widehat{Y}^n$),
\begin{equation}gin{equation}
\langlebel{eq:Phi-equal}
\widetilde{Z}^n := \Phi^n(\widehat{X}^n\widehat{Y}^n)
\stackrel{\text{!}}{=} \Phi^n(X^nY^n) = \overline{Z}^n.
\end{equation}
This in fact implies that merging is achieved for the
distribution $\overline{P}_{XY\overline{Z}}$:
\begin{equation}gin{equation*}\begin{equation}gin{split}
&\bigl\| \overline{P}_{X^nY^n\overline{Z}^n}
- \overline{P}_{\widehat{X}^n\widehat{Y}^n\overline{Z}^n} \bigr\|_1 \\
&\phantom{==:}
\leq \bigl\| \overline{P}_{X^nY^n\overline{Z}^n}
- \overline{P}_{\widehat{X}^n\widehat{Y}^n\widetilde{Z}^n} \bigr\|_1 \\
&\phantom{==:=============}
+ \bigl\| \overline{P}_{\widehat{X}^n\widehat{Y}^n\widetilde{Z}^n}
- \overline{P}_{\widehat{X}^n\widehat{Y}^n\overline{Z}^n} \bigr\|_1 \\
&\phantom{==:}
\leq \| P_{X^nY^n} - P_{\widehat{X}^n\widehat{Y}^n} \|_1
+ 2\,\Pr\{ \widetilde{Z}^n \neq \overline{Z}^n\},
\end{split}\end{equation*}
and both final terms are small.
Furthermore, the secret key (possibly) distilled
at the end of the protocol has to be uncorrelated to
$\widehat{X}^n\widehat{Y}^n$,
and since this data includes knowledge of $\overline{Z}^n$, the key
will not only be secret from a reference $Z^n$ but even against $\overline{Z}^n$.
Now, unfortunately we cannot argue (\ref{eq:Phi-equal}) for a
given protocol (and insofar the situation is understood, it may
not even be generally true {\cal C}_ite{aram:personal});
however, we can modify the protocol
slightly -- in particular losing only a sublinear number of key
bits -- such that (\ref{eq:Phi-equal}) becomes true. We invoke a
result on so-called ``blind mixed-state compression''
{\cal C}_ite{koashi-mixed,koashi:undisturbed}
(see also {\cal C}_ite{DurVC-compr}): notice that Bob
has to output (for most $Z$) a sample of the conditional
distribution $P_{XY|Z}$, but that Alice and Bob together have
access only to one sample of that distribution, without knowing
$Z$. The central technical result in {\cal C}_ite{koashi-mixed} is that
every such process must preserve a lot of correlation between the
given and the produced sample, in the sense that $\Pr\bigl\{
\Phi(\widehat{X}_I\widehat{Y}_I) \neq \Phi(X_IY_I) \bigr\}$, with random
index $I$, is small. In other words, with high probability, the
string $\Phi^n(\widehat{X}^n\widehat{Y}^n)$ is within a small Hamming ball
around $\overline{Z}^n = \Phi^n(X^nY^n)$. Since Bob knows $Y^n$
already, Alice will need to send only negligible further
information about $X^n$ to Bob (invoking Slepian-Wolf another
time) so that he can determine the correct $\overline{Z}^n$ with
high probability. On the other hand, privacy amplification incurs
only a negligible loss in rate to make the final secret key
independent of this further communication (namely just its
length), and hence of $\overline{Z}^n$. Hence, we have a protocol
that effectively puts Bob in possession of $\overline{Z}$, of
which the final secret key is independent; hence he could just
output a sample from $\overline{P}_{XY|\overline{Z}}$, which would
yield a valid and asymptotically correct protocol.
The expression in Eq.~(\ref{eq:purified-rate}), when negative and
optimised over pre-processing, was previously shown to be the rate
for secret key generation~{\cal C}_ite{wyner75,CsiszarKorner,AhlCsi93}. Here, as
in the quantum case, we find that distribution merging provides an
interpretation of this quantity without looking at optimisations,
and for both the positive and negative case.
Note that for given
$P_{XYZ}$, if $Q_{XYZ'}=(\operatorname{id}_{XY}\otimesimes\Lambda)P_{XYZ}$ with
$\Lambda$ sufficiently close to the identity, the two
distributions have the same purification, leading to the
conclusion that our result on distribution merging is robust under
small perturbations of the reference. Note however that a general
perturbation of $P_{XYZ}$ by an arbitrary small change in the
probability density leads to a drastic discontinuity: namely, a
generic perturbation $Q_{X'Y'Z'}$ will have trivial purification
$\overline{Z}' = X'Y'$ because all conditional distributions
$Q_{Z'|X'Y'}$ will be different. Thus, for $Q$ the merging cost
will be $H(X'|Y')$ -- essentially Slepian-Wolf coding with Bob
outputting the very $X'Y'$ of the source, so Alice and Bob's
common knowledge of $X'$ cannot be turned into secret key.
However, this is consistent with the extreme case of $P_{XYZ} =
P_{XY}\otimesimes P_Z$, which has merging cost $-I(X:Y)$ since Bob can
locally produce a fresh sample from $P_{XY}$, and he can extract
$I(X:Y)$ secret bits from the correlation $XY$ with Alice.
{\bf Distribution exchange.} We now turn to finding an analogue of
quantum state exchange {\cal C}_ite{OW-uncommon}. In the quantum task,
not only does Alice send her state to Bob, but Bob should
additionally send his state to Alice, which is to say that the
final state is just the initial state with Alice and Bob's shares
permuted. Amazingly, this can require less resources than if only
Alice is required to send to Bob. In general, the number of
qubits that need to be exchanged can be said to quantify the {\it
uncommon quantum information} between Alice and Bob, because this is the
part which has to sent be to their partner. We can consider the
analogy of this, where Alice and Bob must exchange distributions.
This minimal rate of secret key clearly must be non-negative,
since Alice and Bob could otherwise continue swapping their
distribution and create unlimited secret key from some given
correlation and LOPC. Note that the rate zero is indeed possible.
The distribution $P_{XYZ}(0,0,0)= P_{XYZ}(1,1,1)= P_{XYZ}(0,1,2)=
P_{XYZ}(1,0,2)=\frac{1}{4}$, for instance, has the property that
exchanging the distribution has zero exchange cost (because it is
symmetric), while the cost of Alice merging her distribution to
Bob's is $I(X:Z)=\frac{1}{2}$.
In {\cal C}_ite{OW-uncommon}, a lower bound for quantum state exchange
given in terms of one-way entanglement distillation between $R$
and each of the parties was proven. A similar lower bound
$K^\rightarrow(Z\rangleX)+K^\rightarrow(Z\rangleY)$, where
$K^\rightarrow(Z\rangleT)$ is the distillable key (using only one-way
communication from $R$) can be proven in the context of
distribution exchange. For upper bounds, one can introduce
protocols, for example
Slepian-Wolf coding in either direction is also possible, costing
$H(X|Y)+H(Y|X)$.
A more sophisticated protocol that is sometimes better
uses results from {\cal C}_ite{wyner-common}:
the rate $I(X:Z)-I(X:Y)+I(XY:W)$ can be achieved (or the same
quantity with $X$ and $Y$ interchanged, whichever is smaller);
this quantity is minimized over distributions $W$ such that
$X\text{---}W\text{---}Y$ is a Markov chain. The protocol is for
Alice to merge her $X$ to Bob, which consumes $I(X:Z)-I(X:Y)$
secret bits; then Bob locally creates not $\widehat{X}\widehat{Y}|Z$ as
with merging, but rather $W|Z$ and then $W$ is essentially
communicated back to Alice -- but by {\cal C}_ite{wyner-common} only a
rate $I(XY:W)$ needs to be sent. Then, based on $W$, each one
creates a sample $\widehat{X}$ and $\widehat{Y}$, respectively.
An interesting aspect of quantum state exchange is that the rate
given by the sum of both parties' minimal rate of state merging
$S(A|B)+S(B|A)$ is usually not attainable (although as noted above,
one can sometimes beat it). This is because if Alice first merges
her state with Bob, Bob will not be able to merge his state with
Alice, but must send at the full rate $S(B)$. This is because
after Alice merges, she is left with nothing, being unable to
clone a copy of her state. This motivates us to consider the
analogue of cloning, especially since na\"{\i}vely, classical
variables can be copied. However, we need a different kind of copying
to enable Alice and Bob to merge their distributions simultaneously:
it would be for Alice to create a fresh, independent sample from
the conditional distribution $P_{X|YZ}$ of her $X$, given $Y$ and $Z$
(which are unknown to her). If she could do that, she would
be able to merge her first sample to Bob at secret key cost
$I(X:Z)-I(X:Y)$, and then he could merge his $Y$ to her second sample
(which we designed to have the same joint distribution with $YZ$),
at cost $I(Y:Z)-I(X:Y)$. Since we know that the sum
\begin{equation}gin{equation*}\begin{equation}gin{split}
I(X:Z) &+ I(Y:Z) - 2\,I(X:Y) \\
&\phantom{=}= H(X|Y) \!+\! H(Y|X) \!-\! H(X|Z) \!-\! H(Y|Z)
\end{split}\end{equation*}
is not in general an achievable rate, this hypothetical cloning
cannot be always possible.
Such cloning is indeed always impossible, unless the various conditional
distributions $P_{X|YZ}$ are either identical or
have disjoint support~{\cal C}_ite{class-no-clo}.
Note that in this case, $P_{XYZ}$ is bi-disjoint for the cut $X$-$YZ$.
A different viewpoint is that the cloning would increase the
(secret) correlation between Alice and Bob, which of course
cannot be unless they can privately communicate; this
seems to be another way of thinking about a classical analogue
of the no-cloning principle~{\cal C}_ite{sandu-clone}.
{\bf Conclusion.}
In this paper, we have described a classical analogue of negative
quantum information, and we find that the similarities between
quantum information theory and privacy theory extend very far in
this analogy (at least in the present context), including no-cloning, pure and mixed states, and
GHZ-type correlations. Quantum state merging (with reference
systems such that the overall state is pure or mixed) and state
exchange lead to similar protocols in the case of private
distributions which have many properties in common with their
quantum counterparts.
This is part
of a body of work exploring the similarities between entanglement
and classical correlations, which, it is hoped, will stimulate
progress in both fields, for instance, on the question of the
possible existence of bound information~{\cal C}_ite{gisin-wolf-99}.
{\bf Acknowledgments.}
This work is supported by
EU grants RESQ (IST-2001-37559),
and PROSECCO (IST-2001-39227). JO additionally
acknowledges the support of a grant from the Royal Society and the Newton Trust.
AW additionally acknowledges support from the U.K. Engineering and Physical Sciences
Research Council's ``QIP IRC'' and through a University of
Bristol Research Fellowship.
{\bf APPENDIX -- Direct proof of Theorem \ref{thm:main}.}
For the second, direct, proof of achievability,
we will need the sampling lemma, which is proved
in {\cal C}_ite{wyner-common} (see also {\cal C}_ite{winter-common}
and {\cal C}_ite{Ashlwede-Winter2002}):
\begin{equation}gin{lemma}
Consider a distribution $P_{UV}$ of random variables $U$ and $V$
(with marginals $P_V$ and $P_U$), and
$n$ independent samples $U^nV^n=U_1V_1,\ldots,U_nV_n$ from this distribution.
Then for every $\gamma>0$ and sufficiently large $n$, there are
$N \leq 2^{n(I(U:V)+\gamma)}$ sequences $u^{(i)}$ from $U^n$
such that, with
\begin{equation}gin{equation}
Q := \frac{1}{N} \sum_{i=1}^N P_{V^n|U^n=u^{(i)}},
\end{equation}
\begin{equation}gin{equation}
D\left( Q \| P_{V}^{\otimesimes n} \right) \leq 2^{-\gamma n}.
\end{equation}
Here, $D$ denotes the relative entropy.
Furthermore, such a family of sequences is found with high probability by
selecting them independently at random with probability distribution
$P_{U}^{\otimesimes n}$.
\end{lemma}
In such a situation we say that the distribution
of $V^n$, $P_{V^n}$ is {\it covered}
by the $N$ sequences, meaning that
the distribution $P_{V}^{\otimesimes n}$ is approximated
with high accuracy by choosing only slightly more
than $2^{nI(U:V)}$ sequences from $U^n$.
We achieve distribution merging using a protocol extremely reminiscent of state merging.
In state merging, one adds a maximally entangled state of dimension $nS(A|B)$ bits,
and then performs a random measurement on $\rho_A$ and the pure entanglement, the result of
which is communicated to Bob. Here, Alice and Bob add a secret key of size $H(K)$, and
the analogy of a random measurement will be a random hash (described below),
the result of which is communicated
to Bob. In state merging, a faithful protocol has the property that $\rho_R$ is unchanged
and Bob can decode his state to $\rho_A$ after learning Alice's measurement. Here, a successful
protocol is likewise one which allows Bob to learn $X$, while the distribution of $R$ is
unchanged if one conditions on the result of Alice's measurement.
Let us first take the case when $I(X:Z)-I(X:Y)$ is negative.
Alice and Bob previously decide on a random binning, or {\it code},
which groups Alice's $2^{nH(X)}$ sequences into $2^{nH(X|Y)}$
sets of size just under $2^{nI(X:Y)}$.
Each of these sets are numbered by ${\cal C}_o$ and is called the
{\it outer code}. Within each set,
we further divide the sequences into $2^{n[I(X:Y)-I(X:Z)]}$ sets
containing just over $2^{nI(X:Z)}$ sequences.
These smaller sets are labeled by ${\cal C}_i$, the {\it inner code}.
Alice then publicly broadcasts the number ${{\cal C}_o}$ of the outer code that her
sequence is in (this takes $nH(X|Y)$
bits of public communication to Bob). Now, based on
learning ${\cal C}_o$, Bob will know $X^n$ by the
Slepian-Wolf theorem {\cal C}_ite{slepian-wolf}. We say that
he can decode Alice's sequence. Because the
distribution $P_{XYZ}$ is bi-disjoint, and Bob knows $X^n$
and $Y^n$, he must know $Z^n$. He
can now create the distribution $P_{\widehat{X}\widehat{Y}|Z=z}=P_{XY|Z=z}$.
He has thus succeeded in obtaining
$\widehat{X}\widehat{Y}$ such that the overall distribution is close to $P_{XYZ}$.
Furthermore, the distribution
is private -- each set (or code) in ${\cal C}_o$ has more than $2^{nI(X:Z)}$
elements (i.e. codewords)
[recall that there are $2^{nI(X:Y)}$ outer codewords, and $I(X:Y)\geq I(X:Z)$].
The sampling lemma then tells us that $R$'s distribution is unchanged
i.e. $P_{Z^n|{{\cal C}_o}=c}\approx P^n_{Z^n}$,
which means that an eavesdropper who learns which code ${\cal C}_o$
Alice's sequence is in, doesn't learn anything about the sequence that $R$ has.
Next, we see that Alice and Bob gain
$n[ I(X:Y)-I(X:Z) ]$ bits of secret key. Since Alice and Bob both know $X^n$, they both
know which inner code ${\cal C}_i$ it lies in, and this they use as the key. There
are $2^{n[ I(X:Y)-I(X:Z) ]}$ of them, and each contains
just over $2^{nI(X:Z)}$ codewords in it. Thus, from the
covering lemma, $R$'s state is independent of its value, thus she (and consequently
any eavesdropper) has arbitrarily small probability of knowing its value.
Now, in the case where $I(X:Z)-I(X:Y)$ is positive, Alice and
Bob simply use $I(X:Z)-I(X:Y)$
bits of secret key. Since each bit of key decreases
$I(X:Z)-I(X:Y)$ by $1$, they need
this amount of key until the quantity $I(X:Z)-I(X:Y)$
is negative, and then the preceding proof
applies. We thus see that $I(X:Z)-I(X:Y)$ bits of key are
required to perform distribution merging,
and if it is negative, one can achieve distribution
merging, while obtaining this amount of key.
$\Box$
\begin{equation}gin{thebibliography}{31}
\expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi
\expandafter\ifx\csname bibnamefont\endcsname\relax
\def\bibnamefont#1{#1}\fi
\expandafter\ifx\csname bibfnamefont\endcsname\relax
\def\bibfnamefont#1{#1}\fi
\expandafter\ifx\csname citenamefont\endcsname\relax
\def{\cal C}_itenamefont#1{#1}\fi
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\bibitem[{{\cal C}_itenamefont{Shannon}(1948)}]{Shannon1948}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Shannon}},
\bibinfo{journal}{Bell Syst. Tech. J.} \textbf{\bibinfo{volume}{27}},
\bibinfo{pages}{379} (\bibinfo{year}{1948}).
\bibitem[{{\cal C}_itenamefont{Slepian and Wolf}(1971)}]{slepian-wolf}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Slepian}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Wolf}},
\bibinfo{journal}{IEEE Trans. Inf. Theory} \textbf{\bibinfo{volume}{19}},
\bibinfo{pages}{461} (\bibinfo{year}{1971}).
\bibitem[{{\cal C}_itenamefont{Schumacher}(1995)}]{Schumacher1995}
\bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Schumacher}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{51}},
\bibinfo{pages}{2738} (\bibinfo{year}{1995}).
\bibitem[{{\cal C}_itenamefont{Horodecki et~al.}(2005){\cal C}_itenamefont{Horodecki,
Oppenheim, and Winter}}]{how-merge}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Horodecki}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}},
\bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{436}},
\bibinfo{pages}{673} (\bibinfo{year}{2005}), \eprint{quant-ph/0505062}.
\bibitem[{{\cal C}_itenamefont{Horodecki et~al.}(){\cal C}_itenamefont{Horodecki, Oppenheim,
and Winter}}]{how-merge2}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Horodecki}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}},
\bibinfo{note}{quant-ph/0512247, to appear in Comm. Math. Phys.}
\bibitem[{{\cal C}_itenamefont{Wehrl}(1978)}]{Wehrl78}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Wehrl}}, \bibinfo{journal}{Rev.
Mod. Phys.} \textbf{\bibinfo{volume}{50}}, \bibinfo{pages}{221}
(\bibinfo{year}{1978}).
\bibitem[{{\cal C}_itenamefont{Horodecki and Horodecki}(1994)}]{HH94-redun}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Horodecki}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Horodecki}},
\bibinfo{journal}{Phys. Lett. A} \textbf{\bibinfo{volume}{194}},
\bibinfo{pages}{147} (\bibinfo{year}{1994}).
\bibitem[{{\cal C}_itenamefont{Cerf and Adami}(1997)}]{cerfadami}
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Cerf}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Adami}},
\bibinfo{journal}{Phys. Rev. Lett} \textbf{\bibinfo{volume}{79}},
\bibinfo{pages}{5194} (\bibinfo{year}{1997}), \eprint{quant-ph/9512022}.
\bibitem[{{\cal C}_itenamefont{Gisin and Wolf}(1999)}]{gisin-wolf-99}
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Gisin}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Wolf}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{4200} (\bibinfo{year}{1999}).
\bibitem[{{\cal C}_itenamefont{Collins and Popescu}(2002)}]{collins-popescu}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Collins}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{65}},
\bibinfo{pages}{032321} (\bibinfo{year}{2002}), \eprint{quant-ph/0107082}.
\bibitem[{{\cal C}_itenamefont{Greenberger et~al.}(1989){\cal C}_itenamefont{Greenberger,
Horne, and Zeilinger}}]{ghz}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Greenberger}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Horne}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Zeilinger}},
\emph{\bibinfo{title}{Bell's theorem, quantum mechanics, and conceptions of
the universe}} (\bibinfo{publisher}{Dordrecht, The Netherlands: Kluwer},
\bibinfo{year}{1989}), p.~\bibinfo{pages}{69}.
\bibitem[{foo({\natexlab{a}})}]{foot-1}
\bibinfo{note}{Classical distributions are the only ones for which the same
amount of secret key is needed to create them, as can be distilled from them
{\cal C}_ite{wyner-common,winter-common}, a property analogous to the fact that for
pure states the rate of maximally entangled states needed to create them is
equal to the rate which can be distilled from them, namely $H(p)$.}
\bibitem[{{\cal C}_itenamefont{Csisz\'{a}r and Narayan}(2004)}]{csiszar:narayan}
\bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Csisz\'{a}r}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Narayan}},
\bibinfo{journal}{IEEE Trans. Inf. Theory} \textbf{\bibinfo{volume}{50}},
\bibinfo{pages}{3047} (\bibinfo{year}{2004}).
\bibitem[{foo({\natexlab{b}})}]{foot-2}
\bibinfo{note}{We could further restrict our tripartite pure analogue to be one
which is bi-orthogonal between any splitting of the parties into two groups,
but such a restriction is not needed.}
\bibitem[{{\cal C}_itenamefont{Spekkens}()}]{spekkens-toy}
\bibinfo{author}{\bibfnamefont{R.~W.} \bibnamefont{Spekkens}},
\bibinfo{note}{quant-ph/0401052}.
\bibitem[{{\cal C}_itenamefont{Bennett et~al.}(1995){\cal C}_itenamefont{Bennett, Brassard,
Crepeau, and Maurer}}]{privacy-BBCM}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Bennett}},
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Brassard}},
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Crepeau}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{U.}~\bibnamefont{Maurer}},
\bibinfo{journal}{IEEE Trans. Inform. Theory} \textbf{\bibinfo{volume}{51}},
\bibinfo{pages}{1915} (\bibinfo{year}{1995}).
\bibitem[{{\cal C}_itenamefont{Oppenheim et~al.}(2003){\cal C}_itenamefont{Oppenheim,
Horodecki, Horodecki, Horodecki, and Horodecki}}]{compl}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}},
\bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Horodecki}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Horodecki}},
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Horodecki}},
\bibnamefont{and}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Horodecki}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{68}},
\bibinfo{pages}{022307} (\bibinfo{year}{2003}), \eprint{quant-ph/0207025}.
\bibitem[{{\cal C}_itenamefont{Wolf and Wullschleger}(2004)}]{wolf-wulli}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Wolf}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Wullschleger}}, in
\emph{\bibinfo{booktitle}{Information Theory Workshop 2004, San Antonio}}
(\bibinfo{organization}{IEEE}, \bibinfo{year}{2004}).
\bibitem[{{\cal C}_itenamefont{Harrow}(2005)}]{aram:personal}
\bibinfo{author}{\bibfnamefont{A.~W.} \bibnamefont{Harrow}}
(\bibinfo{year}{2005}), \bibinfo{note}{personal communication}.
\bibitem[{{\cal C}_itenamefont{Koashi and Imoto}(2001)}]{koashi-mixed}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Koashi}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Imoto}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{87}},
\bibinfo{pages}{017902} (\bibinfo{year}{2001}).
\bibitem[{{\cal C}_itenamefont{Koashi and Imoto}(2002)}]{koashi:undisturbed}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Koashi}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Imoto}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{66}},
\bibinfo{pages}{022318} (\bibinfo{year}{2002}).
\bibitem[{{\cal C}_itenamefont{Dur et~al.}(2001){\cal C}_itenamefont{Dur, Vidal, and
Cirac}}]{DurVC-compr}
\bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Dur}},
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Vidal}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Cirac}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{64}},
\bibinfo{pages}{022308} (\bibinfo{year}{2001}), \eprint{quant-ph/0101111}.
\bibitem[{{\cal C}_itenamefont{Ahlswede and Csiszar}(1993)}]{AhlCsi93}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Ahlswede}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Csiszar}},
\bibinfo{journal}{IEEE Trans. Inf. Theory} \textbf{\bibinfo{volume}{39}},
\bibinfo{pages}{1121} (\bibinfo{year}{1993}).
\bibitem[{{\cal C}_itenamefont{Csiszar and Korner}(1978)}]{CsiszarKorner}
\bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Csiszar}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Korner}},
\bibinfo{journal}{IEEE Trans. Inf. Theory} \textbf{\bibinfo{volume}{24}},
\bibinfo{pages}{339} (\bibinfo{year}{1978}).
\bibitem[{{\cal C}_itenamefont{Wyner}(1975{\natexlab{a}})}]{wyner75}
\bibinfo{author}{\bibfnamefont{A.~D.} \bibnamefont{Wyner}},
\bibinfo{journal}{Bell Sys. Tech. J.} \textbf{\bibinfo{volume}{54}},
\bibinfo{pages}{1355} (\bibinfo{year}{1975}{\natexlab{a}}).
\bibitem[{{\cal C}_itenamefont{Oppenheim and Winter}()}]{OW-uncommon}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}},
\eprint{quant-ph/0511082}.
\bibitem[{{\cal C}_itenamefont{Wyner}(1975{\natexlab{b}})}]{wyner-common}
\bibinfo{author}{\bibfnamefont{A.~D.} \bibnamefont{Wyner}},
\bibinfo{journal}{IEEE Trans. Inf. Theory} \textbf{\bibinfo{volume}{21}},
\bibinfo{pages}{163} (\bibinfo{year}{1975}{\natexlab{b}}).
\bibitem[{{\cal C}_itenamefont{Daffertshofer et~al.}(2002){\cal C}_itenamefont{Daffertshofer,
Plastino, and Plastino}}]{class-no-clo}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Daffertshofer}},
\bibinfo{author}{\bibfnamefont{A.~R.} \bibnamefont{Plastino}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Plastino}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{88}},
\bibinfo{pages}{210601} (\bibinfo{year}{2002}).
\bibitem[{{\cal C}_itenamefont{Popescu}()}]{sandu-clone}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}},
\bibinfo{note}{private communication}.
\bibitem[{{\cal C}_itenamefont{Winter}(2005)}]{winter-common}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}}, in
\emph{\bibinfo{booktitle}{Proc. ISIT 2005, Adelaide 5-9 Sept.}}
(\bibinfo{year}{2005}), p. \bibinfo{pages}{2270}.
\bibitem[{{\cal C}_itenamefont{Ashlwede and Winter}(2002)}]{Ashlwede-Winter2002}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Ashlwede}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}},
\bibinfo{journal}{IEEE Trans. Inf. Theory} \textbf{\bibinfo{volume}{48}},
\bibinfo{pages}{569} (\bibinfo{year}{2002}), \bibinfo{note}{addendum in vol.
49 p346 (2003)}.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Distributionally Robust Observable Strategic Queues}
\author{Yijie Wang, Madhushini Narayana Prasad, Grani A. Hanasusanto, and John J. Hasenbein\\ Graduate Program in Operations Research and Industrial Engineering\\ The University of Texas at Austin, USA}
\date{\today}
\maketitle
\begin{abstract}
\noindent This paper presents an extension of Naor's analysis on the join-or-balk problem in observable M/M/1 queues. While all other Markovian assumptions still hold, we explore this problem assuming uncertain arrival rates under the distributionally robust settings. We first study the problem with the classical moment ambiguity set, where the support, mean, and mean-absolute deviation of the underlying distribution are known. Next, we extend the model to the data-driven setting, where decision makers only have access to a finite set of samples. We develop three optimal joining threshold strategies from the perspective of an individual customer, a social optimizer, and a revenue maximizer, such that their respective worst-case expected benefit rates are maximized. Finally, we compare our findings with Naor's original results and the traditional sample average approximation scheme.
\mbox{}
\end{abstract}
\section{Introduction} \label{sec:intro}
Imposing tolls to regulate queueing systems was first studied by Naor \cite{naor1969regulation}.
He considers a single-server first-come-first-served (FCFS) queue with stationary Poisson arrivals at a known rate $\lambda$. Service times are independent, identically, and exponentially distributed with the rate $\mu$. Customers are assumed to be risk-neutral and homogenous from an economic perspective. Each customer receives a reward of $\$R$ upon service completion and incurs a cost of $\$C$ per unit of time spent in the system (including in service). In the observable model, every arriving customer inspects the queue length and decides whether to join (reneging is not allowed) or balk (i.e., not join the queue). This strategic decision making is the key factor differentiating this model from the classic $M/M/1$ queueing model.
Naor derives an optimal threshold strategy $n$: the customer joins the queue if and only if the system length is less than ${n}$. He computes this threshold value under three different control strategies: 1) \emph{individual optimization} ($ n_e$) where the customers act in isolation aiming to maximize their own expected net benefit rate, 2) \emph{social optimization} ($ n_s$) where the objective is to maximize the long-run rate at which customers accrue net benefit and, 3) \emph{revenue maximization} ($n_r$) where the agency imposes a toll on the customers joining the queue with the goal of maximizing its own revenue. The most important result by Naor is the relation $ n_r \leq n_s \leq n_e$, which implies that the customers tend to join the system at a higher rate, when left to themselves, than is socially optimal. This is because customers do not consider the negative externalities they impose on customers who arrive later. The result also implies that the revenue maximizing firms allow fewer customers to join their system than the socially optimal case.
Many authors have expanded on the seminal work by Naor \cite{naor1969regulation}---a detailed review of these game-theoretic models is presented in a recent book by Hassin and Haviv \cite{hassin2003queue}. Some of the other recent works \cite{burnetas2007equilibrium, economou2008equilibrium, guo2011strategic} involve deriving threshold strategies in a classic Naor's setting with server shutdowns. While Economou and Kanta \cite{economou2008equilibrium} study the system with server breakdowns and repairs, Burnetas and Economou \cite{burnetas2007equilibrium} analyze the system where the server shuts off when idle and incurs a set-up time to resume. A slight variant of this model is given by Guo and Hassin \cite{guo2011strategic} where the server resumes only when the queue length exceeds a given critical length. Also, Guo and Zipkin~\cite{guo2007analysis} explore the effects of three different levels of delay information and identify the specific cases which do and do not require such information to improve the performance. Haviv and Oz~\cite{haviv2016regulating} review the properties of several existing regulation schemes and devise a new mechanism where customers are given priority based on the queue length.
Afe\`che and Ata~\cite{afeche2013bayesian} study the observable $M/M/1$ queue with heterogenous customers, some patient and some impatient of given proportion.
All the aforementioned works explore the Naor's model by assuming deterministic arrival and service rates. Some recent studies have relaxed this restrictive assumption by taking the arrival or the service rate as a random variable. Debo and Veeraraghavan \cite{debo2014equilibrium} consider a system where the arriving customers cannot completely observe the service rate and value. They assume that the server belongs to one of two known types, and that the service rate and prior probability for each type is known. Liu and Hasenbein~\cite{CCLiu} study a stochastic extension of Naor's model by relaxing the assumption of certain arrival rate. They assume the arrival rate is drawn from a probability distribution that is known to the decision maker.
Chen and Hasenbein~\cite{chen2020knowledge} further extend the stochastic model to the unobservable setting. They show that the social optimizer induces a lower expected arrival rate than the revenue maximizer in this setting. Hassin et al.~\cite{hassin2021strategic} also investigate the unobservable stochastic model from the perspective of strategic customers and demonstrate that the model exhibits a RASTA (rate-biased arrivals see time averages) property.
However, these works still assume the distribution of the arrival or service rate is known precisely to decision makers, which may not be realistic in practice. In this paper, we extend the classical Naor's model for observable systems by relaxing these assumptions, where we assume the arrival rate is uncertain and governed by an unknown underlying distribution, while the service rate is deterministic.
We consider an alternate modeling paradigm called the \emph{distributionally robust optimization} (DRO)~\cite{scarf1957min, shapiro2002minimax, vzavckova1966minimax}. Unlike the traditional stochastic optimization model, DRO acknowledges the lack of full distributional information on the random arrival rate. Instead, the decision maker is assumed to have access
to partial information such as the moments and structural properties of the distribution, or some limited historical observations. In this setting, the objective is to derive optimal threshold strategies that maximize the worst-case expected benefit rate, where the worst case is taken over an \emph{ambiguity set} of all distributions consistent with the available information about the true distribution. Such max-min problems have been studied since the seminal work by Scarf~\cite{scarf1957min} but only have received more attention with the advent of modern robust optimization techniques \cite{ben2009robust, bertsimas2004price}.
Since then, a substantial body of literature is devoted to studying well-known optimization problems under uncertainty in a distributionally robust setting; see \cite{ardestani2016linearized, delage2010distributionally,hanasusanto2015distributionally, li2014distributionally, shafieezadeh2015distributionally, wiesemann2014distributionally}. However, to the best of our knowledge, the distributionally robust framework has not been considered in the context of classical Naor's observable strategic queue model. The paper fills this gap in the literature.
We first study the distributionally robust queue model with a mean-absolute deviation (MAD) ambiguity set, where partial information about the distribution mean and MAD are known.
The use of the MAD ambiguity set is motivated by a recent work by Eekelen et al. \cite{van2022mad} who analyze the worst-case performance of the GI/G/1 queue under mean-dispersion constraints for the interarrival and service time distributions. The authors demonstrate that measuring the dispersion by MAD, instead of variance, significantly simplifies the analysis and enables a closed-form solution for the extremal distribution whenever the loss function is convex. Inspired by this idea, we prove the concavity of the revenue rate function in the revenue maximization problem, which leads to an analytical solution for the worst-case expectation problem. Unfortunately, the social benefit rate function in the social optimization problem is neither concave nor convex. For this complicated function, we establish that, under some mild prerequisites, the function is unimodal and the MAD ambiguity set still admits a closed-form representation for the extremal distribution. When the prerequisites do not hold, we derive tractable reformulations for the social optimization problem.
Next, we extend our model to the data-driven setting, where queue system managers only have access to a finite number of independent and identically distributed training samples collected from historical observations. We then construct a data-driven MAD ambiguity set which mitigates estimation errors from the empirical moment estimators. The distributionally robust model with a data-driven ambiguity set admits a semidefinite programming reformulation for the social optimization problem and a linear programming reformulation for the revenue maximization problem.
To properly determine the robustness parameters, we establish a new distribution-free confidence interval for the empirical MAD. Although such confidence intervals exist for the empirical mean and variance \cite{delage2010distributionally}, to the best of our knowledge, none is available for the empirical MAD: Herrey~\cite{herrey1965confidence} derives confidence interval for the empirical MAD under a normally distribution data, while other works mostly focus on median-absolute deviation; see \cite{abu2018confidence,arachchige2019confidence,bonett2003confidence}. Using this result, we further derive finite-sample guarantees of the data-driven MAD model, whose optimal value provides high confidence lower bounds on the expected social benefit or revenue rate. We also benchmark our data-driven MAD ambiguity set with the popular Wasserstein ambiguity set~\cite{esfahani2017data,esfahani2018data,gao2016distributionally,pflug2007ambiguity}, which is widely used in the data-driven setting as it can offer attractive finite-sample guarantees. Our results demonstrate that the data-driven MAD model shares a similar guarantee as the Wasserstein model while yields a much more efficient reformulation.
Our main contributions of this paper can be summarized as follows.
\begin{enumerate}
\item We propose a new model to tackle the uncertain arrival rate in Naor's strategic queue problem using the emerging DRO framework. The model does not impose any specific distributional assumption; instead, it optimizes in view of the worst-case distribution within a prescribed ambiguity set. Benefitting from this robustification framework, the model alleviates the overfitting issue and yields attractive out-of-sample performance.
\item We prove the revenue rate function is concave, while the social benefit rate function is either concave or unimodal under some mild prerequisites. We then show that these properties enable a closed-form solution for the worst-case expectation problem with a MAD ambiguity set. For the general cases, we derive a semidefinite programming (SDP) reformulation for the social optimization problem and a linear programming reformulation for the revenue optimization problem.
\item We extend the distributionally robust model to the data-driven setting, where queue system managers only have access to a finite set of historical observations. To mitigate the adverse effect of the estimation errors from the empirical MAD, we robustify the MAD ambiguity set by adding an extra layer of robustness to the empirical mean and MAD estimators. The data-driven MAD model admits a SDP reformulation for the social optimization problem and a linear programming reformulation for the revenue maximization problem. We then establish a distribution-free confidence interval for the empirical MAD and derive finite-sample guarantees for the distributionally robust model with a data-driven MAD ambiguity set. Compared with the Wasserstein ambiguity set, the data-driven MAD ambiguity set admits a more efficient reformulation of fixed complexity, where the number of constraints does not scale with the sample size.
\end{enumerate}
The remainder of the paper is structured as follows. In Section \ref{sec:droqueue}, we propose the distributionally robust queue model and analyze the relationship between different thresholds under the distributionally robust setting. Section \ref{sec:DROM} presents tractable reformulations for the worst-case expectation problem with a classical MAD ambiguity set. Section \ref{sec:data-driven-mad} explores the distributionally robust model with a data-driven MAD ambiguity set and derives theoretical finite-sample guarantees.
Finally, the out-of-sample performances of our distributionally robust models are assessed empirically in Section~\ref{sec:numerical}.
\paragraph{Notation:} The set of all probability measures supported on $\Xi$ is written as $\mathcal P_0(\Xi)\coloneqq\{\mu \in \mathcal M_+: \int_{\Xi} \mu(d\xi)=1 \}$, where $\mathcal M_+$ denotes the set of nonnegative Borel measures. All random variables are designated by tilde signs (e.g., $\tilde\rho$), while their realizations are denoted without tildes (e.g., $\rho$). We denote by $\mathbb E_\mathbb P[c(\tilde\rho)]$ the expectation of a cost function with respect to random variable $\tilde\rho$ under distribution $\mathbb P$. We define $\lfloor n \rfloor$ to be the largest integer less than or equal to $n$ and $\|\bm x\|_p$ to be the $p$-norm of a vector $\bm x$. For any set $\Xi$, we let $\text{int}(\Xi)$ denote its interior.
The cone of $k \times k$ positive semidefinite matrices is denoted by $\mathbb S_+^k$.
\section{Distributionally Robust Strategic Queues Model}\label{sec:droqueue}
The extension of Naor's seminal queue model to the stochastic optimization setting with an uncertain arrival rate was first proposed by Chen and Hasenbein~\cite{CCLiu} who consider an $M/M/1$ queue system with a random arrival rate $\tilde \lambda\sim \mathbb P^\textnormal{s.t.}ar$ and a deterministic service rate $\mu$. The queue system operates under a first-come-first-served discipline, and the true distribution of the uncertain arrival rate $\tilde \lambda$ is known by the system manager. Since the service rate $\mu$ is deterministic, without loss of generality, we consider the traffic intensity~$\tilde\rho :=\frac{\tilde\lambda}{\mu} $ as the uncertain parameter throughout the remainder of the paper. The stochastic model aims to find an optimal threshold that maximizes the expected benefit rate, i.e.,
\[\max_{n \in \mathbb Z_+} \mathbb E_{\mathbb P^\textnormal{s.t.}ar}[c_n(\tilde \rho)].
\]
Here $c_n(\tilde \rho)$ is a general return function, which can be replaced with the social benefit rate function or revenue rate function, depending on the system manager's objective.
In practice, the true distribution $\mathbb P^\textnormal{s.t.}ar$ is never available to the system manager and typically has to be estimated using the empirical distribution generated from the historical observations. While the empirical-based methods may work well on the observed data set, they often fail to achieve an acceptable out-of-sample performance because they do not consider any possible disturbances from the limited historical observations.
In this paper, we endeavor to address this fundamental shortcoming using ideas of DRO. The DRO approach does not impose any single distribution on the uncertain arrival rate. Instead, it constructs an ambiguity set $\mathcal P$ containing all plausible probability distributions that are consistent with the partial information as well as historical observations. In this setting, the objective is to derive an optimal threshold strategy $\hat n$ that maximizes the worst-case expected benefit rate, where the worst case is taken over all distributions from within this ambiguity set, i.e.,
\begin{equation}\label{eq:dromodel}
\max_{n \in \mathbb Z_+} \inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P}[c_n(\tilde\rho)].
\end{equation}
Because the model optimizes the expected benefit rate in view of the worst-case distribution, it mitigates overfitting to the observed samples and helps improve the performance in out-of-sample circumstances.
In this paper, we study the distributionally robust model from the perspective of an individual customer, a social optimizer, and a revenue maximizer. We first derive the results that hold for any generic ambiguity set $\mathcal P$.
\subsection{Individual Optimization}
We determine a pure threshold strategy in which each arriving customer decides to join or not join the queue based on the observed queue length, independent of the strategy adopted by other customers. A newly arrived customer makes a decision (to join or not join) based on the net gain $R-(i+1)C/\mu$, where $i$ is the number of people currently in the queue, and will join the queue if it is nonnegative. Note that net gain is deterministic because it is independent of the random arrival rate. Thus, the optimal joining threshold for any arriving customer is given by
\begin{equation}
\label{eq:io}
\hat{n}_e = \left\lfloor{\frac{R{\mu}}{C}} \right\rfloor.
\end{equation}
This result coincides with Naor's original result (i.e., $\hat n_e=n_e$) because the net gain of a newly arrived customer only depends on the current queue length and the service rate, which are all deterministic. On the other hand, as an individual optimizer, the customer can ignore the rates of future arrivals, because they will not affect the time to service. We also remark here that the individual threshold $n_e$ can be regarded as the maximal length of the strategic queue, beyond which no newly arrived customer will ever enter the queue as the net gain becomes negative.
\subsection{Social Optimization}
We next analyze the distributionally robust threshold for a social optimizer. The social benefit rate for a realization of the traffic intensity $\rho$ and a fixed threshold $n$ is given by
\begin{equation}
\label{eq:social_rate}
f_n(\rho)\coloneqq\left\{\begin{array}{lll}
&R \mu \frac{\rho (1-\rho^n)}{1-\rho^{n+1}} -C\left( \frac{ \rho}{1-\rho} - \frac{ (n+1)\rho^{n+1}}{1-\rho^{n+1}}\right) \quad &\textup{if}\ \rho \neq 1\\
& R\mu \frac{n}{n+1}-C\frac{n}{2} &\textup{if}\ \rho=1. \\
\end{array}\right.
\end{equation}
One can verify that $\lim_{\rho \rightarrow 1} R \mu \frac{\rho (1-\rho^n)}{1-\rho^{n+1}} -C\left( \frac{ \rho}{1-\rho} - \frac{ (n+1)\rho^{n+1}}{1-\rho^{n+1}}\right)=R\mu \frac{n}{n+1}-C\frac{n}{2}$, which indicates that the function $f_n(\rho)$ is continuous in $\rho$. Here, the first term $\mu \tfrac{\rho (1-\rho^n)}{1-\rho^{n+1}}$ corresponds to the probability that an arriving customer joins, while the second term $\frac{\rho}{1-\rho} - \frac{ (n+1)\rho^{n+1}}{1-\rho^{n+1}}$ represents the expected number of customers in the queue system \cite[Equation (2.3)]{hassin2003queue}.
The distributionally robust model determines an optimal threshold $\hat{n}_s$ that maximizes
the worst-case expected social benefit rate $ Z_s(n)$, i.e., $\hat n_s \in \argmax_{n \in \mathbb Z_+} Z_s(n)$, where
\begin{equation}
\label{eq:mean_so1}
Z_s(n) :=\inf_{\mathbb P \in \mathcal P } \mathbb E_{\mathbb P} \left[f_n(\tilde\rho) \right].
\end{equation}
We first investigate the relationship between the optimal thresholds $\hat n_e$ and $\hat n_s$.
\begin{prop}\label{prop:so<ne}
There exists an optimal threshold of the social optimizer less than or equal to the optimal threshold of an individual customer, i.e., $\exists \hat{n}_s \ \textnormal{s.t.} \ \hat{n}_s \leq \hat{n}_e.$
\end{prop}
\begin{proof}[Proof of Proposition \ref{prop:so<ne}]
It is established in \cite[Equation 30]{naor1969regulation} that for any deterministic arrival rate $\lambda$ and service rate $\mu$, the optimal threshold from the perspective of a public goods regulator will be less than or equal to the optimal threshold of an individual customer.
Suppose that every optimal threshold that maximizes the worst-case expected social benefit rate is strictly greater than the optimal threshold of an individual customer, i.e., $\hat n_s > \hat n_e$ for all $\hat n_s \in \argmax_{n \in \mathbb Z_+} \inf_{\mathbb P \in \mathcal P } \mathbb E_{\mathbb P} \left[f_n(\tilde\rho)\right]$.
Then, based on our previous statement, for any fixed $\rho$ and any optimal $\hat n_s$, we have $n_s(\rho) \leq n_e=\hat n_e < \hat n_s$, where $n_s(\rho)$ is the corresponding optimal social threshold under the deterministic setting. Since $f_n(\rho)$ is discretely unimodal for any fixed $\rho$ \cite[Page 20]{naor1969regulation}, the relationship of the benefit rate can consequently be derived as
\begin{equation*}
f_{n_s(\rho)}(\rho) \geq f_{\hat n_e}(\rho) \geq f_{\hat n_s}(\rho) \quad \forall \rho \in \mathbb R_+.
\end{equation*}
Using this relationship, one can further establish that, for any ambiguity set $\mathcal P$,
\begin{equation}\nonumber
\inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} \left[ f_{\hat n_e}(\tilde\rho) \right] \geq \inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} \left[ f_{\hat n_s}(\tilde\rho) \right].
\end{equation}
Conversely, by the definition of $\hat n_s$, we also have $\inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} \left[ f_{\hat n_e}(\tilde\rho) \right] \leq \inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} \left[ f_{\hat n_s}(\tilde\rho) \right]$. This implies that $\inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} \left[ f_{\hat n_e}(\tilde\rho) \right] = \inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} \left[ f_{\hat n_s}(\tilde\rho) \right]$. Therefore, $\hat n_e$ is also an optimal threshold of the social optimization problem, which contradicts our previous assumption. This completes the proof.
\end{proof}
\subsection{Revenue Optimization}
We now consider a profit-maximizing firm that aims to maximize its expected revenue rate by imposing a toll $t$ on every joining customer. In this setting, customers base their joining decision on this imposed toll $t$ and evaluate the service completion only by $R-t$. Recall that customers join the queue if and only if the expected net gain is nonnegative. Therefore, determining an optimal toll $t$ is equivalent to choosing a queue length threshold $n$ that maximizes the expected revenue rate, where $n=\left\lfloor{\frac{(R-t){\mu}}{C}} \right\rfloor.$
The revenue rate for a realization of the traffic intensity and a fixed threshold $n$ is given by
\begin{equation}
r_n(\rho)\coloneqq\left\{\begin{array}{lll}
\left(R\mu - Cn\right) \frac{\rho(1-\rho^n)}{1-\rho^{n+1}} &\textup{if}\ \rho \neq 1\\
\left(R\mu - Cn\right) \frac{n}{n+1} &\textup{if}\ \rho=1. \\
\end{array}\right.
\end{equation}
One can show that $\lim_{\rho \rightarrow 1} \frac{\rho(1-\rho^n)}{1-\rho^{n+1}} = \frac{n}{n+1}$, which indicates that $f_n(\rho)$ is continuous. The revenue rate function $r_n(\rho)$ can be rewritten as $\tfrac{R\mu-Cn}{\mu} \cdot \tfrac{\lambda(1-\rho^n)}{1-\rho^{n+1}} $, where $\tfrac{R\mu-Cn}{\mu}$ is the entrance fee for a given maximal queue length $n$, and $\frac{\lambda(1-\rho^n)}{1-\rho^{n+1}}$ is the expected number of customers joining the queue per unit time.
The distributionally robust model determines an optimal threshold $\hat{n}_r$ that
maximizes the worst-case expected revenue rate $Z_r(n)$, i.e., $\hat n_r \in \argmax_{n \in \mathbb Z_+} Z_r(n)$, where
\begin{equation}
\label{eq:mean_rm}
Z_r(n) \coloneqq \inf_{\mathbb P \in \mathcal P } \mathbb E_{\mathbb P} \left[r_n(\tilde\rho) \right].
\end{equation}
Similarly, we first investigate the relationship between the optimal thresholds $\hat n_e$ and $\hat n_r$.
\begin{prop}\label{prop:rm<ne}
There exists an optimal threshold of the revenue maximizer less than or equal to the optimal threshold of an individual customer, i.e., $\exists \hat{n}_r \ \textnormal{s.t.} \ \hat{n}_r \leq \hat{n}_e.$
\end{prop}
\begin{proof}
The proof parallels that of Proposition \ref{prop:so<ne}---we omit for brevity.
\end{proof}
Up to now, we have presented the generic distributionally robust observable queue models for an individual customer, a social optimizer, and a revenue maximizer. However, we have not specified the ambiguity set for the social and revenue optimization problems. In the following sections, we will investigate different types of ambiguity sets and derive their tractable reformulations.
\section{Distributionally Robust Strategic Queues with a MAD Ambiguity Set} \label{sec:DROM}
In this section, we study the DRO model with a mean-absolute deviation (MAD) ambiguity set. Suppose the support~$[a,b]$, mean $m$ and MAD $d$ of the random parameter $\tilde\rho$ are known to decision makers. Then we can construct an ambiguity set containing all possible distributions that are consistent with the partial information, defined as
\begin{equation}\label{eq:ambiguity_set}
\mathcal P \coloneqq \{ \mathbb P \in \mathcal P_0([a,b]):\; \mathbb E_{\mathbb P} [\tilde\rho] = m, \; \mathbb E_{\mathbb P} \left[|\tilde\rho- m| \right] = d\}.
\end{equation}
We develop efficient solution schemes to find the optimal threshold strategies for a social optimizer and a revenue maximizer, given by $\hat{n}_s$ and $\hat{n}_r$, respectively, such that the worst-case expected benefit rates are maximized. In order to derive tractable reformulations for the distributionally robust models, we assume $m \in (a,b) $ and $d \in (0, \overline d)$, where $\overline d \coloneqq \frac{2(m-a)(b-m)}{b-a}$ is the largest possible mean-absolute deviation by any distribution with the given support and mean.
\subsection{Social Optimization}
To determine an optimal joining threshold for a social optimizer, we compute the worst-case expected social benefit rate $Z_s(n)$ for every $n\in \mathbb Z_+$ satisfying $1 \leq n \leq n_e$, and choose an $\hat n_s$ such that $\hat n_s \in \argmax_{n \in \mathbb Z_+} Z_s(n)$. To this end, we show how to compute the worst-case expected social benefit rate for a fixed $n$. Suppose the distribution mean and MAD of $\tilde\rho$ are precisely known, then the worst-case expected social benefit rate is given by the optimal value of the moment problem
\begin{equation}
\label{eq:so_mean_abs_primal}
\begin{array}{ccll}
Z_s(n)=
&\displaystyle \inf_{\mathbb \nu \in \mathcal M_+}&\displaystyle \int_{\Xi} f_n(\rho) \nu (\rm d\rho) \\
&\textnormal{s.t.}& \displaystyle \int_{\Xi} |\rho-m| \, \nu (\rm{d}\rho)=\mathit d\\
&& \displaystyle \int_{\Xi} \rho \, \nu(\rm d\rho)=\mathit{m}\\
&& \displaystyle \int_{\Xi} \nu(\rm d\rho)= 1,
\end{array}
\end{equation}
where $\Xi:= [a,b]$ is the support of $\tilde\rho$ and $\mathcal M_+$ denotes the set of all nonnegative measures.
The first and second constraints of \eqref{eq:so_mean_abs_primal} require the nonnegative measure's MAD and mean equals to $d$ and $m$, respectively, while the third constraint restricts the nonnegative measure to be a probability measure. The objective of the problem is to find a feasible probability measure that minimizes the expected social benefit rate. However, this semi-infinite linear optimization problem is hard to solve, because it searches for the best decision from an an infinite dimensional space of probability measures.
To derive a tractable reformulation, we focus on the dual problem. We first define $F(\rho)\coloneqq \alpha|\rho-m| +\beta \rho + \gamma$, and derive the dual problem as
\begin{equation}
\label{eq:so_mean_abs_dual}
\begin{array}{ccll}
&\displaystyle\sup_{\alpha,\beta,\gamma \in \mathbb{R}}&\displaystyle \alpha d + \beta m + \gamma\\
&\textnormal{s.t.}& F(\rho) \leq f_n(\rho) \quad \quad \forall \rho \in [a,b].
\end{array}
\end{equation}
Notice that $F(\rho)$ is a two-piece piecewise affine function majorized by $f_n(\rho)$. We know that if $f_n(\rho)$ is a piecewise affine function or a concave function, the semi-infinite constraint will reduce to a linear constraint since we only need to check the satisfaction of the constraint at points $\rho=a,m$ and $b$. However, the social benefit rate function is neither concave nor piecewise affine, making the problem difficult. To solve this optimization problem, we first investigate the properties of the social benefit rate function $f_n(\rho)$. Some of the proofs of this section are relegated to the Appendix \ref{sec:proof_sec3}
\begin{lem}
\label{lem:so_propertie}
The social benefit rate function $f_n(\rho)$ has the following properties if $\frac{R\mu}{C} \geq n+1$:
\begin{enumerate}
\item $f_n(\rho)$ is strictly concave for $\rho \in [0,1]$.
\item $f_n(\rho)$ is either concave increasing or unimodal for $\rho \in [0, \infty)$.
\item The sign of the second derivative $f_n''(\rho)$ changes at most once over $[0, \infty)$.
\end{enumerate}
\end{lem}
From Lemma \ref{lem:so_propertie} we know that the social benefit rate function has some nice properties. Specifically, the function is
either concave increasing or unimodal on the nonnegative axis, and when it is unimodal, the function changes from a concave function to a convex function at some point. The next lemma further asserts that the complementary slackness property holds for the primal and dual problems, which will later help us determine the worst-case distribution.
\begin{lem}
\label{lem:so_3points}
The optimal values of the primal-dual pair \eqref{eq:so_mean_abs_primal} and~\eqref{eq:so_mean_abs_dual} coincide, and their optimal solutions $\nu^\textnormal{s.t.}ar$ and $(\alpha^\textnormal{s.t.}ar,\beta^\textnormal{s.t.}ar,\gamma^\textnormal{s.t.}ar)$, respectively, satisfy the complementary slackness condition
\begin{equation}\nonumber
\left(f_n(\rho)-\alpha^\textnormal{s.t.}ar|\rho-m| -\beta^\textnormal{s.t.}ar \rho - \gamma^\textnormal{s.t.}ar\right)\nu^\textnormal{s.t.}ar(\rm d \rho)=0\qquad \forall \rho\in[a,b].
\end{equation}
\end{lem}
Combining Lemmas \ref{lem:so_propertie} and \ref{lem:so_3points}, we are ready to show that problem \eqref{eq:so_mean_abs_primal} can be solved analytically under certain conditions. Specifically, we divide this problem into three cases and derive an explicit expression of the worst-case distribution for each case.
\begin{prop}
\label{prop:so_mad}
Assume $m\in [0,1]$ and $\frac{R\mu}{C} \geq n+1$. Let $(\rho_t, f_n(\rho_t))$ be the tangent point on $f_n$ for the line that passes through $(m,f_n(m))$. For any $n \geq 1$, we have one of the following three cases:
\begin{enumerate}
\item If $f_n(b) + f'_n(b) (m-b) \geq f_n(m)$, then the extremal distribution that solves \eqref{eq:mean_so1} is a three-point distribution supported on $\rho_1 = a$, $\rho_2 = m$, $\rho_3=b$, with corresponding probabilities
$$ p_1 = \frac{d}{2(m-a)}, \ p_2 = 1 - \frac{d}{2(m-a)} - \frac{d}{2(b-m)},\ p_3= \frac{d}{2(b-m)}.$$
\item If $f_n(b) + f'_n(b) (m-b) < f_n(m)$ and $d< d_0\coloneqq\frac{2(m-a)(\rho_{t}-m)}{\rho_t-a}$, then the extremal distribution is a three point distribution supported on $\rho_1 = a$, $\rho_2 = m$, $\rho_3=\rho_t$, with probabilities
$$ p_1 = \frac{d}{2(m-a)}, \ p_2 = 1 - \frac{d}{2(m-a)} - \frac{d}{2(\rho_t-m)},\ p_3= \frac{d}{2(\rho_t-m)}.$$
\item If $f_n(b) + f'_n(b) (m-b) < f_n(m)$ and $d\geq d_0\coloneqq\frac{2(m-a)(\rho_{t}-m)}{\rho_t-a}$, then the extremal distribution is a two-point distribution supported on $\rho_1=a$, $\rho_2 = \frac{ad+2m(a-m)}{d+2(a-m)}$, with probabilities $$ p_1 = \frac{d}{2(m-a)}, \ p_2 = 1-\frac{d}{2(m-a)}.$$
\end{enumerate}
\end{prop}
\begin{figure}
\caption{\label{fig:oss}
\label{fig:MAD_figure}
\label{fig:mad_impro_1}
\label{fig:mad_impro_2}
\label{fig:mad_impro_3}
\label{fig:oss}
\end{figure}
Figure 1 depicts the optimal two-piece piecewise affine function described in Proposition \ref{prop:so_mad}. We remark that the tangent point $(\rho_t,f_n(\rho_t))$ in Figure \ref{fig:mad_impro_2} can be determined efficiently by the bisection method. Specifically, we set $[l,u]=[m,b]$ as the initial search interval for the algorithm. In each iteration, we compute the derivative at the midpoint $\rho=\frac{u+l}{2}$, and check whether it is the tangent point by calculating the difference between $f_n(m)$ and $f_n'(\frac{u+l}{2})(m-\frac{u+l}{2})+f_n(\frac{u+l}{2})$. If the difference is small enough, we
terminate the algorithm; otherwise, we set $u=\frac{u+l}{2}$ if the difference is positive or set $l=\frac{u+l}{2}$ if the difference is negative, and then go back to the first step with the updated interval $[l,u]$.
Proposition \ref{prop:so_mad} explicitly expresses the extremal distribution for parameters satisfying $m\leq 1$ and $\frac{R\mu}{C} \geq n+1$. Using this result, we can compute the worst-case expected social benefit rate $Z_s(n)$ efficiently.
\begin{thm}\label{thm:so_mad}
Assume $m\in [0,1]$ and $\frac{R\mu}{C} \geq n+1$. Let $(\rho_t, f_n(\rho_t))$ be the tangent point on $f_n(\rho)$ for the line that passes through $(m,f_n(m))$. For any $n \geq 1$, we have the following three cases:
\begin{enumerate}
\item If $f_n(b) + f'_n(b) (m-b) \geq f_n(m)$, then
\[Z_s(n)=\frac{d}{2(m-a)} f_n(a) + \left(1 - \frac{d}{2(m-a)} - \frac{d}{2(b-m)} \right) f_n(m)+ \frac{d}{2(b-m)} f_n(b).
\]
\item If $f_n(b) + f'_n(b) (m-b) < f_n(m)$ and $d< d_0\coloneqq\frac{2(m-a)(\rho_{t}-m)}{\rho_t-a}$, then
\[Z_s(n)=\frac{d}{2(m-a)} f_n(a) + \left(1 - \frac{d}{2(m-a)} - \frac{d}{2(\rho_t-m)} \right) f_n(m)+ \frac{d}{2(\rho_t-m)} f_n(\rho_t).
\]
\item If $f_n(b) + f'_n(b) (m-b) < f_n(m)$ and $d\geq d_0\coloneqq\frac{2(m-a)(\rho_{t}-m)}{\rho_t-a}$, then
\[ Z_s(n)=\frac{d}{2(m-a)} f_n(a) + \left(1-\frac{d}{2(m-a)} \right) f_n\left(\frac{ad+2m(a-m)}{d+2(a-m)}\right).
\]
\end{enumerate}
\end{thm}
Theorem \ref{thm:so_mad} enables us to solve the worst-case expectation problem analytically under certain conditions. However, for the more general case, we are unable to solve it in a closed form. In the following theorem, we show that the worst-case expectation problem admits a semidefinite programming reformulation that can be solved in polynomial time using standard off-the-shelf solvers, such as SDPT3~\cite{toh1999sdpt3} and MOSEK~\cite{mosek}.
\begin{thm}
\label{thm:MAD_so}
For any $n \geq 1$, the worst-case expected social benefit rate $Z_s(n)$ coincides with the optimal value of the following semidefinite program.
\begin{equation}
\label{eq:so_sdp}
\begin{array}{llll}
&\sup \;\; &\alpha d + \beta m + \gamma &\\
&\textnormal{s.t.} \;\; &\alpha,\beta,\gamma \in \mathbb R, y, z \in \mathbb R^{n+3}, X, X' \in \mathbb S^{n+3}_+\\
&& y_1 = R\mu - C - y_0 + y_{n+3}, \; y_2 = -R\mu - y_{n+3},& \\
&& y_3, \cdots, y_n = 0, \; y_{n+1} = - R\mu + C(n+1) - y_0,& \\
&& y_{n+2} = R\mu - C n + y_0 - y_{n+3}, \;& \\
&& y_{0} = \alpha m +\gamma , y_{n+3}= -\alpha + \beta \;&\\
&&\displaystyle \sum_{i + j = 2l - 1} x_{ij} = 0&\forall l \in [n+4]\\
&&\displaystyle \sum_{i + j = 2l} x_{ij} = \sum_{q=0}^{l} \sum_{r=q}^{n+3+q-l} y_r {r \choose q} {{n+3-r} \choose {l-q}} a^{r-q} m^q & \forall l \in [n+4] \cup \{0\}\\
&& z_1 = R\mu - C - z_0 + z_{n+3}, \; z_2 = -R\mu - z_{n+3}, &\\
&& z_3, \cdots, z_n = 0, \; z_{n+1} = - R\mu + C(n+1) - z_0, &\\
&& z_{n+2} = R\mu - C n + z_0 - z_{n+3} \; &\\
&& z_{0} = -\alpha m +\gamma , z_{n+3}= \alpha + \beta \; \\
&& \displaystyle \sum_{i + j = 2l - 1} x'_{ij} = 0 & \forall l \in [n+4]\\
&& \displaystyle \sum_{i + j = 2l} x'_{ij} = \sum_{q=0}^{l} \sum_{r=q}^{n+3+q-l} y'_r {r \choose q} {{n+3-r} \choose {l-q}} m^{r-q} b^q & \forall l \in [n+4] \cup \{0\}\\
\end{array}
\end{equation}
\end{thm}
The proof of this theorem relies on the following lemma which expresses a univariate polynomial inequality in terms of semidefinite constraints.
\begin{lem}[Proposition 3.1(f) in \cite{bertsimas2005optimal}]
\label{poly_lem_so1}
The polynomial $g(\rho) = \sum_{r = 0}^{k} y_r \rho^r$ satisfies $g(\rho) \geq 0$ for all $ \rho \in [a,b]$ if and only if there exists a positive semidefinite matrix $X = [x_{ij}]_{i, j = 0, \cdots, k}\in \mathbb S_+^{k+1}$, such that
\begin{align*}
0 = &\sum_{i,j:i+j = 2l -1} x_{ij} && \forall l = 1, \cdots, k \\
\nonumber \sum_{q=0}^{l} \sum_{r=q}^{k+q-l} y_r {r \choose q} {{k-r} \choose {l-q}} a^{r-q} b^q= & \sum_{i,j:i+j = 2l} x_{ij} && \forall l = 0, \cdots, k.
\end{align*}
\end{lem}
\begin{proof}[Proof of Theorem~\ref{thm:MAD_so}]
Recall that the dual of $\inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} [f_n(\tilde\rho)]$ for $\tilde\rho$ supported on the interval $[a,b]$ is given by (cf. problem \eqref{eq:so_mean_abs_dual}):
\begin{equation}
\nonumber
\begin{array}{ccll}
&\displaystyle\sup_{\alpha,\beta,\gamma \in \mathbb{R}}&\displaystyle \alpha d + \beta m + \gamma\\
&\textnormal{s.t.}& \displaystyle \alpha|\rho-m| +\beta \rho + \gamma \leq f_n(\rho) \quad \quad \forall \rho \in [a,b].
\end{array}
\end{equation}
We can deal with the semi-infinite constraint separately for the cases $\rho \leq m$ and $\rho \geq m$:
\begin{equation}
\nonumber
\begin{array}{ccll}
&\displaystyle\sup_{\alpha,\beta,\gamma \in \mathbb{R}}&\displaystyle \alpha d + \beta m + \gamma\\
&\textnormal{s.t.}& \displaystyle \alpha(m-\rho) +\beta \rho + \gamma \leq f_n(\rho) \quad \quad \forall \rho \in [a,m]\\
&& \displaystyle \alpha(\rho-m) +\beta \rho + \gamma \leq f_n(\rho) \quad \quad \forall \rho \in [m,b].
\end{array}
\end{equation}
Substituting the definition of $f_n(\rho)$ in \eqref{eq:social_rate} and applying algebraic reductions yield the following polynomial inequalities:
\begin{align}
\footnotesize
\label{eq:so_MAD_poly}
\nonumber &-(\alpha m + \gamma) \rho^0 + (R\mu - C - \beta + \gamma+\alpha m + \alpha) \rho + (-R\mu -\alpha+ \beta) \rho^2 + (-R\mu + Cn + C + \alpha m + \gamma) \rho^{n+1} \\ \nonumber & \quad\quad+ (R\mu - Cn -\alpha m - \alpha+ \beta - \gamma) \rho^{n+2} +(\alpha-\beta) \rho^{n+3} \geq 0 \qquad \,\forall \rho \in [a,m], \quad\textup { and }\\
\nonumber &(\alpha m - \gamma) \rho^0 + (R\mu - C -\alpha m - \alpha- \beta + \gamma) \rho + (-R\mu +\alpha+ \beta) \rho^2 + (-R\mu + Cn + C -\alpha m+ \gamma) \rho^{n+1} \\ & \quad\quad + (R\mu - Cn + \alpha m + \alpha+\beta - \gamma) \rho^{n+2} -(\alpha+\beta) \rho^{n+3} \geq 0 \qquad \forall \rho \in [m,b].
\end{align}
The inequalities are of the form $g_1(\rho) = \sum_{r = 0}^{n+3} y_r \rho^r \geq 0$ for $\rho \in [a,m]$ and $g_2(\rho) = \sum_{r = 0}^{n+3} z_r \rho^r \geq 0$ for $\rho \in [m,b]$, where $y= (y_1,\ldots,y_{n+3})$ and $z=(z_1,\ldots,z_{n+3})$ represent the coefficients of the respective polynomial inequalities. We now invoke the result of Lemma \ref{poly_lem_so1} with $k = n+3$ to express the inequalities in \eqref{eq:so_MAD_poly} as semidefinite constraints. The resulting semidefinite problem is equivalent to the original problem, which completes the proof.
\end{proof}
\begin{rem}
In this subsection, we present two results: Theorem \ref{thm:so_mad} provides a closed form solution under certain prerequisites, while Theorem \ref{thm:MAD_so} derives an SDP reformulation for the general cases.
It is worth noting that Theorem \ref{thm:so_mad} requires the parameters to satisfy $n \leq \frac{R\mu}{C} - 1$. By Proposition \ref{prop:so<ne}, there exists an optimal threshold $\hat{n}_s$ less than or equal to $\hat{n}_e$, i.e.,~$\exists \hat{n}_s \leq \hat {n}_e= \left\lfloor{\frac{R{\mu}}{C}} \right\rfloor$. Thus, for a strategic queue with maximum length $n \leq \left\lfloor{\frac{R{\mu}}{C}} \right\rfloor$ and mean arrival rate $m \leq 1$, Theorem~\ref{thm:so_mad} can be applied to compute the worst-case expected social benefit rate for the first $\left\lfloor{\frac{R{\mu}}{C}} \right\rfloor-1$ cases. This greatly speeds up to time to solve \eqref{eq:mean_so1} since we only need to solve an SDP once for the remaining case $n=\left\lfloor{\frac{R{\mu}}{C}} \right\rfloor$. On the other hand, for a strategic queue with mean arrival rate $m >1$, we cannot invoke Theorem \ref{thm:so_mad} anymore and need to solve an SDP for each $n$ satisfying $1 \leq n \leq \hat n_e$, $n \in \mathbb Z_+$.
\end{rem}
\subsection{Revenue Optimization}
To determine an optimal joining threshold for a revenue maximizer, we compute the worst-case expected revenue rate $Z_r(n)$ for every $n\in\mathbb Z_+$, $1 \leq n \leq n_e$, and choose an $\hat n_r$ such that $\hat n_r \in \argmax_{n \in \mathbb Z_+} \{Z_r(n)\}$. To this end, we show how to compute the worst-case expected revenue for each $n$. Suppose the mean and MAD of the uncertain parameter $\tilde\rho$ are known, then the worst-case expected revenue rate is given by the following optimization problem:
\begin{equation}
\label{eq:rm_mean_abs_primal}
\begin{array}{ccll}
Z_r(n)=&\displaystyle \inf_{\mathbb \nu \in \mathcal M_+}&\displaystyle \int_{\Xi} r_n(\rho) \nu (\rm d\rho) \\
&\textnormal{s.t.}& \displaystyle \int_{\Xi} |\rho-m| \, \nu (\rm{d}\rho)=\mathit d\\
&& \displaystyle \int_{\Xi} \rho \, \nu(\rm d\rho)=\mathit{m}\\
&& \displaystyle \int_{\Xi} \nu(\rm d\rho)= 1.
\end{array}
\end{equation}
To derive a tractable reformulation, we first investigate the property of the revenue rate function $r_n(\rho)$.
\begin{lem}\label{lem:rm_concave}
The revenue rate function $r_n(\rho)$ is concave for $\rho \in \mathbb R_+$.
\end{lem}
Equipped with Lemma \ref{lem:rm_concave}, we now show that the worst-case expectation problem \eqref{eq:rm_mean_abs_primal} admits a closed form solution.
\begin{thm}
\label{thm:rm_mean_abs}
For any $n \geq 1$, the worst-case expected revenue rate can be derived as
\[ Z_r(n)=\frac{d}{2(m-a)} f_n(a) + \left(1 - \frac{d}{2(m-a)} - \frac{d}{2(b-m)} \right) f_n(m)+ \frac{d}{2(b-m)} f_n(b).
\]
\end{thm}
To prove this theorem, we invoke a classical result that characterizes the worst-case distribution from the MAD ambiguity set for a concave loss function.
\begin{lem} [Theorem 3 in \cite{Bental1972}]
\label{lem:bental1972}
Suppose $f(\rho)$ is a concave function and the ambiguity set is defined as $\mathcal P = \{ \mathbb P \in \mathcal P_0([a,b]):\; \mathbb E_{\mathbb P} [\tilde\rho] = m, \; \mathbb E_{\mathbb P} \left[|\tilde\rho- m| \right] = d\}$. The extremal distribution that solves $\inf_{\mathbb P \in \mathcal P} \mathbb E_{\mathbb P} [f(\tilde\rho)]$
is a three point distribution supported on $\rho_1=a$, $\rho_2 = m$, $\rho_3 = b$ with probabilities
\begin{equation}
\label{eq:three_points_dist}
p_1 = \frac{d}{2(m-a)},\ p_2 = 1 - \frac{d}{2(m-a)} - \frac{d}{2(b-m)},\ p_3= \frac{d}{2(b-m)}.
\end{equation}
\end{lem}
\begin{proof}[Proof of Theorem \ref{thm:rm_mean_abs}]
From Lemma \ref{lem:rm_concave}, the revenue rate function $r_n(\rho)$ is concave. Therefore, applying Lemma \ref{lem:bental1972} yields the result.
\end{proof}
\section{Extension to the Data-Driven Setting}\label{sec:data-driven-mad}
In this section, we design a distributionally robust model using a purely data-driven ambiguity set constructed from historical samples. As we observed in the previous section, distributionally robust models with a moment ambiguity set necessitate decision makers to have access to precise values of the mean, variance, or MAD of the true unknown distribution, which may not be realistic in practice. Decision makers usually construct such moment ambiguity sets by plugging in the point estimates generated from the historical samples. However, it is rarely the case that one can be entirely confident in these empirical estimators. For example, when the sample size is small, these empirical estimators might be far away from the true values; furthermore, some estimators, such as the empirical MAD, are even biased. In order to mitigate the adverse effects of the estimation errors, we develop a distributionally robust model with a data-driven MAD ambiguity set.
Unlike the setting in the previous section, here we assume queue system managers only have access to $N$ independent and identically distributed samples of the traffic intensity given by $\{\hat{\rho}_i\}_{i \in [N]}$, where $\hat \rho_i = \hat \lambda_i/\mu$.
Suppose the true mean and MAD of the underlying distribution are unknown and belong to two confidence intervals $\mathcal T=[m_l,m_u]$ and $\mathcal D=[d_l,d_u]$ with high probabilities, where $\mathcal T$ and $\mathcal D$ are constructed using the samples. Then the proposed data-driven distributionally robust model is formulated as
\begin{equation}\label{eq:ddmad}
\max_{n \in \mathbb Z_+} \inf_{m \in \mathcal T, d \in \mathcal D} \inf_{ \mathbb P \in \mathcal P} \mathbb E_{\mathbb P} [c_n(\tilde\rho)],
\end{equation}
where $\mathcal P$ is the primitive MAD ambiguity set defined in \eqref{eq:ambiguity_set}. By optimizing in view of the worst-case mean and MAD, the model provides another layer of robustification against errors from the empirical estimators.
Observe that the inner two-layer infimum problem can be rewritten as
\begin{equation}\label{eq:ddmad_inf}
\overline{Z}(n)\coloneqq \inf_{ \mathbb P \in \mathcal P'_N} \mathbb E_{\mathbb P} [c_n(\tilde\rho)],
\end{equation}
where the modified data-driven ambiguity set is defined as
\begin{equation}\label{eq:data-driven-ambiguityset}
\mathcal P'_N= \{ \mathbb P \in \mathcal P_0([a,b]):\; m_l \leq \mathbb E_{\mathbb P} [\tilde\rho] \leq m_u, \; d_l \leq \mathbb E_{\mathbb P} \left[|\tilde\rho- m| \right] \leq d_u\}.
\end{equation}
Therefore, the results of Propositions \ref{prop:so<ne} and \ref{prop:rm<ne} still hold, and we can obtain the optimal value of \eqref{eq:ddmad} by solving $\overline{Z}(n)$ for each $n\in \mathbb Z_+$ satisfying $1 \leq n \leq n_e$ and select the one with the largest objective value.
We now derive the reformulations for the worst-case expected social benefit and revenue rates. To this end, we define the worst-case expected social benefit rate with the data-driven MAD ambiguity set by
\[ \overline{Z}_s(n)\coloneqq \inf_{ \mathbb P \in \mathcal P'_N} \mathbb E_{\mathbb P} [f_n(\tilde\rho)],
\] and the worst-case expected revenue rate with the data-driven MAD ambiguity set by
\[ \overline{Z}_r(n)\coloneqq \inf_{ \mathbb P \in \mathcal P'_N} \mathbb E_{\mathbb P} [r_n(\tilde\rho)].
\]
The next theorem presents the reformulation of the worst-case expected social benefit rate. We relegate the proofs of this section to the Appendix \ref{sec:proof_sec4}.
\begin{thm}\label{thm:data-driven-social}
For any $n \geq 1$, the worst-case expected social benefit rate $\overline{Z}_s(n)$ coincides with the optimal value of the following semidefinite problem:
\begin{equation*}
\begin{array}{llll}
&\sup \;\; &\gamma+\theta_1 d_l - \theta_2 d_u + \theta_3 m_l - \theta_4 m_u\\
&\textnormal{s.t.} \;\; &\gamma \in \mathbb R, \theta_1,\theta_2,\theta_3,\theta_4 \in \mathbb R_+, y, z \in \mathbb R^{n+3}, X, X' \in \mathbb S^{n+3}_+\\
&& y_1 = R\mu - C - y_0 + y_{n+3}, \; y_2 = -R\mu - y_{n+3},& \\
&& y_3, \cdots, y_n = 0, \; y_{n+1} = - R\mu + C(n+1) - y_0,& \\
&& y_{n+2} = R\mu - C n + y_0 - y_{n+3}, \;& \\
&& y_{0} = (\theta_1-\theta_2) \hat m +\gamma , y_{n+3}= -\theta_1+\theta_2 + \theta_3-\theta_4 \;&\\
&&\displaystyle \sum_{i + j = 2l - 1} x_{ij} = 0&\forall l \in [n+4]\\
&&\displaystyle \sum_{i + j = 2l} x_{ij} = \sum_{q=0}^{l} \sum_{r=q}^{n+3+q-l} y_r {r \choose q} {{n+3-r} \choose {l-q}} a^{r-q} \hat m^q & \forall l \in [n+4] \cup \{0\}\\
&& z_1 = R\mu - C - z_0 + z_{n+3}, \; z_2 = -R\mu - z_{n+3}, &\\
&& z_3, \cdots, z_n = 0, \; z_{n+1} = - R\mu + C(n+1) - z_0, &\\
&& z_{n+2} = R\mu - C n + z_0 - z_{n+3} \; &\\
&& z_{0} = -(\theta_1-\theta_2) \hat m +\gamma , z_{n+3}= \theta_1-\theta_2 + \theta_3-\theta_4 \; \\
&& \displaystyle \sum_{i + j = 2l - 1} x'_{ij} = 0 & \forall l \in [n+4]\\
&& \displaystyle \sum_{i + j = 2l} x'_{ij} = \sum_{q=0}^{l} \sum_{r=q}^{n+3+q-l} y'_r {r \choose q} {{n+3-r} \choose {l-q}} \hat m^{r-q} b^q & \forall l \in [n+4] \cup \{0\}\\
\end{array}
\end{equation*}
\end{thm}
Note that when $d_l=d_u$ and $m_l=m_u$, setting $\alpha=\theta_1-\theta_2$ and $\beta=\theta_3 -\theta_4$ recovers the dual problem \eqref{eq:so_mean_abs_dual} in view of the primitive MAD ambiguity set, which indicates the case when we have absolute trust on the mean and MAD estimators.
The next theorem presents the reformulation of the worst-case expected revenue rate.
\begin{thm}\label{thm:data-driven-revenue}
For any $n \geq 1$, the worst-case expected revenue rate $\overline{Z}_r(n)$ is equal to the optimal value of the following linear problem:
\begin{equation*}
\begin{array}{ccll}
&\displaystyle\sup_{\theta \in \mathbb{R}^4_+,\gamma \in \mathbb{R}}&\displaystyle \gamma+\theta_1 d_l - \theta_2 d_u + \theta_3 m_l - \theta_4 m_u\\
&\textnormal{s.t.}& \displaystyle (\theta_1 - \theta_2)|a-\hat m| +(\theta_3-\theta_4) a + \gamma \leq r_n(a)\\
&& \displaystyle (\theta_3-\theta_4) \hat m + \gamma \leq r_n(\hat m)\\
&& \displaystyle (\theta_1 - \theta_2)|b-\hat m| +(\theta_3-\theta_4) b + \gamma \leq r_n(b).
\end{array}
\end{equation*}
\end{thm}
Theorems \ref{thm:data-driven-social} and \ref{thm:data-driven-revenue} provide tractable reformulations for the social and revenue optimization problems. An advantage of the data-driven model is that it can offer attractive finite-sample guarantees.
Compared with the original MAD ambiguity set that imposes unique mean and MAD, the data-driven MAD ambiguity set allows these parameters to vary within the confidence intervals. In this way, we can assure that the set contains the true underlying distribution with a high probability, which immediately generates out-of-sample performance guarantees for the solution.
\begin{thm}\label{thm:finite-sample}
Let $\{\hat{\rho}_i\}_{i \in [N]}$ be a set of $N$ samples generated independently at random from $\mathbb P^\textnormal{s.t.}ar$ and~$v^\textnormal{s.t.}ar$ denote the optimal value of \eqref{eq:ddmad}. By setting
\begin{align}\label{eq:confidence_interval}
\nonumber&\mathcal T=\left[\hat m - (b-a) \sqrt{\frac{\log 4/\delta}{2N}}, \hat m + (b-a) \sqrt{\frac{\log 4/\delta}{2N}}\;\right]\\
&\mathcal D=\left[\hat d - (b-a) \sqrt{\frac{9\log 4/\delta}{2N}}, \hat d + (b-a) \sqrt{\frac{9\log 4/\delta}{2N}}\;\right],
\end{align}
we have
\[\textup{Prob} \left(v^\textnormal{s.t.}ar \leq \mathbb E_{\mathbb P^\textnormal{s.t.}ar} [c_{\hat n}(\rho)] \right) \geq 1-\delta ,
\]
where $\hat n$ is the optimal threshold obtained from \eqref{eq:ddmad}.
\end{thm}
\begin{proof}
The error of the empirical MAD estimate is given by
\begin{align*}
&\nonumber\left| \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -\hat m |\right] \right| \\
=& \max \left\{ \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -\hat m |\right], -\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| + \mathbb E \left[|\tilde \rho -\hat m |\right] \right\}.
\end{align*}
We upper bound both terms inside the max operator. The first term is bounded by
\begin{align*}
\nonumber\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -\hat m |\right] \leq \;& \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m|- \mathbb E \left[|\left|\tilde\rho - m| - |\hat m -m| \right| \right]\\
\leq \; &\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m|- \mathbb E \left[|\tilde\rho - m| - |\hat m -m| \right] \\
\leq \; & \left|\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho - m |\right] \right|+ |\hat m -m|,
\end{align*}
where the second inequality follows from reverse triangle inequality. Meanwhile, the second term is bounded by
\begin{align*}
-\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| + \mathbb E \left[|\tilde \rho -\hat m |\right] \leq \;& -\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| + \mathbb E \left[|\tilde \rho -m|+|\hat m - m|\right]\\
\leq \;& \left|\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho - m |\right] \right|+ |\hat m -m|.
\end{align*}
Since both of these two terms have the same upper bound, we have
\begin{equation*}
\left| \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -\hat m |\right] \right| \leq \;\left|\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho - m |\right] \right|+ |\hat m -m|.
\end{equation*}
As $\mathbb E[\hat m]= \mathbb E[m]$ is an unbiased estimator, we can invoke the Hoeffding's inequality to derive a confidence interval for the second term. However, the empirical MAD is biased, i.e., $\mathbb E [\frac{1}{N} \sum_{i=1}^N |\rho_i-\hat m|] \neq \mathbb E \left[|\rho -m |\right]$---making the Hoeffding's inequality not applicable. To derive a confidence interval for this term, we rewrite it as
\begin{align*}\label{eq:abs_max}
&\nonumber\left| \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -m |\right] \right| \\
=& \max \left\{ \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -m |\right], -\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| + \mathbb E \left[|\tilde \rho -m |\right] \right\}.
\end{align*}
We further upper bound the two terms inside the max operator. For the first term, we have
\begin{align*}
\nonumber\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -m |\right] \leq \;& \nonumber\frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-m| + |m-\hat m| - \mathbb E \left[|\tilde \rho -m |\right]\\
\leq \; & \left| \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-m| - \mathbb E \left[|\tilde \rho -m |\right] \right| + |m-\hat m|.
\end{align*}
For the second term, applying reverse triangle inequality yields
\begin{align*}
\nonumber \mathbb E \left[|\tilde \rho -m |\right] - \displaystyle \frac{1}{N}\sum_{i=1}^N |\hat \rho_i-\hat m|
\leq &\; \nonumber \mathbb E \left[|\tilde \rho -m |\right] - \frac{1}{N} \displaystyle \sum_{i=1}^N \left||\hat \rho_i-m| - |\hat m - m|\right|\\
\leq & \; \nonumber \mathbb E \left[|\tilde \rho -m |\right] - \displaystyle \frac{1}{N} \sum_{i=1}^N |\hat \rho_i-m| + |\hat m - m|\\
\leq & \;\left|\mathbb E \left[|\tilde \rho -m |\right] - \displaystyle \frac{1}{N} \sum_{i=1}^N |\hat \rho_i-m| \right| + |\hat m - m|.
\end{align*}
Thus, we have
\begin{equation*}
\left| \frac{1}{N} \displaystyle \sum_{i=1}^N |\hat \rho_i-\hat m| - \mathbb E \left[|\tilde \rho -m |\right] \right| \leq \left|\mathbb E \left[|\tilde \rho -m |\right] - \displaystyle \frac{1}{N} \sum_{i=1}^N |\hat \rho_i-m| \right| + 2|\hat m - m|.
\end{equation*}
Since both of these two terms are unbiased, we can apply the Hoeffding's inequality and obtain
\begin{align}
&\nonumber\textup{Prob} \left(\left|\mathbb E \left[|\tilde \rho -m |\right] - \displaystyle \frac{1}{N} \sum_{i=1}^N |\hat \rho_i-m| \right| \geq r_1\right) \leq 2 \exp \left(-\frac{2Nr_1^2}{(b-a)^2}\right) \quad \textup{and}\\
&\nonumber\label{eq:mean_hoeffding}\textup{Prob} \left(|\hat m - m| \geq r_2\right) \leq 2 \exp \left(-\frac{2Nr_2^2}{(b-a)^2}\right).
\end{align}
By applying the union bound and setting $r_1=r_2=r/3$, we arrive at the desired confidence intervals that the true mean $m$ and MAD $d$ satisfy
\begin{equation*}
\begin{array}{lll}
\hat m - (b-a) \sqrt{\frac{\log 4/\delta}{2N}} \leq& m & \leq \hat m + (b-a) \sqrt{\frac{\log 4/\delta}{2N}}\\
\hat d - (b-a) \sqrt{\frac{9\log 4/\delta}{2N}} \leq& d & \leq \hat d + (b-a) \sqrt{\frac{9\log 4/\delta}{2N}}
\end{array}
\end{equation*}
with probability at least $1-\delta$.
Therefore, by setting the confidence interval $\mathcal T$ and $\mathcal D$ as in \eqref{eq:confidence_interval}, we have
\[ \textup{Prob} \left(\mathcal P'_N \ni \mathbb P^\textnormal{s.t.}ar \right) \geq 1-\delta,
\]where $\mathcal P'_N$ is the data-driven ambiguity set \eqref{eq:data-driven-ambiguityset} constructed by $N$ random samples
drawn from the underlying distribution $\mathbb P^\textnormal{s.t.}ar$.
As $\nu^\textnormal{s.t.}ar$ is defined by
$\nu^\textnormal{s.t.}ar \coloneqq \inf_{ \mathbb P \in \mathcal P'_N} \mathbb E_{\mathbb P} [c_n(\tilde\rho)]$ and the probability of $\mathcal P'_N$ contains the true distribution~$\mathbb P^\textnormal{s.t.}ar$ is greater than $1-\delta$, we have
\[\textup{Prob} \left(v^\textnormal{s.t.}ar \leq \mathbb E_{\mathbb P^\textnormal{s.t.}ar} [c_{\hat n}(\rho)] \right) \geq 1-\delta ,
\]
which completes the proof.
\end{proof}
The theorem establishes that, with judicious choices of the confidence interval lengths, the optimal value of the data-driven DRO model $v^\textnormal{s.t.}ar$ provides a high confidence lower bound on the expected benefit rate of the robust solution $\hat n$ under the true underlying distribution $\mathbb P^\textnormal{s.t.}ar$.
\begin{rem} An avid reader may be interested in employing the popular Wasserstein DRO model in the data-driven setting. Indeed, the model has been widely adopted because it can generate asymptotically consistent solutions and offer similarly attractive finite-sample guarantees. Unfortunately,
the reformulation of this data-driven DRO model involves $\mathcal O(N)$ semidefinite constraints, which makes the problem computationally intensive. For readers who are interested in the use of Wasserstein ambiguity set, we provide a detailed discussion in Appendix B.
\end{rem}
\section{Numerical Experiment}\label{sec:numerical}
In this section we present the numerical experiments and examine the performance of different DRO policies.
All optimization problems are implemented in MATLAB and solved by SDPT3~\cite{toh1999sdpt3} via the YALMIP interface~\cite{lofberg2004yalmip}. The experiments are run on a 2.2GHz Intel Core i7 CPU laptop with 8GB RAM.
We assess the out-of-sample performance of the data-driven policies for a social optimizer and a revenue maximizer through a fair out-of-sample experiment.
We assume we have access to $N$ independent samples $\{\hat{\rho}_i\}_{i \in [N]}$ of the traffic intensity drawn from the true underlying distribution~$\mathbb P^\textnormal{s.t.}ar$, and we construct three ambiguity sets: an empirical MAD ambiguity set, a data-driven MAD (DD-MAD) ambiguity set and a Wasserstein ambiguity set.
The empirical MAD ambiguity set is defined in \eqref{eq:ambiguity_set}, where we directly substitute the empirical mean and MAD for $m$ and $d$, respectively. The DD-MAD ambiguity set is defined in \eqref{eq:ddmad} where, rather than carelessly plugging in the empirical estimators, we construct a confidence interval around the empirical mean and MAD. The Wasserstein ambiguity set~\cite{esfahani2017data,gao2016distributionally} is a popular data-driven ambiguity set. However, its complexity scales with the number of samples, making the problem computationally intensive with large sample sizes. We derive the reformulation of the Wasserstein model in Appendix \ref{sec:wasser}.
Once we constructed the ambiguity sets, we then proceed to compute the distributionally robust thresholds that maximize the worst-case expected benefit rate under these ambiguity sets. Finally, we compare the three solutions in a fair out-of-sample experiment relative to the sample average approximation (SAA) method, which n\"aively assumes that the empirical distribution generated from the $N$ samples is the true underlying distribution.
We conduct the out-of-sample trials for datasets containing $N = 2, 4, \dots, 10, 20, 40 \dots,100$ independent samples.
We assume the arrival rate is generated by $\lambda=2 \tilde b$, where $\tilde b\sim Beta(0.1,0.5)$.
In each trial, we draw $N$ independent training samples and obtain $\{\hat{\rho}_i\}_{i \in [N]}$ from $\mathbb P^\textnormal{s.t.}ar$. We then compute the optimal thresholds $ \hat n_{d}$, $ \hat n_{dd}$ and $\hat n_{w}$ for the MAD, DD-MAD, and Wasserstein DRO models, respectively. We also compute the SAA threshold $\hat n_{SAA}$ by solving the sample average approximation model. Based on the scaling rates derived in Theorem \ref{thm:finite-sample} and \cite[Theorem 3.4]{esfahani2017data}, the size of the confidence intervals in \eqref{eq:ddmad} is set to be $C_1/\sqrt{N}$ and the Wasserstein radius is set to be $C_2/\sqrt{N}$, where $C_1$ and $C_2$ are chosen from the set $\{5,1,0.5,0.1,0.05,0.01 \}$ using a $\emph{k-fold cross validation}$ procedure. Specifically, we partition the in-sample data $\{\hat{\rho}_i\}_{i \in [N]}$ into $k=\min\{N,5\}$ folds and repeat the following procedure for each fold: the $i$-th fold is taken as a validation dataset and the remaining $k-1$ folds are merged to be a subtraining set. We repeat this process for each fold and choose the interval length that performs best in average.
The out-of-sample expected benefit/revenue rate $\mathbb E_{\mathbb P^\textnormal{s.t.}ar}[c_{\hat n}(\rho)]$ for each of the strategies is then estimated at high accuracy using 10,000 test samples from $\mathbb P^\textnormal{s.t.}ar$.
\begin{figure}
\caption{Improvements of the DRO policies relative to the SAA policy in terms of the social optimizer and revenue maximizer respectively.}
\label{fig:so_oss_impro}
\label{fig:so_oss_impro}
\label{fig:so_oss_impro}
\label{fig:so_oss_impro}
\label{fig:improvement_so}
\end{figure}
Figure \ref{fig:improvement_so} depicts the out-of-sample performances of a social optimizer and a revenue optimizer under different DRO policies with $R=10$, $C=1$ and $\mu=1$. The expected values and $95$ percentiles are computed from $20$ independent trials. The $y$-axis represents the improvements of the DRO policies relative to the SAA policy, while the $x$-axis denotes the sample size. In the social optimization problem, the curve of the Wasserstein model terminates at $N=6$ since the solver fails to converge when the sample size reaches $8$.
Meanwhile, we observe that the Wasserstein model dominates the SAA model uniformly across all sample sizes in the revenue maximization problem, while the MAD and DD-MAD models outperform the SAA model in moderate sample sizes. This is because the Wasserstein ambiguity set converges to the true distribution as the number of samples grows, whereas the moment ambiguity sets fails to converge to the true distribution.
We also find that the MAD model performs poorly when the sample size is small because the empirical MAD constitutes a biased estimator with significant estimation errors. On the other hand, the DD-MAD model---by optimizing in view of the most adverse mean and MAD---mitigates the detrimental effects of poor empirical estimations and generates high-quality policies. Finally, we observe that the advantages of the DRO policies relative to the SAA method are generally more substantial for the $95$th percentiles. This underlines a major advantage of incorporating the DRO scheme, as it reduces the likelihood of realizing extremely poor performance in the out-of-sample test.
\begin{table}[]
\centering
\begin{tabular}{|l|p{2.5cm}|r|r|r|r|r|r|}
\multicolumn{2}{c}{}& \multicolumn{6}{c}{Sample size $N$ } \\
\hline
&Model Name&\multicolumn{1}{r}{2}&\multicolumn{1}{r}{5}&\multicolumn{1}{r}{10}&\multicolumn{1}{r}{25}&\multicolumn{1}{r}{50}&100\\
\hline
&MAD& \multicolumn{1}{r}{24.72}&\multicolumn{1}{r}{21.38}&\multicolumn{1}{r}{31.42}&\multicolumn{1}{r}{26.65}&\multicolumn{1}{r}{24.30}&29.84\\
Social&DD-MAD&\multicolumn{1}{r}{33.58}&\multicolumn{1}{r}{27.49}&\multicolumn{1}{r}{22.75}&\multicolumn{1}{r}{32.94}&\multicolumn{1}{r}{27.61}&28.71\\
&Wasserstein&\multicolumn{1}{r}{38.19}&\multicolumn{1}{r}{88.36}&\multicolumn{1}{r}{-}&\multicolumn{1}{r}{-}&\multicolumn{1}{r}{-}&-\\
\hline
&MAD& \multicolumn{1}{r}{0.05}&\multicolumn{1}{r}{0.03}&\multicolumn{1}{r}{0.04}&\multicolumn{1}{r}{0.05}&\multicolumn{1}{r}{0.07}&0.06\\
Revenue&DD-MAD&\multicolumn{1}{r}{1.54}&\multicolumn{1}{r}{1.79}&\multicolumn{1}{r}{1.42}&\multicolumn{1}{r}{1.81}&\multicolumn{1}{r}{1.65}&1.59\\
&Wasserstein&\multicolumn{1}{r}{1.69}&\multicolumn{1}{r}{1.92}&\multicolumn{1}{r}{2.41}&\multicolumn{1}{r}{2.63}&\multicolumn{1}{r}{2.95}&4.68\\
\hline
\end{tabular}
\caption{Running time (in seconds) of different methods. The '-' symbol indicates that the model fails to converge in the maximal iteration/time.}
\label{tab:time}
\end{table}
Table \ref{tab:time} reports the computation time of different models with the sample size varying from $2$ to $100$. We set the length of the confidence intervals and the radius of the Wasserstein ball to $0.1$. In this experiment, the running time limit of SDPT3 is set to $600$ seconds and the number of iterations is set to $5000$. All computational times are averaged over $10$ trials.
The results in Table \ref{tab:time} indicate that the computational times of the MAD and DD-MAD models are size-invariant in the social optimization problem because the number of constraints is independent of the number of samples. The Wasserstein model is applicable to small-size problems. However, it encounters computational difficulties for moderate-size problem instances: when the sample size reaches $10$, the model diverges or fails to converge within the time/iteration limit. The MAD model is extremely efficient for the revenue maximization problem as it admits a closed-form solution. The DD-MAD model is still size-invariant, and its linear programming reformulation yields a much shorter computational time than the SDP reformulation in the social optimization problem. In addition, the Wasserstein model can be solved efficiently in the revenue maximization problem even for large sample sizes, benefiting from the linear programming reformulation.
In summary, the experimental results highlight the substantial advantage of employing the proposed DD-MAD distributionally robust model, particularly when limited number of observations is available to infer the underlying data-generating distribution. It yields attractive out-of-sample performances while can be solved very efficiently.
\section{Conclusion}
This paper developed an extension of Naor's strategic queue model with uncertain arrival rates using the DRO framework. We showed that under the DRO setting, the optimal threshold of an individual optimizer coincides with Naor's original result, and there exist optimal thresholds of the social and revenue optimizers not larger than the optimal individual threshold. We then proved that the revenue rate function is concave, while the social benefit rate function is concave or unimodal under some mild conditions. These nice properties lead to a closed-form solution for the revenue maximization problem and an analytical solution for the social optimization problem.
Next, we considered the data-driven optimization setting, where decision makers only have access to limited historical samples. We proposed a data-driven MAD model by introducing an extra layer of robustness to the primitive MAD ambiguity set. As the model mitigates the detrimental estimation errors from the empirical mean and MAD, it achieves attractive performance in out-of-sample tests. We derived an SDP reformulation for the social optimization problem and a linear programming reformulation for the revenue maximization problem. We further established finite-sample guarantees for the data-driven model, which provide valuable guidance for choosing the robustness parameters in practice. Our experimental results show that a system manager who disregards ambiguities in the distribution on the arrival rate, as well as errors from the empirical parameter estimations, may incur large out-of-sample costs.
Future work includes extending the DRO scheme to the unobservable strategic queues, where newly arrived customers cannot observe the current length of the queue system.
\paragraph{Acknowledgements}
This research was supported by the National Science Foundation grant no.~$1752125$.
\appendix \section{Proofs of Section \ref{sec:DROM}} \label{sec:proof_sec3}
\begin{lem}
The first and second derivatives of the social benefit rate function $f_n(\rho)$ are continuous.
\end{lem}
\begin{proof}
To show the continuity of the first and second derivative of $f_n(\rho)$, we will show that
\begin{equation}\label{eq:f_n(rho)_cont}
f_n(\rho)=R\mu\left(1-\frac{1}{\sum_{k=0}^{n} \rho^k} \right)-C\left(\frac{\rho(\sum_{k=0}^{n-1}\rho^k)+\rho^2(\sum_{k=0}^{n-2}\rho^k)+\ldots+\rho^n}{(\sum_{k=0}^{n}\rho^k)} \right),
\end{equation}
which has continuous first and second derivatives.
First, we perform the transformation for the term $\frac{\rho (1-\rho^n)}{1-\rho^{n+1}}$ when $\rho \neq 1$. Note that $\frac{\rho (1-\rho^n)}{1-\rho^{n+1}}=1-\frac{1-\rho}{1-\rho^{n+1}}$, and the denominator is equal to $(1-\rho)(1+\rho+\rho^2+\ldots+\rho^{n})$. We can consequently rewrite the first term as
\begin{equation*}
\frac{\rho (1-\rho^n)}{1-\rho^{n+1}}=1-\frac{1}{\sum_{k=0}^{n} \rho^k}.
\end{equation*}
Next, we prove the equivalence of the remaining part $\frac{ (n+1)\rho^{n+1}}{1-\rho^{n+1}}-\frac{ \rho}{1-\rho}$ when $\rho \neq 1$. Similarly, by the fact that $(1-\rho^{n+1})=(1-\rho)(\sum_{k=0}^{n} \rho^k)$, we can rewrite this part as
\begin{align*}
\frac{ (n+1)\rho^{n+1}}{1-\rho^{n+1}}-\frac{ \rho}{1-\rho}&=\frac{(n+1)\rho^{n+1}}{(1-\rho)(\sum_{k=0}^{n}\rho^k)}-\frac{\rho(\sum_{k=0}^{n}\rho^k)}{(1-\rho)(\sum_{k=0}^{n}\rho^k)}\\
&=\frac{-\rho-\rho^2-\ldots-\rho^{n}+n\rho^{n+1}}{(1-\rho)(\sum_{k=0}^{n}\rho^k)}\\
&=\frac{\rho^{n+1}-\rho+\rho^{n+1}-\rho^2+\ldots+\rho^{n+1}-\rho^n}{(1-\rho)(\sum_{k=0}^{n}\rho^k)}\\
&=\frac{\rho(\rho-1)(1+\rho+\ldots+\rho^{n-1})+\rho^2(\rho-1)(1+\rho+\ldots+\rho^{n-2})+\ldots+\rho^n(\rho-1)}{(1-\rho)(\sum_{k=0}^{n}\rho^k)}\\
&=-\frac{\rho(\sum_{k=0}^{n-1}\rho^k)+\rho^2(\sum_{k=0}^{n-2}\rho^k)+\ldots+\rho^n}{(\sum_{k=0}^{n}\rho^k)}.
\end{align*}
When $\rho=1$, $R\mu \left(1-\frac{1}{\sum_{k=0}^{n} \rho^k} \right) - C\left(\frac{\rho(\sum_{k=0}^{n-1}\rho^k)+\rho^2(\sum_{k=0}^{n-2}\rho^k)+\ldots+\rho^n}{(\sum_{k=0}^{n}\rho^k)} \right)=R\mu\left(1-\frac{1}{1+n}\right) - C \frac{n}{2}$, which coincides with $f_n(1)$. Therefore, $f_n(\rho)$ is equal to \eqref{eq:f_n(rho)_cont}. One can verify that the first and second derivatives of \eqref{eq:f_n(rho)_cont} are continuous; hence, $f_n(\rho)$ also has these properties.
\end{proof}
\begin{lem}\label{lem:rm_concave2}
The function $h_n(\rho) = \frac{\rho(1-\rho^n)}{1-\rho^{n+1}}$ is strictly concave and monotone increasing on $[0,1) \cup (1, \infty)$.
\end{lem}
\begin{proof}
When $\rho \in [0,1) \cup (1, \infty)$, the first derivative of $h_n(\rho)$ is $$h_n'(\rho)= \frac{n \rho^{n+1} - (n+1) \rho^n + 1}{(1-\rho^{n+1})^2}.$$ Define the numerator as $\varphi_n(\rho) = n \rho^{n+1} - (n+1) \rho^n + 1$. The first derivative of $\varphi_n(\rho)$ is given by $\varphi_n'(\rho) = n(n+1)\rho^{n-1}(\rho-1).$ Note that when $0 < \rho < 1$, $\varphi_n'(\rho)$ is negative, and when $\rho > 1$, $\varphi_n'(\rho) $ is positive. Therefore, the function $\varphi_n(\rho)$ is decreasing on $(0,1)$ and increasing on $(1, \infty)$. Meanwhile, by the fact that $\varphi_n(1)= 1 + n - (n+1) = 0$, we know that the numerator $\varphi_n(\rho)$ is positive on $[0,1) \cup (1, \infty)$. Since the denominator $(1-\rho^{n+1})^2$ is positive, the first derivative $h
'_n(\rho)$ is positive on $[0,1) \cup (1,\infty)$. Thus, we conclude that $h_n(\rho)$ is increasing on $[0,1) \cup (1, \infty)$.
Next, we show the second derivative of $h_n(\rho)$ is negative. We have
$$h_n''(\rho)= \frac{(n+1)\rho^{n-1} [n\rho^{n+2}-(n+2) \rho^{n+1} +(n+2)\rho -n]}{(1-\rho^{n+1})^3}.$$ Since the term $\frac{(n+1)\rho^{n-1}}{(1-\rho^{n+1})^3}$ is positive on $[0,1)$ and is negative on $(1,\infty)$, we simply need to determine the sign of $ [n\rho^{n+2}-(n+2) \rho^{n+1} +(n+2)\rho -n]$. For convenience, define
\[\psi_n(\rho)\coloneqq n\rho^{n+2}-(n+2) \rho^{n+1} +(n+2)\rho -n.\]
Note that $\psi_n (0) = -n <0$ and $\psi_n (1) = 0$, while $\lim_{\rho \rightarrow \infty} \psi_n (\rho) = +\infty$. Therefore, if $\psi_n(\rho)$ is increasing on $[0,1) \cup (1, \infty)$, the second derivative $h''_n(\rho)$ will be negative on $[0,1) \cup (1, \infty)$. To show this, we take the first derivative of $\psi_n(\rho)$ and obtain $$ \psi_n'(\rho) = n(n+2)\rho^{n+1} - (n+2)(n+1)\rho^n+(n+2).$$
Taking specific values into this function we can obtain $\psi_n'(0)= n+2 >0$, $\psi_n'(1) = 0$ and $\lim_{\rho \rightarrow \infty} \psi_n(\rho) = +\infty$. Similarly, if $\psi_n'(\rho)$ is decreasing on $[0,1)$ and increasing on $(1,\infty)$, then $\psi'_n(\rho)$ will be positive on $[0,1) \cup (1, \infty)$. To verify this, we can take the second derivative of $\psi_n(\rho)$, which gives $$\psi_n''(\rho) = (n+2)(n+1)n\rho^{n-1}(\rho - 1). $$ One can verify that
$\psi_n''(\rho)$ is negative on $[0,1)$ and positive on $(1,\infty)$. Thus, we have established that $h''_n(\rho)$ is negative on $[0,1) \cup (1, \infty)$ and $h_n(\rho)$ is concave on $[0,1) \cup (1, \infty)$.
\end{proof}
\begin{lem}\label{lem:rpart_concave}
For any $v \in\mathbb{R}$, $v\geq 1$, the function $g_v(\rho) = \frac{v\rho}{1-\rho^{v
}}-\frac{\rho}{1-\rho}$ is concave on $[0,1)$.
\end{lem}
\begin{proof}
For any $v \in\mathbb{R}$, $v\geq 1$, one can verify that $g_v(\rho)$ is continuous and second order differentiable on $[0,1)$. Thus, $g_v(\rho)$ is concave if and only if its second derivative $$ g_v''(\rho) = \frac{v^2 \rho^{v-1} (1 + v + \rho^v(v-1))}{(1 - \rho^v)^3} - \frac{2}{(1-\rho)^3}$$
is non-positive for every $\rho\in[0,1)$. Notice that when $\rho=0$, $g_v''(\rho)=-2$ is less than zero. We now prove that the second derivative is also non-positive on $(0,1)$. We first observe that $g_v''(\rho)=0$ at $v=1$ for all $\rho\in[0,1)$. Consider the partial derivative with respect to $v$:
$$\partial_v g_v''(\rho)={\frac {v \left( \left( \left( {v}^{2}-v \right) \ln \left( \rho \right) -3\,v+2 \right) {\rho}^{3\,v-1}+ \left( 4\,{v}^{2}\ln \left( \rho \right) -4 \right) {\rho}^{2\,v-1}+{\rho}^{v-1} \left( \left( {v}^{2}+v \right) \ln \left( \rho \right) +3\,v+2 \right) \right) }{ \left( 1- {\rho}^{v} \right) ^{4}}}.$$
If this function is non-positive for all $v\in\mathbb{R}$, $v\geq 1$, then we can establish that the second derivative $g_v''(\rho)$ is non-positive for all $\rho\in(0,1)$.
Consider a fixed $v\in\mathbb{R}$, $v\geq 1$.
Defining $\psi(\rho)$ as the product of $\partial_v g_v''(\rho)$ and $\frac{(1-\rho^v)^4}{v \, \rho^{v-1}}>0$ yields
\begin{equation}\nonumber
\psi_v(\rho)\coloneqq\left( v \left( v-1 \right) {\rho}^{2\,v}+4\,{v}^{2}{\rho}^{v}+{v}^{2}+v \right) \ln \left( \rho \right) + \left( -3\,v+2 \right) {\rho}^{2\,v}-4 \,{\rho}^{v}+3\,v+2.
\end{equation}
We show $\psi_v(\rho)$ is non-positive for $\rho \in (0,1)$. Observe that $\psi_n(\rho)$ goes to negative infinity as $\rho \rightarrow 0_+$ and equals to zero at $\rho=1$. Thus, it is sufficient to show that $\psi_v(\rho)$ is increasing on $\rho \in (0,1)$ for every fixed $v$. Taking the derivative with respect to $\rho$ and dividing it by $v\rho^{v-1} > 0$ yields
\begin{equation}\nonumber
\tau_n(\rho) \coloneqq \frac{\psi'_n(\rho)}{v\rho^{v-1}}=2\,v \left( \left( v-1 \right) {\rho}^{v}+2\,v \right) \ln \left( \rho \right) + \left( v+1 \right) {\rho}^{-v}+ \left( -5\,v+3 \right) {\rho}^{v} +4\,v-4 .
\end{equation}
Similarly, one can verify that this expression goes to positive infinity as $\rho \rightarrow 0_+$ and is equal to zero at $\rho=1$. Therefore, to show that ${\psi'_n(\rho)}$ is positive on $(0,1)$, it is sufficient to show that $\tau_n(\rho)$ is decreasing on $(0,1)$. Again, taking the derivative with respect to $\rho$ and dividing it by $v\rho^{v-1}>0$, we get
\begin{equation*}
\varphi(\rho)\coloneqq \frac{\tau'_n(\rho)}{v\rho^{v-1}}=2v\left( v-1 \right) \ln \left( \rho \right) - \left( v+1 \right) {\rho}^{-2\,v}+4v\,{\rho}^{-v} - 3\,v+1.
\end{equation*}
This expression again vanishes at $\rho=1$ and goes to negative infinity as $\rho \rightarrow 0_+$. Thus, it is sufficient to show it is increasing on $(0,1)$. Taking the derivative with respect to $\rho$ and multiplying with $\frac{\rho^{2v+1}}{2v}>0$ yield:
\begin{equation*}
\theta_n(\rho)\coloneqq\frac{\varphi'(\rho)\rho^{2v+1}}{2v}=\left( v-1 \right) {\rho}^{2\,v}-2\,v\,{\rho}^{v}+v+1.
\end{equation*}
At $\rho=0$, $\theta_n(\rho)$ is equal to $v+1$, which is greater than zero, and vanishes at $\rho=1$.
Taking the derivative with respect to $\rho$ and dividing by $2n\, \rho^{n-1}>0$, we have
$$\phi_n(\rho)\coloneqq \frac{\theta_n'(\rho)}{2n\, \rho^{n-1}}=(v-1)\rho^v - v.$$
One can verify that when $v\geq 1$, $\phi(\rho)$ is always non-positive, which completes our proof.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:so_propertie} statement (1)]
Using the lemmas above, we are ready to show that when $\frac{R\mu}{C} \geq n+1$, the social benefit rate function $f_n(\rho)$ is strictly concave on $[0,1]$.
For $\rho \in [0,1)$, we can rewrite~$f_n(\rho)$ as $$f_n(\rho) = \left(R \mu - C(n+1)\right) \frac{\rho (1-\rho^n)}{1-\rho^{n+1}} + C \frac{(n+1)\rho}{1-\rho^{n+1}} - \frac{C \rho}{1-\rho}.$$
From Lemma \ref{lem:rm_concave2} and Lemma \ref{lem:rpart_concave}, we know that $\frac{\rho (1-\rho^n)}{1-\rho^{n+1}}$ is strictly concave and $ \frac{(n+1)\rho}{1-\rho^{n+1}} - \frac{ \rho}{1-\rho}$ is concave. Therefore, $f_n(\rho)$ is the sum of a strictly concave and a concave function, which is strictly concave for $\rho \in [0,1)$.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:so_propertie} statement (2)]
When $n=1$, one can verify that $f_1(\rho)$ is a concave increasing function for $\rho \in \mathbb{R}_+$. We now proceed to show the function is unimodal for $n \geq 2$. A sufficient condition for $f_n(\rho)$ to be unimodal is $f_n'(0) > 0$, $\lim_{\rho \rightarrow \infty} f_n'(\rho)<0$, and $f_n'(\rho)=0$ has a unique solution. Taking the derivative of $f_n(\rho)$ yields:
\begin{equation*}
f_n'(\rho)=\left\{\begin{array}{ll}
R \mu \left(\frac{n\rho^{n+1} -(n+1) \rho^{n} +1 }{(\rho^{n+1}-1)^2} \right) + C \left(\frac{(n+1) \left(n\rho^{n+1} + 1 \right)}{(\rho^{n+1}-1)^2} - \frac{1}{(\rho-1)^2}\right) \quad &\rm{if} \ \rho \neq 1\\
\lim_{\rho \rightarrow 1}R \mu \left(\frac{n\rho^{n+1} -(n+1) \rho^{n} +1 }{(\rho^{n+1}-1)^2} \right) + C \left(\frac{(n+1) \left(n\rho^{n+1} + 1 \right)}{(\rho^{n+1}-1)^2} - \frac{1}{(\rho-1)^2}\right) & \rm{if} \ \rho=1. \\
\end{array}\right.
\end{equation*}
Showing $f_n'(\rho)=0$ has exactly one positive root directly is non-trival. However, it is equivalent to showing $(1-\rho)^2 f_n'(\rho)=0$ has exactly three positive roots. One can verify that this new term can be written explicitly as $(1-\rho)^2 f_n'(\rho) = R \mu \left(\frac{n\rho^{n+1} -(n+1) \rho^{n} +1 }{(1+\rho+\ldots+\rho^n)^2} \right) + C \left(\frac{(n+1) \left(n\rho^{n+1} + 1 \right)}{(1+\rho+\ldots+\rho^n)^2} - 1\right),\ \forall \rho \in \mathbb{R}_+$. We then reformulate the root equation to a polynomial form:
\begin{align}
\nonumber &R \mu \left(\frac{n\rho^{n+1} -(n+1) \rho^{n} +1 }{(1+\rho+\ldots+\rho^n)^2} \right) + C \left(\frac{(n+1) \left(n\rho^{n+1} + 1 \right)}{(1+\rho+\ldots+\rho^n)^2} - 1\right)=0\\
\nonumber &\Longleftrightarrow \\
\nonumber &R\mu (n\rho^{n+1} -(n+1) \rho^{n} +1) + C(n+1)(n\rho^{n+1}+1) = C(1+\rho + \ldots + \rho^n)^2 \\
\nonumber &\Longleftrightarrow\\
\nonumber& C(1+\rho + \ldots + \rho^n)^2 - n(R\mu + C(n+1))\rho^{n+1} +R\mu (n+1)\rho^n -R\mu-C(n+1)=0.
\end{align}
The left-hand side of the equation is a single variable polynomial, and one can verify that it has three sign changes. Based on Descartes' rule of signs, the number of positive roots is at most three. By the fact that $ f_n'(0) > 0$ and $\lim_{\rho \rightarrow \infty} f_n'(\rho) < 0$, $f_n'(\rho)$ must has at least one root. Since the term $(1-\rho)^2$ has two roots, we know this polynomial has at least three roots. Therefore, this polynomial has exactly three roots and $f_n'(\rho)$ has exactly one root. This shows that $f_n(\rho)$ is a unimodal function.
\end{proof}
\begin{proof} [Proof of Lemma \ref{lem:so_propertie} statement (3)]
The second derivative of $f_n(\rho)$ is
\begin{equation*}
f_n''(\rho)=\left\{\begin{array}{lll}
R\mu\frac{(n+1)\rho^{n-1}((n+1)(\rho-1)(\rho^{n+1}+1)-2\rho(\rho^{n+1}-1))}{(1 - \rho^{n+1})^3}&\\
\qquad +C\left(\frac{(n+1)^2 \rho^{n} (2 + n + n\rho^{n+1})}{(1 - \rho^{n+1})^3} - \frac{2}{(1-\rho)^3}\right) &\textup{if} \ \rho \neq 1\\
\lim_{\rho \rightarrow 1}R\mu\frac{(n+1)\rho^{n-1}((n+1)(\rho-1)(\rho^{n+1}+1)-2\rho(\rho^{n+1}-1))}{(1 - \rho^{n+1})^3} & \\
\qquad +C\left(\frac{(n+1)^2 \rho^{n} (2 + n + n\rho^{n+1})}{(1 - \rho^{n+1})^3} - \frac{2}{(1-\rho)^3}\right)&\textup{if} \ \rho = 1.
\end{array}\right.
\end{equation*}
Showing $f_n''(\rho)=0$ only has one root is equivalent to showing $(1-\rho)^3f_n(\rho)=0$ has exactly four roots. Once can check that $(1-\rho)^3f_n''(\rho)$ coincides with $R\mu\frac{(n+1)\rho^{n-1}((n+1)(\rho-1)(\rho^{n+1}+1)-(\rho+1)(\rho^{n+1}-1))}{(1+\rho + \ldots +\rho^n)^3} +C\left(\frac{(n+1)^2 \rho^{n} (2 + n + n\rho^{n+1})}{(1+\rho + \ldots +\rho^n)^3} - 2\right).$ Similar to the previous proof, we transform the root equation to a polynomial form:
\begin{align}
\nonumber &(1-\rho)^3 f_n''(\rho)=0\\
\nonumber &\Longleftrightarrow \\
\nonumber &R\mu\frac{(n+1)\rho^{n-1}((n+1)(\rho-1)(\rho^{n+1}+1)-(\rho+1)(\rho^{n+1}-1))}{(1+\rho + \ldots +\rho^n)^3} +C\frac{(n+1)^2 \rho^{n} (2 + n + n\rho^{n+1})}{(1+\rho + \ldots +\rho^n)^3} \\
\nonumber &\qquad \qquad - 2C=0\\
\nonumber &\Longleftrightarrow \\
\nonumber &2C(1+\rho+\ldots+\rho^n)^3+(n+1)\rho^{n-1}(R\mu(n+1)-\left(4C(n+1)+R\mu(n+3)\right)\rho-R\mu(n+1)\rho^2 \\
\nonumber & + R\mu (n+1)\rho^{n+1}+ (R\mu(n-1)+C(n^2+n)\rho^{n+2})=0.
\end{align}
One can verify that this polynomial has four sign changes. Based on Descartes' rule of signs, the number of positive roots is four or two. Since the term $(1-\rho)^3$ already has three roots, $f_n''(\rho)$ has exactly one root, which also implies the sign of $f_n''(\rho)$ changes at most once.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:so_3points}]
We first show that strong duality holds and both the primal and dual optimal solutions are attained, which is a sufficient condition for complementary slackness. To show this, we need to prove both the primal and dual problems have interior points.
Showing the existence of interior points of the primal problem is equivalent to finding a point $(1,m,d)$ that resides in the interior of the convex cone
\begin{equation}\nonumber
\mathcal V =\left\{(l,t,u) \in \mathbb{R}^3: \ \exists \nu \in \mathcal M_+ \ \rm{such \ that}
\hspace{-3mm}\begin{array}{ll}
&\int_{\Xi} \nu(\rm d\rho)= \mathit l \\
&\int_{\Xi} \rho \, \nu(\rm d\rho)=\mathit{t}\\
&\int_{\Xi} |\rho-t| \, \nu(\rm{d}\rho)=\mathit u
\end{array}
\right\},
\end{equation}
where $\Xi=[a,b]$. We define $\mathbb B_\kappa(c)$ by the closed Euclidean ball of radius $\kappa \geq 0$ centered at $c$. To this end, choose any point $(l_s,t_s,u_s) \in \mathbb B_\kappa(1) \times \mathbb B_\kappa(m) \times \mathbb B_\kappa(d)$ with sufficiently small $\kappa>0$, and consider the measure
\begin{equation}\nonumber
\nu_s=\frac{n_s}{2(m_s-a)} \cdot \delta_a+\left(l_s-\frac{u_s}{2(t_s-a)}-\frac{u_s}{2(b-t_s)}\right)\cdot \delta_t + \frac{u_s}{2(b-t_s)}\cdot \delta_b,
\end{equation}
where $s \cdot \delta_m$ denotes a measure that places mass $s$ at $m$. By construction, this measure satisfies $\int_{\rho} \nu_s(\rm d\rho)= \mathit l_s$, $\int_{\rho} \rho \, \nu_s(\rm d\rho)=\mathit{t_s}$ and $\int_{\rho} |\rho-t_s| \, \nu_s(\rm{d}\rho)=\mathit u_s$ for a sufficiently small $\kappa$ (since $m \in (a,b)$ and $d \in (0,\frac{2(m-a)(b-m)}{b-a})) $.
Therefore, strong duality holds and the optimal values of the primal problem and the dual problems coincide. Moreover, as there exist interior points of the primal problem and the common optimal value is finite, we have the dual optimal solution is also attained~\cite[Proposition 3.4]{shapiro2001duality}.
Noticing that the support $[a,b]$ is compact, while the social benefit rate function $f_n(\rho)$ and the moment functions $\rho$ and $|\rho-m|$ are continuous, we can invoke \cite[Corollary 3.1]{shapiro2001duality} to establish that the primal optimal solution is attained.
To this end, we have strong duality holds and both the primal and dual optimal solutions are attained, which implies complementary slackness holds~\cite[Proposition 2.1]{shapiro2001duality}.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:rm_concave}]
We know that the revenue rate function is continuous for $\rho \in \mathbb R_+$. Therefore, employing Lemma \ref{lem:rm_concave2} completes the proof.
\end{proof}
\begin{proof}[Proof of Proposition \ref{prop:so_mad}]
The dual problem \eqref{eq:so_mean_abs_dual} can be equivalently written as
\begin{equation*}
\begin{array}{ccll}
&\displaystyle \sup_{\alpha,\beta,\gamma \in \mathbb{R}}& \displaystyle \mathbb E_{\mathbb P} \left[ \alpha|\rho-m| +\beta \rho + \gamma \right]\\
&\textnormal{s.t.}& \alpha|\rho-m| +\beta \rho + \gamma \leq f_n(\rho) \quad \quad \forall \rho \in [a,b],
\end{array}
\end{equation*}
where $\mathbb P\in\mathcal P$ is an arbitrary probability measure in the ambiguity set. Observe that the left-hand side of the constraint is a two-piece piecewise affine function with a breakpoint at the mean $m$. Therefore, we can interpret the dual problem as finding a feasible two-piece piecewise affine function with the largest expected value. We now use this interpretation to derive the desired results.
First, we illustrate the case when $f_n(b) + f'_n(b) (m-b) \geq f_n(m)$. The constraint of the dual problem indicates that $f_n(\rho)$ majorizes $\alpha|\rho-m| +\beta \rho + \gamma$. One can verify that the two-piece piecewise affine function with the largest expected value is the one that touches $f_n(\rho)$ at three points: $\rho=a,m$ and $b$; see Figure \ref{fig:mad_impro_1} for an illustrative example. By complementary slackness in Lemma \ref{lem:so_3points}, the optimal distribution can only assign positive mass to these three points, which yields the following system of linear equations:
\begin{equation*}
\label{eq:so_mad_3point_prob1}
\begin{array}{cc}
& p_1(a-m) + p_2(m-m) + p_3(b-m) = m\\
& p_1|a-m| + p_2|m-m| + p_3|b-m|= d\\
& p_1 + p_2 + p_3 = 1.
\end{array}
\end{equation*}
Solving this system of linear equations leads to the first result in Proposition \ref{prop:so_mad}.
Next, we prove the two cases when $f_n(b) + f'_n(b) (m-b) \leq f_n(m)$.
If $0<d< d_0\coloneqq\frac{2(m-a)(\rho_{t}-m)}{\rho_t-a}$, we claim that the extremal distribution that solves \eqref{eq:so_mean_abs_primal} is a three-point distribution. To see this, we know that complementary slackness holds from Lemma \ref{lem:so_3points}, which means the extremal distribution is supported on points where the dual constraint is binding. Since the two-piece piecewise affine function can touch $f_n(\rho)$ on at most three points under constraint \[\alpha|\rho-m| +\beta \rho + \gamma \leq f_n(\rho) \quad \quad \forall \rho \in [a,b],
\]
the extremal distribution is either a one-point, two-point, or a three-point distribution. We readily exclude the possibility that the extremal distribution is a one-point distribution because the mean-absolute deviation of a one-point distribution is zero. Next,
we illustrate why the extremal distribution cannot be a two-point distribution. Suppose there exists a two-point distribution supported on $\{\rho_1,\rho_2\}$ that solves the worst-case expecation problem. Then, by complementary slackness, the dual constraint $
f_n(\rho)=\alpha|\rho-m| +\beta \rho + \gamma$ will be binding at these two points. Without loss of generality, we assume $\rho_1 \in [a,m)$ and $\rho_2 \in (m,b]$. Since $f_n(\rho)$ is strictly concave for $\rho \in [a,m)$ and the dual constraint requires $ \alpha|\rho-t| +\beta \rho + \gamma \leq f_n(\rho)$, we thus have $\rho_1=a$. Since $\rho_t$ is defined as the $\rho$ coordinate of the point such that the line segment between $(m,f_n(m))$ and $(\rho_t,f_n(\rho_t))$ is tangent with $f_n(\rho)$, we must have $\rho_2 \geq \rho_t$; otherwise, the dual constraint will be violated. Since $\rho_2-\rho_1 \geq \rho_t - a$, the corresponding mean-absolute deviation will be greater than $d_0$. Therefore, the extremal distribution cannot be a two-point distribution, i.e., it is a three-point distribution.
Next, it can be shown that if $f_n(\rho)$ intersects $\alpha|\rho-m| +\beta \rho + \gamma$ at three points, then these three points must be $\rho=a,m$ and $\rho_t$. Therefore, we have the following system of linear equations:
\begin{equation*}
\label{eq:so_mad_3point_prob}
\begin{array}{cc}
& p_1(a-m) + p_2(m-m) + p_3(\rho_t-m) = m\\
& p_1|a-m| + p_2|m-m| + p_3|\rho_t-m|= d\\
& p_1 + p_2 + p_3 = 1.
\end{array}
\end{equation*}
Solving this system of linear equations leads to the second result in Proposition \ref{prop:so_mad}.
We now establish that if $d_0\leq d$, the extremal distribution is a two-point distribution. Similarly, by the fact that the extremal distribution is a discrete distribution supported on at most three points, we just need to show there does not exist a one-point or three-point extremal distribution that solves \eqref{eq:so_mean_abs_primal}. We can exclude the possibility of one-point distribution easily, since its mean-absolute deviation is 0. As we described previously, the extremal three-point distribution is supported on $\rho=a,\rho_t$ and $b$, and the largest mean-absolute deviation that can be achieved within this support is given by $ d_0 \coloneqq\frac{2(m-a)(\rho_{t}-m)}{\rho_t-a}$. Since $d\geq d_0$, the extremal distribution can only be a two-point distribution. One of the support points is given by $\rho=a$, while the other one is determined by the value of $d$, which yields the following linear equations:
\begin{equation}
\label{eq:so_mad_2point_prob}
\begin{array}{cc}
& p_1(a-m) + p_2(\rho_2-m) = m\\
& p_1|a-m| + p_2|\rho_2-m|= d\\
& p_1 + p_2 = 1.
\end{array}
\end{equation}
Solving this system of equations, we obtain the optimal solution explicitly as:
$$ p_1 = \frac{d}{2(m-a)},\ \rho_1=a; \ p_2 = 1-\frac{d}{2(m-a)},\ \rho_2 = \frac{da+2m(a-m)}{d+2(a-m)}.$$
This completes the proof.
\end{proof}
\section{Proofs of Section \ref{sec:data-driven-mad}}\label{sec:proof_sec4}
\begin{proof}[Proof of Theorem~\ref{thm:data-driven-social}]
Problem \eqref{eq:ddmad_inf} can be equivalently written as:
\begin{equation*}
\begin{array}{ccll}
&\displaystyle \inf_{\nu \in \mathcal M_+}&\displaystyle \int_{\Xi} f_n(\rho) \mathbb \nu(\rm d\rho) \\
&\textnormal{s.t.}& \displaystyle \int_{\Xi} |\rho-\hat m| \, \nu(\rm d\rho)= \textit d\\
&& \displaystyle \int_{\Xi} \rho \, \nu(\rm d\rho)= \textit m\\
&& \displaystyle \int_{\Xi} \nu(\rm d\rho)= 1\\
&& m_l \leq m \leq m_u \\
&& d_l \leq d \leq d_u.
\end{array}
\end{equation*}
Dualizing this optimization problem yields
\begin{equation*}
\begin{array}{ccll}
&\displaystyle\sup_{\theta \in \mathbb{R}^4_+,\gamma \in \mathbb{R}}&\displaystyle \gamma+\theta_1 d_l - \theta_2 d_u + \theta_3 m_l - \theta_4 m_u\\
&\textnormal{s.t.}& \displaystyle (\theta_1 - \theta_2)|\rho-\hat m| +(\theta_3-\theta_4) \rho + \gamma \leq f_n(\rho) \quad \quad \forall \rho \in [a,b].
\end{array}
\end{equation*}
Applying algebraic reductions and invoking Lemma \ref{poly_lem_so1} lead to the desired reformulation. The derivation straightforwardly follows that of Theorem \ref{thm:so_mad}---we omit for brevity.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:data-driven-revenue}]
The dual problem is given by
\begin{equation*}
\begin{array}{ccll}
&\displaystyle\sup_{\theta \in \mathbb{R}^4_+,\gamma \in \mathbb{R}}&\displaystyle \gamma+\theta_1 d_l - \theta_2 d_u + \theta_3 m_l - \theta_4 m_u\\
&\textnormal{s.t.}& \displaystyle (\theta_1 - \theta_2)|\rho-\hat m| +(\theta_3-\theta_4) \rho + \gamma \leq r_n(\rho) \quad \quad \forall \rho \in [a,b].
\end{array}
\end{equation*}
Since the revenue rate function $r_n(\rho)$ is concave for $\rho \geq 0$, the semi-infinite constraints are satisfied if and only if each constraint is satisfied at points $\rho=a,\hat \rho_i, b$, which completes the proof.
\end{proof}
\section{Distributionally Robust Model with a Wasserstein Ambiguity Set}
\label{sec:wasser}
In this section, we study the DRO model with a Wasserstein ambiguity set \cite{gao2016distributionally,esfahani2017data}.
We develop solution schemes to find the optimal threshold strategies for a social optimizer and a revenue maximizer, respectively given by $\hat{n}_s$ and $\hat{n}_r$, such that the worst-case expected benefit rates are maximized.
Here, the worst-case is taken over the Wasserstein ambiguity set containing all probability distributions (discrete or continuous) sufficiently close to the discrete empirical distribution, where the closeness between two distributions is measured in terms of the Wasserstein metric \cite{esfahani2018data}.
\begin{defn}(Wasserstein Metric)
\label{def:wasser}
For any $r \geq 1$, let $\mathcal M^r(\Xi)$ be the set of all probability distributions $\mathbb P$ supported on $\Xi$ satisfying $\mathbb E_{\mathbb P}[\| \xi \|^r] = \int_\Xi \| \xi \|^r \mathbb P(\rm d \xi) < \infty$. The $r$-Wasserstein distance between two distributions $\mathbb P_1, \mathbb P_2 \in \mathcal P_0^r(\Xi)$ is defined as
\begin{equation*}
\mathcal W^r(\mathbb P_1, \mathbb P_2) = \inf\left\{\left(\int_{\Xi^2} \| \xi_1 - \xi_2 \|^r \mathbb Q(\rm d\xi_1, \rm d \xi_2)\right)^{\frac{1}{r}}\right\}
\end{equation*}
where $\mathbb Q$ is a joint distribution of $ \tilde\xi_1$ and $\tilde \xi_2$ with marginals $\mathbb P_1$ and $\mathbb P_2$, respectively.
\end{defn}
The Wasserstein distance $\mathcal W^r(\mathbb P_1, \mathbb P_2)$ can be viewed as the ($r$-th root of the) minimum cost for moving the distribution $\mathbb P_1$ to $\mathbb P_2$, where the cost of moving a unit mass from $ \xi_1$ to $ \xi_2$ amounts to $\| \xi_1 - \xi_2 \|^r$. The
joint distribution $\mathbb Q$ of $ \tilde\xi_1$ and $ \tilde\xi_2$ is therefore naturally interpreted as a mass transportation plan~\cite{esfahani2018data}. Similarly to the data-driven setting in Section \ref{sec:data-driven-mad}, we assume that we have observed a finite set of $N$ independent realizations
given by $\{\hat{\rho}_i\}_{i \in [N]}$, where $\hat \rho_i = \hat \lambda_i/\mu$. Using the observations, we define the empirical distribution $\hat {\mathbb P}_N \coloneqq \frac{1}{N} \sum_{i \in [N]} \delta_{\hat \rho_i}$
as the discrete uniform distribution on the samples.
In this paper, we consider the Wasserstein ambiguity set defined as
\begin{equation}
\label{eq:wasser_ball}
\mathcal B_\epsilon(\mathbb {\hat P} _N) \coloneqq \left\{\mathbb P \in \mathcal P_0(\Xi):
\mathcal W^1(\mathbb P, \mathbb {\hat P}_N) \leq \epsilon
\right\},
\end{equation}
which is a neighborhood around the empirical distribution. The ambiguity set contains all distributions supported on $\Xi$ that are of type-$1$ Wasserstein distance less than or equal to $\epsilon$ from $\mathbb {\hat P}_N$. By adjusting the radius $\epsilon$ of the ball, one can control the degree of conservatism of the DRO model. If $\epsilon = 0$, the Wasserstein ball shrinks to a singleton set containing only the empirical distribution $\mathbb{\hat P}_N$.
One can further show that this data-driven DRO model converges to the corresponding true stochastic program as the sample size $N$ tends to infinity \cite{esfahani2017data}.
We derive the optimal threshold strategies $\hat n_s$ and $\hat n_r$ for a social optimizer and a revenue maximizer, respectively. As stated in Section \ref{sec:droqueue}, the optimal joining threshold $\tilde n_e$ for an individual customer is independent of the arrival rate, and we have $\tilde n_e = n_e$ from \eqref{eq:io}.
\subsection{Social Optimizer}
The objective of a social optimizer is to obtain an optimal joining threshold $\hat n_s$ that maximizes the worst-case expected benefit, i.e., $\hat n_s \in \argmax_{n \in \mathbb Z_+} \{Z_s(n)\}$, where
\begin{equation}\label{eq:worst-case-wasser}
Z_s(n) \coloneqq \inf_{\mathbb P \in \mathcal B_\epsilon(\mathbb {\hat P} _N) } \mathbb E_{\mathbb P} \left[f_n(\tilde\rho) \right].
\end{equation}
The worst-case expectation is computed over all distributions in the Wasserstein ambiguity set $\mathcal B_\epsilon(\mathbb {\hat P} _N)$ with the support set $\Xi=[a,b]$.
\begin{thm}
\label{wass_so}
For any $n \geq 1$ and $\mathcal P = \mathcal B_\epsilon(\hat {\mathbb P}_N)$,
the worst-case expectation $Z_s(n)$ coincides with the optimal objective value of the following semidefinite program:
\begin{align*}
\nonumber \sup \;\; &-\alpha \epsilon + \frac{1}{N}\sum_{i \in [N]} s_i \\
\nonumber \textnormal{s.t.} \;\; &\alpha \in \mathbb R_+, s \in \mathbb R^N y^i, z^i \in \mathbb R^{n+3}, X^i, W^i \in \mathbb S^{n+3}_+ &&\forall i \in [N]\\
&y_0^i = -s_i+\alpha \hat \rho_i, \; y_1^i = s_i-\alpha-\alpha\hat\rho_i+R\mu-C, y_2^i=\alpha-R\mu\\
\nonumber& y^i_3, \cdots, y_n^i = 0, \ y^i_{n+1} = -s_i-\alpha\hat\rho_i-R\mu+C(n+1),\\
\nonumber& y^i_{n+2} = -s_i+\alpha+\alpha\hat\rho_i+R\mu+C-C(n+1) , \ y^i_{n+3} = -\alpha && \forall i \in [N] \\
\nonumber & z_0^i = -s_i- \alpha\hat \rho_i, \; z_1^i = s_i+\alpha+\alpha\hat\rho_i+R\mu-C, z_2^i=-\alpha-R\mu\\
\nonumber& z^i_3, \cdots, z_n^i = 0, \ z^i_{n+1} = -s_i+\alpha\hat\rho_i-R\mu+C(n+1),\\
\nonumber& z^i_{n+2} = -s_i-\alpha-\alpha\hat\rho_i+R\mu+C-C(n+1) , \ z^i_{n+3} = \alpha && \forall i \in [N] \\
\nonumber & \sum_{u + v = 2l - 1} x^i_{uv} = 0 && \forall l \in [n + 3] \ i \in [N]\\ \nonumber &
\sum_{q = 0}^{l}\sum_{r=q}^{n+3+q-l} y^i_r {r \choose q} {n+3-r \choose l - q} a^{r-q}\hat\rho_i^q = \sum_{u + v = 2l} x^i_{uv} && \forall l \in [n+3] \cup \{0\} \ i \in [N] \\ \nonumber & \sum_{u + v = 2l - 1} w^i_{uv} = 0 && \forall l \in [n+3] \ i \in [N]\\ \nonumber &
\sum_{q = 0}^{l}\sum_{r=q}^{n+3+q-l} z^i_r {r \choose q} {n+3-r \choose l - q} \hat\rho_i^{r-q}b^q = \sum_{u + v = 2l} w^i_{uv} && \forall l \in [n+3]\cup \{0\} \ i \in [N].
\end{align*}
\end{thm}
\begin{proof}
The distributionally robust model with the ambiguity set \eqref{eq:wasser_ball} can be equivalently written as
\begin{align*}
\inf \quad & \frac{1}{N} \sum_{i \in [N]} \int_\Xi f_n(\rho) \mathbb P_i(\rm d \rho) \\ \text{s.t.} \quad & \mathbb P_i \in \mathcal P_0(\Xi) \quad \forall i \in [N] \\
& \frac{1}{N} \sum_{i \in [N]} \int_{\Xi} \| \rho - {\hat \rho}_i \| \mathbb P_i(\rm d \rho) \leq \epsilon.
\end{align*}
Its strong dual problem is given by~\cite[Theorem 4.2]{esfahani2017data}
\begin{equation*}
\begin{array}{ccll}
&\displaystyle\sup _{\alpha \in \mathbb{R}_+, \bm s \in \mathbb{R}^N}&\displaystyle -\alpha \epsilon + \frac{1}{N} \sum_{i \in [N]} s_i\\
&\textnormal{s.t.}& \displaystyle s_i - \alpha \| \rho - \hat \rho_i\| \leq f_n(\rho) \qquad \forall i \in [N] \ \forall \rho \in [a,b].
\end{array}
\end{equation*}
We can deal with each constraint separately for the cases $\rho \leq \hat \rho_i$ and $\rho \geq \hat \rho_i$, and consequently we have
\begin{equation*}
\begin{array}{ccll}
&\displaystyle\sup _{\alpha \in \mathbb{R}_+, \bm s \in \mathbb{R}^N}&\displaystyle -\alpha \epsilon + \frac{1}{N} \sum_{i \in [N]} s_i\\
&\textnormal{s.t.}& \displaystyle s_i + \alpha ( \rho - \hat \rho_i) \leq f_n(\rho) \qquad \forall i \in [N] \ \forall \rho \in [a,\hat \rho_i]\\
&&\displaystyle s_i - \alpha ( \rho - \hat \rho_i) \leq f_n(\rho) \qquad \forall i \in [N] \ \forall \rho \in [\hat \rho_i,b].
\end{array}
\end{equation*}
Substituting the definition of $f_n(\rho)$ in \eqref{eq:social_rate} and applying algebraic reductions yield the following polynomial inequalities for each $i \in [N]$:
\begin{align}
\label{so_const}
\nonumber &(-s_i + \alpha \hat \rho_i) \rho^0 + (s_i - \alpha - \alpha \hat \rho_i +R\mu -C) \rho + (\alpha-R\mu) \rho^2 + (s_i - \alpha \hat \rho_i - R\mu +C(n+1)) \rho^{n+1} \\ \nonumber &\hspace{5.5em} + (-s_i+\alpha +\alpha \hat \rho_i +R\mu +C-C(n+1)) \rho^{n+2} -\alpha \rho^{n+3} \geq 0 \quad \forall \rho \in [a,\hat \rho_i], \\
\nonumber &(-s_i - \alpha \hat \rho_i) \rho^0 + (s_i + \alpha + \alpha \hat \rho_i +R\mu -C) \rho + (-\alpha-R\mu) \rho^2 + (s_i + \alpha \hat \rho_i - R\mu +C(n+1)) \rho^{n+1} \\ \nonumber &\hspace{5.5em} + (-s_i-\alpha -\alpha \hat \rho_i +R\mu +C-C(n+1)) \rho^{n+2}+\alpha \rho^{n+3} \geq 0 \quad \forall \rho \in [\hat \rho_i,b]. \\
\end{align}
The inequalities are of the form $g_1^i(\rho) = \sum_{r = 0}^{n+3} y^i_r \rho^r \geq 0$ for $\rho \in [a,\hat \rho_i]$ and $g_2^i(\rho) = \sum_{r = 0}^{n+3} z^i_r \rho^r \geq 0$ for $\rho \in [\hat \rho_i,b]$, where $ y^i$ and $ z^i$ represent the coefficients of the respective polynomial inequalities. We next invoke the result of Lemma \ref{poly_lem_so1} for every $i \in [N]$ to express the inequalities in \eqref{so_const} as semidefinite constraints. This leads to the desired semidefinite program, which completes the proof.
\end{proof}
To determine an optimal joining threshold, we compute the worst-case expected benefit rate ${Z}_s(n)$ for every $n \in \mathbb Z_+$. $1\leq n \leq n_e$, using the result of Theorem \ref{wass_so}, and then select the best threshold $\hat n_s \in \argmax_{n\in \mathbb Z_+}\{ Z_s(n)\}$.
\subsection{Revenue Maximizer}
The objective of a revenue maximizer is to find an optimal threshold $\hat n_r$ that maximizes the worst-case expected revenue rate of a firm, i.e., $\hat n_r \in \argmax_{n \in \mathbb Z_+} \{Z_r(n)\}$, where the worst-case expectation is computed over all the distributions in the Wasserstein ambiguity set $\mathcal B_\epsilon(\mathbb {\hat P} _N)$ defined by \eqref{eq:wasser_ball} with support set $\Xi = [a.b]$. The worst-case expected profit rate $ Z_r(n)$ is given by
\begin{equation}
\label{eq:dr_rm}
Z_r(n) \coloneqq \inf_{\mathcal P \in \mathcal B_\epsilon(\mathbb {\hat P} _N)} \mathbb E_{\mathbb P} \left[r_n(\tilde\rho) \right].
\end{equation}
\begin{thm}
\label{wass_rm}
For any $n \geq 1$, the worst-case expectation $Z_r(n)$ coincides with the optimal objective value of the following linear program:
\begin{equation*}
\begin{array}{ccll}
&\displaystyle \sup _{\alpha \in \mathbb{R}_+, s \in \mathbb{R}^N}&\displaystyle -\alpha \epsilon + \frac{1}{N} \sum_{i \in [N]} s_i\\
&\textnormal{s.t.}& \displaystyle s_i + \alpha ( a - \hat \rho_i) \leq r_n(a) & \forall i \in [N]\\
&& \displaystyle s_i \leq r_n(\hat \rho_i) & \forall i \in [N]\\
&& \displaystyle s_i - \alpha ( b - \hat \rho_i) \leq r_n(b) & \forall i \in [N].
\end{array}
\end{equation*}
\end{thm}
\begin{proof}
The strong dual problem of $\inf_{\mathbb P \in \mathcal B_\epsilon(\mathbb {\hat P} _N)} \mathbb E_{\mathbb P} \left[ r_n(\tilde\rho) \right]$ is given by
\begin{equation*}
\begin{array}{ccll}
&\displaystyle\sup _{\alpha \in \mathbb{R}_+, s \in \mathbb{R}^N}&\displaystyle -\alpha \epsilon + \frac{1}{N} \sum_{i \in [N]} s_i\\
&\textnormal{s.t.}& \displaystyle s_i - \alpha \| \rho - \hat \rho_i\|\leq r_n(\rho) \qquad \forall i \in [N]\ \forall \rho \in [a,b].
\end{array}
\end{equation*}
Since the revenue rate function $r_n(\rho)$ is concave for $\rho \geq 0$, the semi-infinite constraints are satisfied if and only if each constraint is satisfied at three points $\rho=a,\hat \rho_i, b$, and consequently we have
\begin{equation*}
\begin{array}{ccll}
Z_r(n) \coloneqq&\displaystyle\sup _{\alpha \in \mathbb{R}_+, s \in \mathbb{R}^N}&\displaystyle -\alpha \epsilon + \frac{1}{N} \sum_{i \in [N]} s_i\\
&\textnormal{s.t.}& \displaystyle s_i + \alpha ( a - \hat \rho_i)\leq r_n(a) \qquad& \forall i \in [N]\\
&& \displaystyle s_i + \alpha ( \hat \rho_i - \hat \rho_i)\leq r_n(\hat \rho_i) \qquad& \forall i \in [N]\\
&& \displaystyle s_i - \alpha ( b - \hat \rho_i) \leq r_n(b) \qquad &\forall i \in [N].
\end{array}
\end{equation*}
Thus, the claim follows.
\end{proof}
We compute the worst-case expected profit rate ${Z}_r(n)$ for every $n \in \mathbb Z_+$, $1 \leq n \leq \hat n_e$, using the result of Theorem \ref{wass_rm}, and obtain an optimal joining threshold $\hat n_r$ such that $\hat n_r \in \argmax_{n \in \mathbb Z_+}\{ Z_r(n)\}$.
\end{document}
|
\begin{document}
\title{$ q$-Independence of the Jimbo-Drinfeld Quantization
}
\author{Olof Giselsson}
\date{}
\maketitle
\begin{abstract}
Let $\mathrm G$ be a connected semi-simple compact Lie group and for $0<q<1$, let $(\mathbb{C}[\mathrm{G]_q}],\Delta_q)$ be the Jimbo-Drinfeld $q$-deformation of $\mathrm G$. We show that the $C^*$-completions of $\mathrm{C}[\mathrm{G]_q}$ are isomorphic as $C^{*}$-algebras for all values of $q$.
Moreover, these isomorphisms are equivariant with respect to the right-actions of the maximal torus.
\end{abstract}
\section{\bf\textsc{Introduction}}
The quantized universal enveloping algebra $\mathrm{U_{q}}(\mathfrak{g})$ of a semi-simple Lie algebra $\mathfrak g$ was introduced by Drinfeld and Jimbo in the mid-80's~\cite{dr,jimbo}.
In~\cite{dr2}, Drinfeld also introduced their dual objects, deformations $\mathbb{C}[\mathrm{G]_{q}}$ of the Hopf algebra of regular functions on a semi-simple Lie group $\mathrm G.$ Moreover, when $\mathrm G$ is compact, the algebra $\mathbb{C}[\mathrm{G]_{q}}$ can be given the structure of a Hopf $*$-algebra. In this case one can see that the enveloping $C^{*}$-algebra of $\mathbb{C}[\mathrm{G]_{q}}$ exists, giving a natural $q$-analogue $C(\mathrm{G)_{q}}$ of the algebra of continuous functions on $\mathrm{G}.$ The analytic approach to quantum groups was initially proposed by Woronowicz~\cite{wz1}. In the 90's Soibelman gave a complete classification of the irreducible $*$-representations of $\mathbb{C}[\mathrm{G]_{q}}$. These were shown to be in one-to-one correspondence with the symplectic leaves of $\mathrm{G}$ coming from the Poisson structure on $C(\mathrm G)$ determined by the quantization when $q\to 1$. However, it was not clear how the $C^{*}$-algebraic structure of $C(\mathrm{G)_{q}}$ was depending on the parameter $q.$ In fact, several evidence pointed towards that the structure was actually independent of it.
In the special case of $\mathrm{SU}_{2},$ it was observed (see~\cite{wz1}) that the $C^{*}$-algebras $C(\mathrm{SU_{2}})_{q},$ $q\in (0,1)$ are all isomorphic. In the mid 90's, G. Nagy showed in~\cite{gnagy} that the same holds for $C(\mathrm{SU_{3})_{q}}$. Moreover, it was also shown by Nagy (in~\cite{gnagy1}) that $C(\mathrm{SU_{n})_{q}}$ is KK-equivalent to $C(\mathrm{SU_{n})_{s}},$ for all $n\in \mathbb{N}$ and all $q,s\in (0,1).$ This was extended by Neshveyev-Tuset in~\cite{nt} to yield a KK-equivalence between $C(\mathrm{G)_{q}}$ and $C(\mathrm{G)_{s}}$ for any compact simply connected semi-simple Lie group G. In this paper, we show that some of the ideas that underpin Nagy's proof of the $q$-independence of $C(\mathrm{SU_{3})_{q}}$ can be extended to give the following result: for a fixed symplectic leaf $U\subseteq \mathrm{G},$ with corresponding $*$-representations $\pi^{q}$ of $\mathbb{C}[\mathrm{G]_{q}},$ we have an isomorphism $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}}\cong \overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa\pi^{s}}$ for all $q,s\in (0,1).$ Using this, we prove that $C(\mathrm{G)_{q}}\cong C(\mathrm{G)_{s}},$ thus showing that these non-isomorphic compact quantum groups are all isomorphic as $C^{*}$-algebras. This confirms the conjecture made in~\cite{gnagy}.
\\
The paper is organized as follows. We finish this section by giving some geometric intuitions underlying the proof and describing the idea of the proof using this geometric picture. We then present and prove the lifting theorem used in the proof of the main result. Section $2$ goes through the formal definitions of $\mathbb{C}[\mathrm{G]_{q}}$ as well as its representation theory. In Section $3,$ we prove some more specific results regarding representations and how these depend on the $q$ parameter. In Section $4,$ we state and prove the main result.
\subsection{\bf\textsc{Outline of the Proof}}
To explain the main ideas of the proof, it is worth to start by considering the case of $\mathrm{G}=\mathrm{SU_{3}},$ previously covered by Nagy. There is an irreducible $*$-representation $\pi^{q}:\mathbb{C}[\mathrm{SU_{3}]_{q}}\rightarrow \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})^{\otimes 3})$ that, in Nagy's words, corresponds to the "big symplectic leaf" in $\mathrm{SU_{3}}.$ However, there is an inherent problem when trying to determine if $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}}\cong \overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{s}}$ for different $q,s\in (0,1).$ As $\pi^{q}$ can be seen to vary (in a certain sense) continuously on $q,$ one intuitive approach could be to let $q\to 0,$ and then find some natural set of generators such that an isomorphic set of generators can be found in $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}}$ for each $q\in (0,1).$ This method works, for example, in the case of $C(\mathrm{SU_{2})_{q}}.$ However, for $\mathbb{C}[\mathrm{SU_{3}]_{q}},$ there seems to be no simple way of taking the limit $q\to 0.$ As it sits, the image of $\pi^{q}$ is simply too "twisted" to allow passing to any limit.
\\
These problems are resolved in the following way: We observe that $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}},$ for all $q\in (0,1),$ contains the compact operators $\K\subseteq \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})^{\otimes 3}),$ and moreover, the intersection $\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}\cap\K$ is non-trivial. The compact operators form a minimal ideal in $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}},$ in the sense that it is contained in any other ideal. We now consider the composition
$$
\mathbb{C}[\mathrm{SU_{3}]_{q}}\overset{\pi^{q}}\longrightarrow \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})^{\otimes 3})\overset{p}\longrightarrow \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})^{\otimes 3})/\K\cong \mathcal{Q}(\ell^{2}(\mathbb{Z}_{+})^{\otimes 3}),
$$
where $p$ is the quotient map $x\mapsto x+\K,$ and we then proceed by analyzing $p\circ \pi^{q}.$ It is clear that $\pi^{q}$ can not be a direct summand in $p\circ \pi^{q},$ as the elements mapped by $\pi^{q}$ into $\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}\cap\K$ is now mapped into zero. It turns out that there are two Hilbert spaces $\Hh_{1},$ $\Hh_{2},$ such that for every $q\in (0,1),$ we have $*$-representations
$$\begin{array}{ccc}\mathbb{C}[\mathrm{SU_{3}]_{q}}\overset{\Pi_{1}^{q}}\longrightarrow \mathcal{B}(\Hh_{1}), &\mathbb{C}[\mathrm{SU_{3}]_{q}}\overset{\Pi_{2}^{q}}\longrightarrow \mathcal{B}(\Hh_{2})\end{array}$$
and an isomorphism $\varphi_{q}:\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}}/\K\to \overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa (\Pi_{1}^{q}\oplus \Pi_{2}^{q})},$ such that for every $a\in \mathbb{C}[\mathrm{SU_{3}]_{q}},$ we have $\varphi_{q}(p\circ\pi^{q}(a))=(\Pi_{1}^{q}\oplus\Pi_{2}^{q})(a).$ Moreover, in this case, one can show that actually we have $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa (\Pi_{1}^{q}\oplus \Pi_{2}^{q})}=\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa (\Pi_{1}^{s}\oplus \Pi_{2}^{s})}$ for all $q,s\in (0,1)$ as subspaces of $\mathcal{B}(\Hh_{1})\oplus \mathcal{B}(\Hh_{2}).$ Thus, by quoting out the compact operators, we have successfully "untwisted" the $*$-representation $\pi^{q}.$ Letting $M=\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa (\Pi_{1}^{q}\oplus \Pi_{2}^{q})},$ one then shows that the injective homomorphisms $\varphi_{q}^{-1}:M\to \mathcal{Q}(\ell^{2}(\mathbb{Z}_{+})^{\otimes 3})$ varies norm-continuously on $q,$ meaning that, for a fixed $x\in M,$ the map $q\in (0,1)\to \varphi_{q}^{-1}(x)$ is a continuous function of $q.$ We then get an isomorphism \begin{equation}\label{first}\begin{array}{ccc}p^{-1}(\varphi^{-1}_{q}(M))\cong p^{-1}(\varphi^{-1}_{s}(M)), & q,s\in (0,1)\end{array}\end{equation}
using the lifting result (Lemma~\mathrm{Re}f{gnagy} below). As $\K\subseteq \overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}},$ it follows that $p^{-1}(\varphi^{-1}_{q}(M))=\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa \pi^{q}}$ and hence we have established an isomorphism for different $q.$ One needs further argument to conclude that actually $C(\mathrm{SU_{3})_{q}}\cong C(\mathrm{SU_{3})_{s}},$ but by proving~\eqref{first}, the main effort is done. The proof below essentially systematizes this line of argument in a way that makes it also work for a general $\mathrm G$, by quoting out ideals to "untwist" an irreducible $*$-representation $\pi^{q}$ of $C(\mathrm{G})_{q}$ further and further, until it is clear that the images of the resulting $*$-representations are independent of $q.$ Then one uses inductive arguments to check that also $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa\pi^{q}}\cong \overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa\pi^{s}}$ for $q,s\in (0,1).$
\\
One can give a quite clear geometric heuristic of this, using the one-to-one correspondence between irreducible $*$-representations of $\mathbb{C}[\mathrm{G]_{q}}$ and symplectic leaves in $\mathrm G$ coming from the corresponding Poisson structure on $C(\mathrm G)$ (see~\cite{ks}). Recall that $\mathrm G$ can be decomposed into a disjoint union of symplectic leaves and that each leaf is an even-dimensional sub-manifold of $\mathrm G$. Let $U$ be a 2m-dimensional symplectic leaf of $\mathrm G,$ corresponding to a $*$-representations $\pi^{q}$ of $\mathbb{C}[\mathrm{G]_{q}},$ and let $C_{0}(U)$ be the ideal of $C(\overline{U})$ of all continuous functions vanishing on $\overline{U}\backslash U.$ Thus quoting out this ideal gives a homomorphism $C(\overline{U})\to C(\overline{U}\backslash U).$ It turns out that $\overline{U}\backslash U$ can be written as a disjoint union $\cup_{j} U_{j},$ of symplectic leaves of dimension strictly less than $2m,$ and that the leaves in this union of dimension $<2m-2$ are contained in the closures of the leaves of dimension $2m-2.$ In general, we can write $\overline{U}$ as a disjoint union of symplectic leaves
$$\overline{U}=U\cup \left(\cup_{j} U_{j}^{(m-1)}\right)\cup \left(\cup_{j} U_{j}^{(m-2)}\right)\cup\cdots \cup \left(\cup_{j} U_{j}^{(0)}\right)$$ such that each $U_{j}^{(k)}$ is a symplectic leaf of dimension $2k$ and
\begin{equation}\label{symplec}
\begin{array}{ccc}
\cup_{j} \overline{U}_{j}^{(k)}=\left(\cup_{j} U_{j}^{(k)}\right)\cup\cdots \cup \left(\cup_{j} U_{j}^{(0)}\right).
\end{array}
\end{equation}
This shows that we can make a sequence of homomorphisms
\begin{equation}\label{keke}
C(\overline{U})\longrightarrow \prod_{j} C(\overline{U}_{j}^{(m-1)})\longrightarrow\dots \longrightarrow \prod_{j} C(\overline{U}_{j}^{(1)})\longrightarrow \prod_{j} C(\overline{U}_{j}^{(0)})
\end{equation}
such that on each step, the homomorphism $\prod_{j} C(\overline{U}_{j}^{(k)})\to \prod_{j} C(\overline{U}_{j}^{(k-1)})$ has kernel $\prod_{j} C_{0}(U_{j}^{(k)}).$
Let us explain how a $q$-analogue of~\eqref{keke} is used. \begin{remark}For several reasons, the notations used here will differ somewhat from the ones used later in the text.\end{remark} Let $U$ and $U^{(k)}_{j}$ be as above. We can think of $\overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa\pi^{q}}$ and the ideal $\K\subseteq \overline{\mathrm{Im}}\usepackage[bb=boondox,bbscaled=.95,cal=boondoxo]{mathalfa\pi^{q}}$ as $q$-analogs of $C(\overline{U})$ and $C_{0}(U),$ denoted by $C(\overline{U})_{q}$ and $C_{0}(U)_{q}$ respectively. There is then a sequence of homomorphisms
\begin{equation}\label{keke1}
C(\overline{U})_{q}\overset{\partial_{m}^{q}}\longrightarrow \prod_{j} C(\overline{U}_{j}^{(m-1)})_{q}\overset{\partial_{m-1}^{q}}\longrightarrow\dots \overset{\partial_{2}^{q}}\longrightarrow \prod_{j} C(\overline{U}_{j}^{(1)})_{q}\overset{\partial_{1}^{q}}\longrightarrow \prod_{j} C(\overline{U}_{j}^{(0)})_{q}
\end{equation}
such that on each step, the homomorphism $\partial_{k}:\prod_{j} C(\overline{U}_{j}^{(k)})_{q}\to \prod_{j} C(\overline{U}_{j}^{(k-1)})_{q}$ has kernel equal to $\prod_{j} C_{0}(U_{j}^{(k)})_{q}.$ Let us denote by $C(\partial^{(k)}\overline{U})_{q},$ $k=0,\dots,m-1,$ the image of $C(\overline{U})_{q}$ in $\prod_{j} C(\overline{U}_{j}^{(k)})_{q}$ via the composition of homomorphisms in~\eqref{keke1}. The idea is to proceed by induction on the dimensions of the symplectic leaves. In the case of zero-dimensional leaves, the corresponding $*$-representations are one-dimensional (maps to $\mathbb{C}$) and hence trivially $q$-independent. For higher dimensional leaves, we can use the induction hypothesis to connect the lower dimensional leaves for different $q,s\in (0,1)$
\begin{equation}\label{keke2}
\begin{xy}\xymatrixrowsep{3pc}\xymatrix{
C(\overline{U})_{q}\ar@{-->}[d] \ar[r]^-{\partial_{m}^{q}} & \prod_{j} C(\overline{U}_{j}^{(m-1)})_{q} \ar[d]_{\mathrm{G}amma^{s,q}_{m-1}} \ar[r]^-{\partial_{m-1}^{q}} & \cdots \ar[r]^-{\partial_{2}^{q}} &\prod_{j} C(\overline{U}_{j}^{(1)})_{q}\ar[r]^-{\partial_{1}^{q}}\ar[d]_{\mathrm{G}amma^{s,q}_{1}} &\prod_{j} C(\overline{U}_{j}^{(0)})_{q}\ar[d]_{\mathrm{G}amma^{s,q}_{0}} \\
C(\overline{U})_{s} \ar[r]^-{\partial_{m}^{s}} & \prod_{j} C(\overline{U}_{j}^{(m-1)})_{s} \ar[r]^-{\partial_{m-1}^{s}} & \cdots \ar[r]^-{\partial_{2}^{s}} &\prod_{j} C(\overline{U}_{j}^{(1)})_{s}\ar[r]^-{\partial_{1}^{s}} &\prod_{j} C(\overline{U}_{j}^{(0)})_{s}\\
}\end{xy}
\end{equation}
Moreover, this can be done in a way such that the diagram~\eqref{keke2} is commutative. The aim is then to construct a dotted arrow from $C(\overline{U})_{q}$ to $C(\overline{U})_{s}$ that makes the diagram commutative. The main obstacle to do this is to check that $C(\partial^{(m-1)}\overline{U})_{q}$ is mapped by $\mathrm{G}amma^{s,q}_{m-1}$ to $C(\partial^{(m-1)}\overline{U})_{s}.$ In order to prove this, one shows the following
\begin{enumerate}[(i)]
\item for $k=0,\dots, m-1$ the intersection of $C(\partial^{(k)}\overline{U})_{q}$ with $\prod C_{0}(U_{j}^{(k)})_{q}$ is mapped by $\mathrm{G}amma^{s,q}_{k}$ to the intersection of $C(\partial^{(k)}\overline{U})_{s}$ with $\prod C_{0}(U_{j}^{(k)})_{s},$
\item
the $C^{*}$-algebras $C(\partial^{(0)}\overline{U})_{q}$ are commutative and isomorphic for all $q\in (0,1),$ via $\mathrm{G}amma^{s,q}_{0}$,
\item
for $k=1,\dots, m-1,$ there is an approximate unit $\{u_{q,i}^{(k)}\}_{i=1}^{\infty}$ for $\prod_{j} C_{0}(U_{j}^{(k)})_{q}$ such that $\{u_{q,i}^{(k)}\}_{i=1}^{\infty}\subseteq C(\partial^{(k)}\overline{U})_{q}$ and $\mathrm{G}amma_{k}^{s,q}(u_{q,i}^{(k)})=u_{s,i}^{(k)}$ for all $i\in \mathbb{N}.$
\end{enumerate}
\begin{remark}In the actual proof below, we do not really use $(iii),$ since by the way the arguments are constructed there, explicitly stating this point becomes unnecessary.\end{remark}
From the commutivity of the square
\begin{equation}\label{comsq}
\begin{xy}\xymatrix{
\prod_{j} C(\overline{U}_{j}^{(k)})_{q}\ar[r]^-{\partial_{k}^{q}}\ar[d]_{\mathrm{G}amma^{s,q}_{k}} & \prod_{j} C(\overline{U}_{j}^{(k-1)})_{q}\ar[d]_{\mathrm{G}amma^{s,q}_{k-1}}\\
\prod_{j} C(\overline{U}_{j}^{(k)})_{s} \ar[r]^-{\partial_{k}^{s}} &\prod_{j} C(\overline{U}_{j}^{(k-1)})_{s}
}\end{xy}
\end{equation}
it follows that if $\mathrm{G}amma^{s,q}_{k-1}$ restricts to a $*$-isomorphism from $C(\partial^{(k-1)}\overline{U})_{q}$ to $ C(\partial^{(k-1)}\overline{U})_{s},$ then as for any $x\in C(\partial^{(k)}\overline{U})_{q},$ we have $$\partial_{k}^{s}(\mathrm{G}amma^{s,q}_{k}(x))=\mathrm{G}amma^{s,q}_{k-1}(\partial_{k}^{q}(x))\in C(\partial^{(k-1)}\overline{U})_{s},$$ it follows that $\mathrm{G}amma^{s,q}_{k}(x)=y+c$ where $y\in C(\partial^{(k)}\overline{U})_{s}$ and $c\in \prod_{j} C_{0}(U_{j}^{(k)})_{q}.$ By $(iii),$ we have an approximate unit $\{u_{q,i}^{(k)}\}_{i=1}^{\infty}$ such that $x u_{q,i}^{(k)}$ is in the intersection of $C(\partial^{(k)}\overline{U})_{q}$ with $\prod_{j} C_{0}(U_{j}^{(k)})_{q}.$ It now follows from $(i)$ that for all $i\in \mathbb{N}$, we have $$\mathrm{G}amma_{k}^{s,q}(xu_{q,i}^{(k)})\in C(\partial^{(k)}\overline{U})_{s},$$
$$y\mathrm{G}amma_{k}^{s,q}(u_{q,i}^{(k)})\in C(\partial^{(k)}\overline{U})_{s}$$
and thus it follows that also $c u_{s,i}^{(k)}\in C(\partial^{(k)}\overline{U})_{s}.$ Letting $i\to \infty$ now gives $c\in C(\partial^{(k)}\overline{U})_{s}$ and thus $\mathrm{G}amma_{k}^{s,q}(x)\in C(\partial^{(k)}\overline{U})_{s}.$ Using $(ii),$ it now follows by induction that $C(\partial^{(m-1)}\overline{U})_{q}$
is mapped isomorphically onto $C(\partial^{(m-1)}\overline{U})_{s}.$ As $\ker \partial^{q}_{m}=C_{0}(U)_{q},$ this is equivalent to $$\begin{array}{cccc}C(\overline{U})_{q}/C_{0}(U)_{q}\cong C(\overline{U})_{s}/C_{0}(U)_{s}, & q,s\in (0,1).\end{array}$$ After checking that these isomorphisms are varying norm-continuously as functions of $q$ and $s$, the dotted arrow in~\eqref{keke2} can then be constructed using Lemma $2$~below.
\subsection{\bf\textsc{Lifting Results}}
\begin{lem}[{Lemma $2$ in~\cite{gnagy}}]\label{gnagy}
Let $\Hh$ be a separable Hilbert space, let $\K$ be the space of compact operators on $\Hh,$ let $\mathcal{Q}(\Hh)=\mathcal{B}(\Hh)/\K$ be the Calkin algebra and $p: \mathcal{B}(\Hh)\to \mathcal{Q}(\Hh)$ the quotient map. Suppose $A$ is a fixed separable $C^{*}$-algebra of type $\mathrm{I}$ and $\phi_{q}:A\to Q(\Hh),$ $q\in [0,1]$ is a point-norm continuous family of injective $*$-homomorphisms. Denote $$\mathfrak{A}_{q}:=\phi_{q}(A):$$
$$
M_{q}:=p^{-1}(\mathfrak{A}_{q}).
$$
Then there exists a family of injective $*$-homomorphisms $\Phi_{q}:M_{0}\to \mathcal{B}(\Hh),$ $q\in [0,1]$ with the following properties
\begin{enumerate}[(a)]
\item $\Phi_{q}(M_{0})=M_{q}$ for $q\in [0,1]$ and $\Phi_{0}=\id_{M_{0}},$
\item the family $\Phi_{q}:M_{0}\to \mathcal{B}(H),$ $q\in[0,1]$ is point-norm continuous,
\item for every $q\in [0,1],$ the diagram
\begin{equation}\label{com1}
\begin{xy}\xymatrix{
M_{0}\ar[r]^*{\Phi_{q}}\ar[d]_*{p}& M_{q}\ar[d]^*{p}\\
\mathfrak{A}_{0} \ar[r]_*{\phi_{q}\circ \phi_{0}^{-1}} & \mathfrak{A}_{q}
}\end{xy}
\end{equation}
is commutative.
\end{enumerate}
\end{lem}
We remind the reader that a $C^{*}$-algebra of type $\mathrm I,$ is one where the image of every irreducible $*$-representation includes a non-zero compact operator. Here, we will use a modified version of Lemma~\mathrm{Re}f{gnagy}.
\begin{lem}\label{1}
Let $\Hh$ be a separable Hilbert space, let $\K$ be the space of compact operators on $\Hh,$ let $\mathcal{Q}(\Hh)=\mathcal{B}(\Hh)/\K$ be the Calkin algebra and $p: \mathcal{B}(\Hh)\to \mathcal{Q}(\Hh)$ be the quotient map. For every $q\in (0,1),$ suppose $\Aa_{q}\subseteq \mathcal{Q}(\Hh)$ is a separable $C^{*}$-algebra of type $\mathrm{I}$ and we have a family of $*$-isomorphisms $\phi_{s,q}:\Aa_{q}\to \Aa_{s},$ $s,q\in (0,1)$ which are continuous in the point-norm topology (i.e. for every fixed $q\in (0,1)$ and $x\in \Aa_{q},$ the map $s\in (0,1)\mapsto \phi_{s,q}(x)\in \mathcal{Q}(\Hh)$ is norm-continuous). Assume moreover that $\phi_{q,q}=\id_{\Aa_{q}}$ and $\phi_{t,s}\circ\phi_{s,q}=\phi_{t,q}$ for all $t,s,q\in (0,1).$ Denote
$$
\Bb_{q}:=p^{-1}(\Aa_{q}).
$$
Then there exists a family of inner $*$-isomorphisms $\Phi_{s,q}:\mathcal{B}(\Hh)\to\mathcal{B}(\Hh),$ $s,q\in(0,1)$ (i.e $\Phi_{s,q}(x)=U_{s,q}^{*}xU_{s,q}$ for some unitary $U_{s,q}\in \mathcal{B}(\mathrm{H})$) with the following properties
\begin{enumerate}[(i)]
\item $\Phi_{s,q}(\Bb_{q})=\Bb_{s}$ for $s,q\in (0,1),$
\item $\Phi_{q,q}=\id$ and $\Phi_{t,s}\circ \Phi_{s,q}=\Phi_{t,s}$ for all $t,s,q\in (0,1),$
\item for fix $q,$ the family $\Phi_{s,q}:\Bb_{s}\to \mathcal{B}(H),$ $s\in[0,1]$ is point-norm continuous,
\item for all $s,q\in (0,1),$ the diagram
\begin{equation}\label{com}
\begin{xy}\xymatrix{
\Bb_{q}\ar[r]^*{\Phi_{s,q}}\ar[d]_*{p}& \Bb_{s}\ar[d]^*{p}\\
\Aa_{q} \ar[r]_*{\phi_{s,q}} & \Aa_{s}
}\end{xy}
\end{equation}
is commutative.
\end{enumerate}
\end{lem}
\begin{proof}
If $a<b,$ then clearly the conclusion of Lemma~\mathrm{Re}f{gnagy} still holds if we change the interval to $[a,b].$ Let $a_{k}\in (0,1),$ $k\in \mathbb{Z}$ be a strictly increasing sequence such that $a_{k}\to 1$ and $a_{-k}\to 0$ as $k\to \infty.$ For $k\geq 0,$ we apply Lemma~\mathrm{Re}f{gnagy} to the set of injective $*$-homomorphisms $$\begin{array}{ccc}\tilde\phi_{q,k}:=\phi_{q,a_{k}}:\Aa_{a_{k}}\to \mathcal{Q}(\Hh),& q\in [a_{k},a_{k+1}],\end{array}$$
and let $M_{q,k}=p^{-1}(\tilde\phi_{q,k}(\Aa_{a_{k}}))=p^{-1}(A_{q}).$
For $k<0,$ we instead apply Lemma~\mathrm{Re}f{gnagy} to
$$\begin{array}{ccc}\tilde\phi_{q,k}:=\phi_{q,a_{k+1}}:\Aa_{a_{k+1}}\to \mathcal{Q}(\Hh),& q\in [a_{k},a_{k+1}].\end{array}$$
Let $\tilde\Phi_{q,k},$ $k\in \mathbb{Z},$ be the $*$-isomorphisms aquired by applying Lemma~\mathrm{Re}f{gnagy}. Note that $\tilde\Phi_{q,k}$ is an isomorphism from $\Bb_{a_{k}}$ to $ \Bb_{q}$ for $q\in [a_{k},a_{k+1}]$ and $k\geq 0$ and an isomorphism from $\Bb_{a_{k+1}}$ to $\Bb_{q}$ if $k<0.$ Let us define a $*$-isomorphism $\Phi_{q}:\Bb_{a_{0}}\to \Bb_{q}$ by the formula
\begin{equation}\label{phimap}
\Phi_{q}=
\begin{cases}
\tilde\Phi_{q,k}\circ\tilde \Phi_{a_{k},k-1}\circ \cdots \circ\tilde \Phi_{a_{1},0}, & \text{if $q\in [a_{k},a_{k+1}]$ and $k\geq 0$}\\
\tilde\Phi_{q,k}\circ \tilde\Phi_{a_{k},k+1}\circ \cdots \circ\tilde\Phi_{a_{-1},-1}, & \text{if $q\in [a_{k},a_{k+1}]$ and $k< 0$}
\end{cases}
\end{equation}
It follows from Lemma~\mathrm{Re}f{gnagy} and the construction of $\Phi_{q}$ that for $x\in \Bb_{a_{0}},$ the map $q\in (0,1)\mapsto \Phi_{q}(x)\in \mathcal{B}(H)$ is norm-continuous. That $p\circ \Phi_{q}=\phi_{q,a_{0}}\circ p$ holds follows from $\phi_{t,s}\circ\phi_{s,q}=\phi_{t,q}$ and iteration of the commutative diagram
$$
\begin{xy}\xymatrix{
\Bb_{a_{0}}\ar[r]^*{\tilde\Phi_{a_{1},0}}\ar[d]_*{p} &\Bb_{a_{1}}\ar[r]^*{\tilde\Phi_{q,1}}\ar[d]_*{p}& \Bb_{q}\ar[d]^*{p} \\
\Aa_{a_{0}}\ar[r]_*{\phi_{a_{1},a_{0}}}&\Aa_{a_{1}} \ar[r]_*{\phi_{q,a_{1}}} & \Aa_{q}
}\end{xy}
$$
We then let $\Phi_{s,q}=\Phi_{s}\circ \Phi_{q}^{-1}$ for $s,q\in (0,1).$ By~\eqref{com1}, we have a commutative diagram
$$
\begin{xy}\xymatrix{
\Bb_{q}\ar[r]^*{\Phi_{q}^{-1}}\ar[d]_*{p} &\Bb_{a_{0}}\ar[r]^*{\Phi_{s}}\ar[d]_*{p}& \Bb_{s}\ar[d]^*{p} \\
\Aa_{q}\ar[r]_*{\phi_{q,a_{0}}^{-1}}&\Aa_{a_{0}} \ar[r]_*{\phi_{s,a_{0}}} & \Aa_{s}
}\end{xy}
$$
From this, we get the commutivity of~\eqref{com}, as $$\phi_{s,a_{0}}\circ \phi_{q,a_{0}}^{-1}=\phi_{s,a_{0}}\circ\phi_{a_{0},q}=\phi_{s,q}.$$
Hence, $(i),$ $(ii),$ $(iii),$ and $(iv)$ holds for $\Phi_{s,q}$ as a $*$-isomorphism $\Bb_{q}\to \Bb_{s}.$
\\
We now extend $\Phi_{s,q}$ as an inner automorphism of all of $\mathcal{B}(\Hh)$. To do this, we first show that the restriction $\Phi_{s,q}|_{\K}$ is inner.
We have $\K\subseteq \Bb_{q},$ and we get from the diagram~\eqref{com} that $\Phi_{s,q}(\K)\subseteq\K,$ and as $\Phi_{s,q}^{-1}=\Phi_{q,s}$ it follows that actually \begin{equation}\label{k}\Phi_{s,q}(\K)=\K.\end{equation}
So $\Phi_{s,q}$ is an irreducible representation of $\K.$ It is known that any such is unitarily equivalent to the identity representation (e.g. see Corollary~$1.10$ in~\cite{dav}). Hence, there exists a unitary $U_{s,q}\in \mathcal{B}(\Hh)$ such that $\Phi_{s,q}(x)=U_{s,q}x U_{s,q}^{*}$ for all $x\in \K.$ For arbitrary $y\in \Bb_{q},$ we obtain $$\Phi_{s,q}(x)\Phi_{s,q}(y)=\Phi_{s,q}(xy)=U_{s,q}x yU_{s,q}^{*}=\Phi_{s,q}(x)U_{s,q}yU_{s,q}^{*}$$ for all $x\in \K.$ This gives $\Phi_{s,q}(y)=U_{s,q}y U_{s,q}^{*}.$
\end{proof}
\section{\bf\textsc{Preliminaries}}
In this section we recall some facs about the Hopf algebras $\mathrm{U_{q}}(\mathfrak{g})$ and $\mathbb{C}[\mathrm{G]_{q}}.$ The presentation is mainly taken from~\cite{nt}. A general reference for the technical claims made here is~\cite{ks}.
\subsection{\bf\textsc{The Quantum Group $\mathrm{U_{q}}(\mathfrak{g})$}}
Let $\mathrm G$ be a simply connected semisimple compact Lie group and let $\mathfrak{g}$ denote its complexified Lie algebra. We write $\mathrm U (\mathfrak{g})$ to denote the universal enveloping algebra of $\mathfrak{g}$ equipped with a $*$-involution induced by the real form derived from $\mathrm{G}.$ Moreover, we let $\mathfrak{h}\subseteq \mathfrak{g}$ be the Cartan sub-algebra coming from a maximal torus $\mathrm T\subseteq \mathrm{G}.$ Let $\mathfrak{t}\subseteq \mathfrak{h}$ be the real subspace of skew-symmetric elements. Write $\Phi$ for the set of roots of $\mathfrak{g}$, $\Phi_{+}$ for the set of positive roots and $\Omega=\{\alpha_{1},\dots,\alpha_{n}\}$ for the set of simple roots.
\\
We denote the Weyl group of $\mathfrak g$ by $W$ and identify its set of generators $s_{i},$ $i=1,\dots, n,$ (as a Coxeter group) with $\Omega$ by the identification $\alpha_{i}\mapsto s_{\alpha_{i}}=:s_{i}.$ Moreover, we identify $\Phi_{+}$ with the set $\{w s_{i} w^{-1}:s_{i}\in \Omega,w\in W\}\subseteq W.$ In both instances, we write the identification as $\alpha\mapsto h_{\alpha}\in \mathfrak{t}.$
\\
Let $q\in (0,1).$ Let $(a_{ij})_{ij}$ be the Cartan matrix of $\mathfrak{g}$ and $d_{i}=\frac{(\alpha_{i},\alpha_{i})}{2}$ for $i=1,\dots,n$. Let $q_{i}=q^{d_{i}}$ for $i=1,\dots, n.$ The quantized universal enveloping algebra $\mathrm{U_{q}}(\mathfrak{g})$ is the unital complex algebra generated by elements $E_{i},F_{i},K_{i},K_{i}^{-1},$ $i=1,\dots, n$ subject to the relations
$$
\begin{array}{cccc}
K_{i}K^{-1}_{i}=K^{-1}_{i}K_{i}=1, & K_{i}K_{j}=K_{j}K_{i}, & K_{i}E_{j}=q_{i}^{a_{ij}}E_{j}K_{i}, & K_{i}F_{j}=q_{i}^{a_{ij}}F_{i}K_{j},
\end{array}
$$
$$
E_{i}F_{j}-F_{j}E_{i}=\delta_{ij}\frac{K_{i}-K_{i}^{-1}}{q_{i}-q_{i}^{-1}}
$$
$$
\sum_{k=0}^{1-a_{ij}}(-1)^{k}\left[ \begin{array}{cc}1-a_{ij}\\ k \end{array}\right]_{q_{i}}E_{i}^{k}E_{j}E_{i}^{1-a_{ij}-k}=0,
$$
$$
\sum_{k=0}^{1-a_{ij}}(-1)^{k}\left[ \begin{array}{cc}1-a_{ij}\\ k \end{array}\right]_{q_{i}}F_{i}^{k}F_{j}F_{i}^{1-a_{ij}-k}=0
$$
where $\left[ \begin{array}{cc}k\\ j \end{array}\right]_{q_{i}}=\overset{j-1}{\underset{m=0}\prod}\frac{q_{i}^{-(k-m)}-q_{i}^{k-m}}{q_{i}^{-(m+1)}-q_{i}^{m+1}}$ is a $q$-analogue of the binomial coefficients.
\\
$\mathrm{U_{q}}(\mathfrak{g})$ becomes a Hopf $*$-algebra when equipped with a co-associative co-product $\hat\Delta_{q},$ a co-unit $\hat\epsilon_{q},$ an antipode $\hat S_{q}$ and a $*$-involution given on generators as
$$
\begin{array}{cccc}
\hat\Delta_{q}(K_{i})=K_{i}\otimes K_{i}, & \hat\Delta_{q}(E_{i})=E_{i}\otimes 1+K_{i}\otimes E_{i}, &\hat\Delta_{q}(F_{i})=F_{i}\otimes K^{-1}_{i}+1\otimes F_{i},
\end{array}
$$
$$
\begin{array}{cccc}
\hat\epsilon_{q}(E_{i})=\hat\epsilon_{q}(F_{i})=0, & \hat\epsilon_{q}(K_{i})=1,
\end{array}
$$
$$
\begin{array}{cccc}
\hat S_{q}(F_{i})=-F_{i}K_{i}, & \hat S_{q}(E_{i})=-K^{-1}_{i}E_{i}, & \hat S_{q}(K_{i})=K_{i}^{-1}
\end{array}
$$
$$
\begin{array}{cccc}
K_{i}^{*}=K_{i}, & E_{i}^{*}=F_{i}K_{i}, & F_{i}^{*}=K_{i}^{-1}E_{i}.
\end{array}
$$
We denote the antipode of $\mathrm{U_{q}}(\mathfrak{g})$ by $\hat S_{q}.$ For $q=1,$ we let $\mathrm{U_{1}}(\mathfrak{g})=\mathrm{U}(\mathfrak{g})$ be the ordinary universal enveloping algebra with the usual Hopf $*$-algebra structure and denote the co-product, co-unit, antipode simply by $\hat\Delta,$ $\hat \epsilon,$ $\hat S.$
\\
Let $P$ be the set of weights for $\mathfrak{g}.$ Let $P_{+}\subseteq P$ be the set of dominant integral weights, i.e. $\lambda\in P$ such that $\langle \lambda,\alpha_{i} \rangle\geq 0$ for $i=1,\dots,n.$ Moreover, let $P_{++}\subseteq P_{+}\subseteq P$ be the set of those dominant weights $\lambda$ such that $\langle \lambda, \alpha_{i} \rangle>0$ for $i=1,\dots ,n.$
\\
The theory of $\mathrm{U_{q}}(\mathfrak{g})$-modules is very similar to the case $q=1$ (see~\cite{ks}). It is well known that for every $q\in(0,1),$ the monoidal category $\mathcal{M}_{q}(\mathfrak{g})$ of
admissible finite dimensional $\mathrm{U_{q}}(\mathfrak{g})$-modules are
parameterized by $\lambda \in P_{+},$ with the same fusion rules as $\mathcal{M}(\mathfrak{g}),$
the monoidal categories of finite dimensional $\mathrm{U}(\mathfrak{g})$-modules.
Moreover, for any $\lambda \in P_{+},$ the vector spaces $V_{\lambda}^{q}$ and $V_{\lambda}$ have the same dimension. There is a similar decomposition into weight sub-spaces $V_{\lambda}^{q}(\gamma)\subseteq V^{q}_{\lambda},$ for $\gamma\in P;$ these are the sub-spaces such that \begin{equation}\label{ki}\begin{array}{cccc}K_{i}\cdot\eta=q_{i}^{\gamma(H_{i})}\eta, & \eta\in V_{\lambda}^{q}(\gamma), & i=1,\dots, n. \end{array}\end{equation}
For each $\gamma\in P,$ we also have a vector space isomorphism
$$
V_{\lambda}^{q}(\gamma)\cong V_{\lambda}(\gamma).
$$
In particular, the sub-space $V_{\lambda}^{q}(\lambda)$ is one-dimensional and is the highest weight-space of $V_{\lambda}^{q}$, in the sense that for $\xi\in V_{\lambda}^{q}(\lambda),$ we have
$$
\begin{array}{ccc}
E_{i}\cdot\xi=0,& \text{for $i=1,\dots, n.$}
\end{array}
$$
There is a non-degenerate inner product $\langle\cdot,\cdot \rangle$ on $V_{\lambda}^{q}$ such that
$$
\begin{array}{ccccc}
\langle a\cdot \eta,\xi\rangle=\langle \eta,a^{*}\cdot \xi\rangle, & \forall a\in \mathrm{U_{q}}(\mathfrak{g}),& \forall\eta,\xi\in V_{\lambda}^{q}.\\
\end{array}
$$
Clearly, with respect to this inner product, different weight sub-spaces $V_{\lambda}^{q}(\gamma)$ are orthogonal for different $\gamma$. For any $w\in W,$ the Weyl group of $\mathrm G,$ the weight sub-space $V_{\lambda}^{q}(w\cdot \lambda)$ is one dimensional. Thus we can choose a unit vector $\xi_{w\cdot\lambda}\in V_{\lambda}^{q}(w\cdot \lambda).$ Let us do this in such way that $\xi_{w\cdot\lambda}=\xi_{v\cdot\lambda}$ if $w\cdot\lambda=v\cdot\lambda$ and thus no ambiguity arises from this notation.
\\
For $w\in W,$ let $\mathcal{l}(w)\in\mathbb{Z}_{+}$ denote the length of $w.$ By definition, this is the smallest integer $m$ such that $w$ can be written as a product of $m$ generators
\begin{equation}\label{reduced}
\begin{array}{ccccc}
w=s_{i_{1}}\cdots s_{i_{m}}.
\end{array}
\end{equation}
For the identity element $e\in W,$ we let $\mathcal{l}(e)=0.$ If $\mathcal{l}(w)=m$ in~\eqref{reduced} then the product $w=s_{i_{1}}\cdots s_{i_{m}}$ is said to be \textit{reduced}. Recall that the Bruhat order on $W$ is the partial ordering generated by declaring that
\begin{equation}\label{bruhato}
\begin{array}{ccc}
v<w ,& \text{if $\exists \alpha\in \Phi_{+}$ such that $v\alpha=w$ and $\mathcal{l}(v)=\mathcal{l}(w)-1.$}
\end{array}
\end{equation}
Let $\mathrm{U}_{q}(\mathfrak{b})$ be the sub-algebra of $\mathrm{U_{q}}(\mathfrak{g})$ generated by $K_{i},K_{i}^{-1},E_{i}$ for $i=1,\dots, n.$ We can concretely connect the Bruhat order of $W$ with certain $\mathrm{U_{q}}(\mathfrak{g})$-modules in the following way:
\begin{lem}[{Proposition~3.3 in~\cite{nt}}]\label{lem1}
Let $v,w\in W.$ The following are equivalent:
\begin{enumerate}[(i)]
\item We have $v\leq w$ in the Bruhat order on $W.$
\item For any $\lambda\in P_{++}$ we have $V_{\lambda}^{q}(v\cdot \lambda)\subset (\mathrm{U_{q}}(\mathfrak{b}))V_{\lambda}^{q}(w\cdot \lambda)$.
\end{enumerate}
\end{lem}
Both the subspaces $V_{\lambda}^{q}(v\cdot \lambda)$ and $(\mathrm{U_{q}}(\mathfrak{b}))V_{\lambda}^{q}(w\cdot \lambda)$ are invariant under the actions of $K_{i},$ $i=1,\dots, n.$ Notice that these are commuting self-adjoint operators on $V^{q}_{\lambda}$ that separate the weight-spaces. As the space $V_{\lambda}^{q}(v\cdot \lambda)$ is one-dimensional, it follows from~\eqref{ki} that if $v \not \leq w$ and hence $V_{\lambda}^{q}(v\cdot \lambda)\not\subset (\mathrm{U_{q}}(\mathfrak{b}))V_{\lambda}^{q}(w\cdot \lambda),$ then we have the following corollary of Lemma~\mathrm{Re}f{lem1}.
\begin{cor}\label{vnotw}
If $\lambda\in P_{++}$ and $v\not \leq w,$ then
\begin{equation*}V_{\lambda}^{q}(v\cdot \lambda)\bot (\mathrm{U_{q}}(\mathfrak{b}))V_{\lambda}^{q}(w\cdot \lambda).\end{equation*}
\end{cor}
\subsection{\bf\textsc{The Quantum Group $\mathbb{C}[\mathrm{G]_{q}}$}}
We define $\mathbb{C}[\mathrm{G]_{q}}\subseteq (\mathrm{U_{q}}(\mathfrak{g}))^{*}$ as the subspace of the dual generated by linear functionals of the form
\begin{equation}\label{cgq}
C_{\eta,\xi}^{\lambda}(a):=\langle a\cdot \eta,\xi\rangle,
\end{equation}
$$
\begin{array}{ccccc}
\text{for} &a\in \mathrm{U_{q}}(\mathfrak{g}),& \eta,\xi\in V_{\lambda}^{q}, & \lambda\in P_{+}.
\end{array}
$$
We let $\Delta_{q},$ $\epsilon_{q}$ and $S_{q}$ respectively be the dual of the product, co-product and antipode of $\mathrm{U_{q}}(\mathfrak{g}).$ Moreover, we define a $*$-involution on $\mathbb{C}[\mathrm{G]_{q}}$ by the formula $$(C_{\eta,\xi}^{\lambda})^{*}(a)=\overline{C_{\eta,\xi}^{\lambda}((\hat S_{q}(a))^{*})}$$
We let $\mathbb{C}[\mathrm{G]}=\mathbb{C}[\mathrm{G]_{1}}$ as well as $\Delta=\Delta_{1}$, $\epsilon=\epsilon_{1}$ and $S=S_{1}$ denote the $*$-algebra of regular functions on $\mathrm{G}$ with the usual co-product, co-unit and antipode.
For an irreducible module $V^{q}_{\lambda}$ with an orthonormal basis $\xi_{k},$ $k=1,\dots, m=\dim V_{\lambda}^{q},$ it follows from the definition of $\mathbb{C}[\mathrm{G]_{q}}$ that for $i,k=1,\dots, m,$
\begin{gather}
\Delta_{q}(C_{\xi_{i},\xi_{k}}^{\lambda})=\sum_{j=1}^{m}C_{\xi_{i},\xi_{j}}^{\lambda}\otimes C_{\xi_{j},\xi_{k}}^{\lambda},\label{xi1}\\
\epsilon_{q}(C_{\xi_{i},\xi_{k}}^{\lambda})=C_{\xi_{i},\xi_{k}}^{\lambda}(I)=\langle\xi_{i},\xi_{k}\rangle=\delta_{ik}1.\label{xi2}\\
\begin{array}{ccc}
\sum_{i=1}^{m}(C_{\xi_{i},\xi_{k}}^{\lambda})^{*}C_{\xi_{i},\xi_{j}}^{\lambda}=\delta_{km}I, & k,j=1,\dots, m.\label{xi3}
\end{array}
\end{gather}
To deduce~\eqref{xi3}, consider any $a\in \mathrm{U_{q}}(\mathfrak{g}),$ then it follows from the Hopf algebra axioms that (noticing that $\overline{C_{\eta,\xi}^{\lambda}(a)}=C_{\xi,\eta}^{\lambda}(a^{*})$ and hence $(C_{\eta,\xi}^{\lambda})^{*}(a)=C_{\xi,\eta}^{\lambda}(\hat S_{q}(a))$)
$$
\sum_{i=1}^{m}(C_{\xi_{i},\xi_{k}}^{\lambda})^{*}C_{\xi_{i},\xi_{j}}^{\lambda}(a)=\sum_{i=1}^{m}((C_{\xi_{i},\xi_{k}}^{\lambda})^{*}\otimes C_{\xi_{i},\xi_{j}}^{\lambda})(\hat\Delta_{q}(a))=
$$
$$
=\sum_{i=1}^{m}(C_{\xi_{k},\xi_{i}}^{\lambda}\otimes C_{\xi_{i},\xi_{j}}^{\lambda})((\hat S_{q}\otimes \iota)\hat\Delta_{q}(a))=
$$
$$
=C^{\lambda}_{\xi_{k},\xi_{j}}(\sum \hat S_{q}(a_{(1)})a_{(2)})=C^{\lambda}_{\xi_{k},\xi_{j}}(\hat \epsilon_{q}(a))=\delta_{kj}\hat\epsilon_{q}(a)I,
$$
where $\hat \Delta_{q}(a)=\sum a_{(1)}\otimes a_{(2)}$ in the Sweedler notation.
We define inductively
$$
\Delta_{q}^{(2)}=\Delta_{q}.
$$
\begin{equation}\label{deltapower}\Delta_{q}^{(n)}=(\underset{\text{$n-1$ terms}}{\underbrace{\Delta_{q}\otimes \iota\otimes\cdots\otimes \iota}})\circ \Delta^{(n-1)}_{q}:\mathbb{C}[\mathrm{G]_{q}}\longrightarrow \underset{\text{$n$ terms}}{\underbrace{\mathbb{C}[\mathrm{G]_{q}}\otimes \cdots \otimes \mathbb{C}[\mathrm{G]_{q}}}}\end{equation}
Notice that by co-associativity $(\Delta_{q}\otimes \iota)\circ \Delta_{q}=(\iota\otimes \Delta_{q})\circ \Delta_{q}$, it does not matter which tensor factor you apply $\Delta_{q}$ to in~\eqref{deltapower}.
\\
Let us denote by $C(\mathrm{G)_{q}}$ the universal enveloping $C^{*}$-algebra of $\mathbb{C}[\mathrm{G]_{q}}.$ It is known from~\cite{ks} that the universal enveloping $C^{*}$-algebra exists and that the natural homomorphism $\mathbb{C}[\mathrm{G]_{q}}\hookrightarrow C(\mathrm{G)_{q}}$ is injective. Hence we can identify $\mathbb{C}[\mathrm{G]_{q}}$ with its inclusion $\mathbb{C}[\mathrm{G]_{q}}\subseteq C(\mathrm{G)_{q}}.$ Moreover, the co-product can be extended to a $*$-homomorphism $\Delta_{q}:C(\mathrm{G)_{q}}\to C(\mathrm{G)_{q}}\otimes C(\mathrm{G)_{q}}$ (the minimal tensor product), giving a structure of $C(\mathrm{G)_{q}}$ as a compact quantum group in the sense of Woronowicz~\cite{wz2}. We will use the same symbol for a $*$-representation of $\mathbb{C}[\mathrm{G]_{q}}$ as well as its extension to $C(\mathrm{G)_{q}}.$
\\
Recall the special case of $\mathrm{SU_{2}}.$ Let $V^{q}_{\lambda}$ be the unique $2$-dimensional $\mathrm{U_{q}}(\mathfrak{su}_{2})$-module, with basis $\xi_{\lambda}=:\xi_{1},\xi_{-\lambda}=:\xi_{2}$ and let
$$
\begin{array}{ccc}
t_{ij}=C^{\lambda}_{\xi_{i},\xi_{j}}, &\text{for $i,j=1,2.$}
\end{array}
$$
Then the elements $t_{ij}$ generate $\mathbb{C}[\mathrm{SU}_{2}]_{q}$ as an algebra, and they are subject to the relations
\begin{equation}\label{su2q}
\begin{array}{cc}
\begin{array}{cccc}
t_{11}t_{21}=qt_{21}t_{11}, & t_{11}t_{12}=qt_{12}t_{11}, & t_{12}t_{21}=t_{21}t_{12},
\end{array}\\
\begin{array}{cccc}
t_{22}t_{21}=q^{-1}t_{21}t_{11}, & t_{22}t_{12}=q^{-1}t_{12}t_{22},
\end{array}\\
\begin{array}{cccc}
t_{11}t_{22}-t_{22}t_{11}=(q-q^{-1})t_{12}t_{21}, & t_{11}t_{22}-q t_{12}t_{21}=1,
\end{array}\\
\begin{array}{cccc}
t^{*}_{11}=t_{22}, & t_{12}^{*}=-q t_{21}
\end{array}
\end{array}
\end{equation}
Moreover, the relations~\eqref{su2q} determine $\mathbb{C}[\mathrm{SU_{2}}]_{q},$ in the sense that this algebra is isomorphic to the universal $*$-algebra with generators $\hat t_{ij}$, $i,j=1,2,$ satisfying the relations~\eqref{su2q}.
\\
Given two $*$-representations $\pi_{1},\pi_{2},$ such that $\pi_{i}:C(\mathrm{G)_{q}}\to \mathcal{B}(\Hh_{i})$ for $i=1,2,$ we can define the tensor product using the co-multiplication as
\begin{equation}\label{hopftensor}
\pi_{1}\boxtimes\pi_{2}:=(\pi_{1}\otimes \pi_{2})\circ \Delta_{q}:C(\mathrm{G)_{q}}\longrightarrow \mathcal{B}(\Hh_{1})\otimes \mathcal{B}(\Hh_{2})\subseteq \mathcal{B}(\Hh_{1}\otimes \Hh_{2})
\end{equation}
where $\otimes$ denotes the minimal tensor product between $C^{*}$-algebras. We will also use $\otimes$ to denote the algebraic tensor product; it will always be clear from context which one we use (e.g. we will never take the algebraic tensor product between two $C^{*}$-algebras).
\subsection{\bf\textsc{Representation Theory of $\mathbb{C}[\mathrm{G]_{q}}$}}
Recall a $*$-representation $\Pi_{q}$ of $\mathbb C[\mathrm{SU_2]_q}$ defined in the following way:
let $C_q,S,d_q:\ell^2(\mathbb Z_+)\to\ell^2(\mathbb Z_+)$ be the operators defined on the natural orthonormal basis $\{e_{j}\}_{j\in \mathbb{Z}_{+}}$ as follows:
\begin{equation}\label{SC}
Se_n=e_{n+1},\quad C_q e_n=\sqrt{1-q^{2n}}e_n,\quad d_q e_n=q^ne_n.
\end{equation}
Then the map
\begin{equation}\label{SC1}
\Pi_{q}(t_{1,1})=S^*C_q,\ \Pi_{q}(t_{1,2})=q d_q,\ \Pi_{q}(t_{2,1})=-d_q,\ \Pi_{q}(t_{2,2})=C_q S
\end{equation}
extends to a $*$-representation of $\mathbb C[\mathrm{SU_2]_q}.$ Let $C^{*}(S)\subseteq \mathcal{B}(\ell^{2}(\mathbb{Z}_{+}))$ be the $C^{*}$-algebra generated by $S$ (so that $C^{*}(S)$ is equal to the Toeplitz algebra). From the expressions~\eqref{SC} for $C_{q}$ and $d_q$ it is easy to see that $C_{q},d_{q}\in C^{*}(S)$ and hence that $$\Pi_{q}(\mathbb C[\mathrm{SU_2]_q})\subseteq C^{*}(S).$$
We recall the $*$-representation theory for $\mathbb{C}[\mathrm{G]_{q}},$ $q\in (0,1),$ due to Soibelman~\cite{ks}: For each simple root $\alpha_{i}\in \Omega$ we get an injective Hopf $*$-homomorphism $\mathrm{U_{q_{i}}}(\mathfrak{su}_{2})\to \mathrm{U_{q}}(\mathfrak{g})$ that dualizes to a surjective Hopf $*$-homomorphism
$$
\begin{array}{cccc}
\varsigma_{i}^{q}:\mathbb{C}[\mathrm{G]_{q}}\longrightarrow \mathbb{C}[\mathrm{SU_{2}]_{q_{i}}}, & i=1,\dots, n
\end{array}
$$
(this is true also for $q=1,$ we write $\varsigma_{i}=\varsigma_{i}^{1}$). For $i=1,\dots, n,$ we define $*$-representations $$\pi_{i}^{q}:=\Pi_{q_{i}}\circ \varsigma_{i}^{q}:\mathbb{C}[\mathrm{G]_{q}}\longrightarrow C^{*}(S)\subseteq \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})).$$
Let $w\in W$, with a reduced presentation $w=s_{j_{1}}\cdots s_{j_{m}}$ (and hence $m=\mathcal{l}(w)$), and define
\begin{equation}
\pi_{w}^{q}:=\pi_{j_{1}}^{q}\boxtimes \cdots \boxtimes \pi_{j_{m}}^{q}:\mathbb{C}[\mathrm{G]_{q}}\longrightarrow C(S)^{\otimes \mathcal{l}(w)}\subseteq\mathcal{B}(\ell^{2}(\mathbb{Z}_{+})^{\otimes \mathcal{l}(w)})
\end{equation}
when $e\in W$ is the identity element, we let $\pi_{e}=\epsilon_{q}$ (corresponding to the empty reduced presentation).
From~\cite{ks}, we know that $\pi_{w}^{q}$ does not depend on the reduced decomposition, in the sense that if we have two reduced presentations $w=s_{j_{1}}\cdots s_{j_{m}}=s_{j'_{1}}\cdots s_{j'_{m}},$ then the two corresponding $*$-representations
$$
\begin{array}{cccc}
\pi_{j_{1}}^{q}\boxtimes \cdots \boxtimes \pi_{j_{m}}^{q}, & \pi_{j'_{1}}^{q}\boxtimes \cdots \boxtimes \pi_{j'_{m}}^{q}
\end{array}
$$
are unitarily equivalent. For simplicity, let us write $$\Hh_{w}:=\ell^{2}(\mathbb{Z}_{+})^{\otimes \mathcal{l}(w)}.$$ Thus $\pi_{w}^{q}$ is a $*$-representation $\mathbb{C}[\mathrm{G]_{q}}\to \mathcal{B}(\Hh_{w}).$ We have a subgroup $\mathrm T\subseteq G$ of a maximal torus, corresponding to the real sub-algebra $\mathfrak{t}\subseteq \mathfrak{g}.$ Let $\omega_{i},$ $i=1,\dots,n,$ be the fundamental weights for $\mathfrak g.$ We have an isomorphism $\mathrm T\cong \mathbb{T}^{n},$ given by
\begin{equation}\label{TnT}
\begin{array}{cccc}
t=e^{x}\in \mathrm T\mapsto (e^{\omega_{1}(x)},\dots,e^{\omega_{n}(x)})\in \mathbb{T}^{n},
\end{array}
\end{equation}
where $x\in\mathfrak t.$ For every $t\in\mathrm T,$ we have mutually non-equivalent one-dimensional $*$-representations $\chi_{t}:\mathbb{C}[\mathrm{G]_{q}}\to \mathbb{C},$ such that, for $s,t\in\mathrm T,$ we have $\chi_{s}\boxtimes \chi_{t}=\chi_{st}$ and $\chi_{t}\boxtimes \chi_{t^{-1}}=\chi_{1}=\epsilon_{q}.$ By~\cite{nt}, we can for every $t\in \mathrm T$ associate a unitary operator $U_{t}\in \prod_{\lambda \in P_{+}}\mathcal{B}(V^{q}_{\lambda}),$ such that if $x\in \mathfrak t$ satisfies $t=e^{x},$ then for all $\lambda \in P_{+}$ and $\gamma\in P$
\begin{gather}
\begin{array}{cccc}
\langle U_{t}\eta, \xi \rangle=\chi_{t}(C_{\eta,\xi}^{\lambda}), & \eta,\xi\in V_{\lambda}^{q},
\end{array}\label{maxtor}\\
\begin{array}{ccc}
U_{t}\eta=e^{\gamma(x)}\eta, & \eta\in V_{\lambda}^{q}(\gamma).
\end{array}\label{maxtor2}
\end{gather}
The following theorem characterizes all irreducible $*$-representations of $\mathbb{C}[\mathrm{G]_{q}}$ (up to unitary equivalence).
\begin{thm}[{Theorem~6.2.7 in~\cite{ks}}]
\begin{enumerate}[(i)]
\item For $w\in W$ and $t\in \mathrm T,$ the $*$-representations $\pi_{w}^{q}\boxtimes \chi_{t}$ are irreducible and mutually non-equivalent, and
\item every irreducible $*$-representation $\pi$ of $\mathbb{C}[\mathrm{G]_{q}}$ is unitarily equivalent to some $\pi_{w}\boxtimes \chi_{t},$ $w\in W,$ $t\in \mathrm T.$
\end{enumerate}
\end{thm}
\section{\bf\textsc{Properties of $C(\mathrm{G)_{q}}$ under $*$-Representations}}
\subsection{\bf\textsc{Paths in the Weyl Group and Subsets of $\mathrm T$}}
For elements $v,w\in W,$ we write $v\lhd w$ to mean that
\begin{enumerate}[(i)]
\item $v<w$ in the Bruhat order,
\item there is no $r\in W,$ such that $v<r<w.$
\end{enumerate}
By Theorem~2.2.6 in~\cite{abfb}, this means that there is a $\alpha\in \Phi_{+}$ such that $v\alpha=w$ and $l(v)=l(w)-1.$ Keeping with the established terminology, we also say that $w$ \textit{covers} $v$ if $v\lhd w.$
In general, we write $v\lhd^{(k)} w$ if we have elements $r_{1},\dots,r_{k-1}\in W$ such that
$$
v\lhd r_{1}\lhd\dots\lhd r_{k-1}\lhd w.
$$
It is a property of $W$ that every chain
$$
v\lhd r \lhd \dots\lhd w
$$
must have the same length and hence that the relation $\lhd^{(k)}$ is actually well-defined for $k>1$ (see Theorem~2.2.6 in~\cite{abfb}).
\\
Let $v,w\in W.$ If $v\lhd w,$ we also write $v\overset{\gamma}{\leadsto}w,$ for a $\gamma\in \Phi_{+},$ if $v\gamma =w.$ In general, if $v\leq w,$ then we say that we have a path from $v$ to $w$
\begin{equation}\label{paths}
v=v_{1}\overset{\gamma_{1}}{\leadsto}v_{2}\overset{\gamma_{2}}{\leadsto}\dots\overset{\gamma_{m-1}}{\leadsto}v_{m}\overset{\gamma_{m}}{\leadsto}v_{m+1}=w
\end{equation}
if $v_{j}\lhd v_{j+1}$ and $v_{j}\gamma_{j+1}=v_{j+1}$ for $j=1,\dots,m.$ Clearly, every path from $v$ to $w$ has the same length $m=l(w)-l(v).$
We write~\eqref{paths} as the composition of paths
\begin{equation}
\begin{array}{cccc}
v\overset{\gamma}{\leadsto} w, & \text{for $\gamma=\gamma_{1}\circ\dots\circ \gamma_{m}$}
\end{array}
\end{equation}
to indicate that we have a specific path between $v$ and $w.$
\\
For each path $v\overset{\gamma}{\leadsto}w$ we associate a closed connected subgroup $\mathrm T_{\gamma}\subseteq \mathrm T$ by taking the exponential of the real subspace of $\mathfrak{t}$ spanned by $h_{\gamma_{i}}$ for $i=1,\dots,m.$ For the path $v \overset{\gamma_{1}}{\leadsto}r \overset{\gamma_{2}}{\leadsto} w$ and $\gamma =\gamma_2\circ\gamma_1,$ we have \begin{equation}\label{gamma}\mathrm T_{\gamma_{2}}\mathrm T_{\gamma_{1}}:=\{ts:t\in \mathrm T_{\gamma_{2}},s\in \mathrm T_{\gamma_{1}}\}=\mathrm T_{\gamma}.\end{equation} Let $\mathrm T_{v}^{w}$ be the union of all the subgroups of $T$ generated by paths from $v$ to $w.$ From~\eqref{gamma}, it follows that we have the following multiplicative property:
If $v\leq r\leq w$ then $$\mathrm T_{v}^{r}\mathrm T_{r}^{w}\subseteq \mathrm T_{v}^{w}.$$
Clearly, it follows from~\eqref{paths} that for $v<w,$ we have
\begin{equation}\label{mult}
\mathrm T_{v}^{w}=\underset{{v<r\lhd w}}{\cup} \mathrm T_{v}^{r}\mathrm T_{r}^{w},
\end{equation}
where the union ranges over all $r\in W$ such that $v<r\lhd w.$ For later use, we also note the following special case of~\eqref{gamma}: if $v\overset{\gamma}{\leadsto}r\lhd w$ with $r \alpha= w$ then
\begin{equation}\label{gamma1}
\mathrm T_{\gamma}\mathrm T_{r}^{w}=\mathrm T_{\alpha\circ \gamma}.
\end{equation}
\subsection{\bf\textsc{Ideals and Quotients}}
We will use the following results from~\cite{nt} (though stated in a less general fashion in order to suit our purposes).
\begin{thm}[Theorem~$4.1$ $(ii)$ in~\cite{nt}]\label{ntthm}
Let $\sigma\in W$ and $\mathrm Y\subseteq \mathrm T.$ For any $r\in W$ and $t\in T,$ the kernel of the representation $\pi_{r}^{q}\boxtimes \chi_{t}$ contains the intersection of the kernels of the representations $\pi_{\sigma}^{q}\boxtimes \chi_{s},$ $s\in Y$ of $C(\mathrm{G)_{q}}$ if and only if $r\leq \sigma$ and $t\in \overline{\mathrm Y}\mathrm T_{r}^{\sigma}.$
\end{thm}
\begin{lem}[Lemma~$4.5$ in~\cite{nt}]\label{ntlem}
Let $t\in \mathrm T$ and let $w\in W$. Assume $x\in C(\mathrm{G)_{q}}$ is such that $(\pi_{v}^{q}\boxtimes \chi_{s})(x)=0$ for all $v\in W$ such that $v<w$ and $s\in t \mathrm T_{v}^{w},$ then
\begin{equation}\label{compact}
(\pi_{w}^{q}\boxtimes\chi_{t})(x)\in \K_{w}.
\end{equation}
\end{lem}
Recall the definition of $C_{\eta,\xi}^{\lambda}\in \mathbb{C}[\mathrm{G]_{q}},$ given by~\eqref{cgq}. To avoid multiple subscripts, let us write $w\cdot \lambda$ in place of $\xi_{w\cdot \lambda}$ in~\eqref{cgq}. Thus, for example, we write $C^{\lambda}_{w\cdot\lambda,\lambda}$ instead of $C^{\lambda}_{\xi_{w\cdot \lambda},\xi_{\lambda}}.$
\begin{lem}[Lemma~2.3 in~\cite{nt}]\label{lem2}
Let $w\in W$ and $\lambda\in P_{+}.$
\begin{enumerate}[(i)]
\item $\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})$ is a compact contractive diagonalizable operator with zero kernel, and the vector $e_{0}^{\otimes \mathcal{l}(w)}\in \Hh_{w}$ is its only eigenvector (up to scalar) with an eigenvalue of absolute value $1$.
\item If $\zeta\in V^{q}_{\lambda}$ is orthogonal to $(\mathrm{U_{q}}(\mathfrak{b}))V^{q}_{\lambda}(w\cdot \lambda),$ then $$\pi_{w}^{q}(C_{\zeta,\lambda}^{\lambda})=0.$$
\end{enumerate}
\end{lem}
When $q=1,$ we get a Hopf $*$-algebra homomorphism $\tau_{1}=\tau:\mathbb{C}[\mathrm{G]}\to \mathbb{C}[\mathrm{T]}$ by restriction to the subgroup $\mathrm{T\subseteq G}$ (here $\mathbb{C}[\mathrm{T]}$ is the Hopf $*$-algebra of trigonometric polynomials on $\mathrm T$). For general $q\in (0,1),$ we define a surjective Hopf $*$-algebra homomorphism $\tau_{q}:\mathbb{C}[\mathrm{G]_{q}}\to \mathbb{C}[\mathrm{T]},$ that extends to a homomorphism of compact quantum groups $\tau_{q}: C(\mathrm{G)_{q}}\to C(\mathrm T)$. We define $\tau_{q}$ in the following way:
The compact operators $\K\subseteq \mathcal{B}(\ell^{2}(\mathbb{Z}_{+}))$ is a $*$-ideal in $C^*(S),$ and it is well known that we have the isomorphism $p:C^*(S)/\K\mapsto C(\mathbb{T})$ such that $S^{*}+\K\mapsto z$ (here $z\in C(\mathbb{T})$ is the coordinate function). Moreover, it is easy to see that we actually have a homomorphism of Hopf $*$-algebras $\beta_{q}:\mathbb C[\mathrm{SU_2]_q}\to \mathbb{C}[\mathbb T]\subseteq C(\mathbb{T})$ that factors as
\begin{equation}\label{su2}
\mathbb C[\mathrm{SU_2]_q}\overset{\Pi_{q}}{\longrightarrow} C^*(S) \overset{p}{\longrightarrow} C^*(S)/\K\cong C(\mathbb T)
\end{equation}
and such that $\beta_{q}(t_{11}^{q})=z.$ Consider now the $*$-homomorphism
\begin{equation}\label{tauq}
\tau_{q}:\mathbb C[\mathrm{G]_q}\overset{\pi_{1}^{q}\boxtimes\cdots \boxtimes \pi_{n}^{q}}\longrightarrow C^*(S)^{\otimes n}\overset{p\otimes\cdots\otimes p}\longrightarrow C(\mathbb T)^{\otimes n}\cong C(\mathrm T),
\end{equation}
with the isomorphism $C(\mathbb T)^{\otimes n}\cong C(\mathrm T)$ induced by the isomorphism $\mathbb{T}^{n}\cong \mathrm T$ given by~\eqref{TnT}. By using~\eqref{su2}, we can also factor $\tau_{q}$ as
\begin{equation}\label{tauq2}
\mathbb{C}[\mathrm{G]_{q}}\overset{(\varsigma_{1}^{q}\otimes \cdots\otimes \varsigma_{n}^{q})\circ \Delta_{q}^{(n)}}\longrightarrow \mathbb{C}[\mathrm{SU_{2}]_{q_{1}}}\otimes \cdots \otimes \mathbb{C}[\mathrm{SU_{2}]_{q_{n}}}\overset{\beta_{q_{1}}\otimes \cdots\otimes \beta_{q_{n}}}\longrightarrow \mathbb{C}[\mathbb T]^{\otimes n}\cong \mathbb{C}[\mathrm T].
\end{equation}
If we consider $\mathbb{C}[\mathrm{SU_{2}]_{q_{1}}}\otimes \cdots \otimes \mathbb{C}[\mathrm{SU_{2}]_{q_{n}}}$ as a tensor product of Hopf $*$-algebras, hence also a Hopf $*$-algebra, then it is easy to check that the $*$-homomorphisms in~\eqref{tauq2} are actually Hopf $*$-algebra morphisms. Thus, $\tau_{q}$ is a morphisms of Hopf $*$-algebras.
\begin{lem}\label{maxtau}
\begin{enumerate}[(i)]
\item The $*$-homomorphism $\tau_{q}:C(\mathrm{G)_{q}}\to C(\mathrm T)$ is surjective,
\item every $\chi_{t},$ $t\in \mathrm T,$ factors as
$$
\chi_{t}:C(\mathrm{G)_{q}}\overset{\tau_{q}}\longrightarrow C(\mathrm T)\overset{ev_{t}}\longrightarrow \mathbb C,
$$
\item for $\eta\in V_{\lambda}(\gamma_{1}),$ $\xi \in V_{\lambda}(\gamma_{2}),$ we have $\tau_{q}(C_{\eta,\xi}^{\lambda})=0$ unless $\gamma_{1}=\gamma_{2}$ and $\langle \eta,\xi\rangle\neq 0,$
\item let $\omega_{i},$ $i=1,\dots, n,$ be the fundamental weights, then the set $\tau_{q}(C_{\omega_{i},\omega_{i}}^{\omega_{i}}),$ $i=1,\dots,n,$ generates $C(\mathrm T)$ as a $C^{*}$-algebra.
\end{enumerate}
\end{lem}
\begin{proof}
Clearly, $(i)$ follows from $(iv)$ or $(ii).$ We have that every one-dimensional $*$-representation of $\mathbb{C}[\mathrm{G]_{q}}$ is of the form $\chi_{t},$ for some $t\in \mathrm T,$ and different weight spaces are orthogonal. Hence we get $(iii)$ from~\eqref{maxtor} and~\eqref{maxtor2} by point evaluation. Note that the same argument shows that $(iii)$ holds for \textit{any} $*$-homomorphism from $\mathbb{C}[\mathrm{G]_{q}}$ to a commutative $C^{*}$-algebra.
By Lemma~$2.2.1$ in~\cite{ks}, the elements of the form $C^{\lambda}_{\eta,\lambda},$ $\lambda\in P_{+},$ $\eta\in V^{q}_{\lambda}$ generates $\mathbb{C}[\mathrm{G]_{q}}$ as a $*$-algebra. Thus it follows from $(iii)$ that the elements in $C(\mathrm T)$ of the form $\tau_{q}(C_{\lambda,\lambda}^{\lambda}),$ $\lambda\in P_{+}$ generate the image of $\tau_{q}.$ As every $\lambda\in P_{+}$ is a linear combination of the fundamental weights, it follows from~\eqref{maxtor2} that the set $\{\tau_{q}(C_{\omega_{i},\omega_{i}}^{\omega_{i}}):i=1,\dots,n\}$ generate the image of $\tau_{q}.$ Thus $(ii)\Rightarrow (i)\Rightarrow (iv).$
To prove $(ii),$ we establish a one-to-one correspondence between one-dimensional $*$-representations and point evaluations of $\tau_{q}.$ By (\cite{ksch}, Theorem~$14,$ Section 6.1.5), the set $F^{i}K^{j}E^{k},$ $j\in \mathbb Z$ $i,k\in \mathbb{Z}_{+},$ is a basis for $\mathrm{U_{q}}(\mathfrak{su}_{2}).$ Using this, and keeping in mind that $\xi_{\omega_{i}}$ is a heighest weight vector, we obtain
\begin{equation}\label{omegai}
\varsigma_{i}^{q}(C_{\omega_{j},\omega_{j}}^{\omega_{j}})=
\begin{cases}
t_{11}^{q_{i}}, & \text{if $i=j$}\\
I, & \text{for $i\neq j.$}
\end{cases}
\end{equation}
It then follows that $(\beta_{i}\circ \varsigma^{q}_{i})(C_{\omega_{j},\omega_{j}}^{\omega_{j}})=z$ for $i=j$ and $(\beta_{i}\circ \varsigma^{q}_{i})(C_{\omega_{j},\omega_{j}}^{\omega_{j}})=I$ if $i\neq j.$ If we extend $\xi_{1}=\xi_{\omega_{i}}$ by $\xi_{2},\dots,\xi_{m}$ to an orthonormal basis for $V_{\omega_{i}}^{q},$ we then get from~\eqref{xi1} and our comment after the proof of (iii), that if we let $z_{i}$ be the i'th coordinate function of $\mathbb{T}^{n},$ then
\begin{equation}
\begin{array}{ccc}
\tau_{q}(C_{\omega_{i},\omega_{i}}^{\omega_{i}})=z_{i}, & i=1,\dots, n,
\end{array}
\end{equation}
and from this it follows that the range of $\tau_{q}$ is dense in $C(\mathrm{T}).$ By~\eqref{maxtor2}, if $t=e^{x},$ $x\in \mathfrak t,$ then we have $\chi_{t}(C^{\omega_{j}}_{\omega_{j},\omega_{j}})=e^{\omega_{j}(x)}$ for $j=1,\dots,n.$ It thus follows that if we use the identification $\mathrm T\cong \mathbb T^{n}$ given by~\eqref{TnT}, then $t=(e^{\omega_{1}(x)},\dots, e^{\omega_{n}(x)})$ and hence
$$
\begin{array}{ccc}
\chi_{t}(C^{\omega_{j}}_{\omega_{j},\omega_{j}})=e^{\omega_{j}(x)}=\mathrm{ev}_{t}(z_{j})=(\mathrm{ev}_{t}\circ \tau_{q})(C^{\omega_{j}}_{\omega_{j},\omega_{j}}), & j=1,\dots, n.
\end{array}
$$
\end{proof}
Let $\mathbb{C}[\mathrm{G]_{q}^{\textsc{inv}}}\subseteq \mathbb{C}[\mathrm{G]_{q}}$ denote the $*$-subalgebra of elements invariant under the left-right-action of the maximal torus $\mathrm T.$ By definition, this is the subset $x\in\mathbb{C}[\mathrm{G]_{q}},$ such that for every $t\in \mathrm T,$ we have $L_{t}(x)=x=R_{t}(x),$ where $L_{t}$ and $R_{t}$ are given by
\begin{equation}\label{laction}
\begin{array}{cc}
L_{t}:\mathbb{C}[\mathrm{G]_{q}}\overset{\Delta_{q}}{\longrightarrow} \mathbb{C}[\mathrm{G]_{q}}\otimes \mathbb{C}[\mathrm{G]_{q}}\overset{\tau_{q}\otimes \iota}{\longrightarrow} C(\mathrm T)\otimes \mathbb{C}[\mathrm{G]_{q}}\overset{ ev_{t}\otimes\iota}{\longrightarrow} \mathbb{C}[\mathrm{G]_{q}} & \text{(Left-action).}
\end{array}
\end{equation}
\begin{equation}\label{raction}
\begin{array}{ccc}
R_{t}:\mathbb{C}[\mathrm{G]_{q}}\overset{\Delta_{q}}{\longrightarrow} \mathbb{C}[\mathrm{G]_{q}}\otimes \mathbb{C}[\mathrm{G]_{q}}\overset{\iota\otimes \tau_{q}}{\longrightarrow} \mathbb{C}[\mathrm{G]_{q}}\otimes C(\mathrm T)\overset{\iota\otimes ev_{t}}{\longrightarrow} \mathbb{C}[\mathrm{G]_{q}} & \text{(Right-action)}
\end{array}
\end{equation}
Clearly $\mathbb{C}[\mathrm{G]_{q}^{\textsc{inv}}}$ is a $*$-subalgebra of $\mathbb{C}[\mathrm{G]_{q}}.$
\begin{lem}\label{lem3}
For every $w\in W,$ there exists $\Upsilon_{w}\in \mathbb{C}[\mathrm{G]_{q}^{\textsc{inv}}}$ such that
\begin{enumerate}[(i)]
\item $\pi_{w}^{q}(\Upsilon_{w})\in \mathcal{B}(\Hh_{w})$ is a compact contractive positive operator with dense range,
\item $e_{0}^{\otimes \mathcal{l}(w)}\in \Hh_{w}$ is the only eigenvector of $\pi_{w}^{q}(\Upsilon_{w})$ (up to a scalar multiple) with eigenvalue $1,$
\item $\pi_{v}^{q}(\Upsilon_{w})\neq 0$ if and only if $v\geq w .$
\end{enumerate}
\end{lem}
\begin{proof}
Take any $\lambda\in P_{++}$ and consider $C_{w\cdot \lambda,\lambda}^{\lambda}.$ By combining Corollary~\mathrm{Re}f{vnotw} with Lemma~\mathrm{Re}f{lem2} it follows that $\pi_{v}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})=0$ for any $v\not \geq w.$ If $v\geq w,$ then as $1\in \mathrm T_{w}^{v}$ (the identity of $\mathrm T$) it follows from Theorem~\mathrm{Re}f{ntthm} that we have $\ker \pi_{v}^{q}\subseteq \ker \pi_{w}^{q}.$ As $\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})\neq 0,$ it thus follows that also $\pi_{v}^{q}(C_{w\cdot \lambda,\lambda}^{\lambda})\neq 0.$ If we extend $\xi_{\lambda},\xi_{w\cdot\lambda}$ to an orthonormal basis for $V^{q}_{\lambda},$ then we get from~\eqref{xi1} and Lemma~\mathrm{Re}f{maxtau} $(iii)$ that
\begin{equation}\label{lraction}
\begin{array}{cccc}
L_{t}(C_{w\cdot\lambda,\lambda}^{\lambda})=\chi_{t}(C_{w\cdot\lambda,w\cdot\lambda}^{\lambda})\cdot C_{w\cdot\lambda,\lambda}^{\lambda}, & R_{t}(C_{w\cdot\lambda,\lambda}^{\lambda})=\chi_{t}(C_{\lambda,\lambda}^{\lambda})\cdot C_{w\cdot\lambda,\lambda}^{\lambda},& t\in \mathrm T.
\end{array}
\end{equation}
Let us now define $\Upsilon_{w}:=(C_{w\cdot\lambda,\lambda}^{\lambda})^{*}C_{w\cdot\lambda,\lambda}^{\lambda}.$ As $L_{t}$ and $R_{t}$ are $*$-automorphisms for all $t\in\mathrm T,$ it follows from~\eqref{lraction} that $\Upsilon_{w}\in \mathbb{C}[\mathrm{G]_{q}^{\textsc{inv}}}.$ Positivity follows from the definition of $\Upsilon_{w},$ and the other claims in $(i)$ and $(iii)$ follow by Lemma~\mathrm{Re}f{lem2} $(i)$. To see $(ii),$ note that
\begin{equation}\label{calc1}
\langle \pi_{w}^{q}(\Upsilon_{w})e_{0}^{\otimes \mathcal{l}(w)},e_{0}^{\otimes \mathcal{l}(w)}\rangle=\|\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})e_{0}^{\otimes \mathcal{l}(w)} \|^{2}= \|e_{0}^{\otimes \mathcal{l}(w)} \|^{2}=1.
\end{equation}
As $\pi_{w}^{q}(\Upsilon_{w})$ is a positive contraction, it follows that $e_{0}^{\otimes \mathcal{l}(w)}$ must be an eigenvector with eigenvalue $1$. To see that $e_{0}^{\otimes \mathcal{l}(w)}$ is the only eigenvector (up to a scalar multiple) of $\pi_{w}^{q}(\Upsilon_{w})$ with eigenvalue $1,$ notice that~\eqref{calc1} gives that $e_{0}^{\otimes \mathcal{l}(w)}$ is also an eigenvector for $\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})^{*}$ and thus the subspace generated by $e_{0}^{\otimes \mathcal{l}(w)}$ is actually reducing $\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda}).$ By $(i)$ of Lemma~\mathrm{Re}f{lem2}, it follows that $\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})$ must have norm strictly less than $1$ when restricted to the orthogonal complement of $e_{0}^{\otimes \mathcal{l}(w)}$ (otherwise there would be another eigenvector orthogonal to $e_{0}^{\otimes \mathcal{l}(w)}$ with an eigenvalue of absolute value $1$). The same then holds for $\pi_{w}^{q}(\Upsilon_{w})=\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda})^{*}\pi_{w}^{q}(C_{w\cdot\lambda,\lambda}^{\lambda}).$
\end{proof}
\begin{defn}
Let us call a $*$-homomorphism of $\mathbb{C}[\mathrm{G]_{q}}$ a \textit{commutative} $*$-representation if the image sits inside a commutative $C^{*}$-algebra.
\end{defn}
Let $\chi:\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm X)$ be a commutative $*$-representation. As every one-dimensional $*$-representation of $\mathbb{C}[\mathrm{G]_{q}}$ factors throught the commutative $C^{*}$-algebra $C(\mathrm T),$ it follows that the commutative $*$-representation $\chi$ factors as
$$
\chi=\zeta\circ \tau_{q}
$$
$$
\mathbb{C}[\mathrm{G]_{q}}\overset{\tau_{q}}{\to} C(\mathrm T)\overset{\zeta}{\to} C(\mathrm X)
$$
for a unique $*$-homomorphism $\zeta.$
\begin{defn}
Let $$\begin{array}{ccc}\chi^{q}:\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm X), & q\in (0,1)\end{array}$$ be a family of $*$-homomorphisms, where $X$ is a fixed compact Hausdorff space. We say that the $*$-homomorphisms $\{\chi^{q}\}_{q\in (0,1)}$ are \textit{$q$-independent} if in the factorization $$\chi^{q}=\zeta^{q}\circ \tau_{q},$$
$$
\mathbb{C}[\mathrm{G]_{q}}\overset{\tau_{q}}{\to} C(\mathrm T) \overset{\zeta^{q}}{\to} C(\mathrm X),
$$
we have $\zeta^{q}=\zeta^{s}$ for all $q,s\in (0,1).$
\end{defn}
\begin{defn}
\begin{enumerate}[(a)]
\item For $q\in (0,1),$ let $\Bb_{w}^{q}\subseteq \mathcal{B}(\Hh_{w})$ be the closure of the image $\pi_{w}:\mathbb{C}[\mathrm{G]_{q}}\rightarrow \mathcal{B}(\Hh_{w}).$
\item If $\chi:\mathbb{C}[\mathrm{G]_{q}}\rightarrow C(\mathrm X)$ is a commutative $*$-representation, then let $B_{w,\chi}^{q}$ be the closure of the image of $\pi_{w}\boxtimes \chi:\mathbb{C}[\mathrm{G]_{q}}\rightarrow B_{w}^{q}\otimes C(\mathrm X).$
\end{enumerate}
\end{defn}
\begin{prop}\label{prop2}
Let $w\in W.$ Let $\K_{w}\subseteq \mathcal{B}(\Hh_{w})$ be the space of compact operators and let
$$
p_{w}: \mathcal{B}(\Hh_{w})\longrightarrow \mathcal {Q}(\Hh_{w})
$$
be the quotient map to the Calkin algebra. For every $v\lhd w,$ there exists a subset $\mathrm T_{v}^{w}\subseteq \mathrm T$ and a commutative $q$-independent $*$-representation $\chi_{v}^{w}:\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm T_{v}^{w}),$ such that the map
\begin{equation}\label{isomorph}
\begin{array}{ccc}\eta_{w}^{q}:\pi_{w}^{q}(x)+\K_{w}\mapsto \text{\Large{$\oplus$}}ep{}(\pi_{v}^{q}\boxtimes \chi_{v}^{w})(x), & \text{for $x\in \mathbb{C}[\mathrm{G]_{q}},$}\end{array}
\end{equation}
(where the sum ranges over all $v\in W$ covered by $w$) determines an isomorphism
\begin{equation}\label{lower}
\eta_{w}^{q}:\Bb^{q}_{w}/\K_{w}\longrightarrow\overline{ \text{\Large{$\oplus$}}ep{}(\pi_{v}^{q}\boxtimes \chi_{v}^{w})(\mathbb{C}[\mathrm{G]_{q}})}.
\end{equation}
\end{prop}
We will postpone the proof of Proposition~\mathrm{Re}f{prop2} until after Lemma~\mathrm{Re}f{lem5}.
\begin{lem}\label{lem4}
If $x\in \mathbb{C}[\mathrm{G]_{q}^{\textsc{inv}}},$ then for any $w\in W$ and any commutative $*$-representation $\chi,$ we have
$$
(\pi_{w}\boxtimes \chi)(x)=\pi_{w}(x)\otimes I.
$$
\end{lem}
\begin{proof}
By~\eqref{raction} and~\eqref{laction}, the left and right-actions by $t\in \mathrm T$ on $\mathbb{C}[\mathrm{G]_{q}}$ are respectively given as the compositions
$$
\mathbb{C}[\mathrm{G]_{q}}\overset{(\tau_{q}\otimes\iota)\circ \Delta_{q}}{\to} C(\mathrm T)\otimes\mathbb{C}[\mathrm{G]_{q}}\overset{\mathrm{ev}_{t}\otimes \iota}{\to} \mathbb{C}[\mathrm{G]_{q}},
$$
$$
\mathbb{C}[\mathrm{G]_{q}}\overset{(\iota\otimes\tau_{q})\circ \Delta_{q}}{\to} \mathbb{C}[\mathrm{G]_{q}}\otimes C(\mathrm T)\overset{ \iota\otimes \mathrm{ev}_{t}}{\to} \mathbb{C}[\mathrm{G]_{q}}.
$$
Let $x\in \mathbb{C}[\mathrm{G]_{q}^{\textsc{inv}}}.$ Clearly, it follows $L_{t}(x)=R_{t}(x)=x$ for all $t\in \mathrm T$ if and only if
$$
\begin{array}{ccc}
(\iota\otimes\tau_{q})\circ \Delta_{q}(x)=x\otimes I, &(\tau_{q}\otimes\iota)\circ \Delta_{q}(x)=I\otimes x.
\end{array}
$$
The statement now follows from the fact that any $*$-homomorphism $$\chi:C(\mathrm{G)_{q}}\rightarrow C(\mathrm X)$$ factors as $\chi=\zeta\circ\tau_{q}$ for a unique $*$-representation $\zeta:C(\mathrm T)\to C(\mathrm X).$
\end{proof}
\begin{lem}\label{lem5}
Let $\chi:C(\mathrm{G)_{q}}\rightarrow C(\mathrm X)$ be a $*$-homomorphism such that we have $\chi(C(\mathrm{G)_{q}})=C(\mathrm X).$ Then for any $w\in W,$
\begin{equation}\label{subset}
\K_{w}\otimes C(\mathrm X)\subset \Bb_{w,\chi}^{q}.
\end{equation}
\end{lem}
\begin{proof}
If we, for any $\lambda\in P_{++},$ extend $\xi_{\lambda},\xi_{w\cdot \lambda}$ to an orthonormal basis of $V_{\lambda}^{q},$ then we get from~\eqref{xi1} and Lemma~\mathrm{Re}f{maxtau} $(iii)$ that $$(\pi_{w}^{q}\boxtimes \tau_{q})\left(C_{w\cdot\lambda,\lambda}^{\lambda}\right)=\pi_{w}^{q}\left(C_{w\cdot\lambda,\lambda}^{\lambda}\right)\otimes \tau_{q}\left(C_{\lambda,\lambda}^{\lambda}\right).$$ By Lemma~\mathrm{Re}f{lem3} and Lemma~\mathrm{Re}f{lem4}, if $p_{0}$ is the orthogonal projection onto $e_{0}^{\otimes \mathcal{l}(w)},$ then $p_{0}\otimes I\in \Bb_{w,\tau_{q}}^{q}.$ Thus $$(p_{0}\otimes I)\left((\pi_{w}^{q}\boxtimes \tau_{q})\left(C_{w\cdot\lambda,\lambda}^{\lambda}\right)\right)(p_{0}\otimes I)\in \Bb_{w,\tau_{q}}^{q}$$ and is by Lemma~\mathrm{Re}f{lem2} a non-zero constant multiple of $p_{0}\otimes \tau_{q}\left(C_{\lambda,\lambda}^{\lambda}\right).$ Since the functions $$\begin{array}{cccc}\tau_{q}\left(C_{\lambda,\lambda}^{\lambda}\right)\in C(\mathrm T),& \lambda\in P_{++}\end{array}$$
are generating $C(\mathrm T)$ as a $C^{*}$-algebra, it follows that \begin{equation}\label{p0}p_{0}\otimes C(\mathrm T)\subseteq \Bb_{w,\tau_{q}}^{q}.\end{equation}
By (Proposition 5.5 in~\cite{stokman}), the restriction of an irreducible $*$-representation of $C(\mathrm G)_{q}$ is still irreducible when restricted to
$$
C(\mathrm{G/T})_{q}\overset{\mathrm{def}}{=}\{a\in C(\mathrm G)_{q}: (\iota\otimes \tau_{q})\circ \Delta_{q}(a)=a\otimes I \}.
$$
It follows that
$$\K_{w}\otimes I\subseteq (\pi_{w}^{q}\boxtimes \tau_{q})(C(\mathrm{G/T})_{q})\subseteq (\pi_{w}^{q}\boxtimes \tau_{q})(C(\mathrm{G})_{q}),$$
and together with~\eqref{p0}, this gives~\eqref{subset}.
\end{proof}
\begin{proof}[Proof of Proposition~\mathrm{Re}f{prop2}]
Recall the definition of $\mathrm T_{v}^{w}\subseteq \mathrm T.$ For $v\lhd w,$ we let $\chi_{v}^{w}$ be the composition $$\chi_{v}^{w}:C(\mathrm{G)_{q}}\overset{\tau_{q}}{\longrightarrow}C(\mathrm T)\longrightarrow C(\mathrm T_{v}^{w}),$$ where the second $*$-homomorphism is the restriction of $f\in C(\mathrm T)$ to $\mathrm T_{v}^{w}\subseteq \mathrm T.$ By definition, these $*$-representations are commutative and $q$-invariant. To prove the existence of the $*$-homomorphism, we use Theorem~\mathrm{Re}f{ntthm} with $\mathrm Y=\{1\}$ (the set containing only the identity of $\mathrm T$). Thus for every $v\lhd w$ and $t\in \mathrm T_{v}^{w}$ we have that $\ker \pi_{w}^{q}\subseteq\ker \pi_{v}^{q}\boxtimes\chi_{t}$ and hence we can define a $*$-homomorphism
$$\varphi_{v}^{w}:\Bb_{w}^{q}\longrightarrow \Bb_{v,\chi_{v}^{w}}^{q}$$
$$\begin{array}{ccc}\pi_{w}^{q}(x)\mapsto (\pi_{v}^{q}\boxtimes \chi_{v}^{w})(x), & \text{for $x\in C(\mathrm{G)_{q}}.$}\end{array}$$
By Lemma~\mathrm{Re}f{lem3}, we have $\pi_{w}^{q}(\Upsilon_{w})\neq 0$ and $\pi_{v}^{q}(\Upsilon_{w})=0$ and by Lemma~\mathrm{Re}f{lem4} $$(\pi_{v}^{q}\boxtimes\chi_{v}^{w})(\Upsilon_{w})=\pi_{v}^{q}(\Upsilon_{w})\otimes I=0.$$ Hence the kernel of $\varphi_{v}^{w}$ is a non-trivial ideal of $\Bb_{w}^{q}$. By Lemma~\mathrm{Re}f{lem5}, with $\chi=\epsilon_{q},$ we have $\K_{w}\subset \Bb_{w}^{q}.$ Thus any non-trivial ideal of $\Bb_{w}^{q}$ must contain $\K_{w}$ and therefore $\K_{w}\subseteq\ker \varphi_{v}^{w}.$ Now consider the $*$-homomorphism $$\text{\Large{$\oplus$}}ep{}\varphi_{v}^{w}:\Bb_{w}^{q}\longrightarrow \prod_{v\lhd w}\Bb_{v,\chi_{v}^{w}}^{q}.$$
By the definition of the $\varphi_{v}^{w}$'s, it follows that
\begin{equation}\label{factors}
(\text{\Large{$\oplus$}}ep{}\varphi_{v}^{w})\circ \pi_{w}^{q}=\text{\Large{$\oplus$}}ep{}(\pi_{v}^{q}\boxtimes\chi_{v}^{w}).
\end{equation}
As $\K_{w}\subset \ker \text{\Large{$\oplus$}}ep{}\varphi_{v}^{w}$ we can factor $\text{\Large{$\oplus$}}ep{}\varphi_{v}^{w}=:\eta_{w}^{q}\circ p_{w}$ for a $*$-homomorphism
$$\eta_{w}^{q}:\Bb_{w}^{q}/\K_{w}\longrightarrow \text{\Large{$\oplus$}}ep{}(\pi_{v}^{q}\boxtimes \chi_{v}^{w})(C(\mathrm{G)_{q}}) .$$ From~\eqref{factors}, it follows that~\eqref{isomorph} holds. Clearly, by~\eqref{factors} $\eta_{w}^{q}$ is surjective. Hence we only need to show that the kernel of $\eta_{w}^{q}$ is trivial. By definition, this is the same as showing
\begin{equation}\label{compact1}
\ker \text{\Large{$\oplus$}}ep{}\varphi_{v}^{w}=\K_{w}.
\end{equation}
In order to prove this, we are going to show that, for any $x\in C(\mathrm{G)_{q}},$ if $(\pi_{v}^{q}\boxtimes \chi_{t})(x)=0$ for all $v\lhd w$ and $t\in \mathrm T_{v}^{w},$ then also $(\pi_{\sigma}^{q}\boxtimes \chi_{s})(x)=0$ for all $\sigma<w$ and $s\in \mathrm T_{\sigma}^{w}.$ To see this, notice that by~\eqref{mult}, there is a $\sigma\leq v\lhd w$ such that we have $s=s_{1}s_{2}$ for $s_{1}\in\mathrm T_{v}^{w}$ and $s_{2}\in \mathrm T_{\sigma}^{v}.$ As we have $(\pi_{v}^{q}\boxtimes \chi_{s_{1}})(x)=0$ we get from Theorem~\mathrm{Re}f{ntthm} that also $(\pi_{r}^{q}\boxtimes \chi_{s})(x)=0.$ The equality~\eqref{compact1} now follows from Lemma~\mathrm{Re}f{ntlem}.
\end{proof}
\begin{prop}\label{prop3}
Let $\mathcal S\subseteq W$ be a subset where all the elements have the same length i.e. $\mathcal{l}(w)=\mathcal{l}(v)$ for all $w,v\in \mathcal S.$ Moreover, assume that for each $v\in \mathcal S,$ we have a commutative $*$-homomorphism
$\chi_{v}:C(\mathrm{G)_{q}}\rightarrow C(\mathrm X_{v})$ such that $\chi_{v}(C(\mathrm{G)_{q}})=C(\mathrm X_{v}).$
Then
\begin{equation}\label{elements}
\prod_{v\in \mathcal S}\K_{v}\otimes C(\mathrm X_{v})\subseteq \underset{v\in \mathcal S}{\text{\Large{$\oplus$}}}(\pi_{v}\boxtimes \chi_{v})(C(\mathrm{G)_{q}})\subseteq \prod_{v\in \mathcal S}\Bb_{v,\chi_{v}}^{q}.
\end{equation}
\end{prop}
\begin{proof}
As all the elements of $\mathcal S$ have the same length, they must be mutually non-comparable in the partial ordering of $W.$ It follows from Lemma~\mathrm{Re}f{lem3} and Lemma~\mathrm{Re}f{lem4} that for $v\in \mathcal S,$ we have $(\pi_{v}^{q}\boxtimes\chi_{v})(\Upsilon_{v})=\pi_{v}^{q}(\Upsilon_{v})\otimes I\neq 0$ and $(\pi_{w}^{q}\boxtimes \chi_{w})(\Upsilon_{v})=0$ for any other $w\in \mathcal S.$ As $\pi_{v}^{q}(\Upsilon_{v})$ is a compact operator with dense range, it follows from Lemma~\mathrm{Re}f{lem5} that
$$\overline{(\pi_{v}^{q}\boxtimes\chi_{v})(\Upsilon_{v}(C(\mathrm{G)_{q}}))}=\K_{v}\otimes C(\mathrm X_{v})$$
$$\begin{array}{cccc}\overline{(\pi_{w}^{q}\boxtimes\chi_{w})(\Upsilon_{v}(C(\mathrm{G)_{q}}))}=\{0\},& \text{$w\in \mathcal S$ such that $w\neq v.$}\end{array}$$
This gives~\eqref{elements}.
\end{proof}
\subsection{\bf\textsc{Continuous Deformations}}
\begin{lem}\label{field}
There are invertible co-algebra maps
$$\begin{array}{ccccc}\theta^{q}:\mathbb{C}[\mathrm{G}]\longrightarrow \mathbb{C}[\mathrm{G]_{q}}, & q\in (0,1)\end{array}$$
such that for every $w\in W,$ every $q$-independent commutative $*$-representation $\chi^{q}:\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm X)$ and any fixed $f\in \mathbb{C}[G],$ the map
\begin{equation}\label{funcq}
q\in (0,1)\mapsto (\pi_{w}^{q}\boxtimes \chi^{q})(\theta^{q}(f))\in \Bb_{w,\chi^{q}}^{q}\subseteq \mathcal{B}(\Hh_{w})\otimes C(\mathrm X)
\end{equation}
is continuous.
\end{lem}
\begin{proof}
We will refer to the proof of Theorem~1.2 in~\cite{nt2}. We remark that our notation differs from theirs. Let $t_{ij}^{q},$ $i,j=1,2$ be the generators of $\mathbb{C}[\mathrm{SU_{2}]_{q}}$ for $q\in (0,1].$ It follows from the proof that there are invertible co-algebra maps
$$
\begin{array}{cccc}
\kappa_{q}:\mathbb{C}[\mathrm{SU_{2}]}\longrightarrow \mathbb{C}[\mathrm{SU_{2}]_{q}}, & q\in(0,1], & \kappa_{1}=\id,
\end{array}
$$
such that $\kappa_{q}(t_{ij}^{1})=t_{ij}^{q}$ and for every $f\in \mathbb{C}[\mathrm{SU_{2}]},$ the image $\kappa_{q}(f)$ is a non-commutative polynomial in $t_{ij}^{q},$ $i,j=1,2,$ with coefficients continuous in $q.$ Moreover, there exists an invertible co-algebra map $\vartheta_{q}:\mathbb{C}[\mathrm{G]}\to \mathbb{C}[\mathrm{G]_{q}}$, such that for every $i=1,\dots,n$, there exists a continuous family of invertible co-algebra morphisms $\gamma^{q}_{i}$ of $\mathbb{C}[\mathrm{G}]$ that makes the following diagram commute
\begin{equation}\label{diagram1}
\begin{xy}\xymatrixcolsep{3pc}\xymatrixrowsep{3pc}\xymatrix{
\mathbb{C}[\mathrm{SU_{2}]}\ar[d]_*{\kappa_{q_{i}}}& \mathbb{C}[\mathrm{G]}\ar[l]_*{\varsigma_{i}}\ar[r]^*{\gamma^{q}_{i}} & \mathbb{C}[\mathrm{G]}\ar[dl]^*{\vartheta_{q}}\\
\mathbb{C}[\mathrm{SU_{2}]_{q_{i}}} & \mathbb{C}[\mathrm{G]_{q}}\ar[l]^*{\varsigma^{q}_{i}}&.
}\end{xy}
\end{equation}
This gives that \begin{equation}\label{kapvar}\kappa_{q_{i}}^{-1}\circ\varsigma_{i}^{q}\circ\vartheta_{q}=\varsigma_{i}\circ (\gamma_{i}^{q})^{-1}\end{equation} varies continuously on $q\in (0,1].$
The operators $C_{q}$ and $d_q,$ given by~\eqref{SC} varies continuously on $q\in (0,1).$ Hence, by~\eqref{SC1}, it follows that the functions $q\in(0,1)\mapsto\Pi_{q}\circ \kappa_{q}(t_{ij})=\Pi_{q}(t_{ij}^{q})\subseteq \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})),$ for $i,j=1,2,$ are continuous. Thus, for any fixed $f\in \mathbb{C}[\mathrm{SU_{2}]},$ we have a continuous function
$$
q\in (0,1)\mapsto \Pi_{q}\circ \kappa_{q}(f)\in \mathcal{B}(\ell^{2}(\mathbb{Z}_{+})).
$$
Composing this with~\eqref{kapvar}, we get that for every $f\in \mathbb{C}[\mathrm{G}],$ the function
$$
q\in (0,1)\mapsto \Pi_{q}\circ \varsigma_{i}^{q} \circ \vartheta_{q}( f)\in \mathcal{B}(\ell^{2}(\mathbb{Z}_{+}))
$$
is continuous.
As the maps in the diagram~\eqref{diagram1} are all (at least) co-algebra maps, so that $\Delta_{q}\circ\vartheta_{q}=(\vartheta_{q}\otimes \vartheta_{q})\circ \Delta,$ it follows for $f\in \mathbb{C}[\mathrm{G]}$ and $w\in W$ with reduced presentation $w=s_{j_{1}}\cdots s_{j_{m}},$ that
$$
\pi_{w}^{q}(\vartheta_{q}(f))=(\pi_{j_{1}}^{q}\otimes \cdots\otimes \pi_{j_{m}}^{q})\circ \Delta^{(m)}_{q}(\vartheta_{q}(f))=
$$
$$
(\Pi_{q_{j_{1}}}\otimes \cdots\otimes \Pi_{q_{j_{m}}})\circ ((\varsigma_{j_{1}}^{q}\circ \vartheta_{q})\otimes\cdots \otimes(\varsigma_{j_{m}}^{q}\circ \vartheta_{q}))\circ \Delta^{(m)}(f)=
$$
$$
((\Pi_{q_{j_{1}}}\circ \kappa_{q_{j_{1}}}\circ \varsigma_{j_{1}})\otimes \cdots\otimes (\Pi_{q_{j_{m}}}\circ \kappa_{q_{j_{m}}}\circ \varsigma_{j_{m}}))\circ \Delta^{(m)}(f)
$$
and hence the function $q\in (0,1)\mapsto \pi_{w}^{q}(\vartheta_{q}(f))$ is continuous. Combining this with~\eqref{tauq}, it follows that $q\in (0,1)\mapsto\tau_{q}(\vartheta_{q}(f))\in C(\mathrm T)$ is also continuous. Thus
$$
q\in(0,1)\mapsto(\pi_{w}^{q}\boxtimes \tau_{q})(\vartheta_{q}(f))=((\pi_{w}^{q}\circ \vartheta_{q})\otimes (\tau_{q}\circ \vartheta_{q}))\circ \Delta(f)\in \mathcal{B}(\Hh_{w})\otimes C(\mathrm T)
$$
is continuous. That~\eqref{funcq} holds for all $q$-independent maps follows by the factorization $\chi^{q}=\zeta\circ \tau_{q}.$
\end{proof}
Assume we have two subsets $\mathrm T_{1},\mathrm T_{2}\subseteq \mathrm T$ and $\mathrm T_{3}=\mathrm T_{1}\mathrm T_{2}$ (the point-wise multiplication). Let us denote by $\chi_{i},$ for $i=1,2,3$ the $*$-homomorphism $\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm T_{i}),$ $i=1,2,3,$ given by restriction of $\tau_{q}$ to $\mathrm T_{i}.$ It follows that we have an identification
\begin{equation}\label{multset}
\chi_{1}\boxtimes \chi_{2}\sim\chi_{3}
\end{equation}
in the sense that $\chi_{3}$ is the unique $*$-homomorphism with the property that, using the isomorphism $C(\mathrm T_{1})\otimes C(\mathrm T_{2})\cong C(\mathrm T_{1}\times T_{2}),$ we have
\begin{equation}\label{tensors2}
\begin{array}{cccc}
(\chi_{1}\boxtimes \chi_{2})(a)(t_{1},t_{2})=\chi_{3}(a)(t_{1}t_{2}), & t_{1}\in \mathrm T_{1}, & t_{2}\in\mathrm T_{2}, & a\in \mathbb{C}[\mathrm{G]_{q}}.
\end{array}
\end{equation}
The multiplication map $m:\mathrm T_{1}\times \mathrm T_{2}\to \mathrm T_{3}$ gives an injective $*$-homomorphism
$$
C(\mathrm T_{3})\overset{m^{*}}{\longrightarrow}C(\mathrm T_{1})\otimes C(\mathrm T_{2}) \cong C(\mathrm T_{1}\times T_{2})
$$
and it follows from~\eqref{tensors2} that $\chi_{1}\boxtimes\chi_{2}$ factors as
$$
\mathbb{C}[\mathrm{G]_{q}}\overset{\chi_{3}}{\longrightarrow} C(\mathrm T_{3})\overset{m^{*}}{\longrightarrow} C(\mathrm T_{1})\otimes C(\mathrm T_{2}).
$$
Furthermore, if for two subsets $\mathrm T_{1},\mathrm T_{2}\subseteq \mathrm T,$ we let $\mathrm T_{3}=\mathrm T_{1}\cup \mathrm T_{2}$ and denote by $\chi_{i},$ $i=1,2,3,$ the $*$-homomorphisms $\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm T_{i}),$ $i=1,2,3,$ then we have an identification
\begin{equation}\label{sumset}
\chi_{1}\oplus \chi_{2}\sim\chi_{3}
\end{equation}
via the injective $*$-homomorphism $C(\mathrm T_{1}\cup T_{2})\to C(\mathrm T_{1})\oplus C(\mathrm T_{2})$ determined by the two inclusions $\mathrm T_{i}\subseteq \mathrm T_{3}$ for $i=1,2.$ Thus $\chi_{3}$ satisfies
\begin{equation}\label{tensors}
\begin{array}{cccc}
\chi_{1}(a)=\chi_{3}(a)|_{\mathrm T_{1}}, &\chi_{2}(a)= \chi_{3}(a)|_{\mathrm T_{2}}, & a\in \mathbb{C}[\mathrm{G]_{q}}
\end{array}
\end{equation}
where $\chi_{3}(a)|_{\mathrm T_{i}},$ $i=1,2$ denotes the restriction of the function $\chi_{3}(a)\in C(\mathrm T_{3})$ to the subset $\mathrm T_{i}\subseteq \mathrm T_{3}.$
\\
For a path $v\overset{\gamma}{\leadsto}w,$ let us denote by $\chi_{\gamma}$ the commutative $q$-independent $*$-representation $\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm T_{\gamma}).$ If we have paths $v\overset{\gamma_{1}}{\leadsto}r\overset{\gamma_{2}}{\leadsto}w,$ then it follows from~\eqref{gamma} and~\eqref{multset} that if we have the composition of paths $\gamma=\gamma_{1}\circ \gamma_{2},$
then
\begin{equation}\label{cancel}
\chi_{\gamma_{1}}\boxtimes\chi_{\gamma_{2}}\sim\chi_{\gamma}.
\end{equation}
\section{\bf\textsc{The Main Result}}
\begin{thm}\label{main}
\
\begin{enumerate}[(i)]
\item For all $q,s\in (0,1)$ and $w\in W,$ we have an inner $*$-automorphism $\mathrm{G}amma_{w}^{s,q}:\mathcal{B}(\Hh_{w})\to \mathcal{B}(\Hh_{w})$ that restricts to a $*$-isomorphism $\Bb_{w}^{q}\to\Bb_{w}^{s},$ such that $\mathrm{G}amma_{w}^{s,q}(\K_{w})=\K_{w}$ and we have $$\begin{array}{ccc}\mathrm{G}amma_{w}^{s,t}\circ \mathrm{G}amma_{w}^{t,q}=\mathrm{G}amma_{w}^{s,q}, & \text{for all $s,t,q\in (0,1)$}\\ \mathrm{G}amma_{w}^{q,q}=\id, & \text{for all $q\in(0,1)$}.\end{array}$$
Moreover, for all $q,s\in (0,1)$ and $w\in W,$ the following diagram commutes
\begin{equation}\label{arrows}
\begin{xy}\xymatrixcolsep{7pc}\xymatrixrowsep{3pc}\xymatrix{
B_{w}^{q}\ar[r]^*{\mathrm{G}amma_{w}^{s,q}}\ar[d]_*{\eta_{w}^{q}\circ p_{w}}& B_{w}^{s}\ar[d]^*{\eta_{w}^{s}\circ p_{w}}\\
\prod_{v\lhd w}B_{v,\chi_{v}^{w}}^{q} \ar[r]_*{\prod_{v\lhd w}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)} & \prod_{v\lhd w}B_{v,\chi_{v}^{w}}^{s}
}\end{xy}
\end{equation}
where $\eta_{w}^{q}$ and $\chi_{v}^{w}$ are as in Proposition~\mathrm{Re}f{prop2}. The $*$-isomorphisms $\mathrm{G}amma_{w}^{s,q}$ are also continuous in the point-norm topology in the sense that, for fixed $q\in (0,1)$ and $y\in \Bb_{w}^{q},$ the function $s\in(0,1)\mapsto \mathrm{G}amma^{s,q}_{w}(y)\in \mathcal{B}(\Hh_{w})$ is continuous.
\item If $\chi^{q}: \mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm X),$ $q\in (0,1),$ are commutative $q$-independent $*$-homomorphisms, then the $*$-isomorphism $\mathrm{G}amma_{w}^{s,q}\otimes \iota:\Bb_{w}^{q}\otimes C(\mathrm X)\to \Bb_{w}^{s}\otimes C(\mathrm X)$ restrics to a $*$-isomorphism $$\mathrm{G}amma_{w}^{s,q}\otimes \iota:\Bb_{w,\chi}^{q}\longrightarrow\Bb_{w,\chi}^{s}.$$
\end{enumerate}
\end{thm}
\begin{proof}
We will prove $(i)$ and $(ii)$ simultaneously using induction on $k=\mathcal{l}(w),$ starting at $k=0.$
\\
If $\mathcal{l}(w)=0,$ then $w=e.$ As $\pi_{e}=\epsilon_{q},$ we have $\Bb_{e}=\mathbb{C}.$ By the definition of a $q$-independent commutative $*$-homomorphism $\chi^{q}=\zeta\circ \tau_{q},$ and hence $$\chi^{q}(C(\mathrm{G)_{q}})=\zeta\left(\tau_{q}(C(\mathrm{G)_{q}})\right)=\zeta(C(\mathrm T)).$$ So, $(i)$ and $(ii)$ hold in the case of $k=0$ with $\mathrm{G}amma_{e}^{s,q}=\id_{\mathbb{C}}$.
\\
Assume now that $(i)$ and $(ii)$ hold for all $v\in W$ of length $\mathcal{l}(v)<k.$ By Proposition~\mathrm{Re}f{prop2} we have a $*$-homomorphism
\begin{equation}\label{etas}
\partial_{1}^{q}:=\eta_{w}^{q}\circ p_{w}:\Bb_{w}^{q}\longrightarrow \Bb_{w}^{q}/\K_{w}\longrightarrow \prod_{v\lhd w}\Bb_{v,\chi_{v}^{w}}^{q}
\end{equation}
such that the image is isomorphic to $\Bb_{w}^{q}/\K_{w}=p_{w}(\Bb_{w}^{q})\subseteq \mathcal{Q}(\Hh_{w}).$ For $v\lhd w,$ we have
\begin{equation}\label{lhd}
((\eta^{q}_{v}\circ p_{v})\otimes \iota)\circ (\pi^{q}_{v}\boxtimes \chi_{v}^{w})=\underset{\sigma\lhd v}{\prod}(\pi^{q}_{\sigma}\boxtimes (\chi_{\sigma}^{v}\boxtimes \chi_{v}^{w}))\sim \underset{\sigma\lhd v}{\prod}(\pi_{\sigma}\boxtimes \chi_{\gamma})
\end{equation}
where $\sigma\overset{\gamma}{\leadsto} w$ is the path $\sigma\lhd v\lhd w.$ Taking the product over all $v\lhd w$ we get from~\eqref{lhd} a $*$-homomorphism
\begin{equation}\label{haidt}
\partial_{2}^{q}:=\underset{v\lhd w}{\prod}(\eta_{v}^{q}\circ p_{v})\otimes \iota:\underset{v\lhd w}{\prod} \Bb_{v,\chi_{v}^{w}}^{q}\longrightarrow \overset{(2)}{\underset{\sigma\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}},
\end{equation}
where the product $\overset{(2)}{\underset{\sigma\leadsto w}{\prod}}$ is indexed over all $\sigma\lhd^{(2)} w$ and all possible paths $\sigma\overset{\gamma}{\leadsto} w.$ It follows from Lemma~\mathrm{Re}f{lem5} that the kernel of $\partial_{2}^{q}$ is equal to $\underset{v\lhd w}{\prod}\K_{v}\otimes C(\mathrm T_{v}^{w}).$
If we iterate~\eqref{haidt}, then we get a sequence of $*$-homomorphisms
\begin{equation}\label{seq}
\Bb_{w}^{q}\overset{\partial_{1}^{q}}{\longrightarrow} \prod_{v\lhd w}\Bb_{v,\chi_{v}^{w}}^{q}\overset{\partial_{2}^{q}}{\longrightarrow} \overset{(2)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}}\overset{\partial_{3}^{q}}{\longrightarrow}\dots\overset{\partial_{k-1}^{q}}{\longrightarrow} \overset{(k-1)}{\underset{v\leadsto w}{\prod}}\Bb_{v,\chi_{\gamma}}^{q}\overset{\partial_{k}^{q}}{\longrightarrow} \underset{e\leadsto w}{\prod} C(\mathrm T_{\gamma}),
\end{equation}
where the product $ \overset{(i)}{\underset{v\leadsto w}{\prod}}$ ranges over all elements $v\in W$ such that $v\lhd^{(i)}w$ and over all possible paths $v\overset{\gamma}{\leadsto}w.$ In the last product, $e\in W$ is the identity element and we suppress the upper index $(k)$ as it is unnecessary in this case. In general, when we have a fixed element $v\in W$ such that $v\leq w,$ then $v\leadsto w$ denotes the set of all possible paths $v\overset{\gamma}{\leadsto} w.$ As an example, for $v\in W,$ we write $\underset{v\leadsto w}{\prod}\Bb_{v,\chi_{\gamma}}$ to mean that the product ranges over all possible paths $v\overset{\gamma}{\leadsto} w.$ Similarly, we write $\underset{v\lhd^{(i)} w}{\prod}$ to mean that the product is over all $v\in W$ such that $v\lhd^{(i)}w.$ Similar notations will also be used for direct sums, etc.
Clearly, by Lemma~\mathrm{Re}f{lem5}, for every $i=1,\dots,k,$ we have
\begin{equation}\label{kalinka}\ker\partial_{i+1}^{q}= \overset{(i)}{\underset{v\leadsto w}{\prod}}\K_{v}\otimes C(\mathrm T_{\gamma}).\end{equation} Moreover, the commutative $C^{*}$-algebra $\underset{e\leadsto w}{\prod} C(\mathrm T_{\gamma})$ does not depend on $q.$ For $i=1,\dots,k,$ we let
\begin{equation*}
\partial_{i}^{q}\circ\dots\circ\partial_{1}^{q}=:\Psi_{i}^{q}:\Bb_{w}^{q}\longrightarrow \overset{(i)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}}
\end{equation*}
\begin{equation*}
\partial_{k}^{q}\circ\dots\circ\partial_{1}^{q}=:\Psi_{k}^{q}:\Bb_{w}^{q}\longrightarrow \underset{e\leadsto w}{\prod}C(\mathrm T_{\gamma})
\end{equation*}
be the composition of $*$-homomorphisms in~\eqref{seq}. By iteration of~\eqref{isomorph} and~\eqref{sumset}, we have for any $a\in\mathbb{C}[\mathrm{G]_{q}}$
\begin{equation}\label{comm}
\begin{array}{cc}
(\Psi_{i}^{q}\circ \pi_{w}^{q})(a)= \overset{(i)}{\underset{v\leadsto w}{\text{\Large{$\oplus$}}}}(\pi_{v}^{q}\boxtimes\chi_{\gamma})(a), & i=1,\dots, k.
\end{array}
\end{equation}
By induction, for all $v\in W$ such that $l(v)<k,$ we have a $*$-isomorphism
$$
\mathrm{G}amma_{v}^{s,q}:\Bb_{v}^{q}\longrightarrow \Bb_{v}^{s}
$$
such that $\mathrm{G}amma_{v}^{s,q}(\K_{v})=\K_{v}$ and the following diagram is commutative
$$
\begin{xy}\xymatrixcolsep{7pc}\xymatrixrowsep{3pc}\xymatrix{
B_{v}^{q}\ar[r]^*{\mathrm{G}amma_{v}^{s,q}}\ar[d]_*{\eta_{v}^{q}\circ p_{v}}& B_{v}^{s}\ar[d]^*{\eta_{v}^{s}\circ p_{v}}\\
\underset{\sigma\lhd v}{\prod}B_{\sigma,\chi_{\sigma}^{v}}^{q} \ar[r]_*{\underset{\sigma\lhd v}{\prod}(\mathrm{G}amma_{\sigma}^{s,q}\otimes \iota)} & \underset{\sigma\lhd w}{\prod} B_{\sigma,\chi_{\sigma}^{v}}^{s}
}\end{xy}
$$
It follows that for every $i=1,\dots,k-1,$ we have $*$-isomorphisms
\begin{equation}\label{prodiso}
\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota): \overset{(i)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}}\longrightarrow \overset{(i)}{\underset{v\leadsto w}{\prod}}\Bb^{s}_{v,\chi_{\gamma}}
\end{equation}
that maps $ \overset{(i)}{\underset{v\leadsto w}{\prod}}\K_{v}\otimes C(\mathrm T_{\gamma})$ into itself and such that the following diagrams are commutative
\begin{equation}\label{dia1}
\begin{xy}\xymatrixcolsep{5pc}\xymatrixrowsep{3pc}
\xymatrix{
\Bb_{w}^{q} \ar[dr]_*{\Psi_{i+1}^{q}} \ar[r]^*{\Psi_{i}^{q}} & \overset{(i)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}} \ar[d]_*{\partial_{i+1}^{q}} \ar[r]^*{ \overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)} & \overset{(i)}{\underset{v\leadsto w}{\prod}}\Bb^{s}_{v,\chi_{\gamma}} \ar[d]_*{\partial_{i+1}^{s}} &\Bb_{w}^{s} \ar[l]_*{\Psi_{i}^{s}} \ar[dl]^*{\Psi_{i+1}^{s}}\\
& \overset{(i+1)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}}\ar[r]_*{ \overset{(i+1)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)} & \overset{(i+1)}{\underset{v\leadsto w}{\prod}}\Bb^{s}_{v,\chi_{\gamma}}&
}\end{xy}
\end{equation}
\begin{equation}\label{dia2}
\begin{xy}\xymatrixcolsep{5pc}\xymatrixrowsep{3pc}\xymatrix{
\Bb_{w}^{q} \ar[r]^*{\Psi_{k-1}^{q}} \ar[dr]_*{\Psi_{k}^{q}}& \overset{(k-1)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}} \ar[d]_*{\partial_{k}^{q}} \ar[r]^*{ \overset{(k-1)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)} & \overset{(k-1)}{\underset{v\leadsto w}{\prod}}\Bb^{s}_{v,\chi_{\gamma}} \ar[d]_*{\partial_{k}^{s}} &\Bb_{w}^{s} \ar[l]_*{\Psi_{k-1}^{s}} \ar[dl]^*{\Psi_{k}^{s}}\\
& \underset{e\leadsto w}{\prod}C(\mathrm T_{\gamma}) \ar[r]_*{\id} & \underset{e\leadsto w}{\prod}C(\mathrm T_{\gamma}) &.
}\end{xy}
\end{equation}
The idea is now to show that $ \overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)$ restricts to a $*$-isomorphism between $\Psi_{i}^{q}(\Bb_{w}^{q})$ and $\Psi_{i}^{s}(\Bb_{w}^{s})$ for $i=1,\dots,k.$ We prove this by 'climbing the ladder'~\eqref{dia1}, using induction on $i,$ starting at $i=k$ (i.e the case~\eqref{dia2}), and then we count down to $i=1$.
The statement is clear at $k$, since by the $q$-independence of $\chi_{\gamma}$ and the fact that $\tau_{q}(C(\mathrm{G)_{q}})=C(\mathrm T)=\tau_{s}(C(\mathrm{G)_{s}}),$ we have $$\Psi_{k}^{q}(\Bb_{w}^{q})=(\underset{e\leadsto w}{\text{\Large{$\oplus$}}}\chi_{\gamma})(C(\mathrm{G)_{q}})=(\underset{e\leadsto w}{\text{\Large{$\oplus$}}}\chi_{\gamma})(C(\mathrm{G)_{s}})=\Psi_{k}^{s}(\Bb_{w}^{s}).$$
Assume now that the statement holds for $i+1.$ Consider $x\in \Psi_{i}^{q}(\Bb_{w}^{q}).$ Then $$\partial_{i+1}^{q}(x)\in \Psi_{i+1}^{q}(\Bb_{w}^{q}),$$ and hence by induction
$$
\left(\overset{(i+1)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)(\partial_{i+1}^{q}(x))\in \Psi_{k+1}^{s}(\Bb_{w}^{s}).
$$
But by the commutivity of the diagrams~\eqref{dia1}-\eqref{dia2}, this element is also equal to
$$
\partial_{i+1}^{s}\left(\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota) (x)\right)\in \Psi_{k+1}^{s}(\Bb_{w}^{s})
$$
from which it follows, by~\eqref{kalinka}, that
\begin{equation}\label{katyusha}
\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota) (x)\in \Psi_{i}^{s}(\Bb_{w}^{s})+\overset{(i)}{\underset{v\leadsto w}{\prod}}\K_{v}\otimes C(\mathrm T_{\gamma})
\end{equation}
and thus
\begin{equation}\label{xelement}\begin{array}{cccc}\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota) (x)=y+c,&y\in \Psi_{i}^{s}(\Bb_{w}^{s}),& c\in \overset{(i)}{\underset{v\leadsto w}{\prod}}\K_{v}\otimes C(\mathrm T_{\gamma}).\end{array}\end{equation} We show that actually $c\in \Psi_{i}^{s}(\Bb_{w}^{s}).$ For a fixed $v\in W$ such that $v\lhd^{(i)}w,$ we can embed
\begin{equation}\label{embed}
\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{v}^{w})\subseteq \underset{v\leadsto w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})
\end{equation}
via the injective $*$-homomorphisms $C(\mathrm T_{v}^{w})\to \underset{v\leadsto w}{\prod}C(\mathrm T_{\gamma})$ coming from the inclusions $\mathrm T_{\gamma}\subseteq \mathrm T_{v}^{w}$ and, by the definition of $\mathrm T_{v}^{w}$, that $\mathrm T_{v}^{w}=\underset{v\leadsto w}{\cup} \mathrm T_{\gamma}.$ Thus the embedding~\eqref{embed} is on simple tensors given by $$\begin{array}{ccc}x\otimes f\mapsto\underset{v\leadsto w}{\prod}(x\otimes f|_{\mathrm T_{\gamma}}), & x\in \mathcal{B}(\Hh_{v}), & f\in C(\mathrm T_{v}^{w}),\end{array}$$
where $f|_{\mathrm T_{\gamma}}$ denoted the restriction of $f\in C(\mathrm T_{v}^{w})$ to the subset $\mathrm T_{\gamma}\subseteq \mathrm T_{v}^{w}.$ Moreover, we have the embedding
\begin{equation}\label{embedcompact}
\K_{v}\otimes C(\mathrm T_{v}^{w})\subseteq\underset{v\leadsto w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})
\end{equation}
coming from~\eqref{embed}. Using this embedding, we clearly have, for fixed $v\lhd^{(i)}w,$ that
\begin{equation}\label{embedrange}
\underset{v\leadsto w}{\text{\Large{$\oplus$}}}(\pi_{v}^{q}\boxtimes \chi_{\gamma}):\mathbb{C}[\mathrm{G]_{q}}\longrightarrow \mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{v}^{w})\subseteq \underset{v\leadsto w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})
\end{equation}
and that, as in~\eqref{sumset}, we can identify $\underset{v\leadsto w}{\text{\Large{$\oplus$}}}(\pi_{v}^{q}\boxtimes\chi_{\gamma})\sim\pi_{v}^{q}\boxtimes \chi_{v}^{w}.$ It then follows from Lemma~\mathrm{Re}f{lem5}, that under the embeddings~\eqref{embed} and~\eqref{embedcompact} we have
\begin{equation}\label{subsets}
\K_{v}\otimes C(\mathrm T_{v}^{w})\subseteq \overline{\underset{v\leadsto w}{\text{\Large{$\oplus$}}}(\pi_{v}^{q}\boxtimes \chi_{\gamma})(\mathbb{C}[\mathrm{G]_{q}})}\subseteq \mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{v}^{w})\subseteq \underset{v\leadsto w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma}).
\end{equation}
Moreover, note that the left-hand sides~\eqref{embed} and~\eqref{embedcompact} are clearly invariant under the homomorphism $\underset{v\leadsto w}{\prod}\mathrm{G}amma^{s,q}_{v}\otimes \iota.$
We can now use Proposition~\mathrm{Re}f{prop3} and~\eqref{comm} to see that if we take the product of~\eqref{subsets}, ranging over all $v\lhd^{(i)}w,$ then
\begin{equation}
\underset{v\lhd^{(i)}w}{\prod}\K_{v}\otimes C(\mathrm T_{v}^{w})\subseteq \Psi_{i}^{q}(\Bb^{q}_{w})\subseteq \underset{v\lhd^{(i)}w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{v}^{w})\subseteq \overset{(i)}{\underset{v\leadsto w}{\prod}}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma}).
\end{equation}
As $\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)$ clearly fixes the two sub-algebras on either side of $\Psi_{i}^{q}(\Bb_{w}^{q}),$ it follows from~\eqref{xelement} that
$$
\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota) (x)-y=c\in \underset{v\lhd^{(i)}w}{\prod}\K_{v}\otimes C(\mathrm T_{v}^{w})\subseteq \Psi_{i}^{s}(\Bb^{s}_{w}).
$$
From this, it follows that
\begin{equation}\label{equity}\begin{array}{ccc}\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)( \Psi_{i}^{q}(\Bb_{w}^{q}))\subseteq \Psi_{i}^{s}(\Bb_{w}^{s}), & q,s\in (0,1).\end{array}\end{equation}
But as $$\begin{array}{cccc} \overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\circ \overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{q,s}\otimes \iota)=\id, & q,s\in (0,1)\end{array}$$ we must have equality in~\eqref{equity}.
\\
Thus, we have an isomorphism $$\begin{array}{ccc}\Bb_{w}^{q}/\K_{w}\cong\Bb_{w}^{s}/\K_{w}, & q,s\in (0,1)\end{array}$$ via the $*$-isomorphism
\begin{equation}\label{tiso}\mathcal{L}^{s,q}:=(\eta_{w}^{s})^{-1}\circ \left(\underset{v\lhd w}{\prod}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)\circ \eta_{w}^{q}.\end{equation}
However, to be able to use Lemma $2$~to conclude that the $C^{*}$-algebras $\Bb_{w}^{q},$ $q\in (0,1)$ are all isomorphic, we must also show that $\mathcal{L}^{s,q}$ are continuous in the point-norm topology, i.e. that for a fixed $q\in (0,1)$ and an element $y\in \Bb_{w}^{q}/\K_{w},$ we have a continuous function
\begin{equation}\label{continuous}
\begin{array}{ccc}
s\in(0,1)\to \mathcal{Q}(\Hh_{w}), & s\mapsto \mathcal{L}^{s,q}(y).
\end{array}
\end{equation}
By a classical approximation argument, it is enough to prove this for the dense $*$-subalgebra $(p_{w}\circ\pi_{w}^{q})(\mathbb{C}[\mathrm{G]_{q}}).$
By Lemma~\mathrm{Re}f{field} we have invertible coalgebra morphisms $\theta^{q}:\mathbb{C}[\mathrm{G}]\to \mathbb{C}[\mathrm{G]_{q}}$ such that for fixed $f\in \mathbb{C}[\mathrm{G}],$ the function $q\in (0,1)\mapsto \pi_{w}^{q}(\theta^{q}(f))\in \mathcal{B}(\Hh_{w})$ is continuous. Thus the function $$q\in(0,1)\mapsto (p_{w}\circ \pi_{w}^{q})(\theta^{q}(f))\in \mathcal{B}(\Hh_{w})/\K_{w}=\mathcal{Q}(\Hh_{w})$$ is also continuous. Let us write $$F^{q}:=(p_{w}\circ\pi_{w}^{q})(\theta^{q}(f))\in \mathcal{Q}(\Hh_{w}).$$ By induction, the function
$$
s\in(0,1)\mapsto \left(\underset{v\lhd w}{\prod}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)(\eta_{w}^{q}(F^{q}))
$$
is continuous and $\left(\underset{v\lhd w}{\prod}(\mathrm{G}amma_{v}^{q,q}\otimes \iota)\right)(\eta_{w}^{q}(F^{q}))=\eta_{w}^{q}(F^{q})$. Notice that by the definition of $\eta^{q}_{w}$ and~\eqref{factors}, we have $$\eta_{w}^{q}(F^{q})=\text{\Large{$\oplus$}}ep{}(\pi_{v}^{q}\boxtimes \chi_{v}^{w})(\theta^{q}(f))$$ and thus by Lemma~\mathrm{Re}f{field}, the function
$$
q\in (0,1)\mapsto \eta_{w}^{q}(F^{q})\in \prod_{v\lhd w}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{v}^{w})
$$
is continuous. It follows that for all $\epsilon>0$ we have
$$
\|\left(\underset{v\lhd w}{\prod}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)(\eta_{w}^{q}(F^{q}))- (\eta_{w}^{s}(F^{s})) \|\leq
$$
$$
\|\left(\underset{v\lhd w}{\prod}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)(\eta_{w}^{q}(F^{q}))-\eta_{w}^{q}(F^{q}) \|+\| \eta_{w}^{q}(F^{q})- \eta_{w}^{s}(F^{s}) \|< \epsilon
$$
for $|s-q|<\delta_{1},$ if $\delta_{1}>0$ is made small enough. If we apply the $*$-isomorphism $(\eta_{w}^{s})^{-1},$ we get
\begin{equation}\label{smaller}
\begin{array}{ccc}
\|\mathcal{L}^{s,q}\left(F^{q}\right)-F^{s}\|<\epsilon, & \text{for $|s-q|<\delta_{1}$}
\end{array}
\end{equation}
and thus it follows that there is a $0<\delta \leq \delta_{1},$ such that
$$
\|\mathcal{L}^{s,q}\left(F^{q}\right)-F^{q}\|\leq
$$
$$
\|\mathcal{L}^{s,q}\left(F^{q}\right)-F^{s}\|+\|F^{s}-F^{q}\|<2\epsilon
$$
when $|s-q|<\delta.$ We can now apply Lemma $2$~to get an inner $*$-automorphism $\mathrm{G}amma_{w}^{s,q}:\mathcal{B}(\Hh_{w})\to\mathcal{B}(\Hh_{w})$ that restricts to a $*$-isomorphism $\Bb_{w}^{q}\to \Bb_{w}^{s},$ that is continuous in the point-set topology. That the diagram~\eqref{arrows} commutes follows from the commutivity of~\eqref{com} and the way $\mathcal{L}^{s,q}$ was defined. Clearly, the compact operators are invariant under $\mathrm{G}amma_{w}^{s,q}.$
\\
The case $(ii).$ We prove it first for $\tau_{q}:\mathbb{C}[\mathrm{G]_{q}}\to C(\mathrm T)$ (see~\eqref{tauq}). We combine the inclusion $\Bb_{w,\tau_{q}}^{q}\hookrightarrow \Bb_{w}^{q}\otimes C(\mathrm T)$ with the sequence~\eqref{seq} by tensoring all the components with $C(\mathrm T)$ in the following way
$$
\Bb^{q}_{w,\tau_{q}}\hookrightarrow \Bb^{q}_{w}\otimes C(\mathrm T) \overset{\partial_{1}\otimes\iota}{\longrightarrow} \left(\prod_{v\lhd w}\Bb^{q}_{v,\chi_{v}^{w}}\right)\otimes C(\mathrm T)\overset{\partial_{2}\otimes\iota}{\longrightarrow} \left(\overset{(2)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}}\right)\otimes C(\mathrm T)\overset{\partial_{3}\otimes\iota}{\longrightarrow}\dots
$$
\begin{equation}\label{tensorseq}
\dots\overset{\partial_{k-1}\otimes\iota}{\longrightarrow} \left(\overset{(k-1)}{\underset{v\leadsto w}{\prod}}\Bb^{q}_{v,\chi_{\gamma}}\right)\otimes C(\mathrm T)\overset{\partial_{k}\otimes\iota}{\longrightarrow} \left(\underset{e\leadsto w}{\prod}C(\mathrm T_{\gamma})\right)\otimes C(\mathrm T).
\end{equation}
If we define $\Psi_{i}^{q}$ as before, then similar to~\eqref{comm}, we have
\begin{equation}\label{comm2}
(\Psi_{i}^{q}\otimes \iota)\circ (\pi_{w}^{q}\boxtimes \tau_{q})= \left(\underset{v\leadsto w}{\overset{(i)}{\text{\Large{$\oplus$}}}}(\pi_{v}^{q}\boxtimes\chi_{\gamma})\right)\boxtimes \tau_{q}.
\end{equation}
We can proceed exactly as before, using the commutative diagrams~\eqref{dia1} and~\eqref{dia2} (now tensored by $C(\mathrm T)$), by induction on $i=1,\dots , k,$ starting at $k.$ Clearly the images in $\left(\underset{e\leadsto w}{\prod}C(\mathrm T_{\gamma})\right)\otimes C(\mathrm T)$ are the same, as the commutative $*$-representation is $q$-independent. Assuming that $$\left(\overset{(i+1)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)\otimes \iota:\left(\Psi^{q}_{i+1}\otimes \iota\right)(\Bb_{w,\tau_{q}}^{q})\longrightarrow\left(\Psi^{s}_{i+1}\otimes \iota\right)(\Bb_{w,\tau_{s}}^{s})$$
is an $*$-isomorphism gives for $x\in \left(\Psi^{q}_{i}\otimes \iota\right)(\Bb_{w,\tau_{q}}^{q}),$ that
\begin{equation}\label{korobeiniki}
\left(\left(\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)\otimes \iota\right)(x)\in \left(\Psi^{s}_{i}\otimes \iota\right)(\Bb_{w,\tau_{s}}^{s})+\left(\overset{(i)}{\underset{v\leadsto w}{\prod}}\K_{v}\otimes C(\mathrm T_{\gamma})\right)\otimes C(\mathrm T).
\end{equation}
The rest of the argument follows in a similar fashion as for $\Bb_{w}^{q}:$ we find an embedding
\begin{equation}\label{embed2}
\underset{v\lhd^{(i)}w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T)\subseteq \left(\overset{(i)}{\underset{v\leadsto w}{\prod}}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})\right)\otimes C(\mathrm T)\end{equation}
such that,
\begin{enumerate}[(i)]
\item this subalgebra is invariant with respect to the map $\left(\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)\otimes\iota,$
\item we have
\begin{equation}\label{intersec}
\underset{v\lhd^{(i)}w}{\prod}\K_{v}\otimes C(\mathrm T)=\underset{v\lhd^{(i)}w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T)\bigcap \left(\overset{(i)}{\underset{v\leadsto w}{\prod}}\K_{v}\otimes C(\mathrm T_{\gamma})\right)\otimes C(\mathrm T)
\end{equation}
\item and the following inclusions holds
\begin{equation}\label{subsets2}
\underset{v\lhd^{(i)}w}{\prod}\K_{v}\otimes C(\mathrm T)\subseteq\left(\Psi^{s}_{i}\otimes \iota\right)(\Bb_{w,\tau_{s}}^{s}) \subseteq \underset{v\lhd^{(i)}w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T).
\end{equation}
\end{enumerate}
Clearly, this implies $$\left(\left(\overset{(i)}{\underset{v\leadsto w}{\prod}}(\mathrm{G}amma_{v}^{s,q}\otimes \iota)\right)\otimes \iota\right)(x)\in \left(\Psi^{s}_{i}\otimes \iota\right)(\Bb_{w,\tau_{s}}^{s}).$$
To do this, we use the natural isomorphism
$$
\left(\overset{(i)}{\underset{v\leadsto w}{\prod}}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})\right)\otimes C(\mathrm T)\cong \overset{(i)}{\underset{v\leadsto w}{\prod}}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})\otimes C(\mathrm T).
$$
Notice that for every $v\overset{\gamma}{\leadsto} w,$ we have an embedding $C(\mathrm T)\subseteq C(\mathrm T_{\gamma})\otimes C(\mathrm T)$ determined by $C(\mathrm T_{\gamma})\otimes C(\mathrm T)\cong C(\mathrm T_{\gamma}\times T)$ and the multiplication map $\mathrm T_{\gamma}\times \mathrm T\to \mathrm T.$ As $\tau_{q}:\mathbb{C}[\mathrm{G]_{q}}\to \mathbb{C}[\mathrm T]$ is a morphism of Hopf $*$-algebras (hence compatible with the multiplication in $\mathrm T$), it follows that we have a $*$-homomorphism
$$
\pi_{v}^{q}\boxtimes \chi_{\gamma}\boxtimes \tau_{q}:C(\mathrm{G)_{q}}\to \mathcal{B}(\Hh_{v})\otimes C(\mathrm T)\subseteq \mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})\otimes C(\mathrm T).
$$
We can then, for fixed $v\lhd^{(i)}w,$ embed diagonally
$$
\mathcal{B}(\Hh_{v})\otimes C(\mathrm T)\subseteq \underset{v\leadsto w}{\prod}\mathcal{B}(\Hh_{v})\otimes C(\mathrm T_{\gamma})\otimes C(\mathrm T).
$$
By taking the product over all $v\lhd^{(i)}w,$ we get an embedding~\eqref{embed2} such that~\eqref{subsets2} holds (the first inclusion follows from Proposition~\eqref{prop3}). Clearly, we also have~\eqref{intersec}. Thus, it follows that $(\mathrm{G}amma^{s,q}_{w}\otimes \iota)(\Bb_{w,\tau_{q}}^{q})=\Bb_{w,\tau_{q}}^{s},$ since this is the case $i=1.$
\\
This implies the general case: let $\chi^{q}:C(\mathrm{G)_{q}}\to C(\mathrm X)$ be commutative $q$-independent $*$-homomorphisms and $\zeta:C(\mathrm T)\to C(\mathrm X)$ the $*$-homomorphism such that $\chi^{q}=\zeta\circ \tau_{q}.$ Then $\iota\otimes \zeta$ is a surjective $*$-homomorphism $\Bb^{q}_{w,\tau_{q}}\to \Bb^{q}_{v,\chi^{q}}.$
As $\mathrm{G}amma_{w}^{s,q}\otimes \iota$ is a $*$-isomorphism $\Bb_{w,\tau_{q}}^{q}\to \Bb_{w,\tau_{s}}^{s},$ we have
$$
(\mathrm{G}amma_{w}^{s,q}\otimes \iota)(\Bb_{w,\chi^{q}}^{q})=(\mathrm{G}amma_{w}^{s,q}\otimes \iota)\circ (\iota\otimes \zeta) (\Bb_{w,\tau_{q}}^{q})=(\iota\otimes \zeta)\circ (\mathrm{G}amma_{w}^{s,q}\otimes \iota) (\Bb_{w,\tau_{q}}^{q})=
$$
$$
=(\iota\otimes \zeta)(\Bb_{w,\tau_{s}}^{s})=\Bb_{w,\chi^{s}}^{s}.
$$
\end{proof}
\begin{cor}
The universal enveloping $C^{*}$-algebras of $\mathbb{C}[\mathrm{G]_{q}}$ are isomorphic for all $q\in (0,1).$ These isomorphisms are equivariant with respect to the right-action of $\mathrm T.$
\end{cor}
\begin{proof}
If $\omega\in W$ is the unique element of longest length in the Weyl group and $\tau_{q}:C(\mathrm{G)_{q}}\to C(\mathrm T)$ the commutative $q$-independent $*$-homomorphism coming from the embedding of the maximal torus $\mathrm T\subseteq \mathrm G,$ then it follows from Theorem~\mathrm{Re}f{ntthm} that any irreducible $*$-representation of $C(\mathrm{G)_{q}}$ must factor through $\pi_{\omega}^{q}\boxtimes \tau_{q}.$ Thus $\Bb_{\omega,\tau_{q}}^{q}\cong C(\mathrm{G)_{q}}.$ The q-independence follows from Theorem~\mathrm{Re}f{main}(ii). For $x\in C(\mathrm G)_{q}$ and $t\in \mathrm T$ we have
$$
(\pi^{q}_{\omega}\boxtimes \tau_{q})(R_{t}(x))=
$$
$$
=(\iota\otimes\iota\otimes \mathrm{ev}_{t})\circ(\iota\otimes \Delta_{\mathrm T})((\pi_{\omega}^{q}\boxtimes\tau_{q})(x))
$$
and thus equivariance with respect to the right-action follows as the isomorphism $\Bb_{\omega,\tau_{q}}^{q}\to \Bb_{\omega,\tau_{s}}^{s}$ is of the form $\mathrm{G}amma^{s,q}_{\omega}\otimes \iota.$
\end{proof}
\end{document}
|
\begin{document}
\maketitle
\begin{abstract}
We consider several differential operators on compact almost-complex, almost-Hermitian and almost-K\"ahler manifolds. We discuss Hodge Theory for these operators and a possible cohomological interpretation. We compare the associated spaces of harmonic forms and cohomologies with the classical de Rham, Dolbeault, Bott-Chern and Aeppli cohomologies.
\end{abstract}
\section{Introduction}
On a complex manifold $X$ the exterior derivative $d$ decomposes as the sum of two other cohomological differential operators, namely $d=\partial+\partialbar$ satisfying
$\partial^2=0$, $\partialbar^2=0$ and $\partial\partialbar+\partialbar\partial=0$.
Once a Hermitian metric on $X$ is fixed one can associate to $\partialbar$ a natural elliptic differential operator, the Dolbeault Laplacian; if $X$ is compact the kernel of this operator has a cohomological interpretation, i.e., it is isomorphic to the Dolbeault cohomology of $X$. If we do not assume the integrability of the almost-complex structure, i.e., $(X,\,J)$ is an almost-complex manifold, the $\partialbar$ operator is still well-defined but it has no more a cohomological meaning. However, we can define some natural differential operators.\\
In this paper we are interested in studying the properties of such operators, their harmonic forms and possibly their cohomological meaning on compact manifolds endowed with a non-integrable almost-complex structure.
More precisely, in the non-integrable case $d$ decomposes as
$$
d:A^{p,q}(X)\to A^{p+2,q-1}(X)\oplus A^{p+1,q}(X)\oplus A^{p,q+1}(X)\oplus A^{p-1,q+2}(X)
$$
and we set
$$
d=\mu+\partial+\partialbar+\bar\mu\,.
$$
Then we define a $2$-parameter family of differential operators $\left\lbrace D_{a,b}\right\rbrace_{a,b\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace}$ whose squares are zero and interpolate between $d$ and $d^c:=J^{-1}dJ$.
In general $d$ and $d^c$ do not anticommute and so in Proposition \ref{prop:anticommutation-relation} we give necessary and sufficient conditions on the parameters in order to have $D_{a,b}D_{c,e}+D_{c,e}D_{a,b}=0$; in such a case we define the Bott-Chern and Aeppli cohomology groups. Moreover, if we fix a $J$-Hermitian metric we develop a Hodge theory for these cohomologies together with the cohomology of $D_{a,b}$
(see Theorems \ref{thm:Dab-harmonic}, \ref{thm:Dab-harmonic-Dab-cohomology}, Proposition \ref{prop:Dab-star} and Theorems
\ref{thm:BCA-harmonic}, \ref{thm:BCA-harmonic-BCA-cohomology}). In particular we show that if $|a|=|b|$ then the cohomology of $D_{a,b}$ is isomorphic to the de Rham cohomology (cf. Proposition \ref{prop:isom-parametric-derham}). Moreover, in Example \ref{example:kodaira-thurston-Dab-cohomology} we compute explicitly the invariant $D_{a,b}$-cohomology on the Kodaira-Thurston manifold endowed with an almost-complex structure, showing that it is isomorphic to the de Rham cohomology independently on the parameters.
Nevertheless, the considered parametrized cohomology groups do not generalize (except for the almost-K\"ahler case) the classical Dolbeault, Bott-Chern and Aeppli cohomology groups of complex manifolds. To the purpose of finding a possible generalization of these cohomologies we consider the operators (cf. \cite{debartolomeis-tomassini})
$$
\partialta:=\partial+\bar\mu\qquad\bar\partialta:=\partialbar+\mu\,.
$$
These two operators anticommute but their squares are zero if and only if $J$ is integrable. In Section \ref{section:harmonic-forms-almost-hermitian} we define a generalization of the Dolbeault, Bott-Chern and Aeppli Laplacians and develop a Hodge theory for these operators studying their kernels.\\
In the almost-K\"ahler setting considered in Section \ref{section:harmonic-forms-almost-kahler} we derive some further relations among the kernels of these operators, involving also the Betti numbers and the dimension of $\bar\partialta$-harmonic forms (see Corollary \ref{cor:comparison-betti-numbers}). A Hard-Lefschetz type Theorem for Bott-Chern harmonic forms is also proved (cf. Theorem
\ref{thm:hard-lefschetz}).\\
Finally, in the last Section we compute explicit examples on the two $4$-dimensional non-toral nilmanifolds and the Iwasawa manifold showing that a
bi-graded decomposition for the $\bar\partialta$-harmonic forms cannot be expected and that the equalities in Theorem \ref{thm:equalities-harmonic-spaces} and the inequalities in Corollary \ref{cor:comparison-betti-numbers} are peculiar of the almost-K\"ahler case, giving therefore obstructions to the existence of a symplectic structure compatible with a fixed almost-complex structure on a compact manifold. In particular, we show in Example \ref{example-1} that even if in the bigraded case the spaces we consider coincide with the spaces considered in \cite{cirici-wilson-2}, this fails on total degree.
\noindent{\sl Acknowledgments.} The authors would like to thank Daniele Angella, Joana Cirici, Scott O. Wilson for interesting and useful discussions.
They also would like to thank the anonymous referee for reading carefully the paper and for providing useful comments that improved the presentation.
Most of the work has been written during the first-named author's post-doctoral fellow at the Dipartimento di Matematica e Informatica ``Ulisse Dini'' of the Universit\`{a} di Firenze. She is grateful to the department for the hospitality.
\section{Preliminaries}\label{section:preliminaries}
Let $(X\,,J)$ be an almost-complex manifold then the almost-complex structure $J$ induces a natural bi-grading on the space of forms
$A^\bullet(X)=\bigoplus_{p+q=\bullet}A^{p,q}(X)$. If $J$ is non-integrable the exterior derivative $d$ acts on forms as
$$
d:A^{p,q}(X)\to A^{p+2,q-1}(X)\oplus A^{p+1,q}(X)\oplus A^{p,q+1}(X)\oplus A^{p-1,q+2}(X)
$$
and so it splits into four components
$$
d=\mu+\partial+\partialbar+\bar\mu\,,
$$
where $\mu$ and $\bar\mu$ are differential operators that are linear over functions. In particular, they are related to the Nijenhuis tensor $N_J$ by
$$
\left(\mu\alpha+\bar\mu\alpha\right)(X,Y)=\frac{1}{4} N_J(X,Y)
$$
where $\alpha\in A^1(X)$.
Since $d^2=0$ one has
$$
\left\lbrace
\begin{array}{lcl}
\mu^2 & =& 0\\
\mu\partial+\partial\mu & = & 0\\
\partial^2+\mu\partialbar+\partialbar\mu & = & 0\\
\partial\partialbar+\partialbar\partial+\mu\bar\mu+\bar\mu\mu & = & 0\\
\partialbar^2+\bar\mu\partial+\partial\bar\mu & = & 0\\
\bar\mu\partialbar+\partialbar\bar\mu & = & 0\\
\bar\mu^2 & =& 0
\end{array}
\right.\,
$$
Consider the following differential operators (cf. \cite{debartolomeis-tomassini})
$$
\partialta:=\partial+\bar\mu\,,\qquad \bar\partialta:=\partialbar+\mu
$$
with $\partialta:A^{\pm}(X)\to A^\pm(X)$ and $\partialta:A^{\pm}(X)\to A^\mp(X)$, where $A^\pm(X)$ are defined accordingly to the parity of $q$ in the $J$-induced bigraduation on $A^\bullet(X)$.
\begin{lemma}
Let $(X,J)$ be an almost-complex manifold, the following relations hold
\begin{itemize}
\item[$\bullet$] $d=\partialta+\bar\partialta$,
\item[$\bullet$] $\partialta^2+\bar\partialta^2=0$,
\item[$\bullet$] $\partialta^2=\partial^2-\partialbar^2$,
\item[$\bullet$] $\partialta\bar\partialta+\bar\partialta\partialta=0$.
\end{itemize}
\end{lemma}
\begin{proof}
The first statement follows immediately from the definitions. The second and third points follow from direct computation
$$
\bar\partialta^2=(\partialbar+\mu)(\partialbar+\mu)=\partialbar^2+\partialbar\mu+\mu\partialbar+\mu^2=\partialbar^2-\partial^2
$$
and, similarly, $\partialta^2=\partial^2-\partialbar^2$.\\
Finally, for the last statement we have
$$
\partialta\bar\partialta+\bar\partialta\partialta=\partial\partialbar+\partial\mu+\bar\mu\partialbar+\bar\mu\mu+\partialbar\partial+\partialbar\bar\mu+\mu\partial+\mu\bar\mu=0.
$$
\end{proof}
If $D=d,\partial,\partialta,\bar\partialta,\mu,\bar\mu$ we set $D^c:=J^{-1}DJ$, then
$\partialta^c=-i\partialta$ and $\bar\partialta^c=i\bar\partialta$ and
$$
d^c=i(\bar\partialta-\partialta)=i(\partialbar+\mu-\partial-\bar\mu).
$$
Notice that in general if $J$ is not integrable $d$ and $d^c$ do not anticommute, indeed we have
$$
dd^c+d^cd=2i(\bar\partialta^2-\partialta^2)=4i(\partialbar^2-\partial^2)\,.
$$
Therefore, an almost-complex structure $J$ is integrable if and only if $d^c=i(\partialbar-\partial)$ if and only if $d$ and $d^c$ anticommute.\\
Let $g$ be a $J$-Hermitian metric and denote with $*$ the associated anti-linear Hodge-*-operator. If $D=d,\partial,\partialbar,\mu,\bar\mu$ we set
$D^*:=-*D*$ and it turns out that $D^*$ is the adjoint of $D$ with respect to the $L^2$-pairing induced on forms (cf. \cite{debartolomeis-tomassini}, \cite{cirici-wilson-1}).\\
As usual one can consider the following differential operators
$$
\Delta_{\partialbar}:=\partialbar\partialbar^*+\partialbar^*\partialbar\,,
$$
$$
\Delta_{\partial}:=\partial\partial^*+\partial^*\partial\,,
$$
$$
\Delta_{\bar\mu}:=\bar\mu\bar\mu^*+\bar\mu^*\bar\mu\,,
$$
$$
\Delta_{\mu}:=\mu\mu^*+\mu^*\mu\,.
$$
While on compact almost-Hermitian manifolds the operators $\Delta_{\partialbar}$,
$\Delta_{\partial}$ are elliptic, and so the associated spaces
$\mathcal{H}^{\bullet,\bullet}_{\partialbar}(X):=\text{Ker}\,\Delta_{\partialbar}$,
$\mathcal{H}^{\bullet,\bullet}_{\partial}(X):=\text{Ker}\,\Delta_{\partial}$ of harmonic forms are finite dimensional, in case of
$\Delta_{\bar\mu}$,
$\Delta_{\mu}$ the spaces
$\mathcal{H}^{\bullet,\bullet}_{\bar\mu}(X):=\text{Ker}\,\Delta_{\bar\mu}$ and
$\mathcal{H}^{\bullet,\bullet}_{\mu}(X):=\text{Ker}\,\Delta_{\mu}$ are infinite-dimensional in general (recall that $\bar\mu$ and $\mu$ are linear over functions). In the following we will consider several spaces of harmonic forms and we will discuss the relations with these ones.
\section{Differential operators on almost-complex manifolds}
Let $(X,J)$ be an almost-complex manifold and consider a linear combination of the differential operators $\partial\,,\partialbar\,,\mu\,,\bar\mu$,
$$
D_{a,b,c,e}:=a\,\partialbar+b\,\partial+c\,\mu+e\,\bar\mu\,,
$$
with $a,b,c,e\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$.
Clearly $D_{a,b,c,e}$ satisfies the Leibniz rule;
we are interested in finding conditions on the parameters ensuring that $D_{a,b,c,e}^2=0$.
Notice that if $J$ is integrable
$$
D_{a,b,c,e}:=a\,\partialbar+b\,\partial,
$$
and $D_{a,b,c,e}^2=0$ for any choice of the parameters.
Therefore from now on $J$ will always be assumed to be non-integrable if not stated otherwise.
In fact we have
\begin{lemma}
Let $(X,J)$ be an almost-complex manifold. Then $D_{a,b,c,e}^2=0$
if and only if
$$
e=\frac{a^2}{b}\quad\text{and}\quad c=\frac{b^2}{a}\,.
$$
\end{lemma}
\begin{proof}
By a direct computation one has
$$
D_{a,b,c,e}^2=\partialbar^2(a^2-be)+\partial^2(b^2-ac)+
(\partial\partialbar+\partialbar\partial)(ab-ce)\,.
$$
\end{proof}
We set
$$
D_{a,b}:=a\,\partialbar+b\,\partial+\frac{b^2}{a}\mu+\frac{a^2}{b}\bar\mu,
$$
with $a,b\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$.\\
Since $D_{a,b}^2=0$ we define the associated parametrized cohomology
$$
H_{D_{a,b}}^\bullet(X):=\frac{\text{Ker}\,D_{a,b}}{\text{Im}\,D_{a,b}}\,.
$$
Notice that if $a=b$, one has $D_{a,a}=a\,d$ i.e., a multiple of the exterior derivative.\\
In general, $D_{a,b}$ is not a real operator, indeed by a straightforward computation one gets
\begin{lemma}\label{lemma:real-operator}
Let $(X,J)$ be an almost-complex manifold. Then, $D_{a,b}=\overline{D_{a,b}}$
if and only if $a=\bar b$.
\end{lemma}
We set
$$
D_a:=a\,\partialbar+\bar a\,\partial+\frac{\bar a^2}{a}\mu+\frac{a^2}{\bar a}\bar\mu\,.
$$
Notice that the family of operators
$\left\lbrace D_a\right\rbrace_{a\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace}$ contains the operators
$$
D_1=D_{1,1}=d\quad\text{and}\quad D_i=D_{i,-i}=d^c\,.
$$
In particular,
$$
H_{D_{1,1}}^\bullet(X)=H^\bullet_{dR}(X)\simeq
H_{d^c}^\bullet(X)=H_{D_{i,-i}}^\bullet(X)\,.
$$
Moreover, recall that if $J$ is non-integrable, $D_1D_i+D_iD_1\neq 0$, therefore we show when two real differential operators $D_a$ and $D_b$ anticommute.
\begin{prop}
Let $(X,J)$ be an almost-complex manifold. Then, $D_{a}D_b+D_bD_a=0$
if and only if $\bar a\,b\in\mathbb{R}$.
\end{prop}
\begin{proof}
Set
$$
D_a:=a\,\partialbar+\bar a\,\partial+\frac{\bar a^2}{a}\mu+\frac{a^2}{\bar a}\bar\mu
\quad\text{and}\quad
D_b:=b\,\partialbar+\bar b\,\partial+\frac{\bar b^2}{b}\mu+\frac{b^2}{\bar b}\bar\mu\,.
$$
Then, $D_{a}D_b+D_bD_a=0$ if and only if
$$
\left\lbrace
\begin{array}{lcl}
a\bar b+b\bar a & =& \frac{\bar a^2 b^2}{a\bar b}+
\frac{a^2\bar b^2}{\bar a b}\\[8pt]
2\bar a\bar b & =& \frac{\bar b^2a}{b}+\frac{\bar a^2b}{a}
\end{array}
\right.
$$
if and only if $\bar b a=\bar a b$ concluding the proof.
\end{proof}
In fact, with the same argument, more generally one has
\begin{prop}\label{prop:anticommutation-relation}
Let $(X,J)$ be an almost-complex manifold. Then,
$D_{a,b}D_{c,e}+D_{c,e}D_{a,b}=0$
if and only if $ae=bc$.
\end{prop}
\begin{proof}
Set
$$
D_{a,b}:=a\,\partialbar+b\,\partial+\frac{b^2}{a}\mu+\frac{a^2}{b}\bar\mu
\quad\text{and}\quad
D_{c,e}:=c\,\partialbar+e\,\partial+\frac{e^2}{c}\mu+\frac{c^2}{e}\bar\mu.
$$
Then, $D_{a,b}D_{c,e}+D_{c,e}D_{a,b}=0$ if and only if
$$
\left\lbrace
\begin{array}{lcl}
ae+bc & =& \frac{ a^2 e^2}{bc}+
\frac{b^2c^2}{ae}\\[8pt]
2ac & =& \frac{bc^2}{e}+\frac{a^2e}{b}\\[8pt]
2be & =& \frac{ae^2}{c}+\frac{b^2c}{a}
\end{array}
\right.
$$
if and only if $ae=bc$ concluding the proof.
\end{proof}
\begin{rem}
Notice that when $J$ is integrable, it is straightforward to show that two arbitrary operators of the form
$$
D_{a,b}:=a\,\partialbar+b\,\partial
\quad\text{and}\quad
D_{c,e}:=c\,\partialbar+e\,\partial
$$
anticommute.
\end{rem}
\begin{rem}
If $b=1$, namely $D_b=d$ then
$$
D_ad+dD_a=0
$$
if and only if $a\in\mathbb{R}$. Namely, the only operators anticommuting with the exterior derivative in $\left\lbrace D_a\right\rbrace_{a\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace}$ are those with the parameter $a$ real.\\
If $b=i$, namely $D_b=d^c$ then
$$
D_ad^c+d^cD_a=0
$$
if and only if $i\bar a\in\mathbb{R}$. Namely, the only operators anticommuting with $d^c$ in $\left\lbrace D_a\right\rbrace_{a\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace}$ are those with the parameter $a$ purely imaginary.
\end{rem}
As a consequence of the previous considerations, if $ae=bc$ and $(a,b)\neq (c,e)$ then $(A^\bullet(X),D_{a,b},D_{c,e})$ is a double complex since
$$
\left\lbrace
\begin{array}{lcl}
D_{a,b}^2 & = & 0\\
D_{c,e}^2 & = & 0\\
D_{a,b}D_{c,e}+D_{c,e}D_{a,b} & = & 0
\end{array}
\right.,
$$
hence one can define the \emph{Bott-Chern} and
\emph{Aeppli cohomologies} respectively as
$$
H_{BC(D_{a,b},D_{c,e})}^\bullet(X):=
\frac{\text{Ker}\,D_{a,b}\cap\text{Ker}\,D_{c,e}}
{\text{Im}\,D_{a,b}D_{c,e}},\quad
H_{A(D_{a,b},D_{c,e})}^\bullet(X):=
\frac{\text{Ker}\,D_{a,b}D_{c,e}}
{\text{Im}\,D_{a,b}+\text{Im}\,D_{c,e}}.
$$
Let $(X,J)$ be an almost-complex manifold and let $g$ be a $J$-Hermitian metric on $X$. Then the adjoint of $D_{a,b}$ is
$$
D^*_{a,b}:=\bar a\,\partialbar^*+
\bar b\,\partial^*+\frac{\bar b^2}{\bar a}\mu^*+
\frac{\bar a^2}{\bar b}\bar\mu^*\,.
$$
We consider the second-order differential operator
$$
\Delta_{a,b}:=
D_{a,b}D^*_{a,b}+D^*_{a,b}D_{a,b}\,.
$$
\begin{lemma}
Let $(X,J)$ be an almost-complex manifold. The differential operators $D_{a,b}$ are elliptic.
\end{lemma}
\begin{proof}
Fix $a$ and $b$.
We can compute the symbol of $\Delta_{a,b}$ as follows.
We work in a local unitary frame of $T^*X$ and choose a basis
$\left\lbrace\theta^1,\cdots,\theta^n\right\rbrace$ such that the metric can be written as
$$
g=\theta^i\otimes\bar\theta^i+\bar\theta^i\otimes\theta^i\,.
$$
Using Einstein notations, a $(p,q)$-form $\alpha$ locally can be written as
$$
\alpha=\alpha_{i_1\cdots i_pj_1\cdots j_q}\theta^{i_1}\wedge\cdots
\theta^{i_p}\wedge\bar\theta^{j_1}\wedge\cdots\wedge\bar\theta^{j_q}\,.
$$
Then $\partialbar$ acts as
$$
(\partialbar\alpha)_{p,q+1}=\partialbar_{j_{q+1}}
\alpha_{i_1\cdots i_pj_1\cdots j_q}\bar\theta^{j_{q+1}}\wedge\theta^{i_1}\wedge\cdots
\theta^{i_p}\wedge\bar\theta^{j_1}\wedge\cdots\wedge\bar\theta^{j_q}\,.
$$
and $\mu$ acts as
$$
\mu\alpha=
\alpha_{i_1\cdots i_pj_1\cdots j_q}\mu\left(\theta^{i_1}\wedge\cdots
\theta^{i_p}\wedge\bar\theta^{j_1}\wedge\cdots\wedge\bar\theta^{j_q}\right)
$$
and similarly for $\partial$ and $\bar\mu$.
In computing the symbol of $\Delta_{a,b}$ we are only interested in the highest-order differential acting on the coefficients
$\alpha_{i_1\cdots i_pj_1\cdots j_q}$.
Denoting with $\simeq$ the equivalence of the symbol of the operators we get
$$
\Delta_{a,b}\simeq
\vert a\vert^2\Delta_{\partialbar}+\vert b\vert^2\Delta_{\partial}+
a\bar b(\partialbar\partial^*+\partial^*\partialbar)+
b\bar a(\partial\partialbar^*+\partialbar^*\partial)\simeq
\vert a\vert^2\Delta_{\partialbar}+\vert b\vert^2\Delta_{\partial}
$$
hence $\Delta_{a,b}$ is elliptic.
\end{proof}
We denote with $\mathcal{H}^k_{D_{a,b}}(X):=
\Ker\,(\Delta_{{a,b}{\vert A^k}})$ the space of $D_{a,b}$-harmonic $k$-forms.
By the elliptic operators theory we get the following
\begin{theorem}\label{thm:Dab-harmonic}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then
the following Hodge decompositions holds, for every $k$,
$$
A^k(X)=\mathcal{H}^k_{D_{a,b}}(X)\oplus D_{a,b} A^{k-1}(X)\oplus
D_{a,b}^* A^{k+1}(X)\,.
$$
Moreover, the space $\mathcal{H}^\bullet_{D_{a,b}}(X)$ is finite-dimensional.
\end{theorem}
One has the following
\begin{theorem}\label{thm:Dab-harmonic-Dab-cohomology}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then there exists
an isomorphism, for every $k$,
$$
H^k_{D_{a,b}}(X)\simeq\mathcal{H}^k_{D_{a,b}}(X)\,.
$$
In particular, the space $H^\bullet_{D_{a,b}}(X)$ is finite-dimensional and we will denote with $h^\bullet_{D_{a,b}}(X)$ its dimension.
\end{theorem}
As a consequence we have the analogue of the Poincar\'e duality for the cohomology groups $H^\bullet_{D_{a,b}}(X)$.
\begin{prop}\label{prop:Dab-star}
Let $(X,J,g)$ be a compact almost-Hermitian manifold of dimension $2n$, then the Hodge-$*$-operator induces a duality isomoprhism, for every $k$,
$$
*:H^k_{D_{a,b}}(X)\to H^{2n-k}_{D_{a,b}}(X)\,.
$$
In particular, for every $k$, one has the equalities
$h^k_{D_{a,b}}(X)=h^{2n-k}_{D_{a,b}}(X)$.
\end{prop}
Similarly, one could develop a Hodge Theory for the Bott-Chern and Aeppli cohomologies of $(A^\bullet(X),D_{a,b},D_{c,e})$ (with $ae=bc$ and $(a,b)\neq (c,e)$)
following for instance \cite{schweitzer}.\\
In particular, the Bott-Chern and Aeppli Laplacians can be defined as
$$
\Delta_{BC_{a,b,c,e}}=(D_{a,b}D_{c,e})(D_{a,b}D_{c,e})^*+
(D_{a,b}D_{c,e})^*(D_{a,b}D_{c,e})+
(D_{c,e}^*D_{a,b})(D_{c,e}^*D_{a,b})^*+
$$
$$
(D_{c,e}^*D_{a,b})^*(D_{c,e}^*D_{a,b})+
D_{c,e}^*D_{c,e}+D_{a,b}^*D_{a,b}\,,
$$
$$\Delta_{A_{a,b,c,e}} \;:=\;
D_{a,b}D_{a,b}^*+D_{c,e}D_{c,e}^*+
(D_{a,b}D_{c,e})^*(D_{a,b}D_{c,e})+
(D_{a,b}D_{c,e})(D_{a,b}D_{c,e})^*+
$$
$$
(D_{c,e}D_{a,b}^*)^*(D_{c,e}D_{a,b}^*)+
(D_{c,e}D_{a,b}^*)(D_{c,e}D_{a,b}^*)^*\,.
$$
These operators are elliptic and we denote with $\mathcal{H}^k_{BC(D_{a,b},D_{c,e})}(X):=\Ker(\Delta_{BC_{a,b,c,e}|A^k})$ the space of $BC$-harmonic $k$-forms and with $\mathcal{H}^k_{A(D_{a,b},D_{c,e})}(X):=\Ker(\Delta_{A_{a,b,c,e}\vert A^k})$ the space of $A$-harmonic $k$-forms.
By a direct calculation one can show the following
\begin{prop}
Let $(X,J,g)$ be a compact almost-Hermitian manifold. If
$ae=bc$ and $(a,b)\neq (c,e)$
then, a differential form $\alpha\in\mathcal{H}^k_{BC(D_{a,b},D_{c,e})}(X)$ if and only if
$$
D_{a,b}\,\alpha=0\,,\quad D_{c,e}\,\alpha=0\,,\quad
(D_{a,b}D_{c,e})^*\,\alpha=0\,.
$$
Similarly,
$\alpha\in\mathcal{H}^k_{A(D_{a,b},D_{c,e})}(X)$ if and only if
$$
(D_{a,b})^*\,\alpha=0\,,\quad (D_{c,e})^*\,\alpha=0\,,\quad
D_{a,b}D_{c,e}\,\alpha=0\,.
$$
\end{prop}
By the elliptic operators theory we get the following
\begin{theorem}\label{thm:BCA-harmonic}
Let $(X,J,g)$ be a compact almost-Hermitian manifold. If $ae=bc$ and $(a,b)\neq (c,e)$ then
the following Hodge decompositions hold, for every $k$,
$$
A^k(X)=\mathcal{H}^k_{BC(D_{a,b},D_{c,e})}(X)\oplus D_{a,b}D_{c,e} A^{k-2}(X)\oplus
(D_{c,e}^* A^{k+1}(X)+D_{a,b}^* A^{k+1}(X))\,,
$$
$$
A^k(X)=\mathcal{H}^k_{A(D_{a,b},D_{c,e})}(X)\oplus
(D_{a,b} A^{k-1}(X)+D_{c,e} A^{k-1}(X))\oplus
((D_{a,b}D_{c,e})^* A^{k+2}(X))\,.
$$
Moreover, the spaces $\mathcal{H}^\bullet_{BC(D_{a,b},D_{c,e})}(X)$ and
$\mathcal{H}^\bullet_{A(D_{a,b},D_{c,e})}(X)$ are finite-dimensional.
\end{theorem}
One has the following
\begin{theorem}\label{thm:BCA-harmonic-BCA-cohomology}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then there exist
isomorphisms, for every $k$,
$$
H^k_{BC(D_{a,b},D_{c,e})}(X)\simeq\mathcal{H}^k_{BC(D_{a,b},D_{c,e})}(X)\,,
$$
and
$$
H^k_{A(D_{a,b},D_{c,e})}(X)\simeq\mathcal{H}^k_{A(D_{a,b},D_{c,e})}(X)\,.
$$
In particular, the spaces $H^\bullet_{BC(D_{a,b},D_{c,e})}(X)$ and
$H^\bullet_{A(D_{a,b},D_{c,e})}(X)$ are finite-dimensional.
\end{theorem}
However, under some hypothesis on the parameters $a,b$ we can write down an explicit isomorphism.
\begin{prop}\label{prop:isom-parametric-derham}
Let $(X,J,g)$ be a compact almost-Hermitian manifold of dimension $2n$. Let
$a,b\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$ such that
$\vert a\vert=\vert b\vert$, then there exists an isomoprhism
$$
\text{Ker}\,\Delta_d\quad\simeq\quad \text{Ker}\,\Delta_{a,b}
$$
given by
$$
\alpha\mapsto\sum_{p+q=k}\left(\frac{a}{b}\right)^q\alpha^{p,q}
$$
where $\alpha^{p,q}$ denotes the $(p,q)$-component of a $k$-form $\alpha$.
\end{prop}
\begin{proof}
Let $\alpha=\sum_{p+q=k}\alpha^{p,q}$ be a $d$-closed $k$-form, namely
Hence $\mu\alpha+\partial\alpha+\partialbar\alpha+\bar\mu\alpha=0$.
Then, by bi-degree reasons
$$
\left\lbrace
\begin{array}{lcl}
\mu\,\alpha^{p+q,0} & =& 0\\
\partial\,\alpha^{p+q,0} & =& -\mu\,\alpha^{p+q-1,1}\\
\partial\,\alpha^{p+q-1,1} & =& -\partialbar\,\alpha^{p+q,0} -\mu\,\alpha^{p+q-2,2}\\
\partial\,\alpha^{p+q-2,2} & =& -\partialbar\,\alpha^{p+q-1,1}
-\mu\,\alpha^{p+q-3,3}-\bar\mu\,\alpha^{p+q,0}\\
\vdots & \vdots & \vdots \\
\partial\,\alpha^{1,p+q-1} & =& -\partialbar\,\alpha^{2,p+q-2}
-\mu\,\alpha^{0,p+q}-\bar\mu\,\alpha^{3,p+q-3}\\
\partial\,\alpha^{0,p+q} & =& -\partialbar\,\alpha^{1,p+q-1}
-\bar\mu\,\alpha^{2,p+q-2}\\
\partialbar\,\alpha^{0,p+q} & =& -\bar\mu\,\alpha^{1,p+q-1}\\
\bar\mu\,\alpha^{0,p+q} & =& 0
\end{array}
\right.\,.
$$
Therefore,
$$
\left\lbrace
\begin{array}{lcl}
\mu\,\alpha^{p+q,0} & =& 0\\
b\partial\,\alpha^{p+q,0} & =& -\frac{b^2}{a}\mu\,(\frac{a}{b}\alpha^{p+q-1,1})\\[5pt]
b\partial\,(\frac{a}{b}\alpha^{p+q-1,1}) & =& -a\partialbar\,\alpha^{p+q,0} -
\frac{b^2}{a}\mu\,(\frac{a^2}{b^2}\alpha^{p+q-2,2})\\[5pt]
b\partial\,(\frac{a^2}{b^2}\alpha^{p+q-2,2}) & =&
-a\partialbar\,(\frac{a}{b}\alpha^{p+q-1,1})
-\frac{b^2}{a}\mu\,(\frac{a^3}{b^3}\alpha^{p+q-3,3})-
\frac{a^2}{b}\bar\mu\,\alpha^{p+q,0}\\
\vdots & \vdots & \vdots \\
b\partial\,(\frac{a^{p+q-1}}{b^{p+q-1}}\alpha^{1,p+q-1}) & =&
-a\partialbar\,(\frac{a^{p+q-2}}{b^{p+q-2}}\alpha^{2,p+q-2} )
-\frac{b^2}{a}\mu\,(\frac{a^{p+q}}{b^{p+q}}\alpha^{0,p+q})
-\frac{a^2}{b}\bar\mu\,(\frac{a^{p+q-3}}{b^{p+q-3}}
\alpha^{3,p+q-3})\\[5pt]
b\partial\,(\frac{a^{p+q}}{b^{p+q}}\alpha^{0,p+q}) & =&
-a\partialbar\,(\frac{a^{p+q-1}}{b^{p+q-1}}\alpha^{1,p+q-1})
-\frac{a^2}{b}\bar\mu\,(\frac{a^{p+q-2}}{b^{p+q-2}}
\alpha^{2,p+q-2})\\[5pt]
a\partialbar\,(\frac{a^{p+q}}{b^{p+q}}\alpha^{0,p+q}) & =&
-\frac{a^2}{b}\bar\mu\,(\frac{a^{p+q-1}}{b^{p+q-1}}
\alpha^{1,p+q-1})\\[5pt]
\bar\mu\,\alpha^{0,p+q} & =& 0
\end{array}
\right.\,
$$
Namely, if $d\alpha=0$ then
$$
D_{a,b}\left(\alpha^{p+q,0}+\frac{a}{b}\alpha^{p+q-1,1}+
\frac{a^2}{b^2}\alpha^{p+q-2,2}+\cdots+
\frac{a^{p+q}}{b^{p+q}}\alpha^{0,p+q}\right)=0\,.
$$
Similarly, if $d^*\alpha=0$ then
$$
D_{a,b}^*\left(\alpha^{p+q,0}+\frac{\bar b}{\bar a}\alpha^{p+q-1,1}+
\frac{\bar b^2}{\bar a^2}\alpha^{p+q-2,2}+\cdots+
\frac{\bar b^{p+q}}{\bar a^{p+q}}\alpha^{0,p+q}\right)=0\,.
$$
Therefore if $\vert a\vert^2=\vert b\vert^2$ and $\Delta_d\,\alpha=0$ then
$$
\alpha^{p+q,0}+\frac{a}{b}\alpha^{p+q-1,1}+
\frac{a^2}{b^2}\alpha^{p+q-2,2}+\cdots+
\frac{a^{p+q}}{b^{p+q}}\alpha^{0,p+q}
$$
is $\Delta_{a,b}$-harmonic.
\end{proof}
\begin{cor}
Let $(X,J,g)$ be a compact almost-Hermitian manifold of dimension $2n$. Let
$a,b\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$ such that
$\vert a\vert=\vert b\vert$, then there exists an isomoprhism
$$
H_{dR}^\bullet(X)\quad\simeq\quad H_{D_{a,b}}^\bullet(X).
$$
\end{cor}
Notice that in case of $D_{i,-i}=d^c$ the isomorphism becomes
$$
\alpha\mapsto\sum_{p+q=k}(-1)^q\alpha^{p,q}=i^{-k}J\alpha\,.
$$
\begin{rem}
If $D_a$ is a real operator, namely $D_a=D_{a,\bar a}$, then by previous corollary
there is an isomoprhism
$$
H_{dR}^\bullet(X)\quad\simeq\quad H_{D_{a}}^\bullet(X)
$$
for any $a\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$.
\end{rem}
\begin{ex}\label{example:kodaira-thurston-Dab-cohomology}
Let $\mathbb{H}(3;\mathbb{R})$ be the $3$-dimensional Heisenberg group and
$\mathbb{H}(3;\mathbb{Z})$ be the subgroup of matrices with entries in $\mathbb{Z}$.
The Kodaira-Thurston manifold is defined as the quotient
$$
X:=\left(\mathbb{H}(3;\mathbb{R})\times \mathbb{R}\right)/
\left(\mathbb{H}(3;\mathbb{Z})\times \mathbb{Z}\right)\,.
$$
The manifold $X$ is a $4$-dimensional nilmanifold which admits both complex and symplectic structures.
We consider the non-integrable almost-complex structure $J$ defined by the structure equations
$$
\left\lbrace
\begin{array}{lcl}
d\varphi^1 & =& 0\\
d\varphi^2 &=& \frac{1}{2i}\varphi^{12}+\frac{1}{2i}\left(\varphi^{1\bar 2}-\varphi^{2\bar 1}\right)+\frac{1}{2i}\varphi^{\bar 1\bar 2}
\end{array}
\right.\,
$$
where $\left\lbrace\varphi^1\,,\varphi^2\right\rbrace$ is a global co-frame of (1,0)-forms on $X$.\\
Hence, directly we get, for any $a,b\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$
$$
\left\lbrace
\begin{array}{lcl}
D_{a,b}\varphi^1 & =& 0\\
D_{a,b}\varphi^2 &=& \frac{1}{2i}a\left(\varphi^{1\bar2}-\varphi^{2\bar 1}\right)+\frac{1}{2i}b\varphi^{12}+
\frac{1}{2i}\frac{a^2}{b}\varphi^{\bar1\bar2}
\end{array}
\right.\,.
$$
We fix the $J$-Hermitian metric $\omega:=\frac{1}{2i}\sum_{j=1}^2\varphi^j\wedge\bar\varphi^j$, and by a direct computation one gets on invariant $2$-forms
$$
\text{Ker}\,D_{a,b,inv}=\mathbb{C}\left\langle
\varphi^{1\bar1},\varphi^{2\bar2},
\frac{b}{a}\varphi^{12}+\varphi^{1\bar2},
-\frac{b}{a}\varphi^{12}+\varphi^{2\bar1},
-\frac{b^2}{a^2}\varphi^{12}+\varphi^{\bar1\bar2}
\right\rangle
$$
and
$$
\text{Ker}\,D^*_{a,b,inv}=\mathbb{C}\left\langle
\varphi^{1\bar1},\varphi^{2\bar2},
-\frac{\bar a}{\bar b}\varphi^{12}+\varphi^{1\bar2},
\frac{\bar a}{\bar b}\varphi^{12}+\varphi^{2\bar1},
-\frac{\bar a^2}{\bar b^2}\varphi^{12}+\varphi^{\bar1\bar2}
\right\rangle\,.
$$
Therefore, one gets
$$
H^2_{D_{a,b},\text{inv}}\simeq\mathbb{C}\left\langle
\varphi^{1\bar1},\varphi^{2\bar2},
\varphi^{1\bar 2}+\varphi^{2\bar1},
\varphi^{\bar1\bar2}
-\frac{\vert a\vert^2-\vert b\vert^2}{a\bar b}\varphi^{1\bar2}
-\frac{b\bar a}{a\bar b}\varphi^{12}
\right\rangle\,,
$$
where we listed the harmonic representatives with respect to $\omega$.
In particular, for $a=b=1$ we get the harmonic representatives for the de Rham cohomology and for $a=-b=i$ we get the harmonic representatives for the
$d^c$-cohomology $H^2_{d^c}(X)$.
\end{ex}
\begin{rem}
Notice that if $J$ is integrable then $(A^\bullet(X),D_{a,b},D_{c,e})$ is a double complex for any choice of the parameters (provided $(a,b)\neq(c,e)$) and so one can define accordingly the associated Dolbeault, Bott-Chern and Aeppli cohomologies.
\end{rem}
\section{Differential operators on symplectic manifolds}\label{section:diff-op-sympl}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold that is an almost-Hermitian manifold with fundamental form $\omega$ $d$-closed.
Then, we can generalize the symplectic cohomologies introduced in \cite{tseng-yau-I}.\\
Let
$$
L:=\omega\wedge-: A^{\bullet}(X)\to A^{\bullet+2}(X)
$$
and
$$
\Lambda:=-\star L\star: A^{\bullet}(X)\to A^{\bullet-2}(X)\,,
$$
where $\star=J*=*J$ is the symplectic-Hodge-$\star$-operator.
Denote with
$$
d^\Lambda:=[d,\Lambda]\,;
$$
since $\omega$ is symplectic we have that
$$
d^\Lambda=(-1)^{k+1}\star d\star_{\vert A^k(X)}
$$
i.e., $d^\Lambda$ is the Brylinski-codifferential (\cite{brylinski}), namely the symplectic adjoint of $d$.
Then, it is well known that $(d^c)^*=-d^\Lambda$, indeed on $k$-forms
$$
(d^c)^*=-*d^c*=-*J^{-1}dJ*=-(-1)^{k+1}*Jd\star=(-1)^k\star d\star=
-d^\Lambda\,.
$$
By the almost-K\"ahler identities (cf. Lemma \ref{lemma:almost-kahler-identities})
\begin{itemize}
\item[$\bullet$] $[\partial,\Lambda]=i\,\partialbar^*$ and $[\bar\mu,\Lambda]=i\,\mu^*$
\item[$\bullet$] $[\partialbar,\Lambda]=-i\,\partial^*$ and $[\mu,\Lambda]=-i\,\bar\mu^*$.
\end{itemize}
one has the following
\begin{lemma}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold, then for $a,b\in\mathbb{C}\setminus\left\lbrace 0\right\rbrace$,
\begin{itemize}
\item[$\bullet$] $[D_{a,b},L]=0$,
\item[$\bullet$] $[D_{a,b},\Lambda]=-i\,D^*_{-\bar b,\bar a}$.
\end{itemize}
Moreover,
$[D_{a,b},\Lambda]=(-1)^{k+1}\star D_{a,b}\star$ on $k$-forms if and only if
$D_{a,b}$ is a real operator.
\end{lemma}
\begin{proof}
By direct computations using the almost-K\"ahler identities
$$
[D_{a,b},\Lambda]=a[\partialbar,\Lambda]+b[\partial,\Lambda]+
\frac{b^2}{a}[\mu,\Lambda]+\frac{a^2}{b}[\bar\mu,\Lambda]=
$$
$$
=-ia\partial^*+ib\partialbar^*-i\frac{b^2}{a}\bar\mu^*+i\frac{a^2}{b}\mu^*=
-i\,D^*_{-\bar b,\bar a}\,.
$$
Moreover, notice that
$$
\star D_{a,b}\star=\bar a \star\partialbar\star+\bar b\star\partial\star+
\frac{\bar b^2}{\bar a}\star\mu\star+ \frac{\bar a^2}{\bar b}\star\bar\mu\star
$$
hence, $[D_{a,b},\Lambda]=(-1)^{k+1}\star D_{a,b}\star$ if and only if
$a=\bar b$ if and only if $D_{a,b}$ is a real operator by Lemma
\ref{lemma:real-operator}.
\end{proof}
As a consequence, we denote
$$
D_a^\Lambda:=[D_{a},\Lambda]=(-1)^{k+1}\star D_{a}\star_{\vert_{A^k(X)}}\,.
$$
This operator generalizes the Brylinski co-differential, indeed
$$
D_1^\Lambda=d^\Lambda\,.
$$
In fact using $D_a^\Lambda:=[D_{a},\Lambda]$ and $D_a^2=0$ we have that
$$
D_aD_a^\Lambda+D_a^\Lambda D_a=0\quad\text{and}\quad
(D_a^\Lambda)^2=0.
$$
In particular, for $a=1$ we recover the standard relations
$$
dd^\Lambda+d^\Lambda d=0\quad\text{and}\quad
(d^\Lambda)^2=0.
$$
Therefore, one can define
$$
H^\bullet_{D_a^\Lambda}:=
\frac{\text{Ker}\,D_a^\Lambda}{\text{Im}\,D_a^\Lambda},\quad
H_{BC(D_{a},D_a^\Lambda)}^\bullet(X):=
\frac{\text{Ker}\,D_{a}\cap\text{Ker}\,D_a^\Lambda}
{\text{Im}\,D_{a}D_a^\Lambda},\quad
H_{A(D_{a},D_a^\Lambda)}^\bullet(X):=
\frac{\text{Ker}\,D_{a}D_a^\Lambda}
{\text{Im}\,D_{a}+\text{Im}\,D_a^\Lambda}.
$$
The symplectic cohomologies defined in \cite{tseng-yau-I} correspond to the parameter $a=1$.
\section{Harmonic forms on almost-Hermitian manifolds}\label{section:harmonic-forms-almost-hermitian}
In the following we try to generalize the spaces of harmonic forms for the
Dolbeault, Bott-Chern and Aeppli cohomology groups of complex manifolds using the intrinsic decomposition of $d$ induced by the almost-complex structure. However, for a non-integrable almost-complex structure we do not have a cohomological counterpart (cf. also \cite{cirici-wilson-1}, \cite{cirici-wilson-2}).
Let $(X,\,J,\,g)$ be an almost-Hermitian manifold that means $X$ is a smooth manifold endowed with an almost complex structure $J$ and a
$J$-Hermitian metric $g$. As above denote with $*$ the associated Hodge-$*$-operator.
Consequently,
$$
\partialta^*=\partial^*+\bar\mu^*\,,\qquad \bar\partialta^*=\partialbar^*+\mu^*
$$
and
$$
(d^c)^*=i(\partialta^*-\bar\partialta^*)=i(\partial^*+\bar\mu^*-\partialbar^*-\mu^*)\,.
$$
We define the following differential operators
$$
\Delta_{\bar\partialta}:=\bar\partialta\bar\partialta^*+\bar\partialta^*\bar\partialta\,,
$$
$$
\Delta_{\partialta}:=\partialta\partialta^*+\partialta^*\partialta\,,
$$
$$
\Delta_{BC(\partialta,\bar\partialta)}:=
(\partialta\bar\partialta)(\partialta\bar\partialta)^*+
(\partialta\bar\partialta)^*(\partialta\bar\partialta)+
(\bar\partialta^*\partialta)(\bar\partialta^*\partialta)^*+
(\bar\partialta^*\partialta)^*(\bar\partialta^*\partialta)+
\bar\partialta^*\bar\partialta+\partialta^*\partialta\,,
$$
$$\Delta_{A(\partialta,\bar\partialta)} \;:=\; \partialta\partialta^*+\partialtabar\partialtabar^*+(\partialta\partialtabar)^*(\partialta\partialtabar)+
(\partialta\partialtabar)(\partialta\partialtabar)^*+
(\partialtabar\partialta^*)^*(\partialtabar\partialta^*)+
(\partialtabar\partialta^*)(\partialtabar\partialta^*)^*\,.
$$
\begin{rem}
Notice that if $J$ is an integrable almost-complex structure
then these differential operators coincide with the classical Laplacian operators on complex manifolds, namely
the Dolbeault Laplacians
$$
\Delta_{\partialbar}:=\partialbar\partialbar^*+\partialbar^*\partialbar\,,
$$
$$
\Delta_{\partialta}:=\partial\partial^*+\partial^*\partial\,,
$$
and the Bott-Chern and Aeppli Laplacians
$$
\Delta_{BC}=(\partial\partialbar)(\partial\partialbar)^*+
(\partial\partialbar)^*(\partial\partialbar)+
(\partialbar^*\partial)(\partialbar^*\partial)^*+
(\partialbar^*\partial)^*(\partialbar^*\partial)+
\partialbar^*\partialbar+\partial^*\partial\,,
$$
$$\Delta_{A} \;:=\; \partial\partial^*+\partialbar\partialbar^*+(\partial\partialbar)^*(\partial\partialbar)+(\partial\partialbar)(\partial\partialbar)^*+(\partialbar\partial^*)^*(\partialbar\partial^*)+(\partialbar\partial^*)(\partialbar\partial^*)^*\,.
$$
\end{rem}
We have the following
\begin{prop}\label{prop:ellipticity-deltabar}
Let $(X,J,g)$ be an almost-Hermitian manifold, then the operators
$\Delta_{\bar\partialta}$ and $\Delta_{\partialta}$ are elliptic differential operators of the second order.
\end{prop}
\begin{proof}
The operator $\Delta_{\bar\partialta}$ is elliptic, indeed it is a
lower order perturbation of its integrable counterpart.
More precisely, denoting with $\simeq$ the equivalence of the symbol of the operators we have
$$
\Delta_{\bar\partialta}=
\partialtabar^*\partialtabar+\partialtabar\partialtabar^*\simeq
\partialbar\partialbar^*+\partialbar^*\partialbar
=\Delta_{\partialbar}\,.
$$
Similar considerations can be done for $\Delta_{\partialta}$.
\end{proof}
We denote with $\mathcal{H}^k_{\bar\partialta}(X):=\Ker\Delta_{\bar\partialta_{\vert A^k(X)}}$ the space of $\bar\partialta$-harmonic $k$-forms and with
$\mathcal{H}^{p,q}_{\bar\partialta}(X):=\Ker\Delta_{\bar\partialta_{\vert A^{p,q}(X)}}$ the space of $\bar\partialta$-harmonic $(p,q)$-forms, and similarly for the operator $\partialta$.
We get the following
\begin{theorem}\label{thm:deltabar-harmonic}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then
the following Hodge decompositions hold
$$
A^k(X)=\mathcal{H}^k_{\bar\partialta}(X)\oplus\bar\partialta A^{k-1}(X)\oplus
\bar\partialta^* A^{k+1}(X)
$$
and
$$
A^k(X)=\mathcal{H}^k_{\partialta}(X)\oplus\partialta A^{k-1}(X)\oplus
\partialta^* A^{k+1}(X)
$$
Moreover, a $(p,q)$-form $\alpha\in\mathcal{H}^{p,q}_{\bar\partialta}(X)$ if and only if
$\alpha\in\mathcal{H}^{p,q}_{\partialbar}\cap\mathcal{H}^{p,q}_{\mu}$.
Similarly, a $(p,q)$-form $\alpha\in\mathcal{H}^{p,q}_{\partialta}(X)$ if and only if
$\alpha\in\mathcal{H}^{p,q}_{\partial}\cap\mathcal{H}^{p,q}_{\bar\mu}$.
\end{theorem}
\begin{proof}
The Hodge decompositions follow form the classical theory of elliptic operators.
Notice that a $k$-form $\beta$ is $\bar\partialta$-harmonic
if and only if
$$
\left\lbrace
\begin{array}{lcl}
\bar\partialta\beta & =& 0\\
\bar\partialta^*\beta &=& 0
\end{array}
\right.
\qquad\iff\qquad
\left\lbrace
\begin{array}{lcl}
\partialbar\beta+\mu\beta & =& 0\\
\partialbar^*\beta+\mu^*\beta &=& 0
\end{array}
\right.\,.
$$
Hence let $\alpha\in A^{p,q}(X)$, then $\alpha\in\Ker\Delta_{\bar\partialta}$
if and only if
$\partialbar\alpha=0$, $\partialbar^*\alpha=0$, $\mu\alpha=0$, $\mu^*\alpha=0$ concluding the proof.\\
\end{proof}
\begin{rem}
Since the operator $\Delta_{\bar\partialta}$ is elliptic the associated space of harmonic forms $\mathcal{H}^{\bullet}_{\bar\partialta}(X)$ is finite-dimensional on a compact almost-Hermitian manifold. In particular, we denote with $h^{\bullet}_{\bar\partialta}(X)$ its dimension. The same applies for the operator $\partialta$.
\end{rem}
\begin{prop}\label{prop:delta-delbar-delta-deltabar}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then
$$
\Delta_{\bar\partialta}=\Delta_{\partialbar}+\Delta_{\mu}+
[\partialbar,\mu^*]+[\mu,\partialbar^*]
$$
and
$$
\Delta_{\partialta}=\Delta_{\partial}+\Delta_{\bar\mu}+
[\partial,\bar\mu^*]+[\bar\mu,\partial^*]\,.
$$
In particular, $\mathcal{H}^{\bullet}_{\partialbar}(X)
\cap\mathcal{H}^{\bullet}_{\mu}(X)\subseteq\mathcal{H}^{\bullet}_{\bar\partialta}(X)$.
\end{prop}
\begin{proof}
We prove only the first equality since the second one can be easily obtained by conjugation.
We have
$$
\begin{aligned}
\Delta_{\bar\partialta}=&\, (\partialbar+\mu)(\partialbar^*+\mu^*)+(\partialbar^*+\mu^*)(\partialbar+\mu)\\
=&\, \partialbar\partialbar^*+\partialbar\mu^*+\mu\partialbar^*+\mu\mu^*+
\partialbar^*\partialbar+\partialbar^*\mu+\mu^*\partialbar+\mu^*\mu\\
=&\, \Delta_{\partialbar}+\Delta_{\mu}+
[\partialbar,\mu^*]+[\mu,\partialbar^*]\,.
\end{aligned}
$$
\end{proof}
\begin{rem}
Notice that in \cite{cirici-wilson-2} the authors consider on $2n$-dimensional compact almost-Hermitian manifolds the spaces
of harmonic forms
$\mathcal{H}^{\bullet,\bullet}_{\partialbar}\cap\mathcal{H}^{\bullet,\bullet}_{\mu}$.
By Theorem \ref{thm:deltabar-harmonic} we know that
on bi-graded forms we are just reinterpreting these spaces since
$\mathcal{H}^{\bullet,\bullet}_{\partialbar}(X)
\cap\mathcal{H}^{\bullet,\bullet}_{\mu}(X)=\mathcal{H}^{\bullet,\bullet}_{\bar\partialta}(X)$. Hence we refer to
\cite{cirici-wilson-2} for the properties and several results concerning these spaces.
But in general,
we just proved that on total degrees we have only the inclusion $\mathcal{H}^{\bullet}_{\partialbar}(X)
\cap\mathcal{H}^{\bullet}_{\mu}(X)\subseteq\mathcal{H}^{\bullet}_{\bar\partialta}(X)$. In particular in Example \ref{example-1} we show that this inclusion can be strict.
\end{rem}
\begin{rem}\label{rem:delbar-hodge-duality}
Let $(X,J,g)$ be a compact almost-Hermitian manifold of real dimension $2n$, then the Hodge-$*$-operator induces duality isomorphisms for every $k$
$$
*:\mathcal{H}^k_{\bar\partialta}(X)\to \mathcal{H}^{2n-k}_{\bar\partialta}(X)\,,\quad
*:\mathcal{H}^k_{\partialta}(X)\to \mathcal{H}^{2n-k}_{\partialta}(X)\,.
$$
In particular, for every $p,q$
$$
*:\mathcal{H}^{p,q}_{\bar\partialta}(X)\to \mathcal{H}^{n-p,n-q}_{\bar\partialta}(X)\,,\quad
*:\mathcal{H}^{p,q}_{\partialta}(X)\to \mathcal{H}^{n-p,n-q}_{\partialta}(X)\,.
$$
This follows easily from the relations $*\Delta_{\bar\partialta}=\Delta_{\bar\partialta}*$
and $*\Delta_{\partialta}=\Delta_{\partialta}*$.\\
In particular, we have the usual symmetries for the Hodge diamonds, namely for every $k$
$$
h^k_{\bar\partialta}(X)=h^{2n-k}_{\bar\partialta}(X)\,,\quad
h^k_{\partialta}(X)=h^{2n-k}_{\partialta}(X)
$$
and for every $p,q$
$$
h^{p,q}_{\bar\partialta}(X)=h^{n-p,n-q}_{\bar\partialta}(X)\,,\quad
h^{p,q}_{\partialta}(X)=h^{n-p,n-q}_{\partialta}(X)\,.
$$
\end{rem}
\begin{prop}
Let $(X,J,g)$ be an almost-Hermitian manifold, then the operators
$\Delta_{BC(\partialta,\bar\partialta)}$ and $\Delta_{A(\partialta,\bar\partialta)}$ are elliptic differential operators of the fourth order.
\end{prop}
\begin{proof}
The calculations for the symbol of $\Delta_{BC(\partialta,\bar\partialta)}$ are similar to the ones for $\Delta_{\bar\partialta}$ keeping only the highest order differential terms.
Denoting with $\simeq$ the equivalence of the symbol of the operators we have
$$
\Delta_{BC(\partialta,\bar\partialta)}\simeq
\partialta\bar\partialta\bar\partialta^*\partialta^*+
\bar\partialta^*\partialta^*\partialta\bar\partialta+
\bar\partialta^*\partialta\partialta^*\bar\partialta+
\partialta^*\bar\partialta\bar\partialta^*\partialta
\simeq\partialta\partialta^*\partialtabar\partialtabar^*+
\partialta^*\partialta\partialtabar^*\partialtabar+
\partialta\partialta^*\partialtabar^*\partialtabar+
\partialta^*\partialta\partialtabar\partialtabar^*
$$
$$
\simeq\left(\partialta^*\partialta+\partialta\partialta^*\right)
\left(\partialtabar^*\partialtabar+\partialtabar\partialtabar^*\right)=
\Delta_{\partialta}\Delta_{\partialtabar}\simeq\Delta_{\partialtabar}^2\,.
$$
Similar considerations can be done for $\Delta_{A(\partialta,\bar\partialta)}$.
\end{proof}
We denote with $\mathcal{H}^k_{BC(\partialta,\bar\partialta)}(X):=\Ker(\Delta_{BC(\partialta,\bar\partialta)\vert A^k})$ the space of $\Delta_{BC(\partialta,\bar\partialta)}$-harmonic
$k$-forms and with $\mathcal{H}^{p,q}_{BC(\partialta,\bar\partialta)}(X):=\Ker(\Delta_{BC(\partialta,\bar\partialta){\vert A^{p,q}}})$
the space of $\Delta_{BC(\partialta,\bar\partialta)}$-harmonic
$(p,q)$-forms. We have the following Lemma whose proof is a direct computation.
\begin{lemma}\label{lemma:equivalence-bc-harmonic}
Let $(X,J,g)$ be a compact almost-Hermitian manifold. Then,
a differential form $\alpha\in\mathcal{H}^k_{BC(\partialta,\bar\partialta)}(X)$ if and only if
$$
\partialta\alpha=0\,,\quad \bar\partialta\alpha=0\,,\quad
(\partialta\bar\partialta)^*\alpha=0\,.
$$
\end{lemma}
We get the following
\begin{prop}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then the following Hodge decomposition holds
$$
A^k(X)=\mathcal{H}^k_{BC(\partialta,\bar\partialta)}(X)
\stackrel{\perp}{\oplus}
\left(\partialta\bar\partialta A^{k-2}(X)\oplus
\left(
\bar\partialta^* A^{k+1}(X)+\partialta^* A^{k+1}(X)\right)\right)\,.
$$
Moreover, a $(p,q)$-form $\alpha\in\mathcal{H}^{p,q}_{BC(\partialta,\bar\partialta)}(X)$ if and only if
$$
\left\lbrace
\begin{array}{lcl}
\partial\alpha & =& 0\\
\partialbar\alpha &=& 0\\
\mu\alpha &=& 0\\
\bar\mu\alpha &=& 0\\
(\partial\partialbar+\bar\mu\mu)(*\alpha) &=& 0\\
\partial\mu(*\alpha) &=& 0\\
\bar\mu\partialbar(*\alpha) &=& 0
\end{array}
\right.\,.
$$
\end{prop}
\begin{proof}
The Hodge decomposition follows from the ellipticity of $\Delta_{BC(\partialta,\bar\partialta)}$.\\
Now let $\alpha\in A^k(X)$, then in view of Lemma \ref{lemma:equivalence-bc-harmonic}
$\alpha\in\Ker\Delta_{BC(\partialta,\bar\partialta)}$
if and only if
$$
\left\lbrace
\begin{array}{lcl}
\partialta\alpha & =& 0\\
\bar\partialta\alpha &=& 0\\
\partialta\bar\partialta*\alpha &=& 0
\end{array}
\right.\,.
$$
if and only if
$$
\left\lbrace
\begin{array}{lcl}
(\partial+\bar\mu)\alpha & =& 0\\
(\partialbar+\mu)\alpha &=& 0\\
(\partial\partialbar+\partial\mu+\bar\mu\partialbar+\bar\mu\mu)(*\alpha)&=& 0\\
\end{array}
\right.\,.
$$
In particular, if $\alpha$ is a $(p,q)$-form we obtain the thesis.\\
Finally, given $\alpha\in \mathcal{H}^k_{BC(\partialta,\bar\partialta)}(X)$, $\beta\in A^{k-2}(X)$, $\gamma\in A^{k+1}(X)$ and $\eta\in A^{k+1}(X)$
we have
$$
(\alpha,\partialta\bar\partialta\beta+\partialtabar^*\gamma+\partialta^*\eta)=
((\partialta\bar\partialta)^*\alpha,\beta)+(\partialtabar\alpha,\gamma)
+(\partialta\alpha,\eta)=0\,.
$$
\end{proof}
\begin{rem}
Notice that the spaces
$\partialta\bar\partialta A^{k-2}(X)$ and
$\bar\partialta^* A^{k+1}(X)+\partialta^* A^{k+1}(X)$ are orthogonal if and only if $\partialta^2=0$.
\end{rem}
Similarly, if we denote with $\mathcal{H}^k_{A(\partialta,\bar\partialta)}(X):=\Ker(\Delta_{A(\partialta,\bar\partialta){\vert A^k}})$ the space of
$\Delta_{A(\partialta,\bar\partialta)}$-harmonic
$k$-forms and with $\mathcal{H}^{p,q}_{A(\partialta,\bar\partialta)}(X):=\Ker(\Delta_{A(\partialta,\bar\partialta){\vert A^{p,q}}})$
the space of $\Delta_{A(\partialta,\bar\partialta)}$-harmonic
$(p,q)$-forms we get the following
\begin{prop}
Let $(X,J,g)$ be a compact almost-Hermitian manifold, then the following Hodge decomposition holds
$$
A^k(X)=\mathcal{H}^k_{A(\partialta,\bar\partialta)}(X)\stackrel{\perp}{\oplus}
\left(
\left(\partialta A^{k-1}(X)+\partialtabar A^{k-1}(X)\right)
\oplus \left(\partialta\partialtabar\right)^* A^{k+2}(X)\right)\,.
$$
Moreover, a $(p,q)$-form $\alpha\in\mathcal{H}^{p,q}_{A(\partialta,\bar\partialta)}(X)$ if and only if
$$
\left\lbrace
\begin{array}{lcl}
\partial^*\alpha & =& 0\\
\partialbar^*\alpha &=& 0\\
\mu^*\alpha &=& 0\\
\bar\mu^*\alpha &=& 0\\
(\partial\partialbar+\bar\mu\mu)\alpha &=& 0\\
\partial\mu\alpha &=& 0\\
\bar\mu\partialbar\alpha &=& 0
\end{array}
\right.\,.
$$
\end{prop}
\begin{rem}
Since the operators $\Delta_{BC(\partialta,\bar\partialta)}$ and $\Delta_{A(\partialta,\bar\partialta)}$ are elliptic, the associated spaces of harmonic forms $\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)$, $\mathcal{H}^{\bullet}_{A(\partialta,\bar\partialta)}(X)$ are finite-dimensional on a compact almost-Hermitian manifold. In particular, we denote with $h^{\bullet}_{BC(\partialta,\bar\partialta)}(X)$ and $h^{\bullet}_{A(\partialta,\bar\partialta)}(X)$ their dimensions.
\end{rem}
\begin{rem}
Let $(X,J,g)$ be an almost-Hermitian manifold. Then, by definition,
conjugation induces the following isomorphisms
$$
\overline{\mathcal{H}^{\bullet}_{\bar\partialta}(X)}=\mathcal{H}^\bullet_{\partialta}(X)\,,
\qquad
\overline{\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)}=\mathcal{H}^\bullet_{BC(\partialta,\bar\partialta)}(X)\,.
$$
In particular, for any $p,\,q$
$$
\overline{\mathcal{H}^{p,q}_{\bar\partialta}(X)}=\mathcal{H}^{q,p}_{\partialta}(X)\,,
\qquad
\overline{\mathcal{H}^{p,q}_{BC(\partialta,\bar\partialta)}(X)}=\mathcal{H}^{q,p}_{BC(\partialta,\bar\partialta)}(X)\,.
$$
Therefore, we have the following dimensional equalities
for every $k$
$$
h^k_{\bar\partialta}(X)=h^{k}_{\partialta}(X)
$$
and for every $p,q$
$$
h^{p,q}_{\bar\partialta}(X)=h^{q,p}_{\partialta}(X)\,,\quad
h^{p,q}_{BC(\partialta,\bar\partialta)}(X)=h^{q,p}_{BC(\partialta,\bar\partialta)}(X)\,.
$$
\end{rem}
\begin{rem}\label{rem:BC-hodge-duality}
Let $(X,J,g)$ be a compact almost-Hermitian manifold of real dimension $2n$, then the Hodge-$*$-operator induces duality isomorphisms for every $k$
$$
*:\mathcal{H}^k_{BC(\partialta,\bar\partialta)}(X)\to \mathcal{H}^{2n-k}_{A(\partialta,\bar\partialta)}(X)\,.
$$
In particular, for every $p,q$
$$
*:\mathcal{H}^{p,q}_{BC(\partialta,\bar\partialta)}(X)\to \mathcal{H}^{n-p,n-q}_{A(\partialta,\bar\partialta)}(X)\,.
$$
Therefore we have the usual symmetries for the Hodge diamonds, namely for every $k$
$$
h^k_{BC(\partialta,\bar\partialta)}(X)=h^{2n-k}_{A(\partialta,\bar\partialta)}(X)
$$
and for every $p,q$
$$
h^{p,q}_{BC(\partialta,\bar\partialta)}(X)=h^{n-p,n-q}_{A(\partialta,\bar\partialta)}(X)\,.
$$
\end{rem}
\section{Harmonic forms on almost-K\"ahler manifolds}\label{section:harmonic-forms-almost-kahler}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold.
With the usual notations, we have the following almost-K\"ahler identities
(cf. \cite{debartolomeis-tomassini}, \cite{cirici-wilson-2})
\begin{lemma}\label{lemma:almost-kahler-identities}
Let $(X,J,g,\omega)$ be an almost-K\"ahler manifold then
\begin{itemize}
\item[$\bullet$] $[\partialta,\Lambda]=i\,\bar\partialta^*$,
$[\partial,\Lambda]=i\,\partialbar^*$ and $[\bar\mu,\Lambda]=i\,\mu^*$
\item[$\bullet$] $[\bar\partialta,\Lambda]=-i\,\partialta^*$,
$[\partialbar,\Lambda]=-i\,\partial^*$ and $[\mu,\Lambda]=-i\,\bar\mu^*$.
\end{itemize}
\end{lemma}
\begin{proof}
For the sake of completeness we recall here the proof.
We have
$$
d^\Lambda=[d,\Lambda]=[\partialta+\partialtabar,\Lambda]=
[\partial+\bar\mu+\partialbar+\mu,\Lambda]
$$
and
$$
-(d^c)^*=i(\partialtabar^*-\partialta^*)=i(\partialbar^*+\mu^*-\partial^*-\bar\mu^*).
$$
Since $\omega$ is symplectic, $d^\Lambda=-(d^c)^*$ as recalled at the beginning of Section \ref{section:diff-op-sympl};
hence $[\partialta,\Lambda]=i\,\bar\partialta^*$ and $[\bar\partialta,\Lambda]=-i\,\partialta^*$.
\end{proof}
As a consequence one has the following (see \cite[Lemma 3.6]{debartolomeis-tomassini})
\begin{prop}\label{prop:deltabar-delta-laplacians}
Let $(X,J,g,\omega)$ be an almost-K\"ahler manifold, then
$\Delta_{\bar\partialta}$ and $\Delta_{\partialta}$ are related by
$$
\Delta_{\bar\partialta}=\Delta_{\partialta}
$$
and
$$
\Delta_d=\Delta_{\bar\partialta}+\Delta_{\partialta}+E_J
$$
where
$$
E_J=\partialta\bar\partialta^*+\bar\partialta^*\partialta+\bar\partialta\partialta^*+\partialta^*\bar\partialta\,.\\
$$
In particular, their spaces of harmonic forms coincide, i.e. $\mathcal{H}^{\bullet}_{\partialta}(X)=\mathcal{H}^{\bullet}_{\bar\partialta}(X)$\,.
\end{prop}
In fact, we can use this result to characterize K\"ahler manifolds among the almost-K\"ahler ones.
\begin{cor}\label{cor:kahler-equality-laplacians}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold, then
$$
\Delta_d=2\Delta_{\partialta}\quad\iff\quad (X,J,g,\omega) \text{ is K\"ahler.}
$$
\end{cor}
\begin{proof}
First of all, on any almost-K\"ahler manifold one has (cf. e.g., \cite{cirici-wilson-2})
$$
[\Delta_d,L]=[[d,d^*],L]=[d,[d^*,L]]=-[d,d^c].
$$
In view of Lemma \ref{lemma:almost-kahler-identities}, it is
$$[\partialta,\Lambda]=i\,\bar\partialta^*;
$$
therefore, taking the adjoint,
$$
[L,\partialta^*]=-i\partialtabar.
$$
Furthermore, since $\omega$ is $d$-closed, we have
$$[\partialta,L]=0.
$$
Hence, (cf. Lemma \ref{lemma:commutator-laplacian-L})
$$
[\Delta_\partialta,L]=[[\partialta,\partialta^*],L]=[\partialta,[\partialta^*,L]]=i[\partialta,\partialtabar]=0,
$$
that is
$$
[\Delta_\partialta,L]=0.
$$
By Proposition \ref{prop:deltabar-delta-laplacians}, on an almost-K\"ahler manifold we have that
$$
\Delta_d=\Delta_{\bar\partialta}+\Delta_{\partialta}+E_J=2\Delta_{\partialta}+E_J,
$$
and we want to show that $E_J=0$ if and only if $J$ is integrable.\newline
Clearly, if $J$ is integrable, then $(X,J,g,\omega)$ is K\"ahler and as a consequence of the K\"ahler identities,
$\Delta_d=2\Delta_{\partial}$. \newline
For the converse implication, assume that $\Delta_d=2\Delta_{\partialta}$. Then, by the above formula,
$$
dd^c+d^cd=-[\Delta_d,L]=-2[\Delta_\partialta,L]=0,
$$
and, as noticed in Section \ref{section:preliminaries}, $d$ and $d^c$ anticommute if and only if $J$ is integrable.
\end{proof}
An immediate consequence of Proposition \ref{prop:deltabar-delta-laplacians} is also the following
\begin{cor}\label{cor:comparison-betti-numbers}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold, then
$$
\mathcal{H}^\bullet_{\bar\partialta}(X)\subseteq \mathcal{H}^\bullet_{dR}(X)\,,
$$
namely every $\bar\partialta$-harmonic form is harmonic. In particular,
$$
h^\bullet_{\bar\partialta}(X)\leq b_\bullet(X)\,,
$$
where $b_\bullet(X)$ denotes the Betti numbers of $X$.
\end{cor}
We will see with an explicit example that the inequality $
h^\bullet_{\bar\partialta}(X)\leq b_\bullet(X)
$ does not hold for an arbitrary compact almost-Hermitian manifold.
\begin{lemma}\label{lemma:sympl-harmonic-deltabar-harmonic}
Let $(X,J,g,\omega)$ be an almost-K\"ahler manifold, then $d^\Lambda=i(\bar\partialta^*-\partialta^*)$. In particular, a $(p,q)$-form is symplectic harmonic, i.e., it belongs to $\Ker d\cap\Ker d^\Lambda$, if and only if belongs to $\Ker\partialta\cap\Ker\bar\partialta\cap\Ker\partialta^*\cap\Ker\bar\partialta^*$.
\end{lemma}
\begin{proof}
Since $d^c=-i(-\partialbar+\partial+\bar\mu-\mu)=i(\bar\partialta-\partialta)$
we have that
$(d^c)^*=i(-\partialbar^*+\partial^*+\bar\mu^*-\mu^*)=
i(\partialta^*-\bar\partialta^*)$ and so the thesis follows from $d^\Lambda=-(d^c)^*$ as noted in Section \ref{section:diff-op-sympl}.
\end{proof}
In general, the existence of a symplectic harmonic representative in every de-Rham cohomology class is equivalent to the Hard-Lefschetz condition (cf. \cite{brylinski},
\cite{yan},
\cite{mathieu}, \cite{tseng-yau-I}). Therefore, Tseng and Yau in \cite{tseng-yau-I} introduced the space
\[
H^k_{d+d^\Lambda}\left(X\right)
:=\frac{\ker(d+d^\Lambda)\cap A^k(X)}{\Imm dd^\Lambda\cap A^k(X)},
\]
and they study Hodge theory for it.
It turns out that $H^k_{d+d^\Lambda}\left(X\right)\simeq
\mathcal{H}^k_{d+d^\Lambda}\left(X\right)$ where
$$
\mathcal{H}^k_{d+d^\Lambda}\left(X\right)=
\text{Ker}\,d\cap \text{Ker}\,d^\Lambda\cap
\text{Ker}\,(dd^\Lambda)^*\,.
$$
Let us denote with $\mathcal{H}^{p,q}_{d+d^\Lambda}\left(X\right)$
the $(d+d^\Lambda)$-harmonic $(p,q)$-forms.
\begin{rem}
Notice that on a compact almost-K\"ahler manifold $(X^{2n},J,g,\omega)$ we have the inclusion
$$
\mathcal{H}^\bullet_{\partialtabar}(X)\subseteq\mathcal{H}^\bullet_{d+d^\Lambda}(X),
$$
indeed if $\alpha\in\mathcal{H}^\bullet_{\partialtabar}(X)$ then, by Proposition \ref{prop:deltabar-delta-laplacians},
$\alpha\in\mathcal{H}^\bullet_{\partialta}(X)$, namely
$\partialta\alpha=0$, $\partialta^*\alpha=0$, $\partialtabar\alpha=0$ and $\partialtabar^*\alpha=0$.
Since $d=\partialta+\partialtabar$ and
$d^\Lambda=-(d^c)^*=-i(\partialta^*-\partialtabar^*)$ then we have the inclusion.\\
Moreover, if $J$ is $\mathcal{C}^{\infty}$-pure and full \cite{LZ} (e.g., this is always the case if $n=2$, see \cite{DLZ}) by Corollary \ref{cor:comparison-betti-numbers} and \cite[Theorem 4.2]{tardini-tomassini-instability}
one has
$$
\mathcal{H}^2_{\bar\partialta}(X)\subseteq \mathcal{H}^2_{dR}(X)
\subseteq\mathcal{H}^2_{d+d^\Lambda}(X)\,
$$
and in particular, $h^2_{\bar\partialta}(X)\leq b_2(X)\leq
h^2_{d+d^\Lambda}(X).$
Recall that if $n=2$ by \cite[Theorem 4.5]{tardini-tomassini-proper-surjective} (cf. also \cite[Section 3.2]{tardini-proceeding}) $b_2(X)<h^2_{d+d^\Lambda}(X)$ unless $(X,\omega)$, as a symplectic manifold, satisfies the Hard Lefschetz condition.
\end{rem}
On bigraded forms we have a different situation from Corollary
\ref{cor:comparison-betti-numbers}.
\begin{theorem}\label{thm:equalities-harmonic-spaces}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold, then on $(p,q)$-forms
$$
\mathcal{H}^{p,q}_{d+d^\Lambda}(X)=
\mathcal{H}^{p,q}_{\partialta}(X)\cap\mathcal{H}^{p,q}_{\bar\partialta}(X)=
\mathcal{H}^{p,q}_{\partialbar}(X)\cap \mathcal{H}^{p,q}_{\partial}(X)\cap
\mathcal{H}^{p,q}_{\bar\mu}(X)\cap\mathcal{H}^{p,q}_{\mu}(X)=
$$
$$
=\mathcal{H}^{p,q}_{\partialbar}(X)\cap\mathcal{H}^{p,q}_{\mu}(X)=
\mathcal{H}^{p,q}_{d}(X)\,.
$$
\end{theorem}
\begin{proof}
Notice that the equality $\mathcal{H}^{p,q}_{\partialta}(X)\cap\mathcal{H}^{p,q}_{\bar\partialta}(X)=
\mathcal{H}^{p,q}_{\partialbar}(X)\cap \mathcal{H}^{p,q}_{\partial}(X)\cap
\mathcal{H}^{p,q}_{\bar\mu}(X)\cap\mathcal{H}^{p,q}_{\mu}(X)$
follows from Theorem \ref{thm:deltabar-harmonic}.\\
The equalities $\mathcal{H}^{p,q}_{\partialbar}(X)\cap \mathcal{H}^{p,q}_{\partial}(X)\cap
\mathcal{H}^{p,q}_{\bar\mu}(X)\cap\mathcal{H}^{p,q}_{\mu}(X)
=\mathcal{H}^{p,q}_{\partialbar}(X)\cap\mathcal{H}^{p,q}_{\mu}(X)=
\mathcal{H}^{p,q}_{d}(X)
$
follow from \cite[Proposition 3.3, Theorem 4.3]{cirici-wilson-2}.\\
Indeed,
$$
\mathcal{H}^{p,q}_{\partialbar}(X)\cap \mathcal{H}^{p,q}_{\partial}(X)\cap
\mathcal{H}^{p,q}_{\bar\mu}(X)\cap\mathcal{H}^{p,q}_{\mu}(X)=
\text{Ker}\,(\Delta_{\partialbar}+\Delta_{\partial}+\Delta_{\bar\mu}+
\Delta_{\mu})
$$
$$
=\text{Ker}\,(\Delta_{\partialbar}+\Delta_{\mu})\cap
\text{Ker}\,(\Delta_{\partial}+\Delta_{\bar\mu})=
\text{Ker}\,(\Delta_{\partialbar}+\Delta_{\mu})=
\text{Ker}\,(\Delta_{\partialbar})\cap \text{Ker}\,(\Delta_{\mu})\,.
$$
\\
We just need to prove that $\mathcal{H}^{p,q}_{d+d^\Lambda}(X)=
\mathcal{H}^{p,q}_{\partialta}(X)\cap\mathcal{H}^{p,q}_{\bar\partialta}(X)$.
Let $\alpha\in\mathcal{H}^{p,q}_{d+d^\Lambda}(X)$, then $d\alpha=0$,
$d^\Lambda\alpha=0$ and $dd^\Lambda*\alpha=0$, or equivalently
$d\alpha=0$, $d^c*\alpha=0$ and $d*d^c\alpha=0$. Since on $(p,q)$-forms
$d\alpha=0$ implies $d^c\alpha=0$ the last condition is superfluous, and $d\alpha=0$, $d^c*\alpha=0$ is equivalent to $\partialta\alpha=0$, $\bar\partialta\alpha=0$, $\partialta*\alpha=0$, $\bar\partialta*\alpha=0$ (cf. Lemma
\ref{lemma:sympl-harmonic-deltabar-harmonic}).
\end{proof}
\begin{theorem}
Let $(X,J,g,\omega)$ be an almost-K\"ahler manifold, then
$\Delta_{BC(\partialta,\bar\partialta)}$, $\Delta_{\bar\partialta}$ are related by
$$
\Delta_{BC(\partialta,\bar\partialta)}=\Delta_{\bar\partialta}^2+\bar\partialta^*\bar\partialta+\partialta^*\partialta+F_J
$$
where
$$
F_J:=-\partialta\left(\partialta\bar\partialta^*+\bar\partialta^*\partialta\right)\bar\partialta^*+
\left(\partialta\bar\partialta^*+\bar\partialta^*\partialta\right)\partialta^*\bar\partialta+
\partialta^*\bar\partialta\left(\partialta\bar\partialta^*+\bar\partialta^*\partialta\right)-\partialta^*\left(\partialta\bar\partialta^*+\bar\partialta^*\partialta\right)\bar\partialta\,.
$$
\end{theorem}
\begin{proof}
First of all, since it will be useful in the following, we notice that by the almost-K\"ahler identities
$\partialta^*=i\,[\partialtabar,\Lambda]$ and $\partialtabar^*=-i\,[\partialta,\Lambda]$
we obtain
$$
\partialta^*\partialtabar=i(\partialtabar\Lambda\partialtabar-\Lambda\partialtabar^2)\,,\quad
\partialtabar\partialta^*=i(\partialtabar^2\Lambda-\partialtabar\Lambda\partialtabar)
$$
and similarly for their conjugates.\\
Recall that when $J$ is non-integrable $\partialta^2\neq 0$ and $\bar\partialta^2\neq 0$ and so we cannot cancel them out in these expressions.
By Proposition \ref{prop:deltabar-delta-laplacians}
$$
\Delta_{\bar\partialta}^2=\Delta_{\partialta}\Delta_{\bar\partialta}=
\partialta\partialta^*\partialtabar\partialtabar^*+
\partialta\partialta^*\partialtabar^*\partialtabar+
\partialta^*\partialta\partialtabar\partialtabar^*+
\partialta^*\partialta\partialtabar^*\partialtabar.
$$
Now in the first and fourth terms we use the previous formulas, and in the second and third terms we use the fact that $\partialta$ and $\partialtabar$ anticommute. Hence, we get
$$
\Delta_{\bar\partialta}^2=
\partialta(i\partialtabar\Lambda\partialtabar-i\Lambda\partialtabar^2)\partialtabar^*
-\partialta\partialtabar^*\partialta^*\partialtabar
-\partialta^*\partialtabar\partialta\partialtabar^*
+\partialta^*(-i\partialta^2\Lambda+i\partialta\Lambda\partialta)\partialtabar\,.
$$
Using again that $\partialta^*=i\,[\partialtabar,\Lambda]$ one has
$$
i\partialta\partialtabar\Lambda\partialtabar\partialtabar^*=
\partialta\partialtabar(-\partialta^*\partialtabar^*+i\partialtabar\Lambda\partialtabar^*)=
i\partialta\partialtabar^2\Lambda\partialtabar^*+\partialta\partialtabar\partialtabar^*\partialta^*
$$
and so the first term in the previous expression of $\Delta_{\bar\partialta}^2$
becomes
$$
\partialta(i\partialtabar\Lambda\partialtabar-i\Lambda\partialtabar^2)\partialtabar^*=
i\partialta\partialtabar^2\Lambda\partialtabar^*+
\partialta\partialtabar\partialtabar^*\partialta^*
-i\partialta\Lambda\partialtabar^2\partialtabar^*=
\partialta\partialtabar\partialtabar^*\partialta^*+
\partialta(i\partialtabar^2\Lambda-i\Lambda\partialtabar^2)\partialtabar^*
$$
$$
=\partialta\partialtabar\partialtabar^*\partialta^*+
\partialta(\partialta^*\partialtabar+\partialtabar\partialta^*)\partialtabar^*
$$
and similarly the fourth term becomes
$$
\partialta^*(-i\partialta^2\Lambda+i\partialta\Lambda\partialta)\partialtabar=
-i\partialta^*\partialta^2\Lambda\partialtabar+
\partialtabar^*\partialta^*\partialta\partialtabar
+i\partialta^*\Lambda\partialta^2\partialtabar=
\partialtabar^*\partialta^*\partialta\partialtabar+
\partialta^*(-i\partialta^2\Lambda+i\Lambda\partialta^2)\partialtabar
$$
$$
=\partialtabar^*\partialta^*\partialta\partialtabar+
\partialta^*(\partialta\partialtabar^*+\partialtabar^*\partialta)\partialtabar\,.
$$
For the second term using again from the K\"ahler identities that
$\partialta\partialtabar^*=-i\partialta^2\Lambda+i\partialta\Lambda\partialta$ and
$\partialtabar^*=-i\,[\partialta,\Lambda]$ one has
$$
-\partialta\partialtabar^*\partialta^*\partialtabar=
i\partialta^2\Lambda\partialta^*\partialtabar-i\partialta\Lambda\partialta\partialta^*\partialtabar=
i\partialta^2\Lambda\partialta^*\partialtabar+\partialtabar^*\partialta\partialta^*\partialtabar
-i\Lambda\partialta^2\partialta^*\partialtabar\,=
\partialtabar^*\partialta\partialta^*\partialtabar+
(i\partialta^2\Lambda-i\Lambda\partialta^2)\partialta^*\partialtabar
$$
$$
=\partialtabar^*\partialta\partialta^*\partialtabar-
(\partialta\partialtabar^*+\partialtabar^*\partialta)\partialta^*\partialtabar
$$
and similarly for the third term
$$
-\partialta^*\partialtabar\partialta\partialtabar^*=
i\partialta^*\partialtabar\partialta^2\Lambda+
\partialta^*\partialtabar\partialtabar^*\partialta
-i\partialta^*\partialtabar\Lambda\partialta^2=
\partialta^*\partialtabar\partialtabar^*\partialta+
\partialta^*\partialtabar(i\partialta^2\Lambda-i\Lambda\partialta^2)
$$
$$
=\partialta^*\partialtabar\partialtabar^*\partialta-
\partialta^*\partialtabar(\partialta\partialtabar^*+\partialtabar^*\partialta)
\,.
$$
Putting all this together we obtain
$$
\Delta_{\bar\partialta}^2=
\Delta_{BC(\partialta,\partialtabar)}-\partialtabar^*\partialtabar-\partialta^*\partialta-F_J
$$
concluding the proof.
Here in the expression of $F_J$ we have used that
$$
\partialta^*\bar\partialta+\bar\partialta\partialta^*=
\partialta\bar\partialta^*+\bar\partialta^*\partialta
$$
We prove this last statement separately in the following Proposition.
\end{proof}
Clearly, if $J$ is integrable we recover the classical relations between the Bott-Chern and Dolbeault Laplacians (cf. e.g., \cite{schweitzer}), namely on K\"ahler manifolds
$$
\Delta_{BC}=\Delta_{\partialbar}^2+\partialbar^*\partialbar+\partial^*\partial\,.
$$
In particular, $F_J=0$ since by the K\"ahler identities $\partial\partialbar^*+\partialbar^*\partial=0$.
\begin{prop}
Let $(X,J,g,\omega)$ be an almost-K\"ahler manifold, then
$$
\bar\partialta\partialta^*+\partialta^*\bar\partialta=
\partialta\bar\partialta^*+\bar\partialta^*\partialta\,.
$$
In particular,
\begin{itemize}
\item[$\bullet$]
$
\partialta\bar\partialta^*+\bar\partialta^*\partialta=
\partial\partialbar^*+\partialbar^*\partial+\partialbar\partial^*+\partial^*\partialbar\,,
$
\item[$\bullet$] $E_J=2(\partialta\bar\partialta^*+\bar\partialta^*\partialta)$.
\end{itemize}
\end{prop}
\begin{proof}
We have
$$
\bar\partialta\partialta^*+\partialta^*\bar\partialta=
(\partialbar+\mu)(\partial^*+\bar\mu^*)+(\partial^*+\bar\mu^*)(\partialbar+\mu)=
$$
$$
=\partialbar\partial^*+\partial^*\partialbar+\partialbar\bar\mu^*+\bar\mu^*\partialbar+\mu\partial^*+\partial^*\mu+\mu\bar\mu^*+\bar\mu^*\mu\,.
$$
Now, by \cite[Lemma 3.7]{debartolomeis-tomassini} we have
$$
\mu\bar\mu^*+\bar\mu^*\mu=0
$$
and
$$
\partialbar\partial^*+\partial^*\partialbar=\partial\mu^*+\mu^*\partial+\bar\mu\partialbar^*+\partialbar^*\bar\mu\,,
$$
hence
$$
\bar\partialta\partialta^*+\partialta^*\bar\partialta=
\partial\mu^*+\mu^*\partial+\bar\mu\partialbar^*+\partialbar^*\bar\mu+
\partialbar\bar\mu^*+\bar\mu^*\partialbar+\mu\partial^*+\partial^*\mu\,.
$$
Using conjugation we have
$$
\partialta\bar\partialta^*+\bar\partialta^*\partialta=
\partial\partialbar^*+\partialbar^*\partial+\partial\mu^*+\mu^*\partial+\bar\mu\partialbar^*+\partialbar^*\bar\mu=
$$
$$
=\mu\partial^*+\partial^*\mu+\partialbar\bar\mu^*+\bar\mu^*\partialbar+
\partial\mu^*+\mu^*\partial+\bar\mu\partialbar^*+\partialbar^*\bar\mu\,,
$$
therefore $\bar\partialta\partialta^*+\partialta^*\bar\partialta=
\partialta\bar\partialta^*+\bar\partialta^*\partialta\,.$
\end{proof}
\begin{prop}\label{prop:bc-uguale-derham}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold, then
$$
\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)=\mathcal{H}^{\bullet}_{\bar\partialta}(X)\,.
$$
\end{prop}
\begin{proof}
Let $\alpha\in\mathcal{H}^k_{BC(\partialta,\bar\partialta)}(X)$; then by Lemma \ref{lemma:equivalence-bc-harmonic}, $\partialta\alpha=0$,
$\bar\partialta\alpha=0$ and $\partialta^*\bar\partialta^*\alpha=0$. We need to prove that $\bar\partialta^*\alpha=0$. Using the almost-K\"ahler identities we have
$$
0=\partialta^*\bar\partialta^*\alpha=-i\partialta^*[\partialta,\Lambda]\alpha
$$
which means that $\partialta^*\partialta\Lambda\alpha=0$. Therefore, pairing with
$\Lambda\alpha$,
$$
0=(\partialta^*\partialta\Lambda\alpha,\Lambda\alpha)=
\vert\partialta\Lambda\alpha\vert^2
$$
hence $\partialta\Lambda\alpha=0$.
This, means that $\bar\partialta^*\alpha=-i[\partialta,\Lambda]\alpha=\partialta\Lambda\alpha=0$, giving the first inclusion
$\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)\subseteq\mathcal{H}^{\bullet}_{\bar\partialta}(X)$.\\
We now prove the other inclusion
$\mathcal{H}^{\bullet}_{\bar\partialta}(X)\subseteq\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)$. Let $\alpha\in\mathcal{H}^k_{\bar\partialta}(X)$, i.e.,
$\bar\partialta\alpha=0$ and $\bar\partialta^*\alpha=0$.
Moreover, since $\mathcal{H}^{\bullet}_{\bar\partialta}(X)=
\mathcal{H}^{\bullet}_{\partialta}(X)$ we also have that
$\partialta\alpha=0$ and $\partialta^*\alpha=0$. Hence, putting these relations together we have that $\bar\partialta\alpha=0$, $\partialta\alpha=0$ and $\partialta^*\bar\partialta^*\alpha=0$, i.e., by definition
$\alpha\in\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)$ giving the second inclusion.
\end{proof}
\begin{cor}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold, then
$$
\mathcal{H}^{\bullet,\bullet}_{BC(\partialta,\bar\partialta)}(X)=\mathcal{H}^{\bullet,\bullet}_{d}(X)\,.
$$
\end{cor}
\begin{proof}
The thesis follows from the previous Proposition saying that $\mathcal{H}^{\bullet,\bullet}_{BC(\partialta,\bar\partialta)}(X)=\mathcal{H}^{\bullet,\bullet}_{\bar\partialta}(X)$,
the fact that $\mathcal{H}^{\bullet,\bullet}_{\bar\partialta}(X)=\mathcal{H}^{\bullet,\bullet}_{\partialta}(X)$ and
Theorem \ref{thm:equalities-harmonic-spaces}.
\end{proof}
\begin{cor}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler manifold of dimension
$2n$, then
$$
\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)=\mathcal{H}^{\bullet}_{A(\partialta,\bar\partialta)}(X)\,.
$$
\end{cor}
\begin{proof}
First we show the inclusion $\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)\subseteq\mathcal{H}^{\bullet}_{A(\partialta,\bar\partialta)}(X)$.
Let $\alpha\in\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)$ then, by Lemma \ref{lemma:equivalence-bc-harmonic}, $\partialta\alpha=0$, $\partialtabar\alpha=0$ and $\partialta\partialtabar*\alpha=0$. Hence, $\partialta\partialtabar\alpha=0$ and
by Propositions \ref{prop:bc-uguale-derham} and \ref{prop:deltabar-delta-laplacians},
$\alpha\in\mathcal{H}^{\bullet}_{BC(\partialta,\bar\partialta)}(X)=\mathcal{H}^{\bullet}_{\bar\partialta}(X)=\mathcal{H}^{\bullet}_{\partialta}(X)$, so
$\partialta*\alpha=0$ and $\partialtabar*\alpha=0$ giving the inclusion.\\
The other inclusion follows from having $h^{\bullet}_{BC(\partialta,\bar\partialta)}(X)=h^{\bullet}_{A(\partialta,\bar\partialta)}(X)$. Indeed by using
Remark \ref{rem:BC-hodge-duality}, Proposition \ref{prop:bc-uguale-derham} and Remark \ref{rem:delbar-hodge-duality} we have the following equalities on the dimensions, for any $k$,
$$
h^{k}_{A(\partialta,\bar\partialta)}(X)=h^{2n-k}_{BC(\partialta,\bar\partialta)}(X)=
h^{2n-k}_{\partialtabar}(X)=h^{k}_{\partialtabar}(X)=
h^{k}_{BC(\partialta,\bar\partialta)}(X)\,.
$$
\end{proof}
We prove the following Lemma
\begin{lemma}\label{lemma:commutator-laplacian-L}
Let $(X,J,g,\omega)$ be an almost-K\"ahler manifold, then
\begin{itemize}
\item[$\bullet$] $[L,\Delta_{\bar\partialta}]=0$ and $[L,\Delta_{\partialta}]=0$,
\item[$\bullet$] $[\Lambda,\Delta_{\bar\partialta}]=0$ and $[\Lambda,\Delta_{\partialta}]=0$.
\end{itemize}
\end{lemma}
\begin{proof}
We just need to prove the first equality
$$
[L,\Delta_{\bar\partialta}]=[L,[\bar\partialta,\bar\partialta^*]]=
-[\bar\partialta,[\bar\partialta^*,L]]=i[\bar\partialta,\partialta]=0\,.
$$
\end{proof}
As a consequence we have the following Hard-Lefschetz Theorem
on the spaces of $\bar\partialta-$ and $\Delta_{BC(\partialta,\bar\partialta)}-$harmonic forms.
\begin{theorem}\label{thm:hard-lefschetz}
Let $(X,J,g,\omega)$ be a compact almost-K\"ahler $2n$-dimensional manifold, then, for any $k$, the maps
$$
L^{k}:\mathcal{H}^{n-k}_{BC(\partialta,\bar\partialta)}(X)\to
\mathcal{H}^{n+k}_{BC(\partialta,\bar\partialta)}(X)
$$
are isomorphisms.
\end{theorem}
\begin{proof}
Since by the previous Lemma $[L,\Delta_{\bar\partialta}]=0$ and $[\Lambda,\Delta_{\bar\partialta}]=0$ and in general
$$
L^{k}:A^{n-k}(X)\to
A^{n+k}(X)
$$
are isomorphisms, then
the maps
$$
L^{k}:\mathcal{H}^{n-k}_{\bar\partialta}(X)\to
\mathcal{H}^{n+k}_{\bar\partialta}(X)
$$
are injective, and so isomorphisms by Remark \ref{rem:delbar-hodge-duality}.
The maps
$$
L^{k}:\mathcal{H}^{n-k}_{BC(\partialta,\bar\partialta)}(X)\to
\mathcal{H}^{n+k}_{BC(\partialta,\bar\partialta)}(X)
$$
are clearly isomorphic by Proposition \ref{prop:bc-uguale-derham}.
\end{proof}
For the bigraded case the result holds and it is proven in \cite[Theorem 5.1]{cirici-wilson-2}.
\section{Examples}
An important source of non-K\"ahler examples is furnished by nilmanifolds, namely compact quotients of a nilpotent connected simply-connected Lie group by a lattice.
On almost-complex nilmanifolds, given a left-invariant Hermitian metric, one can look for left-invariant harmonic (with respect to some operator) forms but in general they do not exhaust the whole space of harmonic forms.
In the following we compute some examples showing that even on almost-K\"ahler manifolds we do not have a decomposition of the form
$$
\mathcal{H}^\bullet_{\bar\partialta}(X)\neq\bigoplus_{p+q=\bullet} \mathcal{H}^{p,q}_{\bar\partialta}(X)\,,
$$
differently from the case (cf. \cite[Theorem 4.1]{cirici-wilson-2})
$$
\mathcal{H}^\bullet_{\partialbar}(X)\cap
\mathcal{H}^\bullet_{\mu}(X)=\bigoplus_{p+q=\bullet} \mathcal{H}^{p,q}_{\partialbar}(X)\cap
\mathcal{H}^{p,q}_{\mu}(X)\,.
$$
Moreover, we will see that when the almost-Hermitian structure is not almost-K\"ahler the equalities in Theorem \ref{thm:equalities-harmonic-spaces} may fail and
also the inequalities in Corollary \ref{cor:comparison-betti-numbers} may fail,
in particular we construct an example where
$$
h^2_{\partialtabar}(X)>b_2(X)\,.
$$
\begin{ex}\label{example-1}
Let $\mathbb{H}(3;\mathbb{R})$ be the $3$-dimensional Heisenberg group and
$\mathbb{H}(3;\mathbb{Z})$ be the subgroup of matrices with entries in $\mathbb{Z}$.
The Kodaira-Thurston manifold is defined as the quotient
$$
X:=\left(\mathbb{H}(3;\mathbb{R})\times \mathbb{R}\right)/
\left(\mathbb{H}(3;\mathbb{Z})\times \mathbb{Z}\right)\,.
$$
The manifold $X$ is a $4$-dimensional nilmanifold which admits both complex and symplectic structures.
We consider the non-integrable almost-complex structure $J$ defined by the structure equations
$$
\left\lbrace
\begin{array}{lcl}
d\varphi^1 & =& 0\\
d\varphi^2 &=& \frac{1}{2i}\varphi^{12}+\frac{1}{2i}\left(\varphi^{1\bar 2}-\varphi^{2\bar 1}\right)+\frac{1}{2i}\varphi^{\bar 1\bar 2}
\end{array}
\right.\,
$$
where $\left\lbrace\varphi^1\,,\varphi^2\right\rbrace$ is a global co-frame of (1,0)-forms on $X$.
The $(1,1)$-form $\omega:=\frac{1}{2i}\left(\varphi^{1\bar 1}+\varphi^{2\bar 2}\right)$ is a compatible symplectic structure, hence the pair $(J,\omega)$ induces a almost-K\"ahler structure on $X$.\\
Recall that $\mathcal{H}^{\bullet,\bullet}_{\bar\partialta}(X)=\mathcal{H}^{\bullet,\bullet}_{d}(X)$, but in general
$\mathcal{H}^{\bullet}_{\bar\partialta}(X)\subseteq\mathcal{H}^{\bullet}_{d}(X)$.
One can easily compute the spaces of left-invariant harmonic forms and one gets
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^1_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^1,\,\bar\varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^{1\bar 1}\,,\varphi^{2\bar 2}\,,
\varphi^{12}-\varphi^{\bar 1\bar 2}\,,
\varphi^{1\bar 2}+\varphi^{2\bar 1}
\right\rangle
\end{array}
$$
and (cf. also \cite{cirici-wilson-2})
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^{1,0}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^{0,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \bar\varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^{2,0}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{0,2}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{1,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^{1\bar 1}\,,\varphi^{2\bar 2}\,,
\varphi^{1\bar 2}+\varphi^{2\bar 1}
\right\rangle
\end{array}
$$
The remaining spaces can be computed easily by duality.
A first observation is that
$$
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)\neq\bigoplus_{p+q=2} \mathcal{H}^{p,q}_{\bar\partialta,\text{inv}}(X)\,.
$$
In particular, by \cite[Theorem 4.1]{cirici-wilson-2} and Theorem
\ref{thm:equalities-harmonic-spaces}
$$
\mathcal{H}^2_{\partialbar,\text{inv}}(X)\cap
\mathcal{H}^2_{\mu,\text{inv}}(X)=
\bigoplus_{p+q=2}\mathcal{H}^{p,q}_{\partialbar,\text{inv}}(X)\cap
\mathcal{H}^{p,q}_{\mu,\text{inv}}(X)=
\bigoplus_{p+q=2}\mathcal{H}^{p,q}_{\bar\partialta,\text{inv}}(X)\subset
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)\,,
$$
Therefore, also in the almost-K\"ahler case we can have (cf. Proposition
\ref{prop:delta-delbar-delta-deltabar})
$$
\mathcal{H}^2_{\partialbar,\text{inv}}(X)\cap
\mathcal{H}^2_{\mu,\text{inv}}(X)\neq
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X).
$$
Moreover,
since $\text{dim}\,\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)=4=b_2(X)$,
from Corollary \ref{cor:comparison-betti-numbers} we have that
$$
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)=\mathcal{H}^2_{\bar\partialta}(X)\,.
$$
Since $\text{dim}\,\mathcal{H}^1_{\bar\partialta,\text{inv}}(X)=2$
then
$$
2\leq\text{dim}\,\mathcal{H}^1_{\bar\partialta}(X)\leq b_1(X)=3\,,
$$
hence $\text{dim}\,\mathcal{H}^1_{\bar\partialta}(X)=3$ if and only if
there exists a non-left-invariant $\bar\partialta$-harmonic $1$-form\,.
\end{ex}
\begin{ex}\label{example-2}
Let $X$ be the $4$-dimensional Filiform nilmanifold and consider the non-integrable almost-complex structure $J$ defined by the following structure equations
$$
\left\lbrace
\begin{array}{lcl}
d\varphi^1 & =& 0\\
d\varphi^2 &=& \frac{1}{2i}\varphi^{12}+\frac{1}{2i}\left(\varphi^{1\bar 2}-\varphi^{2\bar 1}\right)-i\varphi^{1\bar 1}+\frac{1}{2i}\varphi^{\bar 1\bar 2}
\end{array}
\right.
$$
where $\left\lbrace\varphi^1\,,\varphi^2\right\rbrace$ is a global co-frame of (1,0)-forms on $X$.
As observed in \cite{cirici-wilson-2} $J$ does not admit any compatible symplectic structure.
We fix the diagonal metric $\omega:=\frac{1}{2i}\left(\varphi^{1\bar 1}+\varphi^{2\bar 2}\right)$.
One can easily compute the spaces of left-invariant harmonic forms and one gets
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^1_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^1,\,\bar\varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle
\varphi^{12}-\varphi^{\bar 1\bar 2}\,,
-\frac{1}{2}\varphi^{1\bar 1}+\varphi^{1\bar 2}-\frac{1}{2}\varphi^{2\bar 2}\,,
\frac{1}{2}\varphi^{1\bar 1}+\varphi^{2\bar 1}+\frac{1}{2}\varphi^{2\bar 2}
\right\rangle
\end{array}
$$
and
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^{1,0}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^{0,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \bar\varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^{2,0}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{0,2}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{1,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle -\frac{1}{2}\varphi^{1\bar 1}+\varphi^{1\bar 2}-\frac{1}{2}\varphi^{2\bar 2}\,,
\frac{1}{2}\varphi^{1\bar 1}+\varphi^{2\bar 1}+\frac{1}{2}\varphi^{2\bar 2}
\right\rangle
\end{array}
$$
The remaining spaces can be computed easily by duality.
Since $(J,\omega)$ is not an almost-K\"ahler structure we cannot apply Corollary
\ref{cor:comparison-betti-numbers}, in particular in this case we have the opposite inequality
$$
\text{dim}\,\mathcal{H}^2_{\bar\partialta}(X)\geq
3=\text{dim}\,\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)>2=b_2(X)\,.
$$
Moreover, one can easily compute the $\Delta_{BC(\partialta,\bar\partialta)}$-harmonic forms
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^1_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^1,\,\bar\varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^2_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
\left\langle
\varphi^{1\bar 1}\,,
\varphi^{12}-\varphi^{\bar 1\bar 2}\,,
\varphi^{1\bar 2}-\frac{1}{2}\varphi^{2\bar 2}\,,
\varphi^{2\bar 1}+\frac{1}{2}\varphi^{2\bar 2}
\right\rangle
\end{array}
$$
and
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^{1,0}_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^{0,1}_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
\left\langle \bar\varphi^1 \right\rangle, \\[5pt]
\mathcal{H}^{2,0}_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{0,2}_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{1,1}_{BC(\partialta,\bar\partialta),\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^{1\bar 1}\,,
\varphi^{1\bar 2}-\frac{1}{2}\varphi^{2\bar 2}\,,
\varphi^{2\bar 1}+\frac{1}{2}\varphi^{2\bar 2}
\right\rangle
\end{array}
$$
Notice that, unlike the almost-K\"ahler case
$$
\mathcal{H}^2_{BC(\partialta,\bar\partialta),\text{inv}}(X)\neq
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)
$$
and
$$
\mathcal{H}^{1,1}_{BC(\partialta,\bar\partialta),\text{inv}}(X)\neq
\mathcal{H}^{1,1}_{\bar\partialta,\text{inv}}(X)\,.
$$
\end{ex}
\begin{ex}
Let $X:=\mathbb{I}_3$ be the Iwasawa manifold, namely the quotient of the complex $3$-dimensional Heisenberg group $\mathbb{H}(3;\mathbb{C})$ by the subgroup of matrices with entries in $\mathbb{Z}[i]$. The manifold $X$ is a $6$-dimensional nilmanifold admitting both complex and symplectic structures.
Then there exists a global co-frame of $1$-forms $\left\lbrace e^i\right\rbrace_{i=1,\,\cdots\,,6}$ satisfying the following structure equations
$$
\left\lbrace
\begin{array}{lcl}
d\,e^1 & =& 0\\
d\,e^2 & =& 0\\
d\,e^3 & =& 0\\
d\,e^4 & =& 0\\
d\,e^5 & =& -e^{13}+e^{24}\\
d\,e^6 & =& -e^{14}-e^{23}\,.
\end{array}
\right.
$$
We define the following non-integrable almost-complex structure
$$
Je^1=-e^6,\quad Je^2=-e^5, \quad Je^3=-e^4
$$
and consider the compatible symplectic structure
$$
\omega:=e^{16}+e^{25}+e^{34}.
$$
Therefore, $(X,J,\omega)$ is a compact $6$-dimensional almost-K\"ahler manifold.
We set
$$
\left\lbrace
\begin{array}{lcl}
\varphi^1 & =& e^1+ie^6\\
\varphi^2 & =& e^2+ie^5\\
\varphi^3 & =& e^3+ie^4
\end{array}
\right.
$$
then the structure equations become
$$
\left\lbrace
\begin{array}{lcl}
d\,\varphi^1 & =& \left(-\frac{1}{4}\varphi^{13}-\frac{i}{4}\varphi^{23}\right)+\left(\frac{1}{4}\varphi^{1\bar3}+\frac{1}{4}\varphi^{3\bar 1}-
\frac{i}{4}\varphi^{2\bar3}+\frac{i}{4}\varphi^{3\bar2}\right)+
\left(\frac{1}{4}\varphi^{\bar1\bar3}-\frac{i}{4}\varphi^{\bar2\bar3}\right)\\
d\,\varphi^2 & =& \left(-\frac{i}{4}\varphi^{13}+\frac{1}{4}\varphi^{23}\right)+\left(-\frac{i}{4}\varphi^{1\bar3}+\frac{i}{4}\varphi^{3\bar 1}-
\frac{1}{4}\varphi^{2\bar3}-\frac{1}{4}\varphi^{3\bar2}\right)+
\left(-\frac{i}{4}\varphi^{\bar1\bar3}-\frac{1}{4}\varphi^{\bar2\bar3}\right)\\
d\,\varphi^3 & =& 0
\end{array}
\right.
$$
One can compute the spaces of left-invariant harmonic forms and one gets
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^1_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^3,\,\bar\varphi^3 \right\rangle, \\[5pt]
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^{1\bar 1}+\varphi^{2\bar 2}\,,
\varphi^{3\bar3}\,, -\varphi^{12}+\varphi^{\bar1\bar2}
\right\rangle,\\[5pt]
\mathcal{H}^3_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^{13\bar 1}+\varphi^{23\bar 2}\,,
\varphi^{1\bar1\bar3}+\varphi^{2\bar2\bar3}
\right\rangle
\end{array}
$$
and
$$
\displaystyle\begin{array}{lcl}
\mathcal{H}^{1,0}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^3 \right\rangle, \\[5pt]
\mathcal{H}^{0,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \bar\varphi^3 \right\rangle, \\[5pt]
\mathcal{H}^{2,0}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{0,2}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
0\,,\\[5pt]
\mathcal{H}^{1,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle \varphi^{1\bar 1}+\varphi^{2\bar 2}\,,
\varphi^{3\bar3}\right\rangle\,,\\[5pt]
\mathcal{H}^{2,1}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle\varphi^{13\bar1}+\varphi^{23\bar 2}\right\rangle\,,\\[5pt]
\mathcal{H}^{1,2}_{\bar\partialta,\text{inv}}(X) & = & \displaystyle
\left\langle
\varphi^{1\bar1\bar3}+\varphi^{2\bar2\bar3}
\right\rangle\,.
\end{array}
$$
The remaining spaces can be computed easily by duality.
In particular
$$
\mathcal{H}^2_{\bar\partialta,\text{inv}}(X)\neq\bigoplus_{p+q=2} \mathcal{H}^{p,q}_{\bar\partialta,\text{inv}}(X)\,.
$$
\end{ex}
\end{document}
|
\begin{document}
\author{Paulo Brand\~ao}
\address{Impa, Estrada Dona Castorina, 110, Rio de Janeiro, Brazil.} \email{[email protected]}
\date{\today}
\thanks{Partially supported by FAPERJ, CNPq and CAPES}
\title{Topological Attractors of Contracting Lorenz Maps}
\maketitle
\begin{abstract}
We study the non-wandering set of contracting Lorenz maps. We show that if such a map $f$ doesn't have any attracting periodic orbit, then there is a unique topological attractor. Precisely, there is a compact set $\Lambda$ such that $\omega_f(x)=\Lambda$ for a residual set of points $x \in [0,1]$. Furthermore, we classify the possible kinds of attractors that may occur.
\end{abstract}
\tableofcontents
\mbox{$\Leftarrow$}ction{Introduction}
In \cite{Lor} Lorenz studied the solution of the system of differential equations~(\ref{eqlorenz}) in $\mathbb{R}^3$, originated by truncating the Navier-Stokes equations for modeling atmospheric conditions
\begin{eqnarray}
\label{eqlorenz}
\dot{x} & = & -10 x + 10 y \\
\dot{y} & = & 28 x -y -xz \nonumber\\
\dot{z} & = & - \frac{8}{3} z + x y \nonumber
\end{eqnarray}
\noindent
He observed what was thought to be an attractor with features that led to the present concept of a strange attractor. V.S. Afraimovich, V.V. Bykov, L.P. Shil'nikov, in \cite{ABS}, and Guckenheimer and Williams, in \cite{GW}, introduced the idea of Lorenz-like attractors: dynamically similar models that also displayed the characteristics of the Lorenz strange attractor.
These models consist of a hyperbolic singularity with
one-dimensional unstable manifold such that, in a linearizable neighborhood, these separatrices can be considered as one of the coordinate axes, say $x$, in such a way that both components of $x\mbox{$\Leftarrow$}tminus \{0\}$ return to this neighborhood cutting transversally the plane $z = constant$, with the eigenvalues $\lambda_2 < \lambda_3< 0 < \lambda_1 $ (see Figure~\ref{LorenzFluxo}), and the expanding condition $ \lambda_3 + \lambda_1 > 0 $. We consider the Poincar\'e map of the square $Q=\left\{ \left| x\right| \leq cte;\left| y\right| \leq cte;z=cte\right\} $ into itself, having the returns as indicated in Figure~\ref{LorenzFluxo} and we can exhibit in $Q$ a foliation by one dimensional leaves, invariant by the Poincar\'e map, and such that it exponentially contracts the leaves. In \cite{GW} Guckenheimer and Williams show that given such a system, in a neighborhood $U$ the system is structurally stable in $ codim \, 2 $, and in any representative family there is only a single attractor attracting the neighborhood constructed.
\begin{figure}
\caption{Lorenz-like Flow and Associated One-dimensional Dynamics}
\label{LorenzFluxo}
\end{figure}
In \cite{ACT}, Arneodo, Coullet and Tresser studied similar systems, just modifying the relation between the eigenvalues of the singularity, taking $ \lambda_3 + \lambda_1 < 0 $: the so-called {\em contracting} Lorenz attractors. In this case the induced one-dimensional map is as displayed in Figure~\ref{LorenzFluxo}.
Critical points and critical values play fundamental roles in the study of dynamics of maps of the interval and from this point of view Lorenz maps are of hybrid type. Indeed, these maps have a single critical point, as unimodal maps do, but two critical values, as bimodal ones have. Because of this, it could perhaps happen that two different attractors would occur, but indeed we prove in Theorem~\ref{atratortopologico} that there is only one single topological attractor. That is, the behavior of contracting Lorenz maps looks like the one of unimodal maps, instead of the behavior of bimodal maps, that admits up to two attractors.
More specifically, we prove that, for contracting Lorenz maps, the possible long-term behavior scenarios for orbits of generic points are either periodic orbits, that only can be one or two of them, or a single attractor that can be one of the following types: cycle of intervals that forms a single chaotic attractor, Cherry attractor, Solenoid, or yet a subset of a chaotic Cantor set coexisting with wandering intervals. This last possibility, however, is expected not to occur, as conjectured by Martens and de Melo.
\mbox{$\Leftarrow$}ction{Statement of the Main Results}\label{MainResults}
\label{mainresults}
We say an open interval $I$ is {\em of trivial dynamics} (up to some iterate) if $\exists n \in \NN$ such that $f^n|_I \equiv$ id.
\begin{Definition}[Lorenz Maps]
We say that a $C^2$ map $f:[0,1]\mbox{$\Leftarrow$}tminus \{c\} \rightarrow[0,1]$, $0<c<1$, is a {\em Lorenz map} if $f(0)=0$, $f(1)=1$, $f'(x)>0$, $\forall\,x\in[0,1]\mbox{$\Leftarrow$}tminus \{c\}$. A Lorenz map is called {\em contracting} if $\lim_{x\to c}f'(x)=0$ and there is no interval of trivial dynamics.
\end{Definition}
Given $n\ge1$, define $f^{n}(c_{\pm})=\lim_{0<\epsilon \to 0}f^{n}(c \pm \epsilon)$. The critical values of $f$ are $f(c_{-})$ and $f(c_{+})$. If $x\in\{f(c_{-}),f(c_{+})\}$ set $f^{-1}(x)=\{c\}\cup\{y\in[0,1]\,;\,f(y)=x\}$. Given a set $X\subset[0,1]$, define $f^{-1}(X)=\bigcup_{x\in X}f^{-1}(x)$. Inductively, define $f^{-n}(x)=f^{-1}(f^{-(n-1)}(x))$, where $n\ge2$.
The {\em pre-orbit} of a point $x\in[0,1]$ is the set $\mbox{$\mathcal{O}$}_{f}^{-}(x):=\bigcup_{n\ge0}f^{-n}(x)$, where $f^{0}(x):=x$. Denote the positive orbit of a point $x\in[0,1]\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}_{f}^{-}(c)$ by $\mbox{$\mathcal{O}$}_f^+(x)$, i.e., $\mbox{$\mathcal{O}$}_{f}^{+}(x)=\{f^j(x);j\ge0\}$. If $\exists p \ge 1$ such that $f^{p}(c_{-})=c$, we take $p \in \NN$ as being minimal with this property and define $\mbox{$\mathcal{O}$}_{f}^+(c_{-})=\{f^{j}(c_{-})\,;\, 1\le j \le p\}$. Otherwise, if $\nexists p \ge 1$ such that $f^{p}(c_{-})=c$, we define $\mbox{$\mathcal{O}$}_{f}^+(c_{-})=\{f^{j}(c_{-})\,;\,j\ge0\}$. Similarly we define $\mbox{$\mathcal{O}$}_{f}^+(c_{+})$. If $x\in\mbox{$\mathcal{O}$}_{f}^{-}(c)$, let $\mbox{$\mathcal{O}$}_{f}^{+}(x)=\{x,f(x),\cdots,f^{m_{x}-1}(x),c\} \cup \mbox{$\mathcal{O}$}_{f}^+(c_{-}) \cup \mbox{$\mathcal{O}$}_{f}^+(c_{+}) $, with $m_x$ minimum such that $f^{m_{x}}(x)=c$. Also, $\mbox{$\mathcal{O}$}_{f}^{+}(X)$ denotes the positive orbit of $X$ by $f$, that is,
$\mbox{$\mathcal{O}$}_{f}^{+}(X)=\bigcup_{x \in X} \mbox{$\mathcal{O}$}_f^+(x) $.
A point $x$ is said to be {\em non-wandering} if for any neighborhood $U \ni x$, $\exists n \ge 1$ such that $f^n(U)\cap U\ne \emptyset$. The set of all non-wandering points is the {\em non-wandering set} $\Omega(f)$. The set of accumulation points of the positive orbit of $x\in[0,c)\cup\{c_{-},c_{+}\}\cup(c,1]$ is denoted by $\omega_f(x)$, the $\omega$-limit set of $x$. The $\alpha$-limit set of $x$, $\alpha_{f}(x)$, is the set of points $y$ such that $y=\lim_{j\to\infty}x_{j}$ for some sequence $x_{j}\in f^{-n_{j}}(x)$ with $n_{j}\to+\infty$.
Following Milnor \cite{Milnor:1985ut}, a compact set $A$ is a {\em topological attractor} if
its basin $\beta(A) = \{x; \omega_{f}(x) \subset A\}$ is residual in an open set and if each closed forward invariant subset $A'$ which is strictly contained in $A$ has a topologically smaller basin of attraction, i.e., $\beta(A) \mbox{$\Leftarrow$}tminus \beta(A')$ is residual in an open set. (Similarly, $A$ is a {\em metrical attractor} if $\operatorname{Leb} \beta(A)> 0$ and $\operatorname{Leb} \big(\beta(A) \mbox{$\Leftarrow$}tminus \beta(A')\big)>0$, $\forall A'$ closed forward invariant $A'\subsetneq A$).
Given a periodic point $p$, say $f^n(p)=p$, we say that its periodic orbit $\mbox{$\mathcal{O}$}_f^+(p)$ is an {\em attracting periodic orbit} if $\exists \epsilon >0$ such that $(p,p+\epsilon)$ or $(p-\epsilon,p)\subset \beta(\mbox{$\mathcal{O}$}_f^+(p))$. A {\em periodic attractor} is a finite set $\Lambda$ such that $\operatorname{interior}(\{x\,;\,\omega_{f}(x)=\Lambda\})\ne\emptyset$, and it can be either an attracting periodic orbit, or a {\em super-attractor}: a finite set $\Lambda=\{p_{1},\cdots,p_{n},c\}$ such that $f(p_{i})=p_{i+1}$ for $1\le i<n$, $f(p_{n})=c$ and $\lim_{0<\varepsilon\downarrow0}f(c+\varepsilon)=p_{1}$ or $\lim_{0<\varepsilon\downarrow0}f(c-\varepsilon)=p_{1}$. A {\em weak repeller} is a periodic point $p$ of $f$ such that it is non-hyperbolic and it is not a periodic attractor.
We say $I$ is {\em a wandering interval} of $f$ if $f^n|_I$ is a homeomorphism for $\forall n\ge1$, $f^i(I)\cap f^j(I)=\emptyset$ for $i\ne j >0$ and $I$ doesn't intersect the basin of an attracting periodic orbit.
We say that an attractor (topological or metrical) $\Lambda$ is a {\em chaotic attractor} if $\Lambda$ is transitive, periodic orbits are dense in it ($\overline {Per(f)\cap \Lambda}=\Lambda$), its topological entropy $h_{top}(f|_\Lambda)$ is positive and $\exists \lambda >0$ and a dense subset of points $x \in \Lambda$ such that their {\em Lyapounov exponents}, $\exp_f(x)$, are greater than $\lambda$, where $\exp_f(x):=\liminf \frac{1}{n}\log |Df^n(x)|$.
A {\em cycle of intervals} is a transitive finite union of non-trivial disjoint closed intervals.
A {\em gap map} is a continuous and injective map $g:S^1\mbox{$\Leftarrow$}tminus\{c\}\to S^1$, where $S^1=\RR/\ZZ$ is the circle and $c$ is any point of it. It is a known fact that such a map has a well defined rotation number $\rho(g)$. Furthermore, if $\rho(g)\notin\QQ$, then $g$ is semi-conjugated to an irrational rotation. In this case there exists a minimal set $\Lambda$ containing $c$ such that $\omega_g(x)=\Lambda$ for every $x\in S^1$ (if $x\in\bigcup_{j\ge0}g^{-j}(c)$ we consider $\omega_g(x_{\pm})$ instead of $\omega_g(x)$).
We say that a Lorenz map $f$ is a {\em Cherry map} if there is a neighborhood $J$ of the critical point such that the first return map to $J$ is conjugated to a gap map with an irrational rotation.
It follows from \cite{GT85} that a Lorenz map $f$ is a Cherry map if and only if $f$ does not admit super-attractors and there exists a neighborhood $J$ of the critical point $c$ such that $c\in\omega_f(x_{\pm})$, $\forall\,x\in J$. If $f$ is a Cherry map, $\Lambda:=\omega_f(c_{-})=\omega_f(c_+)$ is called a {\em Cherry attractor} and it is a minimal compact set containing the critical point $c$ in the interior of its basin of attraction.
A {\em renormalization interval} for $f$ is an open interval $J=(a,b)\ni c$ such that the first return map to $[a,b]$ is conjugated to a Lorenz map. Their points of boundary are always periodic points and $f^{\operatorname{Per}iod(a)}([a,c))\subset[a,b]\supset f^{\operatorname{Per}iod(b)}((c,b])$. Further properties of intervals of this type will be studied in Section \ref{renormecherry}.
Given a renormalization interval $J=(a,b)$, define the {\em renormalization cycle} associated to $J$ (or generated by $J$) as $$U_J=\bigg(\bigcup_{i=0}^{\operatorname{Per}iod(a)}f^{i}((a,c))\bigg)\cup\bigg(\bigcup_{i=0}^{\operatorname{Per}iod(b)}f^{i}((c,b))\bigg).$$
Given $J\subset[0,1]$ an open set with $c\in J$, define $\Lambda_{J}:=\{x\in [0,1] \,;\,\mbox{$\mathcal{O}$}^{+}_{f}(x)\cap J=\emptyset\}$. We call a {\em gap of $\Lambda_{J}$} any connected component of $[0,1]\mbox{$\Leftarrow$}tminus \Lambda_J$. We also define the set $K_J$, the {\em nice trapping region associated to J}, as being the set formed by the union of gaps of $\Lambda_J$ such that each of these gaps contains one interval of the renormalization cycle.
We say that $f$ is {\em $\infty$-renormalizable} if $f$ has infinitely many different renormalization intervals. An attractor $\Lambda$ of a contracting Lorenz map $f$ is a {\em Solenoidal attractor} (or {\em Solenoid}) if $\Lambda\subset\bigcap_{n=0}^\infty K_{J_n}$, and $\{J_n\}_{n}$ is an infinite nested chain of renormalization intervals.
A Contracting Lorenz map $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\} \to [0,1]$ is called {\em non-flat} if there exist constants $\alpha$,$\beta>1$, $a,b\in[0,1]$ and $C^2$ orientation preserving diffeomorphisms $\phi_{0}:[0,c]\to[0,a^{1/\alpha}]$ and $\phi_{1}:[c,1]\to[0,b^{1/\beta}]$ such that $$f(x)=\begin{cases}a-(\phi_{0}(c-x))^\alpha&\text{ if }x<c\\
1-b+(\phi_{1}(x))^\beta&\text{ if }x>c\end{cases}.$$
\begin{maintheorem}[The Solenoid attractor]\label{SOLENOIDETH}
Let $f$ be a $C^{2}$ non-flat contracting Lorenz map without periodic attractors. If $f$ is $\infty$-renormalizable, then there is a compact minimal set $\Lambda$, with $c\in\Lambda\subset\bigcap_{J\in\cR }K_J$ such that $\omega_f(x)=\Lambda$, $\forall\,x\in[0,1]$ with $c\in\omega_f(x)$, where $\cR $ is the set of renormalization intervals $J$ of $f$ and $K_J$ their corresponding nice trapping regions.
\end{maintheorem}
\begin{maintheorem}
\label{baciastopologicas}
If $f$ is a $C^{2}$ non-flat contracting Lorenz map without periodic attractors, then $f$ has a transitive topological attractor $\Lambda$. Furthermore, $\beta(\Lambda)$ is a residual set in the whole interval, and $\Lambda$ is one and only one of the following types:
\begin{enumerate}
\item {\em Cherry attractor} and in this case $\omega_{f}(x)=\Lambda$ in an open and dense set of points $x\in [0,1]$.
\item {\em Solenoidal attractor} and in this case $\omega_{f}(x)=\Lambda$ in a residual set of points $x\in [0,1]$.
\item {\em Chaotic attractor} that can be of two kinds:
\begin{enumerate}
\item {\em Cycle of intervals}, in this case $\omega_{f}(x)=\Lambda$ in a residual set of points $x\in [0,1]$.
\item {\em Cantor set} and in this case there are wandering intervals.
\end{enumerate}
\end{enumerate}
\end{maintheorem}
\begin{maintheorem}
\label{teoalfalim}
Let $f$ be a $C^{2}$ non-flat contracting Lorenz map without periodic attractors and $\Lambda$ its single topological attractor as obtained in Theorem \ref{baciastopologicas}. Then, $f$ has no wandering interval if and only if $\alpha_{f}(x)=[0,1], \forall x \in \Lambda$.
\end{maintheorem}
The next theorem goes deeper in the classification provided by Theorem~\ref{baciastopologicas}, as it distinguishes two possible situations for item (3)(b) of that theorem. Observe that item (3)(b) didn't state that the Cantor set $\Lambda$ is equal to $\omega_{f}(x)$ for a residual set of $x \in [0,1]$, but only that the basin $\beta(\Lambda)$ contains a residual set. That is, (3)(b) can split into two situations. In the first one, $\Lambda$ attracts a residual set whose $\omega$-limit coincides with $\Lambda$. In the case this doesn't happen, under some additional hypothesis we can have that $\Lambda$ properly contains another Cantor set $\Lambda'$ such that its basin $\beta(\Lambda')$ is residual in $[0,1]$ and $\forall x \in \beta(\Lambda')$ is such that $\omega(x)=\Lambda'$.
We say that a $C^3$ map $f$ has negative Schwarzian derivative, denoted by $Sf$, if $Sf$ is negative in every point $x$ such that $Df(x)\ne0$, where
\begin{equation}\label{defschwarz}
Sf(x)=\frac{D^3 f(x)}{D f(x)}-\frac{3}{2}\bigg( \frac{D^2 f(x)}{D f(x)} \bigg)^2
\end{equation}
\begin{maintheorem}
\label{atratortopologico}
Let $f$ be a $C^3$ non-flat contracting Lorenz map with negative Schwarzian derivative.
If $f$ has a periodic attractor $\Lambda$, then either $\beta(\Lambda)$ is an open and dense set or there is another periodic attractor $\Lambda'$ such that $\beta(\Lambda)\cup\beta(\Lambda')$ is open and dense.
If $f$ does not have any periodic attractor, then there is a single topological attractor $\Lambda$ with $\omega_{f}(x)=\Lambda$ for a residual set of points $x \in [0,1]$ and it is one of the following types:
\begin{enumerate}
\item {\em $\Lambda$ is a Cherry attractor};
\item {\em $\Lambda$ is a solenoidal attractor};
\item {\em $\Lambda$ is a chaotic cycle of intervals};
\item {\em $\Lambda=\overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }$ and it is contained in a chaotic Cantor set whose gaps are wandering intervals}
\end{enumerate}
\end{maintheorem}
Theorem \ref{atratortopologico} allows us to compare between the metrical and topological attractors. Indeed we can conclude that (1) the topological attractor contains the metrical one, and (2) If the topological attractor is not a cycle of intervals, then the topological attractor and the metrical one coincide. The existence and classification of metrical attractors can be found in \cite{StP}.
Results on contracting Lorenz maps and flows date from the beginning of the 1980's. In this decade and the first half of the 1990's, we mention C. Tresser, A. Arneodo, L. Alsed\`a, A. Chenciner, P. Coullet, J-M. Gambaudo, M. Misiurewicz, A. Rovella, R.F. Williams (see \cite{ACT,CGT84,GT85,Gambaudo:1986p2422,Tresser:1993uf,Rov93}).
Later on, main contributions include M. Martens and W. de Melo \cite{MM}, G. Keller and M. St. Pierre \cite{Keller:2011p1585,StP}, D. Berry and B. Mestel \cite{BM}, and R. Labarca and C. G. Moreira \cite{Labarca:2010p1505,Labarca:2006p1486}.
\mbox{$\Leftarrow$}ction{Preliminary Results}
A {\em homterval} is an open interval $I=(a,b)$ such that $f^n|_I$ is a homeomorphism for $n\ge1$ or, equivalently, $I\cap\mbox{$\mathcal{O}$}_f^-(c)=\emptyset$.
Let us denote by $\BB_0(f)$ the union of the basins of attraction of all periodic attractors of $f$.
\begin{Lemma}[Homterval Lemma, see \cite{MvS}]\label{homtervals}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map and $I=(a,b)$ be a homterval of $f$. If $I$ is not a wandering interval, then $I\subset\BB_0(f)\cup\mbox{$\mathcal{O}$}_f^{-}(\operatorname{Per}(f))$. Furthermore, if $f$ is $C^3$ with $Sf<0$, and $I$ is not a wandering interval, then the set $I\mbox{$\Leftarrow$}tminus\BB_0$ has at most one point.
\end{Lemma}
\begin{Lemma}\label{LemmaWI}If $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ is a $C^{2}$ non-flat contracting Lorenz map, then every wandering interval accumulates on both sides of the critical point. In particular, a wandering interval cannot contain any interval of the form $(-r,c)$ or $(c,r)$.
\end{Lemma}
\begin{proof}
Suppose we have a wandering interval $J$ that doesn't accumulate on the right side of the critical point, say, it never enters a neighborhood $(c,c+\varepsilon)$. So, we can modify $f$ to coincide with the original function outside this interval, but being $C^2$ and non-flat in this interval (see Figure~\ref{errantepng}). In this way, the modified function is a $C^{2}$ map displaying a wandering interval, but it is a known fact that this can't happen with a $C^2$ map with non-flat critical points (see Theorem A, Chapter IV of \cite{MvS}).
\begin{figure}\label{errantepng}
\end{figure}
\end{proof}
One can adapt the well known Singer's Theorem to our context, with $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ being a $C^{3}$ non-flat contracting Lorenz map with negative Schwarzian derivative, and obtain that the immediate basin of any attracting periodic orbit of this map contains in its border either its critical point or a boundary point of $[0,1]$. From this we obtain that $f$ can have, at most, two attractors of periodic type (one can also obtain that each neutral periodic point is an attracting periodic orbit and that there exists no interval of periodic points). We can go even further and state:
\begin{Proposition}
\label{DOISMAX}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{3}$ non-flat contracting Lorenz map with negative Schwarzian derivative. Then $f$ can have at most two periodic attractors and, when it has a periodic attractor, the union of the basins of the periodic attractors is always an open and dense set.
\end{Proposition}
\begin{proof}
Let $p$ be so that $\mbox{$\mathcal{O}$}_{f}^{+}(p)$ is an attracting periodic orbit. Notice that $\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))$ is an open set. By Singer's theorem $(c, \delta) \subset \beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))$ for some $\delta >0$ (or $(-\delta, c) \subset \beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))$, which is similar). If $\overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))} \ne [0,1]$, then there is a connected component $T$ of $[0,1] \mbox{$\Leftarrow$}tminus \overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))}$.
If $\exists j$ such that $f^j(T)\ni c$, $\exists y \in T$ such that $f^j(y)\in (c,\delta)$, then $y\in \beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))$, leading to an absurd, as $y \in T$ and $T \subset [0,1] \mbox{$\Leftarrow$}tminus \overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))}$.
In this way, for any given $j$, $f^j|_T$ is a homeomorphism, so $T$ is a homterval and then it is either a wandering interval or it intersects the basin of attraction of an attracting periodic orbit that can't be $\mbox{$\mathcal{O}$}_{f}^{+}(p)$ as $ T \subset [0,1] \mbox{$\Leftarrow$}tminus \overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))}$.
The first case can't occur, as $T$ cannot be a wandering interval, as its orbit would accumulate in $c$ by both sides (by Lemma \ref{LemmaWI}) and then there would be $j$ such that $f^j(T)\subset (c,\delta)$, leading again to an absurd. In the second case, $\exists q$ such that $\mbox{$\mathcal{O}$}_{f}^{+}(q)$ is an attracting periodic orbit, and $\mbox{$\mathcal{O}$}_{f}^{+}(q) \ne \mbox{$\mathcal{O}$}_{f}^{+}(p)$. Finally, if $\overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))} \cup \overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(q))}\ne [0,1]$, there would be a connected component of $[0,1] \mbox{$\Leftarrow$}tminus ( \overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(p))} \cup \overline{\beta(\mbox{$\mathcal{O}$}_{f}^{+}(q))} )$ and we could show in the same way it is a homterval, that cannot be wandering. Also, it cannot be in the basin of a third periodic orbit, as this would have to have the critical point in its border, but both sides of it are already attracted to one or possibly two aforementioned orbits.
\end{proof}
\begin{Lemma}\label{Remark98671oxe}Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ contracting Lorenz map. If $f$ does not have any periodic attractor, then there is a residual set $U$ such that $$c\in\omega_{f}(x)\, \forall x \in U.$$
Furthermore, given any neighborhood $V$ of the critical point, the set of points that visit $V$ is an open and dense set.
\end{Lemma}
\begin{proof} Let $J_n=\{x\in[0,1]\|\mbox{$\mathcal{O}$}^+_f(x)\cap(c-1/n,c+1/n)\ne\emptyset\}$, $n\in \NN$, so $J_n$ is open and non-empty. If $J_n$ was not dense, then $\exists (a,b)\subset [0,1]\mbox{$\Leftarrow$}tminus \overline J_n$. By the homterval lemma, as $f$ has no periodic attracting orbit, there would be $\ell \ge 0$ such that $f^\ell\big( (a,b)\big) \ni c$ or $(a,b)$ would be a wandering interval. The first case would imply that $(a,b)\cap \overline J_n\ne \emptyset$. The second one also cannot happen, as otherwise iterates of $(a,b)$ would approach $c$, by Lemma \ref{LemmaWI}, and this would lead to the same contradiction.
Then, $J=\cap_{n\ge 0}J_n$ is residual and we have that $c\in \omega(x)$, $\forall x \in J$.
\end{proof}
A metrical version of this lemma also can be obtained as a consequence of \cite{Man85} if we add the hypothesis that the map has no weak repellers.
\begin{theorem}[Koebe's Lemma \cite{MvS}]
For every $\varepsilon>0$, $\exists K>0$ such that the following holds: let $M$, $T$ be intervals in $[0,1]$ with $M\subset T$ and denote respectively by $L$ and $R$ the left and right components of $T\mbox{$\Leftarrow$}tminus M$ and let $f:[0,1]\to[0,1]$ be a map with negative Schwarzian derivative. If $f^{n}|_T$ is a diffeomorphism for a given $n\ge1$ and
$$ |f^{n}(L)|\ge\varepsilon|f^{n}(M)| \text{ and } |f^{n}(R)|\ge\varepsilon|f^{n}(M)|,$$ then $\frac{|Df^{n}(x)|}{|Df^{n}(y)|}\le K$ for $x,y\in M$.
\end{theorem}
\begin{Lemma} \label{trescinco}Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{3}$ non-flat contracting Lorenz map with negative Schwarzian derivative. If $I$ is a wandering interval, $\forall y \in I$, $\omega_f(y)= \overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }$.
\end{Lemma}
\begin{proof}
We have shown in Lemma \ref{LemmaWI} that
the orbit of any given wandering interval $I$ accumulates in the critical point by both sides, and then, by continuity we have
$\omega_f(I)\supset \overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }$.
Now, suppose there is $p \in \omega_f(I)$ such
that $p \not\in \overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }$.
We can also suppose without loss of generality
that $I$ is maximal, in the sense that there is no bigger wandering interval that contains $I$ properly.
Let $T$ be a connected component of $[0,1] \mbox{$\Leftarrow$}tminus \big(\overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }\big)$ containing $p$.
Given $\epsilon>0$, let $n_{\epsilon}$ be the
minimum $j$ such that $f^j (I) \subset B_{\epsilon}(p)$.
Let $T_{\epsilon}$ be the maximal interval
containing I such that $f^{n_{\epsilon}}(T_{\epsilon}) \subset T$
and that $f^{n_{\epsilon}}|_{T_{\epsilon}}$ is a diffeomorphism.
Notice that $f^{n_{\epsilon}}(T_{\epsilon})=T$ for otherwise, there would exist $y \in T$ such that
$y= f^{n_{\epsilon}}(a)$, where $a \in \partial T_{\epsilon}$. And as $f^{n_{\epsilon}}|_{T_{\epsilon}}$ cannot be monotonously extended to a bigger interval, then $\exists 0 \le j < {n_{\epsilon}}$ such that $f^j(a) =c$,
which would lead to an absurd, as
$f^{({n_{\epsilon}-j})}(a_+) \in T \subset [0,1] \mbox{$\Leftarrow$}tminus\overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }$ (or this would occur for $f^{({n_{\epsilon}-j})}(a_-)$).
Let $J_{\epsilon}=(f^{n_{\epsilon}}|_{T_{\epsilon}}) ^{-1} (B_{\epsilon}(p))$
and $U = \cap_{\epsilon>0} J_{\epsilon}$.
As ${\epsilon} \to 0$ implies ${n_{\epsilon}}\to \infty$, and as every $f^{n_{\epsilon}}$ is a diffeomorphism onto its image, $\forall{n_{\epsilon}}$, it follows that $f^j$ is a diffeomorphism in $U$, $\forall j$. In this way, $U$ is a homterval and then either $U$ is a wandering interval or $U \in \mbox{$\mathcal{O}$} ^-(Per(f)) \cup \BB_0(f)$.
As $U \supset I$ it cannot be as in the second case for $I$ being wandering implies there is no periodic attractor, and as $I$ was taken as maximal, we have necessarily that $U=I$.
We can take $\epsilon_0$ small enough such that the left and right connected components of $T\mbox{$\Leftarrow$}tminus
B_{\epsilon}(p)$ are as big as we want compared to $|B_{\epsilon}(p)|$, in such a way that Koebe's Lemma ensures that given any $\epsilon > 0$ such that $\epsilon < \epsilon_0$, $\exists K >0$ such that $\frac{|Df^{n_\epsilon}(x)|}{|Df^{n_\epsilon}(y)|}\le K$, $\forall x, y \in J_\epsilon$, $\forall \epsilon \in (0,\epsilon)$. Recall that $B_{\epsilon}(p)=f^{n_{\epsilon}}(J_{\epsilon})$ and $|f^{n_{\epsilon}}(J_{\epsilon})|\ge (1/K) m |J_\epsilon|$ where $m=|Df^{n_\epsilon}(x_0)|$, to some $x_0 \in M$ and also $|f^{n_{\epsilon}}(J_\epsilon\mbox{$\Leftarrow$}tminus I)|\le K m |J_\epsilon \mbox{$\Leftarrow$}tminus I|$. So, we have the following inequality
$$
\frac{|B_{\epsilon}(p) \mbox{$\Leftarrow$}tminus f^{n_{\epsilon}}(I)|}{|B_{\epsilon}(p)|}=
\frac{|f^{n_{\epsilon}}(J_\epsilon\mbox{$\Leftarrow$}tminus I)|}{|f^{n_{\epsilon}}(J_\epsilon)|}\le
K^2 \frac{ |J_\epsilon\mbox{$\Leftarrow$}tminus I|}{|J_\epsilon|}<1/2.
$$
The last inequality follows from the fact that the collection of $J_\epsilon$ cannot have subsequences whose limit would be bigger than $I$, for otherwise the intersection of them would generate a bigger wandering interval, in contradiction to the maximality of $I$. So, we can calculate these estimates on a nested subsequence of $J_\epsilon$ whose intersection is $I$, and so we can take $\epsilon$ small enough such that
$$
\frac{|J_\epsilon\mbox{$\Leftarrow$}tminus I|}{|J_\epsilon|}<\frac{1}{2 K ^2}
$$
then
$$
\frac{|f^{n_{\epsilon}}(I)|}{|B_{\epsilon}(p)|}>1/2
$$
and then $p \in f^{n_{\epsilon}}(I)$, which is a contradiction, as $p$ was chosen as belonging to $\omega_f(I)$ where $I$ is a wandering interval.
\end{proof}
\begin{Lemma}[Denseness of wandering intervals, when they exist]\label{DenWanInt}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map without periodic attractors. If $f$ has a wandering interval $I$, then $\cw$ is an open and dense set, where $\cw$ is the union of all open wandering intervals of $f$.
\end{Lemma}
\begin{proof}
If $\cw$ is not dense, then $[0,1]\mbox{$\Leftarrow$}tminus\overline{\cw}$ contains some open interval $I$. Clearly, $I$ is not a wandering interval. As $f$ does not have periodic attractors, we can apply Lemma~\ref{homtervals} and conclude that there is $n\in\NN$ such that $f^{n}|_{I}$ is a homeomorphism and that $f^{n}(I)\ni c$. As $\cw$ is invariant ($f^{-1}(\cw)=\cw$), $[0,1]\mbox{$\Leftarrow$}tminus\overline{\cw}$ is also invariant. Thus, $c\in\operatorname{interior}([0,1]\mbox{$\Leftarrow$}tminus\overline{\cw})$, that is, there is no wandering interval in a neighborhood of $c$. And this is not possible, by Lemma~\ref{LemmaWI}.
\end{proof}
\begin{Corollary}
\label{stpremovido}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{3}$ non-flat contracting Lorenz map with negative Schwarzian derivative displaying no periodic attractors.
If $f$ has a wandering interval $I$, then there is an open and dense set $U$ such that any given $x \in U$, $\omega_f(x) = \overline{\mbox{$\mathcal{O}$}_f^+(c_{+}) \cup\mbox{$\mathcal{O}$}_f^+(c_{-}) }$.
\end{Corollary}
\begin{proof}
Taking $U$ as the set $\cw$ of Lemma~\ref{DenWanInt}, $U$ satisfies the required condition by applying Lemma \ref{trescinco}.
\end{proof}
\mbox{$\Leftarrow$}ction{Periodic Points}\label{Section98767}
Given an interval $J=(a,b)$ and a map $f$ defined in $J$, denote the {\em first return map} to $J$ by $\cf_{J}:J^{*}\to J$. That is, $\cf_{J}(x)=f^{R(x)}(x)$, where $J^{*}=\{x\in J$ $;$ $\mbox{$\mathcal{O}$}^{+}_{f}(f(x))\cap J\ne\emptyset\}$ and $R(x)=\min\{j\ge1$ $;$ $f^{j}(x)\in J\}$, that is called the {\em first return time}. Let $\cp_{J}$ be the collection of connected components of $J^{*}$.
An open interval $I=(a,b)$ containing the critical point $c$
is called a {\em nice interval} of $f$ if
$\mbox{$\mathcal{O}$}_{f}^{+}(\partial I)\cap I=\O$ and $a$ and $b\not\in \beta(\mbox{$\mathcal{O}$}^+(p))\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}^+(p)$, $p$ a periodic attractor. We will denote the {\em set of nice intervals of $f$} by
$\mathcal{N}=\mathcal{N}(f)$ and the set of nice intervals whose
borders belong to the set of periodic points of $f$ by
$\mathcal{N}_{per}=\mathcal{N}_{per}(f)$, that is,
$\mathcal{N}_{per}=\{I\in\mathcal{N}\ \|\ \partial I\subset Per(f)\}$.
\begin{Lemma}\label{Lemma8388881a}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map and let $J=(a,b)$ be a nice interval, with first return map $\cf_{J}:J^{*}\to J$.
The following statements are true.
\begin{enumerate}
\item $\big((p,q)\in\cp_{J}\text{ and }p\ne c\big) \Rightarrow \cf_{J}((p,q))=(a,f^{R|_{(p,q)}}(q));$
\item $\big((p,q)\in\cp_{J}\text{ and }q\ne c\big) \Rightarrow \cf_{J}((p,q))=(f^{R|_{(p,q)}}(p),b);$
\item $\big(I\in\cp_{J}\text{ and }c\notin\partial I\big) \Rightarrow \cf_{J}(I)=J.$
\end{enumerate}
\end{Lemma}
\begin{proof}\,
Assume that $I=(p,q)\in\cp_{J}$ and $p\ne c$. Let $n=R|_{I}$.
If $p=a$, then
(i) If $f^{n}(p)<a$, then $f^{n}(p+\varepsilon)<a$ for $\varepsilon>0$ sufficiently small. This is an absurd, as $n$ is a return time of $p+\varepsilon\in I$.
(ii) If $f^{n}(p)\ge b$, as $f$ preserves orientation, $f^{n}(p+\varepsilon)\ge b$, that will also be in contradiction with the fact that $n$ is a return time of $(p+\varepsilon)\in I$.
(iii) $f^{n}(p)\in(a,b)$ also leads to a contradiction, because $J$ is nice. So, $f^{n}(p)=a$ whenever $p=a$.
Consider now $a<p$ and $p\ne c$. Cases (i) and (ii) can be proved as before, and the remaining case, if $f^{n}(p)\in(a,b)$, $\exists \varepsilon$ sufficiently small such that, $(p,p+\varepsilon)$ doesn't return until $n$, as $n$ is the first return time of $I$ to $(a,b)$, $f^{j}(I)\cap (a,b)=\emptyset$ for every $0<j<n$.
Thus, $f^{j}(p)\ne c$, $\forall\,0\le j<n$.
Thus, $f^{n}$ is continuous in $(p-\delta,p+\delta)$ for a sufficiently small $\delta>0$.
As a consequence, if $a<f^{n}(p)<b$, then, taking $\delta>0$ small, $n$ will be the first return time for $(p-\delta,q)$ to $(a,b)$, contradicting $I\in\cp_{J}$.
So, we necessarily have $f^{n}(p)=a$, proving (1).
Similarly, (2) follows from the same kind of reasoning, and (3) is a consequence of (1) and (2)
\end{proof}
\begin{Corollary}\label{Corollary8388881b}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map and let $J=(a,b)$ be a nice interval, with first return map $\cf_{J}:J^{*}\to J$.
If $J=(a,b)$ is a nice interval and $f$ is a contracting Lorenz map defined in $J$, then the following statements are true:
\begin{enumerate}
\item $a\in\partial I\text{ for some }I\in\cp_{J}\Leftrightarrow\,\,a\in Per(f).$
\item $b\in\partial I\text{ for some }I\in\cp_{J}\Leftrightarrow\,\,b\in Per(f).$
\end{enumerate}
\end{Corollary}
\begin{proof}
If $I=(a,q)\in\cp_{J}$ (the case $I=(q,b)$ is analogous) and $n=R|_{I}$, it follows from Lemma~\ref{Lemma8388881a} that
$f^n(a)=\cf_{J}(a)=a$. That is, $a$ is a periodic point.
Now suppose that $a\in Per(f)$ or $a$ is a super-attractor (the proof for $b$ is analogous).
Thus, there is $n>0$ such that $\lim_{\delta \downarrow 0} f^{n}(a+\delta)=a$ and $f^{j}(a)\notin[a,b)\ni c$, $\forall0<j<n$.
As $f^n$ is well defined, continuous and monotone on $(a,a+\varepsilon)$ for some $\varepsilon>0$ and as $f$ preserves orientation, we get $f^{n}(x)\in(a,b)$ for every $x>a$ sufficiently close to $a$ and that $f^{j}(x)\notin(a,b)$, $\forall 0<j<n$. Thus, there is some $I=(a,q)\in\cp_{J}$.
\end{proof}
\begin{Lemma}\label{LemmaHGFGH54}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map. If $J=(a,b)$ is a nice interval, then there are sequences $a_{n},b_{n}\in \overline{J}\cap Per(f)$ such that
\begin{enumerate}
\item $\lim_{n}a_{n}=a$ and $\lim_{n}b_{n}=b$;
\item $\mbox{$\mathcal{O}$}^{+}_{f}(a_{n})\cap(a_{n},b)=\emptyset$ and $\mbox{$\mathcal{O}$}^{+}_{f}(b_{n})\cap(a,b_{n})=\emptyset$.
\end{enumerate}
\end{Lemma}
\begin{proof}
We will show the existence of a sequence $a_{n}\in \overline{J}\cap Per(f)$ with $\lim_{n}a_{n}=a$ such that $\mbox{$\mathcal{O}$}^{+}_{f}(a_{n})\cap(a_{n},b)=\emptyset$. Assume that $a\notin Per(f)$, otherwise take $a_{n}=a$. Let $I_{0}=(p_{0},q_{0})\in\cp_{J}$ such that $I_{0}\subset(a,c)$. By Lemma~\ref{Lemma8388881a}, as $a$ is not periodic we get $p_{0}\ne a$. Thus, there is some $I_{1}\in\cp_{J}$ with $I_{1}\subset (a,p_{0})$. In particular, $c\ne\partial I_{1}$. Again by Lemma~\ref{Lemma8388881a} we get $\cf_{J}(I_{1})=f^{n_{1}}(I_{1})=J$. Thus, there is a fixed point $a_{1}\in\overline{I_{1}}$ of $f^{n_{1}}|_{\overline{I_{1}}}$. As $n_{1}=R_{J}(I_{1})$ it follows that $f^{j}(a_{1})\notin(a,b)$ for every $0<j<n$ and so, $\{a_{1}\}=\mbox{$\mathcal{O}$}^{+}(a_{1})\cap(a,b)$. From this we get $\mbox{$\mathcal{O}$}^{+}(a_{1})\cap(a_{1},b)=\emptyset$. Again, writing $I_{1}=(p_{1},q_{1})$, it follows as before that $a\ne p_{1}$ and so there is some $I_{2}\in\cp_{J}$ such that $I_{2}\subset(a,p_{1})$. Proceeding as before, we get a periodic point $a_{2}\in\overline{I_{2}}$ satisfying the statement. Inductively, we get a sequence $a_{n}\mbox{$\Leftarrow$}arrow a$ of periodic points with $\mbox{$\mathcal{O}$}^{+}(a_{n})\cap(a_{n},b)=\emptyset$.
Similarly, one can get the sequence $b_{n}\nearrow b$.
\end{proof}
\begin{Lemma}
\label{noitenoite}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map.
If $Per(f)\cap (0,1)=\emptyset$, then either $f$ has an attracting periodic orbit (indeed, at least one of the fixed points is an attractor) or $\omega_{f}(x)\ni c$, $\forall\, x \in (0,1)$.
\end{Lemma}
\begin{proof}
Under these hypotheses, if $f$ has a periodic attractor, it has to be the point $0$, or $1$ or both. If none of these occur, $f$ does not have a periodic attractor. Suppose we can choose a point $x \in (0,1)$ such that $\omega_{f}(x)\not\ni c $. Let $(a,b)$ be the connected component of $[0,1]\mbox{$\Leftarrow$}tminus\overline{\mbox{$\mathcal{O}$}^+_f(x)}$ containing $c$. If $\exists n$ such that $f^n(a)\in(a,b)$, then, as $a\in\overline{\mbox{$\mathcal{O}$}^+_f(x)}$, $f^n(a)\in\overline{\mbox{$\mathcal{O}$}^+_f(x)}$, in contradiction with the fact that $(a,b) \subset [0,1]\mbox{$\Leftarrow$}tminus\overline{\mbox{$\mathcal{O}$}^+_f(x)}$. The same reasoning applies to point $b$, and then $(a,b) \subsetneq (0,1)$ is a nice interval and so $Per(f)\cap (a,b)\ne\emptyset$ (Lemma~\ref{LemmaHGFGH54}), which is a contradiction.
\end{proof}
\begin{Lemma}\label{Lemma545g55}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a non-flat $C^{2}$ contracting Lorenz map.
If $f(x)>x$, $\forall x \in{(0,c)}$, $f(x)<x$, $\forall x \in{(c,1)}$ and $\lim_{x \uparrow c}f(x)>c > \lim_{x\downarrow c}f(x)$, then $$\mbox{$\mathcal{O}$}^{+}_{f}(x)\cap(0,c)\ne\emptyset\ne\mbox{$\mathcal{O}$}^{+}_{f}(x)\cap(c,1), \hspace{0.2cm} \forall\,x\in (0,1)\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}^{-}_f(c).$$
\end{Lemma}
\begin{proof}
Suppose, for instance, that there is $y\in(0,c)\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}^{-}_f(c)$ such that $f^n(y)\in(0,c)$, $\forall\,n\ge0$. That is, $0<f^{n}(y)<c$ for all $n\ge0$.
As $f|_{(0,c)}$ is an increasing map, we get $f(0)=0<y<f(y)<f^{2}(y)<\cdots<f^{n}(y)\cdots<c$. This implies that $\lim_{n\to\infty}f^{n}(y)$ is a fixed point for $f$, contradicting our hypothesis. Thus, there is some $n>0$ such that $f^{n}(y)\in(c,1)$.
\end{proof}
\begin{Lemma}\label{AcumulacaoDePer}
If $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ is a non-flat $C^{2}$ contracting Lorenz map without periodic attractors, then either $\exists\,\delta>0$ such that $c\in\omega_{f}(x)$, $\forall\,x\in(c-\delta,c+\delta)$ or $$\overline{Per(f)\cap(0,c)}\ni c\in\overline{(c,1)\cap Per(f)}.$$
\end{Lemma}
\begin{proof}
Suppose that $f$ does not have periodic attractors and suppose also that $\nexists\delta>0$ such that $c\in\omega_{f}(x)$, $\forall\,x\in(c-\delta,c+\delta)$. In this case, by Lemma~\ref{noitenoite}, $Per(f)\cap(0,1)\ne\emptyset$. As $f$ does not have periodic attractors, $f(x)>x$, $\forall x \in{(0,c)}$, $f(x)<x$, $\forall x \in{(c,1)}$ and $\lim_{x \uparrow c}f(x)>c > \lim_{x\downarrow c}f(x)$, and then $\mbox{$\mathcal{O}$}_{f}^{+}(x)\cap(0,c)\ne\emptyset\ne(c,1)\cap\mbox{$\mathcal{O}$}_{f}^{+}(x)$, $\forall\,x\in(0,1)\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}^{-}_f(c)$, by Lemma \ref{Lemma545g55}. Thus, $Per(f)\cap(0,c)\ne\emptyset\ne(c,1)\cap Per(f)$.
Let $a=\sup Per(f)\cap(0,c)$ and $b=\inf Per(f)\cap(c,1)$. We know that $0<a\le c\le b<1$. If $a=b$ the proof is done. So suppose that $a\ne b$. We may assume that $0<a<c\le b<1$ (the other case is analogous).
We claim that $\mbox{$\mathcal{O}$}_{f}^{+}(a_{-})\cap(a,b)=\emptyset=(a,b)\cap\mbox{$\mathcal{O}$}_{f}^{+}(b_{+})$. Indeed, if there is a minimum $\ell\ge1$ such that $f^{\ell}(a_{-})\in(a,b)$, then $\emptyset\ne f^{\ell}((a-\varepsilon,a)\cap Per(f))\subset(a,b)$, contradicting the definition of $a$ and $b$. With the same reasoning we can show that $\mbox{$\mathcal{O}$}_{f}^{+}(b_{+})\cap(a,b)=\emptyset$.
Notice that $\exists n>0$ such that $f^{n}((a,c))\cap(a,c)\ne\emptyset$. Indeed, $(a,c)$ can not be a wandering interval (Lemma~\ref{LemmaWI}) and as $f$ does not have periodic attractors, it follows from the homterval lemma (Lemma~\ref{homtervals}) that $f^{n}((a,c))\ni c$ for some $n\ge1$. Let $\ell$ be the smallest integer bigger than $0$ such that $c \in f^{\ell}((a,c))$. As $\mbox{$\mathcal{O}$}_{f}^{+}(a)\cap(a,c)=\emptyset$, we get $f^{\ell}((a,c))\supset(a,c)$. Thus, there is a periodic point $p\in[a,c)$ with period $\ell$. By the definition of $a$, it follows that $p=a$.
We claim that $f^{\ell}((a,c))\subset(a,b)$. If not, let $q_{0}\in f^{\ell}((a,c))\cap Per(f)\cap[b,1)$. Let $q=\min \mbox{$\mathcal{O}$}_{f}^{+}(q_{0})\cap(c,1)$ and $q'=(f^{\ell}|_{(a,c)})^{-1}(q)$. Clearly, $a<q'<c<q$ and $(q',q)$ is a nice interval. Thus, by Lemma~\ref{LemmaHGFGH54}, $Per(f)\cap(q',c)\ne\emptyset$ and this contradicts the definition of $a$.
Notice that $f^{\ell}((a,c))\ni c$, otherwise $f$ would have periodic attractors. As a consequence of this and of the claim above, $b>c$.
As $b>c$, $(a,b)$ is a nice interval. We already know that $f^{\ell}(a)=a$. Moreover, by the definition of $b$ and Lemma~\ref{LemmaHGFGH54}, $b$ also must be a periodic point. So, let $r=\operatorname{Per}iod(b)$. From the same reasoning of the claim above, we get $f^{r}((c,b))\subset((a,b))$.
Thus, the first return map to $[a,b]$ is conjugated to a contracting Lorenz map $g:[0,1]\mbox{$\Leftarrow$}tminus\{c_{g}\}\to[0,1]$. As $\nexists\delta>0$ such that $c\in\omega_{f}(x)$, $\forall\,x\in(c-\delta,c+\delta)$, it follows that $\exists\,x\in[0,1]$ such that $c_{g}\notin\omega_{g}(x)$. So, it follows from Lemma~\ref{noitenoite} that $Per(g)\cap(0,1)\ne\emptyset$. As a consequence, $Per(f)\cap(a,b)\ne\emptyset$. This contradicts the definition of $a$ and $b$, proving the lemma.
\end{proof}
\begin{Lemma}\label{omegaemc}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a non-flat $C^{2}$ contracting Lorenz map without periodic attractors and such that $Per(f)\cap (0,1)=\emptyset$. If $x\in (0,1)$ is such that $c \in \omega_f(x)$, then $\overline{\mbox{$\mathcal{O}$}_f^+(x)\cap(0,c)}\ni c\in \overline{(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)}$.
\end{Lemma}
\begin{proof}
As $c \in \omega_f(x)$, $x$ is under the hypotheses of Lemma \ref{Lemma545g55}, then $\mbox{$\mathcal{O}$}_f^+(x)\cap(0,c)\ne\emptyset\ne(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)$. Also, $c \in \omega_f(x)$ implies that $c \in \overline{\mbox{$\mathcal{O}$}_f^+(x)\cap(0,c)}$ or $c\in \overline{(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)}$. Suppose one of these do not occur. For instance, suppose $c\not\in \overline{(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)}$. Then, defining $v=\inf \overline{(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)}$, we have that $J=(c,v)$ is such that $\nexists j$ such that $c\in f^j(J)$, for otherwise, either $J\subset f^j(J)$, that would imply the existence of a periodic repeller, what is in contradiction to the hypothesis, or $f^j(v) \in J$, and as $v \in \omega_f(x)$ and $\omega_f(x)$ is a positively invariant set, this is in contradiction with the definition of $J$.
So, as $\nexists j \in \NN$ such that $c \in f^j(J)$ and we are supposing there are no periodic attractors, Lemma \ref{homtervals} implies that $J$ is a wandering interval. But we know from Lemma \ref{LemmaWI} that $(c,v)$ cannot be a wandering interval, leading to an absurd.
\end{proof}
\begin{Lemma}[Variational Principle]\label{LemmaVarPric}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a non-flat $C^{2}$ contracting Lorenz map without periodic attractors. Suppose that $\nexists\delta>0$ such that $c\in\omega_{f}(x)$, $\forall\,x\in(c-\delta,c+\delta)$.
Given $\varepsilon>0$, there exists a unique periodic orbit minimizing the period of all periodic orbits intersecting $(c-\varepsilon,c)$. Similarly, there exists a unique periodic orbit minimizing the period of all periodic orbits intersecting $(c,c+\varepsilon)$.
\end{Lemma}
\begin{proof}
As $Per(f)\cap(c-\varepsilon,c)\ne\emptyset$ (Lemma~\ref{AcumulacaoDePer}), let
$$
n=\min\{\operatorname{Per}iod(x)\,;x\in Per(f)\cap(c-\varepsilon,c)\}
$$
and suppose that there are $p_0,q_0\in Per_n(f)\cap(c-\varepsilon,c)$ such that $\mbox{$\mathcal{O}$}_f^+(p_0)\ne\mbox{$\mathcal{O}$}_f^+(q_0)$. Let $p=\max\{\mbox{$\mathcal{O}$}_f^+(p_0)\cap(c-\varepsilon,c)$ and $q=\max\{\mbox{$\mathcal{O}$}_f^+(q_0)\cap(c-\varepsilon,c)$. Thus, $\mbox{$\mathcal{O}$}_f^+(p)\cap(p,c)=\emptyset=\mbox{$\mathcal{O}$}_f^+(q)\cap(q,c)$. We may assume that $q<p$.
\begin{figure}\label{Fig47488}
\label{Fig47488}
\end{figure}
Notice that $f^n$ can not be monotone on $(q,p)$. Indeed, otherwise, if $f^n$ is monotone on $(q,p)$, then $f^n([q,p])=[q,p]$. As $f^n$ can not be the identity on $[q,p]$, $f^n([q,p])=[q,p]$ would imply the existence of an attracting fixed point for $f^n$ on $[q,p]$. But this is impossible, as we are assuming that $f$ does not have a finite attractor.
As $f^n$ is not monotone on $(q,p)$, there is $0<j<n$ such that $f^j$ is monotone on $(q,p)$ and $c\in f^j((q,p))$. Thus, $f^j(q)<c<f^j(p)$. Moreover, $f^j(q)<q$ (because $\mbox{$\mathcal{O}$}_f^+(q)\cap(q,c)=\emptyset$ and $j<n$). Thus, $f^j((q,p))\supset(q,p)$ (see Figure~\ref{Fig47488}) and this implies in the existence of a periodic point $a\in[q,p]\subset(c-\varepsilon,c)$ with period $j<n$, contradicting the minimality of $n$.
The proof for the case $(c,c+\varepsilon)$ is analogous.
\end{proof}
\mbox{$\Leftarrow$}ction{Renormalization and Cherry maps}
\label{renormecherry}
\begin{Definition}[Left and right renormalizations]
Let $f$ be a contracting Lorenz map, $J=(a,b)\in\cn$ and let $F:J^{*}\to J$ be the map of first return to $J$. We say that $f$ is renormalizable by the left side with respect to $J$ (or, for short, $J$-left-renormalizable) if $(a,c)\subset J^{*}$ (this means that $F|_{(a,c)}=f^{n}|_{(a,c)}$ for some $n\ge1$).
Analogously, we define $f$ to be renormalizable by the right side with respect to $J$ (or, for short, $J$-right-renormalizable) if $(c,b)\subset J^{*}$.
\end{Definition}
If the first return map to an interval $\overline{J}\ne[0,1]$, $F$, is conjugated to a Lorenz map, $f$ is called {\em renormalizable} with respect to $J$. The renormalization of $f$ (with respect to $J$) is the map $g:[0,1]\mbox{$\Leftarrow$}tminus \{\frac{c-a}{b-a}\}\to[0,1]$ given by $$g(x)=A^{-1}\circ F \circ A(x)$$ where $A(x)=(b-a) x + a$.
Notice that $f$ is renormalizable with respect to $J$ if and only if $J\in\cn_{per}$ and $f$ is renormalizable by both sides (left and right) with respect to $J$. Moreover, using Corollary~\ref{Corollary8388881b}, it is easy to check the following result:
\begin{Lemma}\label{Lemma09090863}
Let $J=(a,b)\in\cn$. The following statements are equivalent:
\begin{enumerate}
\item $f$ is renormalizable with respect to $J$.
\item $(\,\overline{J}\,)\,^{*}=[a,c)\cup(c,b]$.
\item $c\in\partial I$, $\forall\,I\in (\overline J)^{*}$.
\item $a$ and $b$ are periodic points, $$f^{\operatorname{Per}iod(a)}([a,c))\subset[a,b]\supset f^{\operatorname{Per}iod(b)}((c,b]).$$
\end{enumerate}
\end{Lemma}
The interval involved in a (left/right) renormalization is called an interval of (left/right) renormalization. A map $f$ is {\em non-renormalizable} if it does not admit any interval of renormalization.
In what follows, given a renormalization interval $J $, we will refer to some concepts that were previously introduced. Namely, of its renormalization cycle $U_J$, the nice trapping region $K_{J}$ associated to J, and gaps of sets $\Lambda_{J}$ ($\Lambda_{J}$ also already defined, being the set of points whose orbits never reach an open set $J \ni c$). These definitions were given before the statement of Theorem \ref{SOLENOIDETH} in Section \ref{mainresults}.
\begin{Lemma}\label{Corollary545g55} Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ contracting Lorenz map without periodic attractors.
For any given $J=(a,b)$ renormalization interval of $f$, we have that $$\mbox{$\mathcal{O}$}^{+}_{f}(x)\cap(a,c)\ne\emptyset\ne\mbox{$\mathcal{O}$}^{+}_{f}(x)\cap(c,b)\hspace{0.5cm}\forall\,x\in J\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}^{-}_f(c).$$
Therefore, the positive orbit $\mbox{$\mathcal{O}$}^{+}_{f}(x)$ of any $x\in J\mbox{$\Leftarrow$}tminus\mbox{$\mathcal{O}$}^{-}_f(c)$ intersects each connected component of the renormalization cycle $U_J$ (and also each connected component of the nice trapping region $K_{J}$).
\end{Lemma}
\begin{proof}
Let $\ell=\operatorname{Per}iod(a)$ and $r=\operatorname{Per}iod(b)$. As $f$ has no periodic attractors, it doesn't have any super-attractor, then $\lim_{x \uparrow c}f^{\ell}(x)>c > \lim_{x\downarrow c}f^{r}(x)$.
If there is $x\in(a,c)$ such that $f^{\ell}(x)\le x$, then $f^{\ell}|_{[a,x]}$ will have an attracting fixed point, as $f^{\ell}|_{[a,x]}$ is not the identity, but this contradicts the hypothesis. The same reasoning can be done for $f^{r}|_{(c,b)}$, and therefore applying Lemma \ref{Lemma545g55} to the renormalization of $f$ with respect to $J$, we conclude the proof.
\end{proof}
We say that two open intervals $I_{0}$ and $I_{1}$ are linked if $\partial I_{0}\cap I_{1}\ne\emptyset\ne I_{0}\cap\partial I_{1}$.
\begin{Lemma}\label{renormalinks} Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map without periodic attractors. Then, two renormalization intervals of $f$ can never be linked. Moreover, if $J_{0}$ and $J_{1}$ are two renormalization intervals and $J_{0}\ne J_{1}$, then either $\overline{J_{0}}\subset J_{1}$ or $\overline{J_{1}}\subset J_{0}$. In particular, $\partial J_{0}\cap\partial J_{1}=\emptyset$.
\end{Lemma}
\begin{proof}
Write $J_0=(a_0,b_0)$ and $J_1=(a_1,b_1)$. First note that $J_{0}$ and $J_{1}$ can not be linked. Indeed, if they were linked, we would either have $a_{0}<a_{1}<c<b_{0}<b_{1}$ or $a_{1}<a_{0}<c<b_{1}<b_{0}$. We may suppose that $a_{0}<a_{1}<c<b_{0}<b_{1}$. In this case, $a_{1}\in J_{0}$ and by Lemma~\ref{Corollary545g55}.
$\emptyset\ne\mbox{$\mathcal{O}$}_{f}^{+}(a_{1})\cap(c,b_{0})\subset\mbox{$\mathcal{O}$}_{f}^{+}(a_{1})\cap(a_{1},b_{1})=\mbox{$\mathcal{O}$}_{f}^{+}(a_{1})\cap J_{1}$ contradicting the fact that $J_{1}$ is a nice interval.
As $J_{0}\cap J_{1}\ne\emptyset$ (because both contains the critical point) and as $J_{0}$ and $J_{1}$ are not linked, it follows that either $J_{0}\supset J_{1}$ or $J_{0}\subset J_{1}$. We may suppose that $J_{0}\supset J_{1}$. In this case, as $J_{0}\ne J_{1}$ we have three possibilities: either $a_{0}< a_{1}<c<b_{1}= b_{0}$ or $a_{0}= a_{1}<c<b_{1}< b_{0}$ or $J_{0}\supset\overline{J_{1}}$. If $a_{0}< a_{1}<c<b_{1}= b_{0}$, we can use again Lemma~\ref{Corollary545g55} to get $\mbox{$\mathcal{O}$}_{f}^{+}(a_{1})\cap J_{1}\ne\emptyset$. On the other hand, if $a_{0}= a_{1}<c<b_{1}< b_{0}$, the same Lemma~\ref{Corollary545g55} implies that $\mbox{$\mathcal{O}$}_{f}^{+}(b_{1})\cap J_{1}\ne\emptyset$. In both cases we get a contradiction to the fact that $J_{1}$ is a nice interval. Thus, the remaining possibility is the only valid one.
\end{proof}
A periodic attractor $\Lambda$ is called {\em essential} if its local basin contains $c^{-}$ or $c^{+}$. Precisely, if $\exists\,p\in\Lambda$ such that $(p,c)$ or $(c,p)$ is contained in $\beta(\Lambda)=\{x\,;\,\omega_{f}(x)\subset\Lambda\}$ (the basin of $\Lambda$). If a periodic attractor in not essential, it is called {\em inessential}. Notice that if $f$ is $C^{3}$ and has negative Schwarzian derivative, then, by Singer's Theorem, $f$ does not admit inessential periodic attractors.
\begin{Proposition}
\label{Lemma1110863}Suppose that $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ is a $C^{2}$ non-flat contracting Lorenz map that does not admit inessential periodic attractors.
If $J_{n}$ is an infinite sequence of renormalization intervals with $J_{n}\supsetneqq J_{n+1}$, then $\bigcap_{n}J_{n}=\{c\}$.
\end{Proposition}
\begin{proof}
Let $J=\bigcap_{n}J_{n}$. Write $(a,b)=\operatorname{interior} J$. Suppose for example that $a\ne c$ (the case $b\ne c$ is analogous). Given $x\in(a,b)$, let $R(x)=\min\{j>0\,;\,f^{j}(x)\in(a,b)\}$. As $J_n=(a_n,b_n)$ are renormalization intervals, then $(a_n,c)$ only returns to $J_n$ at $\operatorname{Per}iod(a_n)$ (and $(c,b_n)$ at the period of $b_n$), that is, the first return is at the time $\operatorname{Per}iod(a_n)$. So, as $R(x)\ge\min\{\operatorname{Per}iod(a_n),\operatorname{Per}iod(b_{n})\}\to\infty$. Thus, $R(x)=\infty$, $\forall\,x\in(a,b)$.
As $f^{j}((a,c))\cap(a,b)=\emptyset$, $\forall\,j>0$ (because $R\equiv\infty$), then $f^{j}|_{(a,c)}$ is a homeorphism $\forall\,j$. By Lemma~\ref{LemmaWI}, $(a,c)$ is not a wandering interval. As $\mbox{$\mathcal{O}$}_f^-(Per(f))$ does not contain intervals, it follows from Lemma~\ref{homtervals} that there is a periodic attractor $\Lambda$ with $(a,c)\cap\beta(\Lambda)\ne\emptyset$. As $f$ does not have inessential periodic attractors, there is some $q\in\Lambda$ such that $(q,c)$ or $(c,q)\subset\beta(\Lambda)$.
As $q$ is periodic, $q\notin[a,b]$. Thus, $q<a_{n}<c$ for some $n$ or $c<b_{n}<q$. In any case, we get a contradiction for nor $a_n$ neither $b_n$ can be in the basin of a periodic attractor.
\end{proof}
\begin{Corollary}\label{Corolary989982}
Suppose that $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ is a $C^{2}$ non-flat contracting Lorenz map that does not admit inessential periodic attractors. If there exists $p\in(0,1)$ such that $\alpha_{f}(p)\ni c\notin\overline{\mbox{$\mathcal{O}$}_{f}^{+}(p)}$, then $f$ is not an infinitely renormalizable map.
\end{Corollary}
\begin{proof}
Suppose that $T_{n}$ is a sequence of two by two distinct renormalizable intervals. By Proposition~\ref{Lemma1110863}, $\bigcap_{n}T_{n}=\{c\}$.
For each $n\in\NN$, let $0<r_n,\ell_n\in\NN$ be such that $f^{\ell_n}(T_n\cap(0,c))\subset T_n$ and $f^{r_n}(T_n\cap(c,1))\subset T_n$ and let $$U_{n}=T_n\cup\bigg(\bigcup_{j=1}^{\ell_n-1}f^j((T_n\cap(0,c))\bigg)\cup\bigg(\bigcup_{j=1}^{r_n-1}f^j((T_n\cap(c,1))\bigg).$$
If $p\in U_{n}$, $\forall\,n \in \NN$, then $c\in\omega_{f}(p)$, contradicting our hypothesis. Thus, one can find some $n\ge0$ such that $p\notin U_{n}$. But this is not possible, because $c\in\alpha_{f}(p)$ and so, $\mbox{$\mathcal{O}$}_{f}^{-}(p)\cap T_{n}\ne\emptyset$.
\end{proof}
Now we have enough information on maps that are infinitely many times renormalizable in order to prove Theorem \ref{SOLENOIDETH}.
\begin{proof}[Proof of Theorem \ref{SOLENOIDETH}]
Write $\cR =\{J_n\}_{n\in\NN}$, with $J_1\supsetneqq J_2\supsetneqq J_3\supsetneqq\cdots$.
Notice that $J_n\supset\overline{J_{n+1}}$, $\forall\,n \in \NN$ and also
\begin{equation}
K_{J_n}=\operatorname{interior}(K_{J_n})\supset\overline{K_{J_{n+1}}},\;\;\forall\,n\in\NN.
\end{equation}
Thus, $$\Delta:=\bigcap_{n\in\NN}\overline{K_{J_n}}=\bigcap_{n\in\NN}K_{J_n}.$$
As each $K_n$ is a trapping region ($f(K_n)\subset K_n$), it is easy to see that $\omega_f(x)\subset\Delta$, whenever $c\in\omega_f(x)$. Indeed, if $c\in\omega_f(x)$, then $\mbox{$\mathcal{O}$}^+_f(x)\cap J_n\ne\emptyset$ for every $n\in\NN$, because $\{c\}=\bigcap_{n}J_n$ (see Proposition~\ref{Lemma1110863}). Thus, $\omega_f(x)\subset\overline{K_n}$, $\forall\,n \in \NN$.
Let $\ck_n$ be the collection of connected components of $K_{J_n}$ and $\ck_n(y)$ be the element of $\ck_n$ containing $y$ (see Figure~\ref{KnBasica.png}), for any given $y\in\Delta$.
Let $\Lambda$ be the (closed) set of points $y\in\Delta$ such that there is a sequence $\Delta\ni y_n\to y$ and $\NN\ni k_n\to\infty$ with $\lim_n\operatorname{diameter}(\ck_{k_n}(y_n))=0$.
Given any $x\in[0,1]$ with $c\in\omega_f(x)$, we have $\mbox{$\mathcal{O}$}^+_f(x)\cap J_n\ne\emptyset$ $\forall\,n\in\NN$ and, by Proposition~\ref{Lemma1110863} and Lemma~\ref{Corollary545g55}, $\mbox{$\mathcal{O}$}^+_f(x)$ intersects every element of $\ck_n$, $\forall\,n \in \NN$. As a consequence, any point $y \in \Lambda$ is accumulated by points of $\mbox{$\mathcal{O}$}^+_f(x)$ for any $x\in[0,1]$ with $c\in\omega_f(x)$. That is,
\begin{equation}\label{eq6783453}
\Delta\supset\omega_f(x)\supset\Lambda\text{\, for every $x$ such that }c\in\omega_f(x).
\end{equation}
\begin{figure}\label{KnBasica.png}
\label{KnBasica.png}
\end{figure}
\begin{claim} Define $\Delta(y)$ as the connected component of $\Delta$ containing $y$.
If $\operatorname{interior}(\Delta(y))\ne\emptyset$, $y\in\Delta$, then $\operatorname{interior}(\Delta(y))$ is a wandering interval.
\end{claim}
\begin{proof}[Proof of the claim]
Suppose that $\operatorname{interior}(\Delta(y))\ne\emptyset$ and that $\exists s$ such that $c \in f^s(\Delta(y))$. Then, $\forall n$, $f^s(\Delta(y))\cap J_n\ne\emptyset$. But if $f^s(\Delta(y))\cap J_n\ne\emptyset$, then $f^s(\ck_n(y))\cap J_n\ne\emptyset$ and so, $f^s(\Delta(y))\subset f^s(\ck_n(y))\subset J_n$, $\forall n$.
Thus, if $c\in f^s(\Delta(y))$, then we have $f^s(\Delta(y))\subset \bigcap_nJ_n=\{c\}$ (Proposition~\ref{Lemma1110863}), a contradiction. This implies that $c\notin f^s(\Delta(y))$, $\forall s\in\NN$. From Lemma~\ref{homtervals}, we get that $\operatorname{interior}(\Delta(y))$ is a wandering interval.
\end{proof}
Now consider $y\in\Delta\mbox{$\Leftarrow$}tminus\Lambda$. We will show that if $c \in \omega_f(x)$, then $y \not\in \omega_f(x)$.
Under the assumption of $y\in\Delta\mbox{$\Leftarrow$}tminus\Lambda$, there is some $\varepsilon>0$ such that $B_{\varepsilon}(y)\cap\Delta=B_{\varepsilon}(y)\cap\Delta(y)$.
Notice that $\Delta(y)\ne\{y\}$, otherwise $\lim_n\operatorname{diameter}(\ck_n(y))=0$ and $y\in\Lambda$.
So, $\operatorname{interior}(\Delta(y))\ne\emptyset$ and so, by the claim above, $\operatorname{interior}(\Delta(y))$ is a wandering interval. This implies that $\omega_f(x) \cap \operatorname{interior}(\Delta(y))=\emptyset$, $\forall x$. So, if $y\in\operatorname{interior}(\Delta(y))$ we have that
$y \not\in \omega_f(x)$.
Let's then consider $y\notin\operatorname{interior}(\Delta(y))$. Reducing $\epsilon$ if necessary, $B_{\varepsilon}(y)\cap\Delta\cap\Omega(f)=B_{\varepsilon}(y)\cap\Delta(y)\cap\Omega(f)\subset\{y\}$. Suppose that $y\in\omega_f(x)$ for some $x$ such that $c\in\omega_f(x)$.
In this case, as $\Delta\supset\omega_f(x)$, we conclude that $y$ is an isolated point of $\omega_f(x)$: indeed, as $\omega_f(x)\subset\Delta\cap\Omega(f)$, we have $y \in \omega_f(x)\cap B_{\varepsilon}(y)=\omega_f(x)\cap B_{\varepsilon}(y)\cap\Delta\cap\Omega(f)=\omega_f(x)\cap B_{\varepsilon}(y)\cap\Delta(y)\cap\Omega(f)\subset\{y\}$, then this set is $\{y\}$.
\begin{figure}\label{Kn.pdf}
\label{Kn.pdf}
\end{figure}
Since $y\notin\operatorname{interior}(\Delta(y))$, we may suppose that $\Delta(y)=[y,b]$ (the case $\Delta(y)=[a,y]$ is analogous). Taking $\varepsilon>0$ small enough, we can assume that $y+\varepsilon<b$. Let $n\ge1$ be such that $y-\varepsilon<k_{n,0}(y)<y$, where $(k_{n,0},k_{n,1}):=K_n(y)$. Let $m_j\in\NN$ be such that $k_{n,0}<f^{m_1}(x)<f^{m_2}(x)<\cdots<f^{m_j}(x)\nearrow\,y$ and $\mbox{$\mathcal{O}$}_f^+(x)\cap(k_{n,0},y)=\{f^{m_1}(x),f^{m_2}(x),f^{m_3}(x),\cdots\}$ (See Figure~\ref{Kn.pdf}).
Choose $j_0$ big enough so that $m_j>m_1$, $\forall\,j\ge j_0$. Given $j\ge j_0$, let $I_j=(t_j,f^{m_1}(x))$ be an interval contained in $(k_{n,0},f^{m_1}(x))$, maximal such that $f^{m_j-m_1}|_{I_j}$ is a homeomorphism. If $k_{n,0}<t_j$, there is some $1\le s<m_j-m_1$ s.t. $f^s((t_j,f^{m_1}(x)))=(c,f^{m_1+s}(x))$. As $f$ is infinitely renormalizable, Lemma \ref{Corollary545g55} says that the orbit of $x$ accumulates on $c$ by both sides, then $\#\mbox{$\mathcal{O}$}_f^+(x)\cap (c,f^{m_1+s}(x))=\infty$. As $K_{J_n}$ is positively invariant and
$f^{m_j-m_1}(I_j)\cap K_n(y)\ne\emptyset$, we get that $f^{m_j-m_1}(I_j)\subset K_n(y)$. So,
$f^{m_j-m_1}(I_j)\subset (k_{n,0},f^{m_j}(x))\subset (y-\varepsilon,f^{m_j}(x))$. Thus, $\#\mbox{$\mathcal{O}$}_f^+(x)\cap(y-\varepsilon,f^{m_j}(x))=\infty$, and this is an absurd, as $y$ was taken was the only non-wandering point in this neighborhood.
Thus, $t_j=k_{n,0}$ and so, $I_j=(k_{n,0},f^{m_1}(x))\,\,\forall\,j\ge j_0.$
As a consequence, $f^j|_{(k_{_{n,0}},f^{m_1}(x))}$ is a homeomorphism $\forall\,j\in\NN$ because $f^{m_j-m_1}|_{(k_{_{n,0}},f^{m_1})}=f^{m_j-m_1}|_{I_j}$ is a homeomorphism, $\forall\,j\ge j_0$. But this contradicts the homterval lemma (Lemma~\ref{homtervals}), as $(k_{_{n,0}},f^{m_1}(x))$ cannot be a wandering interval ($k_{n,0}$ is pre-periodic, as $\partial K_{J_n}\subset \mbox{$\mathcal{O}$}^{-}_{f}(\partial J_n)$ ) and as $f$ does not have periodic attractors.
For short, if $c\in\omega_f(x)$, then $y\notin\omega_f(x)$ for all $y\in\Delta\mbox{$\Leftarrow$}tminus\Lambda$. So, by (\ref{eq6783453}), $\omega_f(x)=\Lambda$ when $c\in\omega_f(x)$. Finally, as $\Lambda\subset\bigcap_{J\in\cR }K_J$ and $c\in\omega_f(x)$ for every $x\in\bigcap_{J\in\cR }K_J$, then $\omega_f(x)=\Lambda$, $\forall\,x\in\Lambda$. That is, $\Lambda$ is minimal and so we conclude the proof.
\end{proof}
\begin{Remark}\label{Remark12327890}
Let $x,y\in[0,1]\mbox{$\Leftarrow$}tminus\{c\}$, $\delta>0$ and $j\in\NN$. If $f^{j}|_{(y-\delta,y+\delta)}$ is an homeomorphim, then $y\in\alpha_{f}(x)$ $\iff$ $f^{j}(y)\in\alpha_{f}(x)$.
\end{Remark}
\begin{Lemma}
\label{Lemma01928373}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map without periodic attractor. If $c\in\alpha_{f}(p)$ for some $p\ne c$, then $\overline{\mbox{$\mathcal{O}$}^{-}_{f}(p)\cap[0,c)}\ni c \in\overline{(c,1]\cap\mbox{$\mathcal{O}$}^{-}_{f}(p)}$.
\end{Lemma}
\begin{proof}
Suppose that $c\in\alpha_{f}(p)$, $p\in[0,c)\cup(c,1]$. We may suppose that $\mbox{$\mathcal{O}$}^{-}_{f}(p)\cap(-\delta,c)\ne\emptyset$, $\forall\delta>0$ and $\mbox{$\mathcal{O}$}^{-}_{f}(p)\cap(c,\delta_{0})=\emptyset$ for some $\delta_{0}>0$, the symmetrical case being analogous. As $\alpha_{f}(p)$ is compact, there is some $q>0$ such that $(c,q)$ is a connected component of $[0,1]\mbox{$\Leftarrow$}tminus\alpha_{f}(p)$.
\begin{claim}
$f^{j}\big((c,q)\big)\cap(c,q)=\emptyset$, $\forall\,j>0$.
\end{claim}
\begin{proof}[Proof of the Claim] Suppose there is a smallest $\ell>0$ such that $f^{\ell}\big((c,q)\big)\cap(c,q)\ne\emptyset$. In this case $f^{\ell}|_{(c,q)}$ is a homeomorphism.
If $f^{\ell}\big((c,q)\big)\subset(c,q)$, then $f$ admits a periodic attractor or a super-attractor, contradicting our hypothesis. Thus, there is some $x\in\{c,q\}\cap f^{\ell}\big((c,q)\big)$. As both $c$ and $q$ are accumulated by pre-images of $p$, it follows that $x$ is also accumulated by pre-images of $p$. So, $\alpha_{f}(p)\cap(c,q)\ne\emptyset$ (Remark~\ref{Remark12327890}), contradicting that $(c,q)$ is contained in the complement of $\alpha_{f}(p)$.
(end of the proof of the Claim)\end{proof}
It follows from the Claim that $f^{j}|_{(c,q)}$ is a homeomorphism for every $j>0$. Moreover, $(c,q)$ is a wandering interval. Indeed, if $f^{j}\big((c,q)\big)\cap f^{k}\big((c,q)\big)\ne\emptyset$, with $j<k$, then $f^{j}\big((c,q)\big)\not\supset f^{k}\big((c,q)\big)$, since $f^{j}\big((c,q)\big)\supset f^{k}\big((c,q)\big)$ implies the existence of a periodic attractor or a super-attractor, contradicting again our hypothesis. Thus, there is $x\in\{f^{j}(c),f^{j}(q)\}$ belonging to $f^{k}\big((c,q)\big)$. As $f^{j}(c)$ and $f^{j}(q)\in\alpha_{f}(p)$ we get $\big(f^{j}|_{(c,q)}\big)^{-1}(x)\in\alpha_{f}(p)\cap(c,q)$ (Remark~\ref{Remark12327890}), contradicting again that $(c,q)$ is contained in the complement of $\alpha_{f}(p)$.
As $(c,q)$ being a wandering interval is a contradiction to Lemma~\ref{LemmaWI}, we have to conclude that $\mbox{$\mathcal{O}$}^{-}_{f}(p)\cap(c,\delta)\ne\emptyset$, $\forall\delta>0$.
\end{proof}
\begin{Lemma}\label{Lemma549164}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a contracting Lorenz map without periodic attractors. Let $p\in(0,1)$ be such that $c\notin\overline{\mbox{$\mathcal{O}$}_{f}^{+}(p)}$ and let $(p_{1},p_{2})$ be the connected component of $(0,1)\mbox{$\Leftarrow$}tminus\overline{\mbox{$\mathcal{O}$}_{f}^{+}(p)}$ containing the critical point $c$. Given $y\in\mbox{$\mathcal{O}$}_{f}^{-}(p)$ and $\varepsilon>0$, we have $$\bigcup_{j\ge0}f^{j}(y,y+\varepsilon)\supset (p_{1},c)\,\,\text{ and }\,\,\bigcup_{j\ge0}f^{j}(y-\varepsilon,y)\supset (c,p_{2}).$$\end{Lemma}
\begin{proof}
For any given $\delta>0$, Lemma~\ref{Remark98671oxe} says that, $\forall \varepsilon >0$, $\exists j_{1},j_{2}\ge0$ such that $f^{j_{1}}((p-\varepsilon,p))\cap(c-\delta,c+\delta)\ne\emptyset\ne f^{j_{2}}((p,p+\varepsilon))\cap (c-\delta,c+\delta)$. Take $j_{1},j_{2}$ minima with such property such that $f^{j_{1}}|_{(p-\varepsilon,p)}$ and $f^{j_{2}}|_{(p,p+\varepsilon)}$ are homeomorphisms. Notice that $f^{j_{2}}|_{(p,p+\varepsilon)}\supset(p_{1},c-\delta)$ and $f^{j_{1}}((p-\varepsilon,p))\supset(c+\delta,p_{2})$, as $\mbox{$\mathcal{O}$}_{f}^{+}(p)\cap(p_{1},p_{2})=\emptyset$ and $f^{j_{1}}|_{(p-\varepsilon,p)}$ and $f^{j_{2}}|_{(p,p+\varepsilon)}$ preserve orientation.
As a consequence, $$\bigcup_{j\ge0}f^{j}\big((p,p+\varepsilon)\big)\supset \bigcup_{\delta>0}(p_{1},c-\delta)=(p_{1},c)$$ and $$\bigcup_{j\ge0}f^{j}\big((p-\varepsilon,p)\big)\supset\bigcup_{\delta>0}(c+\delta,p_{2})=(c,p_{2}).$$
Suppose that $y\in f^{-s}(p)$ for some $s\ge1$. There is $r>0$ such that $f^{s}|_{(y,y+r)}$ and $f^{s}|_{(y-r,y)}$
are homeomorphisms. As $f^{s}|_{(y,y+r)}$ is a homeomorphism, $f^{s}((y,y+r))=(p,p+\varepsilon)$ with $\varepsilon=f^{s}(y+r)-p$. Thus, $$\bigcup_{j\ge0}f^{j}\big((y,y+r)\big)\supset\bigcup_{j\ge0}f^{j}\big(f^{s}((y,y+r))\big)=\bigcup_{j\ge0}f^{j}\big((p,p+\varepsilon)\big)\supset(p_{1},c).$$
\end{proof}
\begin{Lemma}\label{jotaxrenormaliza}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a contracting Lorenz map. Write $v_{1}=f(c_-)$ and $v_{0}=f(c_+)$.
Given any $x$, $v_{0} < x < v_{1}$, let $J_{x}=(x_{1},x_{2})$ be the connected component of $[0,1]\mbox{$\Leftarrow$}tminus\alpha_{f}(x)$ that contains the critical point $c$. If $J_{x}\ne\emptyset$, then $J_{x}$ is a renormalization interval and $\partial J_{x}\subset\alpha_{f}(x)$.
\end{Lemma}
\begin{proof}
Firstly observe that $\alpha_{f}(x)\supset\{0,1\}$ because, as $x \in (v_{0},v_{1})$, $0=\lim_{n\to\infty}(f|_{[0,c)})^{-n}(x)$ and $1=\lim_{n\to\infty}(f|_{(c,1]})^{-n}(x)$. Thus, $J_{x}$ is an open interval. Moreover, $\partial J_{x}\subset\alpha_{f}(x)$.
We claim that $J_{x}$ is a nice interval. Otherwise, consider $n$ the smallest integer $n>0$ such that $f^{n}(\partial J_{x})\cap J_{x}\ne\emptyset$. Let $i\in\{1,2\}$ be so that $f^{n}(x_{i})\in J_{x}$. As $f^{j}(x_{i})\notin J_{x}$, $\forall0\le j<n$, there is $\varepsilon>0$ such that $f^{n}|_{(x_{i}-\varepsilon,x_{i}+\varepsilon)}$ is a homeomorphism. From Remark~\ref{Remark12327890} it follows that $f^{n}(x_{i})\in\alpha_{f}(x)$, contradicting $\alpha_{f}(x)\cap J_{x}=\emptyset$. Thus, $J_{x}\in\cn$.
Now let us check that $J_{x}$ is a renormalization interval. Suppose it is not the case, it follows from Lemma~\ref{Lemma09090863} that one can find a connected component $I=(t_{1},t_{2})$ of the domain of the first return map to $J_{x}$ such that $c\notin\partial I$. By Lemma~\ref{Lemma8388881a}, $f^{k}(I)=\cf_{J_{x}}(I)=J_{x}$, where $k=R_{J_{x}}(I)$. Notice that $t_{1}$ or $t_{2}\in(x_{1},x_{2})$. Suppose that $t_{1}\in(x_{1},x_{2})$ (the case $t_{2}\in(x_{1},x_{2})$ is similar). As $c\notin\partial I$ (and $f^{j}(t_{1})\notin J_{x}$, $\forall0<j<k$), there is some small $\delta>0$ such that $f^{k}|_{(t_{1}-\delta,t_{1}+\delta)}$ is a homeomorphism. As $f^{k}(t_{1})=x_{1}\in\alpha_{f}(x)$, it follows from Remark~\ref{Remark12327890} that $t_{1}\in\alpha_{f}(x)$. But this is impossible as $\alpha_{f}(x)\cap J_{x}=\emptyset$.
\end{proof}
\begin{Corollary}\label{Cor111} Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map without periodic attractors. If $p\in Per(f)$, then either
$\overline{\mbox{$\mathcal{O}$}^{-}_{f}(p)\cap(0,c)}\ni c \in\overline{(c,1)\cap\mbox{$\mathcal{O}$}^{-}_{f}(p)}$ or the connected component of $[0,1]\mbox{$\Leftarrow$}tminus\alpha_{f}(p)$, $J_{p}$, is non-empty and it is a renormalization interval.\end{Corollary}
\begin{Notation}[$\cl_{Per}$, $\cl_{Sol}$ and $\cl_{Che}$]
\label{persolche}
Let $\cl_{Per}$ denote the collection of contracting Lorenz maps having periodic attractors. The set of all $\infty$-renormalizable contracting Lorenz maps will be denoted by $\cl_{Sol}$. Let $\cl_{Che}$ be the set of all contracting Lorenz maps that are Cherry maps.
\end{Notation}
Recall that $f$ is a {\em Cherry map} if it does not have a periodic or super-attractor and there is $\delta>0$ such that $c\in\omega_{f}(x_{\pm})$ for every $x\in(c-\delta,c+\delta)$.
\begin{Lemma}
\label{vizinhanca}
If $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ is a $C^{2}$ non-flat contracting Lorenz map and $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$, then $c\in\alpha_{f}(p)$ for some $p\in Per(f)$.
\end{Lemma}
\begin{proof}
If $f$ is not renormalizable let $I=(0,1)$, otherwise let $I=(a,b)$ be the smallest renormalization interval of $f$ (we are assuming that $f\notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$).
By lemma \ref{noitenoite} we can pick a point $p \in (a,b)$ that is periodic. So, we have that $ p \in \alpha_{f}(p)$. As a consequence, it follows from Corollary~\ref{Cor111} that $\overline{\mbox{$\mathcal{O}$}_{f}^{-}(p)\cap(0,c)}\ni c \in\overline{(c,1)\cap \mbox{$\mathcal{O}$}_{f}^{-}(p)}$. Indeed, if the pre-orbit of $p$ is not accumulating on $c$ by both sides, then $J_{p}\ne\emptyset$ is a renormalization interval. In this case, as $p\in \alpha_{f}(p)$, we get $J_{p}\subsetneqq(a,b)$. This is an absurd, as $(a,b)$ is the smallest renormalization interval.
\end{proof}
\begin{Proposition}[Long branches lemma]\label{Proposition008345678}Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map.
Suppose that $f$ does not admit a periodic attractor. If $\alpha_{f}(p)\ni c \notin\omega_{f}(p)$ for some $p\ne c$, then there exists $\varepsilon>0$ such that $\overline{ \mbox{$\mathcal{O}$}^{-}_{f}(x)\cap(0,c)} \ni c \in \overline{ \mbox{$\mathcal{O}$}^{-}_{f}(x)\cap(c,1)}$ for every $0<|x-c|<\varepsilon$. Furthermore, $f$ is not $\infty$-renormalizable, $f$ is not a Cherry map and $Per(f)$ $\cap$ $(c-\delta,c)$ $\ne$ $\emptyset$ $\ne$ $Per(f)$ $\cap$ $(c,c+\delta)$, $\forall\delta>0$.
\end{Proposition}
\begin{proof}
Suppose by contradiction that the main statement is not true. That is, $c\in\overline{W}$, where $$W=\{x\,;\,c\notin \overline{ \mbox{$\mathcal{O}$}^{-}_{f}(x)\cap(0,c)} \text{ or }c\notin \overline{ \mbox{$\mathcal{O}$}^{-}_{f}(x)\cap(c,1)}\}.$$
By Lemma~\ref{Lemma01928373}, if $\mbox{$\mathcal{O}$}^{-}_{f}(x)$ accumulates on one side of $c$, then $\mbox{$\mathcal{O}$}^{-}_{f}(x)$ will accumulate on $c$ by both sides. Then, $W=\{x; c \notin \alpha_f(x)\}$.
Let $(p_{1},p_{2})$ be the connected component of $[0,1]\mbox{$\Leftarrow$}tminus\overline{\mbox{$\mathcal{O}$}_{f}^{+}(p)}$ that contains $c$.
Choose a sequence $\mbox{$\mathcal{O}$}_{f}^{-}(p)\ni y_{n}\to c$. As $f$ does not have a periodic attractor, taking a subsequence if necessary, we get by Lemma~\ref{Lemma549164} that
\begin{equation}\label{EQ2345654a}
\bigcup_{j\ge0}f^{j}\big((y_{n},y_{n}+\varepsilon)\big)\supset (p_{1},c)\,\,\forall\varepsilon>0,\forall\,n>0
\end{equation} and that
\begin{equation}\label{EQ2345654b}
\bigcup_{j\ge0}f^{j}\big((y_{n}-\varepsilon,y_{n})\big)\supset (c,p_{2})\,\,\forall\varepsilon>0,\forall\,n>0.
\end{equation}
As $c$ is accumulated by $W$, say by the left side (the other case is similar), choose some $q\in(p_{1},c)\cap W$. It follows from (\ref{EQ2345654a}) that $\bigcup_{j\ge0}f^{j}\big((y_{n},c)\big)\supset (p_{1},c)\ni q$, $\forall\,n>0$ (we are taking $\varepsilon=|y_{n}-c|$ in (\ref{EQ2345654a})). Thus, there is a sequence $y_{n}<q_{n}<c$ and $i_{n}\to\infty$ such that $f^{i_{n}}(q_{n})=q$, $\forall\,n \in \NN$. This implies that $c\in\alpha_{f}(q)$. But this is an absurd because $q\in W$.
Therefore, we can not have $c\in\overline{W}$ and this proves the main part of the Proposition. By Corollary~\ref{Corolary989982}, $f$ cannot be $\infty$-renormalizable. As $\omega_{f}(y)=\omega_{f}(p)\not\ni c$ for all $y\in\mbox{$\mathcal{O}$}^{-}_{f}(p)$, it follows that $f$ cannot be a Cherry map. Finally, let us show that $Per(f)\cap(c-\delta,c)\ne\emptyset\ne Per(f)\cap(c,c+\delta)$, $\forall\delta>0$. For this, let $n\ge \operatorname{Per}iod(p)$ and $J_{n}$ be the connected component of $(0,1)\mbox{$\Leftarrow$}tminus\bigcup_{j=0}^{n-1}f^{-j}(p)$ containing the critical point $0$. It is easy to see that $J_{n}$ is a nice interval, $\forall\,n \in \NN$. Also, as $\alpha_{f}(p)\ni c$, $\forall \delta>0$, $\exists n$ such that $\partial J_n \subset B_\delta(c)$. As it follows from Lemma~\ref{LemmaHGFGH54} that $Per(f)\cap\overline{J_{n}}\cap(-\infty,c)\ne\emptyset\ne(c,+\infty)\cap\overline{J_{n}}\cap Per(f)$, $\forall\,n \in \NN$, we conclude the proof.
\end{proof}
Observe that it is also true that $f$ being a Cherry map implies that $Per(f) \cap (u,v)=\emptyset$, $(u,v)$ being the last interval of renormalization.
\mbox{$\Leftarrow$}ction{The structure of the Topological Attractors}
\label{TheSofTA}
We now study the topological attractors for the contracting Lorenz maps. The main result is Theorem \ref{cicloint}, from which we obtain (Section~\ref{ProofABCD}) the main theorems: Theorem \ref{baciastopologicas}, \ref{teoalfalim} and \ref{atratortopologico}.
In this Section, $f$ will be a $C^{2}$ non-flat contracting Lorenz map $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$.
\begin{Lemma}
\label{216}
If $f$ does not have periodic attractors, then
$$
\alpha_f(x) \ni c \Rightarrow \alpha_f(x) \supset \Omega(f)
$$
\end{Lemma}
\begin{proof}
Let $x$ such that $\alpha_f(x) \ni c$ and given $y \in \Omega(f)$ consider any neighborhood $T$ of $y$. As $y$ is non-wandering, there is $z \in T$, (we may assume $z \not \in \mbox{$\mathcal{O}$}^-_f (c) \cup \mbox{$\mathcal{O}$}^-_f(Per(f))$) and $j \in \NN$ such that $f^j(z) \in T$. It follows from the homterval lemma that there exists a smallest $t \in \NN$ such that $f^t((z,f^j(z))) \ni c$.
As $x$ is such that $\alpha_f(x) \ni c$, we have that $\mbox{$\mathcal{O}$}^-_f (x) \cap f^t(z,f^j(z)) \ne \emptyset$ and, then, $\mbox{$\mathcal{O}$}^-_f (x) \cap T \supset \mbox{$\mathcal{O}$}^-_f (x) \cap (z,f^j(z)) \ne \emptyset$. As the chosen neighbourhood $T$ can be taken as small as wanted, we conclude that $y \in \alpha_f(x)$.
\end{proof}
For a Lorenz map $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$ as in \ref{persolche}, let us define $$\EE = \{x \in (0,1); \alpha_{f}(x) \ni c\}.$$
By Lemma~\ref{vizinhanca} and Proposition~\ref{Proposition008345678}, $\EE$ contains a neighborhood of $c$. In the next lemma, consider $(a,b) \subset \EE$ to be the maximal interval containing $c$.
\begin{Lemma}
$\exists \ell$ and $r>0$ such that $f^\ell((a,c))\subset(a,b)\supset f^r((c,b))$
\end{Lemma}
\begin{proof}
As $f((a,c))$ has non-empty interior, it follows from Lemma~\ref{Remark98671oxe} that some iterates of its points will intersect the neighborhood $(a,b)$ of the critical point. Take the minimum $k$ such that $f^k((a,c))\cap (a,b) \ne \emptyset$.
Suppose $f^k((a,c))\not\subset (a,b)$. For example, $b \in f^k((a,c))$. As $b\in\overline{[0,1]\mbox{$\Leftarrow$}tminus\EE}$ and $[0,1]\mbox{$\Leftarrow$}tminus\EE$ is invariant, we get that $f^k((a,c))\cap([0,1]\mbox{$\Leftarrow$}tminus\EE)\ne\emptyset$ and so, $(a,c)\cap([0,1]\mbox{$\Leftarrow$}tminus\EE)\ne\emptyset$, which is an absurd.
\end{proof}
For a Lorenz map $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$ and $\ell$ and $r$ as given by the former lemma, we define
\begin{equation}\label{regiaoarmadilha}
\UU = (a,b) \cup \bigg(\bigcup_{j=1}^{\ell-1}f^j((a,c))\bigg) \cup \bigg(\bigcup_{j=1}^{r-1}f^j((c,b))\bigg)\ni c
\end{equation}
and we have that $\UU$ is a trapping region, that is, $f( \UU \mbox{$\Leftarrow$}tminus \{c\}) \subset \UU$.
It's worth observing that given a non-renormalizable Lorenz map $f$ having a trapping region $\cu$, any point in $(0,1)$ eventually reaches this region when iterated by $f$. Also, the non-wandering set within $(0,1)$ is necessarily inside $\cu$.
\begin{Lemma} Let $f$ be a non-renormalizable Lorenz map defined in $[0,1]\mbox{$\Leftarrow$}tminus\{c\}$ and $\UU \subset [0,1]\mbox{$\Leftarrow$}tminus\{c\}$ so that $f(\UU)\subset \UU$, then $\forall x \in [0,1]\mbox{$\Leftarrow$}tminus\{c\}$ $\exists k >0$ such that $f^k(x) \in \UU$.
\end{Lemma}
\begin{Corollary}
\label{sobrealfa}
For $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$, we have that
$\alpha_{f}(x) \supset \Omega(f)$ $ \forall x \in \UU$.
\end{Corollary}
\begin{proof}
As Lemma \ref{216} states that $\alpha_f(x) \supset \Omega(f)$ for any $x$ such that $c \in \alpha_f(x)$, this holds for any $x$ in $\UU$, as this is contained in $\EE$.
\end{proof}
\begin{Lemma}
\label{0967809hp}
For $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$, if $\alpha_f(x) \ni c$, then $\alpha_f(x)\cap\UU\subset\Omega(f)\cap\UU$.
\end{Lemma}
\begin{proof}
Consider $x$ such that $\alpha_f(x) \ni c$. Given $y \in \alpha_f(x)$, consider any neighborhood $V$ of $y$. We may assume $V \subset \UU$.
\begin{claim}[A]
$y \in \overline{(V\mbox{$\Leftarrow$}tminus\{y\})\cap\mbox{$\mathcal{O}$}^-_f(x)}$.
\end{claim}
\begin{proof}
If not, $\exists \epsilon >0$ such that $B_\epsilon(y)\cap\mbox{$\mathcal{O}$}^-_f(x)=\{y\}$. In this case, we have that $\exists n_1<n_2<...<n_j \to \infty$ such that $f^{n_j}(y)=x$.
Then, $$x=f^{n_2}(y)=f^{n_2-n_1}(f^{n_1}(y))=f^{n_2-n_1}(x).$$
Observe that if $f^s(B_\epsilon(y))\not\ni c\, \forall s$, then writing $(\alpha, \beta)=f^{n_1}(B_\epsilon(y))$ we have
$$x \in (\alpha,\beta) \text{ and } f^{k(n_2-n_1)}((\alpha,\beta))\not\ni c \forall k.$$
Taking $(x,\gamma)=\bigcup_{k\ge1}f^{k(n_2-n_1)}((x,\beta))=\bigcup_{k\ge1}(x,f^{k(n_2-n_1)}(\beta))$, we have that $f^{n_2-n_1}|_{(x,\gamma)}$ is a homeomorphism and $f^{n_2-n_1}((x,\gamma))\subset(x,\gamma).$
But this would imply the existence of attracting periodic orbits, that are considered not to exist. Then, we necessarily have that $\exists s$ such that $f^s(B_\epsilon(y))\ni c$.
As $c\in \alpha_{f}(x)$, we would have that $\#\mbox{$\mathcal{O}$}_f^-(x)\cap B_\epsilon(y)=\infty$. Again a contradiction, proving Claim (A).
\end{proof}
Because of the Claim we may assume that $y\in \overline{(y,1)\cap V\cap \mbox{$\mathcal{O}$}^-_f(x)}$ (the proof for the case $y\in$ $\overline{(0,y)\cap V\cap \mbox{$\mathcal{O}$}^-_f(x)}$ is analogous).
We may take $x_2<x_1\in(y,1)\cap V \cap\mbox{$\mathcal{O}$}_f^-(x)$ such that $f^{n_2}(x_2)=x=f^{n_1}(x_1)$ with $n_1<n_2$.
\begin{claim}[B]
$\exists s \in \NN$ such that $f^s([x_1,x_2))\ni c$
\end{claim}
\begin{proof}
If $c\notin f^s([x_1x_2))$, $\forall\,s\ge0$, then $$f^{k(n_2-n_1)}([f^{n_2-n_1}(x),x))=f^{k(n_2-n_1)+n_2}([x_1,x_2))\not\ni c,\; \forall k \in \NN.$$
As $f$ preserves orientation, $f^{k(n_2-n_1)}|_{[f^{n_2-n_1}(x),x)}$ is a homeomorphism, $\forall x$, $\forall k\ge 0$, so we have $f^{(k+1)(n_2-n_1)}(x)<f^{k(n_2-n_1)}(x) $, $ \forall k\ge 0$.
Then, $\bigcup_{k\ge0}f^{k(n_2-n_1)}([f^{n_2-n_1}(x),x))$ is an interval $(\gamma,x)$. Besides that, $f^{n_2-n_1}|_{(\gamma,x)}$ is a homeomorphism and $f^{n_2-n_1}((\gamma,x))\subset(\gamma,x)$.
But this is an absurd, because it would imply the existence of attracting periodic orbits, what proves Claim (B).
\end{proof}
Let $s\in \NN$ such that $f^s([x_1,x_2))\ni c$. As $x_1 \in \UU$, we have that $\mbox{$\mathcal{O}$}^-_f(x_1)$ accumulates in $c$ by both sides. Then, $\mbox{$\mathcal{O}$}_f^-(x_1)\cap f^s([x_1,x_2))\ne \emptyset$.
This implies that $\exists x_1' \in \mbox{$\mathcal{O}$}^-_f(x_1)\cap[x_1,x_2)\subset V$, say $x_1' \in f^{-t}(x_1)\cap V$. Then,
$$
f^t(V)\cap V \ne \emptyset
$$
As $V$ is a neighborhood of $y \in \UU$ that was arbitrarily taken, we may conclude that $y \in \Omega(f)$, proving Lemma \ref{0967809hp}.
\end{proof}
\begin{Corollary}
\label{poiupoiu} For $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$,
$\alpha_{f}(x)\cap \UU = \Omega(f)\cap \UU, \forall x \in \UU$.
\end{Corollary}
\begin{Corollary}
\label{compon} For $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$, then any connected component of $\UU \mbox{$\Leftarrow$}tminus \Omega(f)$ is a wandering interval.
\end{Corollary}
\begin{proof}
Let $J=(a,b)$ connected component of $\UU \mbox{$\Leftarrow$}tminus \Omega(f)$. Suppose it is not a wandering interval. Then, Lemma~\ref{homtervals} says there will be $n$ for which $f^n(J)\ni c$. Lemma~\ref{vizinhanca} and Proposition~\ref{Proposition008345678} assures us that there are several points with $c$ in their $\alpha$-limits inside this set $f^n(J)$. We know $f^{-1}(\alpha_{f}(x))\subset\alpha_{f}(x)$ and, then, Corollary~\ref{poiupoiu} assures us these points are in $\Omega(f)$, but they are inside $J$, that should not contain any point of $\Omega(f)$.
\end{proof}
\begin{Definition}[Strong Transitivity]\label{StTopTrans} Let $\XX$ be a compact metrical space. Given a continuous map $g:A\subset\XX\to\XX$, we say it is strongly (topologically) transitive if for any open set $V\subset\XX$ with $V\cap A\ne\emptyset$, we have $\bigcup_{j \ge 0}g^j(V)=A$.
Let us make precise the notation used in this definition: given $V\subset\XX$, let $g^{-1}(V)=\{x\in A\,;\,g(x)\in V\}$. We define inductively $g^{-n}(V)$, for $n\ge2$, by $g^{-n}(V)=g^{-(n-1)}(g^{-1}(V))$. We define for $n\ge1$, $g^{n}(V)=\{g^{n}(v)\,;\,v\in V\cap g^{-n}(A)\}$.
\end{Definition}
\begin{Proposition} \label{transitivo} If $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$, then
$f|_{\Omega\cap\UU}$ is strongly transitive.
In particular, $$f|_{\Omega\cap\UU}\text{ is transitive.}$$
\end{Proposition}
\begin{proof}
We know that $f^{-1}(\alpha_{f}(x))\subset\alpha_{f}(x)$. We will show that $\bigcup_{j\ge0}f^j(V\cap\Omega(t)) = \Omega(f)\cap \UU, \forall V \subset \UU$, $V$ open and $V \cap \Omega(f) \ne \emptyset$. It follows from the Corollary \ref{poiupoiu} that
\begin{equation}
\label{inclusaoestrela}
f^{-1}(\Omega(f)\cap\UU)\cap\UU\subset\Omega(f)\cap\UU.
\end{equation}
Let $V\subset\UU$, $V$ any open set with $V \cap \Omega(f) \ne \emptyset$. Given $x \in \Omega(f)\cap\UU$, we have that $\alpha_{f}(x)\cap V\ne \emptyset$ and, then, $\mbox{$\mathcal{O}$}^-_f(x)\cap V \ne \emptyset$. Pick $x_t \in f^{-t}(x) \cap V$. Define $x_k=f^{t-k}(x_t)$ for $0 \le k \le t$.
$$
x_t such that ackrel{f}{\to} x_{t-1} such that ackrel{f}{\to} \dots such that ackrel{f}{\to} x_0=f^t(x_t)
$$
As $\UU$ is a trapping region, we have that $x_k$ in $\UU, \forall 0 \le k \le t$.
We claim that $x_t \in \Omega(f)\cap\UU$. Indeed, we have that $x_0 \in \Omega(f)\cap\UU$. Suppose it also works for $k-1$, that is, $x_{k-1} \in \Omega(f)\cap\UU$. We have that $x_k \in \UU$. Then $x_k \in f^{-1}(x_{k-1})\cap \UU$ and by (\ref{inclusaoestrela}) we have that $x_k \in \Omega(f)\cap \UU$.
It follows by induction that $x_t \in \Omega(f)\cap\UU$.
\end{proof}
\begin{Theorem}
\label{cicloint}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map.
If $f$ doesn't have a periodic attracting orbit, isn't a Cherry map nor $\infty$-renormalizable, then there is an open trapping region $U\ni c$ given by a finite union of open intervals such that $\Lambda:=\overline{U\cap\Omega(f)}$ satisfies the following statements.
\begin{enumerate}
\item$\omega_{f}(x)=\Lambda$ for a residual set of points of $\Lambda$ (in particular, $\Lambda$ is transitive).
\item The basin of attraction of $\Lambda$, $\beta(\Lambda):=\{x;\omega_{f}(x)\subset \Lambda\}$, is an open and dense set.
\item $\exists \lambda >0$ such that $\lim_{n\to\infty} \frac{1}{n}\log |Df^n(x)|=\lambda$ for a dense set of points $x$ in $\Lambda$.
\item \label{opcoes} either $\Lambda$ is a finite union of intervals or it is a Cantor set.
\item if $\Lambda$ is a finite union of intervals, then $\omega_{f}(x)=\Lambda$ for a residual set of $x$ in $[0,1]$.
\item \label{cantor} $\Lambda$ is a Cantor set if and only if there is a wandering interval.
\end{enumerate}
\end{Theorem}
\begin{proof} Set $\Lambda:=\overline{\Omega(f) \cap\UU}$ with $\UU$ as defined in (\ref{regiaoarmadilha}).
\begin{enumerate}
\item Lemma~\ref{dicotomia} of Appendix insures us it is true, as we have transitivity provided by Proposition~\ref{transitivo}.
\item
By Lemma~\ref{Remark98671oxe}, the set $\cu = \{ x \in [0,1]\mbox{$\Leftarrow$}tminus\{c\}; \exists j \text{ such that } f^j(x)\in \UU\}$ is an open and dense set. We claim that any point $y$ in this set $\cu$ is also in $\beta(\Lambda)$. For some $k$, $f^k(y)=x \in \UU$, and we have two possible situations for a point $q\in\omega_{f}(x)=\omega_f(y)$. As $\UU$ is a trapping region, $q$ can be an interior point of $\UU$, and then it automatically belongs to $\Lambda=\overline{\Omega \cup \UU}$. If not an interior point, $q\in\partial\UU$. In this case, as $q\in\omega_{f}(x)$, there are infinitely many $f^{n_j}(x)$ accumulating in $q$. Then, there can be no wandering interval with border $q$ (as images of $x$ keep coming close to $q$). By Corollary~\ref{compon}, as $q$ can't be in the border of a wandering interval, it is not in the border of a connected component of $\UU\mbox{$\Leftarrow$}tminus\Omega(f)$, then it is accumulated by points of this set, that is, $q\in \Lambda=\overline{\Omega(f) \cap\UU}$.
\item Proposition (\ref{Proposition008345678}) says that repeller points $p \in Per(f)$ accumulate in $c$. As they are in $\Omega(f)$, the ones that are in $\UU$ are also in $\Lambda$, and it follows from Corollary(\ref{poiupoiu}) that $\mbox{$\mathcal{O}$}^-_f(p)$ is dense in $\Lambda$.
Given any point $x\in\mbox{$\mathcal{O}$}^-_f(p)$, as it is eventually periodic, say $f^j(x)=p$ (and as there are infinitely many ones, we can pick one such that $c$ is not in its pre-orbit, in order to proceed with the following computation), we have
$$\lim_{n\to\infty} \frac{1}{n}\log |D(f^{n-j}\circ f^j)(x)|=\lim_{n\to\infty} \frac{1}{n}\log\big(|Df^{n-j}(p)|\big)+\lim_{n\to\infty} \frac{1}{n}\log|Df^j(x)|=$$
$$=\lim_{n\to\infty} \frac{n-j}{n(n-j)}\log\big(|Df^{n-j}(p)|\big)=\lim_{n\to\infty} \frac{1}{n-j}\log\big(|Df^{n-j}(p)|\big)=\exp_f(p)=:\lambda.$$
\item \label{item4} As $\Lambda$ is transitive, $\exists x \in \Lambda=\omega_{f}(x)$, then, by Lemma~\ref{aeroporto} of Appendix, it is a perfect set.
We have two possibilities: $\operatorname{interior}(\Lambda)= \emptyset$ or not.
As $\Lambda$ is a subset of $\RR$, if it has empty interior, it is totally disconnected. Consequently, it will be a Cantor set (as we already proved it is compact and perfect).
Suppose, then, $\operatorname{interior}(\Lambda) \ne \emptyset$. Let $I$ be an open interval, $I \subset \Lambda$ and it can't be a wandering interval, as it is a subset of $\Lambda \subset \Omega(f)$. Then, by Lemma~\ref{homtervals}, $\exists j$ such that $f^{j}(I)\ni c$, and so, $c \in \operatorname{interior}\Lambda$. This forbids the existence of wandering intervals. Indeed, if there is a wandering interval $J$, it has to accumulate in the critical point (by Lemma~\ref{LemmaWI}), but this would imply that $f^{n}(J)\cap\Omega(f)\ne\emptyset$ for $n$ sufficiently big. An absurd. So, as we cannot have wandering intervals, Corollary~\ref{compon}, $\UU\mbox{$\Leftarrow$}tminus\Omega(f)$ has to be an empty set. As $\UU$ is an orbit of intervals, it proves the claim of the Theorem.
\item Let $\Lambda'=\{x\in\UU;\omega_{f}(x)=\Lambda\}$.
Observe that $x \in \bigcup_{j\ge0}f^{-j}(\Lambda')$ implies that $\omega_{f}(x)=\Lambda$.
As $\Lambda'$ is residual in $\UU$, there exist $A_n$, $n\in\NN$, open and dense sets in $\UU$ such that $\Lambda'=\bigcap_{n\in \NN}A_n$.
On the other hand, for every $n\in\NN$ we have that $\bigcup_{j\ge0}f^{-j}(A_n)$ is an open dense set in $[0,1]$.
Then, $\bigcap_{n\in\NN}\big(\bigcup_{j\ge0}f^{-j}(A_n)\big)$ is residual in $[0,1]$.
So we have that $\bigcup_{j\ge0}f^{-j}\big(\Lambda'\big)=\bigcup_{j\ge0}f^{-j}\big(\bigcap_{n\in\NN}A_n\big)=\bigcap_{n\in\NN}\big(\bigcup_{j\ge0}f^{-j}(A_n)\big)$ is residual.
\item It follows straightforwardly from the former construction: $\Lambda$ being a Cantor set implies that $\UU\mbox{$\Leftarrow$}tminus\Omega(f)$ has non-trivial connected components, that Lemma~\ref{homtervals} says it is a wandering interval. The converse, for as $\Lambda$ is compact and perfect, if we suppose $\operatorname{interior}(\Lambda)\ne\emptyset$, following the same reasoning of (\ref{item4}), there would be an interval $I$ such that $f^j(I)\ni c$ for some $j$, contradicting the existence of wandering interval.
\end{enumerate}
\end{proof}
\begin{Lemma}
\label{perdenso} Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a $C^{2}$ non-flat contracting Lorenz map.
Suppose that $f \notin \cl_{Per}\cup\cl_{Sol} \cup \cl_{Che}$. Then $\overline {Per(f)\cap \Lambda}=\Lambda$, with $\Lambda$ as obtained in Theorem \ref{cicloint}.
\end{Lemma}
\begin{proof}
Notice that $ {Per(f)\cap \overline\UU}= Per(f)\cap \Lambda$, thus $\Lambda\mbox{$\Leftarrow$}tminus\overline {Per(f)}=\Lambda\mbox{$\Leftarrow$}tminus\overline {Per(f)\cap \Lambda}$. Suppose that $\Lambda\mbox{$\Leftarrow$}tminus\overline {Per(f)}\ne\emptyset$. Let $I$ be connected component of $\UU\mbox{$\Leftarrow$}tminus\overline {Per(f)}$ such that $I\cap\Lambda\ne\emptyset$. As $\Lambda$ is perfect and compact we have that $I\cap\Lambda$ is uncountable. Moreover, as $\{x\in\Lambda;\omega_f(x)=\Lambda\}$ is residual in $\Lambda$, we have that $\{x\in\Lambda;\omega_f(x)=\Lambda\}\cap I$ is uncountable.
Then, the set of points that return infinitely many times to $I$ (that is, $\bigcap_{j\ge0}f^{-j}(I))$ is uncountable.
Let $I^*=\{x\in I;\mbox{$\mathcal{O}$}^+_f(f(x))\cap I\ne\emptyset\}$ be the set of points that return to $I$ and $F:I^*\to I$ the first return map.
Observe that the set of points that return infinitely many times to $I$ is given by
$$
\{x; \#(\mbox{$\mathcal{O}$}^+_f(x)\cap I)=\infty\}=\bigcap_{j\ge0}F^{-j}(I)
$$
This way
\begin{equation}
\label{estdavi}
\bigcap_{j\ge0}F^{-j}(I) \text{ is uncountable.}
\end{equation}
\begin{claim}[a]
If $J$ is connected component of $I^*$, then $F(J)=I$.
\end{claim}
{\em Proof of the Claim.}
Let $I=(i_0,i_1)$. If $F(J)\ne I$, then let $(t_0,t_1)=F(J)$ and in this case $t_0\ne i_0$ or $t_1\ne i_1$. Suppose $t_0\ne i_0$ (the other case is analogous). Let $n=R(J)$.
As $t_0\ne i_0$, there is $0\le s< n$ such that $f^s(t_0)=c$. Then we have that
$$
\#\big(Per(f)\cap f^s(J)\big)=\#\big(Per(f)\cap (c,f^s(t_1))\big)=\infty,
$$
as the periodic points accumulate in both sides of the critical point (Proposition~\ref{Proposition008345678}).
Then $\#\big(Per(f)\cap I\big)\ge\#\big(Per(f)\cap f^n(J)\big)=\infty$, contradicting the fact that $I$ is connected component of $\UU\mbox{$\Leftarrow$}tminus\overline {Per(f)}$.
$\square $(end of the proof of the Claim (a))
\begin{claim}[b]
$I^*$ has more than one connected component.
\end{claim}
{\em Proof of the Claim.}
Suppose it isn't so, then $I^*$ is an interval and we will write it as $(u,v)$ and $F=f^n|{(u,v)}$ for some $n\in\NN$. This implies, then, that $\bigcap_{j\ge0}F^{-j}(I)=Fix(f^n|{(u,v)})$.
But this is an absurd, as by equation (\ref{estdavi}) this set would be uncountable and so the set of periodic points of $f$ would also be uncountable.
$\square $(end of the proof of the Claim (b))
\begin{figure}\label{PerDenso.png}
\end{figure}
As $F$ has at least two branches covering the full image $I$, we have it has infinitely many periodic points and, then, $f$ also has infinitely many periodic points in $I$, absurd.
\end{proof}
\mbox{$\Leftarrow$}ction{Proof of Theorems \ref{baciastopologicas}, \ref{teoalfalim} and \ref{atratortopologico}}\label{ProofABCD}
Now, we will prove the main theorems: Theorem \ref{baciastopologicas}, \ref{teoalfalim} and \ref{atratortopologico}.
\begin{proof}[Proof of Theorem~\ref{baciastopologicas}]
We are supposing $f$ has no attracting periodic orbit. Besides that, let's consider different situations:
\begin{enumerate}
\item Firstly, let us suppose that $\exists \varepsilon>0$ such that $B_\varepsilon(c)\cap Per(f)=\emptyset$. Then $[0,1]\mbox{$\Leftarrow$}tminus \overline{Per(f)}$ has a connected component $J=(a,b)$ such that $c\in J$.
If $\exists n$ such that $f^n(a)\in J$, $\exists \varepsilon >0$ such that $f^n(B_\varepsilon(a))\subset J$.
As $Per(f) \cap B_\varepsilon(a)\neq \emptyset$, then $Per(f)\cap J\neq\emptyset$, in contradiction with the definition of $J$.
Similarly we show that $f^j(b)\not\in J$, $\forall j \in \NN$, and so $J$ is a nice interval.
Lemma \ref{LemmaHGFGH54} states that $a\in Per(f)$ or it is accumulated by periodic points $p_j \in J$, and the same for $b$. Then, $\{a,b\}\subset Per(f)$.
We can also state that $J$ is a renormalization interval, for if $f^{period(a)}((a,c))\not\subset(a,b)$, by Lemma \ref{Lemma8388881a}, $\exists d \in (a,b)$ such that $f^{period(a)}((a,d))=(a,b)$, that is, $f^{period(a)}(d)=(b)$ and then $(d,b)$ is nice, but again by lemma \ref{LemmaHGFGH54}, $d \in Per(f)$ or $\exists p_j \in Per(f)$, $p_j \nearrow d$, which is a contradiction. In the same way, $f((c,b))\subset(a,b)$ and, so, $J$ is a renormalization interval.
As there are no attracting periodic orbits and $J$ is a renormalization interval, it follows from Lemma~\ref{noitenoite} that $\omega_{f}(x)\ni c, \,\forall x \in J$. By a renormalization and Lemma~\ref{atcher} in the Appendix, there is a compact minimal set $\Lambda$ such that $\omega_{f}(x)=\Lambda$, $\forall\,x\in J$. Then this is a Cherry map, according to the equivalency provided by \cite{GT85}, as observed when we defined Cherry maps. Also, as Lemma \ref{Remark98671oxe} assures us that $\{x\,;\,\mbox{$\mathcal{O}$}_{f}^{+}(x)\cap J\ne\emptyset\}$ is an open and dense set, it is not difficult to conclude that $\Lambda$ is a Cherry attractor, and that it attracts a residual set of the interval.
One can observe that all these features of the Cherry attractor could also be obtained using the semi-conjugation with an irrational rotation.
It may occur that the semi-conjugacy is not surjective, meaning the Cherry map has a gap, that is, there is a wandering interval for the considered map.
For the remaining cases we have, then, that $\forall \varepsilon>0 \,\exists p; p\in B_\varepsilon(c)\cap Per(f)$. Among these, the first situation to consider is the one of $\Lambda$ being a solenoidal attractor:
\item
As we have defined, there is a set $\Lambda \subset \bigcap_{n=0}^\infty K_n$, where $K_n=\bigcup_{j=0}^{\operatorname{Per}iod(p_{n})}f^{j}((p_n,c))\cup \bigcup_{j=0}^{\operatorname{Per}iod(q_n)}f^{j}((c,q_n))$, $J_n=(p_{n},q_{n})$, $n\in\NN$, and $J_1 \supset J_2 \supset \cdots $ is the chain of renormalization intervals.
It follows from the construction that $c \in \Lambda$. Moreover, it follows from Lemma \ref{Remark98671oxe} that given a renormalization interval $J_n$, the set of points that eventually visit it is an open and dense set, $V_n=\{x; \exists j \text{ such that }f_j(x)\in J_n\}$. There is a residual set $\bigcap_{n=0}^{\infty}V_{n}$ of points that eventually fall into any renormalization interval, that is, $c \in \omega_f(x)$, $\forall x \in \bigcap_{n=0}^{\infty}V_{n}$ and by theorem \ref{SOLENOIDETH}, $\omega_f(x)=\Lambda$, that is, this residual set belongs to the basin of $\Lambda$, as stated.
\item Now we come to the situation that $f$ has no periodic attractor, neither Cherry attractor nor Solenoidal attractor. It follows from Theorem~\ref{cicloint} that $\exists \Lambda$ compact, $f(\Lambda)=\Lambda$, transitive set such that $\omega_{f}(x)=\Lambda$ for a residual set of points of $\Lambda$, whose basin of attraction $\beta(\Lambda):=\{x;\omega_{f}(x)\subset \Lambda\}$, is an open and dense set. Also, $\exists \lambda >0$ such that $\lim_{n\to\infty} \frac{1}{n}\log |Df^n(x)|=\lambda$ for a dense set of points $x$ in $\Lambda$.
Theorem~\ref{cicloint} also gives two possibilities for this setting:
\begin{enumerate}
\item either $\Lambda$ is a finite union of intervals and $\omega_{f}(x)=\Lambda$ for a residual set of $x$ in $[0,1]$
\item or it is a Cantor set and there is a wandering interval.
\end{enumerate}
In both cases, all we have to do to complete the proof of the theorem is to show that any of these two is a chaotic attractor, and for this, it only remains to prove that periodic orbits are dense in it ($\overline {Per(f)\cap \Lambda}=\Lambda$) and that its topological entropy $h_{top}(f|_\Lambda)$ is positive. The condition on the periodic points follows from Lemma~\ref{perdenso}.
The fact that the topological entropy is positive can be obtained by taking arbitrarily small nice intervals whose borders are non-periodic (e.g., pre-periodic points), and by observing that the returns to this interval provide at least two full branches, that will create shifts that have positive entropy.
\end{enumerate}
\end{proof}
\begin{proof}[Proof of Theorem~\ref{teoalfalim}]
The existence of a single topological attractor is given by Theorem~\ref{cicloint}. If $\Lambda$ is a Cherry attractor and it does not have a wandering interval, then there is an interval $[a,b]$, such that (identifying $a$ and $b$) the first return map to $F:[a,b]\to[a,b]$ is conjugated to an irrational rotation. In particular $\alpha_{f}(x)\supset\alpha_{F}(x)=[a,b]=\omega_{F}(x)\subset\omega_{f}(x)$, $\forall\,x\in[a,b]$. Furthermore, the attractor for the map $f$, $\Lambda$, is given by the itinerary of the interval $[a,b]$, that is, $\Lambda=[a,b]\cup \bigcup_{j=0}^{\ell-1}f^{j}([f(a),f(c_{-})])\cup \bigcup_{j=0}^{r-1}f^{j}([f(c_{+}),f(b)])$, where $\ell$ and $r$ are the smallest integers such that $f^{\ell}((a,c))\subset(a,b)\supset f^{r}((c,b))$. So, $$\alpha_{f}(x)\supset\Lambda\subset\omega_{f}(x)\,\,\forall\,x\in\Lambda.$$ In particular, $$\alpha_{f}(x)\supset(a,b)\subset\omega_{f}(x)\,\,\forall\,x\in\Lambda.$$
Considering $V_{(a,b)}=\{x\in[0,1]\,;\, \exists j \text{ such that }f^j(x)\in(a,b) \}$, Lemma \ref{Remark98671oxe} assures us that this set is open and dense, and then we get $\alpha_{f}(x)=[0,1]$, $\forall\,x\in\Lambda$.
If $\Lambda$ is a Solenoid, then $\Lambda\subset\bigcap_{n=0}^\infty K_n$, where $$K_n=\bigg(\bigcup_{j=0}^{\operatorname{Per}iod(a_{n})}f^j([a_{n},c))\bigg)\cap\bigg(\bigcup_{j=0}^{\operatorname{Per}iod(b_{n})}f^{j}((c,b_{n}])\bigg)$$ and $\{J_n=(a_{n},b_{n})\}_{n}$ is an infinite nested chain of renormalization intervals. Given any $x\in K_{J_{n}}$ and $y \in \Lambda_{J_{n}}$, there are $w\in J_n$ and $\ell \in \NN$ such that $f^\ell(w)=x$. By Lemma \ref{Lemma549164}, for any given $\varepsilon>0$, $\exists z \ \in B_\varepsilon(y)$ such that $f^k(z)=x$ for some $k>0$. Then, $\alpha_{f}(x)\supset \Lambda_{J_{n}}$, $\forall n \in \NN$. If $f$ does not have any wandering interval, it is easy to show that $\bigcup_{n\ge0}\Lambda_{J_{n}}$ is dense in $[0,1]$: suppose it isn't, then $\exists U$ open interval $U\in [0,1]\mbox{$\Leftarrow$}tminus \bigcup_{n\ge0}\Lambda_{J_{n}}$. If $\exists j$ such that $f^j(U)\ni c$, take $j$ minimum with this property. As $f^j(U)$ is an open neighborhood of $c$, then it contains $\overline{J_m}$ for $m$ big enough, where $J_m$ is a renormalization interval. Then, $\exists s$ and $t\in U$ such that $f^j(s)=a_m$ and $f^j(t)=b_m$, which is in contradiction with the definition of $U$, as $\{s,t\}\subset \Lambda_{J_{m}}$. As $\alpha_{f}(x)\supset \Lambda_{J_{n}}$ and $\bigcup_{n\ge0}\Lambda_{J_{n}}$ is dense in $[0,1]$, we have proved that $\alpha_{f}(x)=[0,1]$, $\forall\,x\in\Lambda$.
Finally, if $\Lambda$ is not a Cherry or a Solenoid attractor, the proof follows from Corollary~\ref{sobrealfa} and items (\ref{opcoes}) and (\ref{cantor}) of Theorem~\ref{baciastopologicas}. Indeed, as we are assuming that $f$ does not have wandering intervals, it follows from items (\ref{opcoes}) and (\ref{cantor}) of Theorem~\ref{baciastopologicas} that $\Lambda$ is a cycle of intervals. By Corollary~\ref{sobrealfa} and the fact that $\Lambda=\overline{\UU\cap\Omega(f)}$, we get $\alpha_{f}(x)\supset\Lambda$. As $\Lambda$ contains an open neighborhood of $c$, it follows that the set of $x\in[0,1]$ such that $\mbox{$\mathcal{O}$}_{f}^{+}(x)\cap\Lambda\ne\emptyset$ contains an open and dense set. Thus, $\bigcup_{j\ge0}f^{-j}(\Lambda)$ is dense and so $\alpha_{f}(x)$ is dense $\forall\,x\in\UU$. As the $\alpha$-limit is a closed set, $\alpha_{f}(x)=[0,1]$ for all $x\in\UU$. If $\Lambda=\overline{\UU\cap\Omega(f)}=\UU\cap\Omega(f)\subset\UU$, the proof is done. On the other hand, if $\Lambda\not\subset\UU$, then $\Lambda\mbox{$\Leftarrow$}tminus\UU\subset(\mbox{$\mathcal{O}$}_{f}^{+}(c_{-})\cap\mbox{$\mathcal{O}$}_{f}^{+}(c_{+}))$. But, as it was defined in the beginning of Section~\ref{MainResults}, $c\in f^{-1}(f(c_{-}))$ and also $c\in f^{-1}(f(c_{+}))$. Thus, $\alpha_{f}(\Lambda\mbox{$\Leftarrow$}tminus\UU)\supset\alpha_{f}(c)=[0,1]$ (because $c\in\UU$).
\end{proof}
\begin{proof}[Proof of Theorem~\ref{atratortopologico}]
The first statement of the theorem follows straightforwardly from Proposition~\ref{DOISMAX}.
Items (1),(2) and (3) repeat what is said in Theorem~\ref{baciastopologicas}.
In the case (4), we have the existence of wandering intervals, so let's consider $V$ the union of all wandering intervals. Lemma~\ref{DenWanInt} says this set is open and dense in $[0,1]$, and Corollary~\ref{stpremovido} gives the structure of the set $\Lambda$.
\end{proof}
\mbox{$\mathcal{O}$}lor{black}
\mbox{$\Leftarrow$}ction{Appendix}
\begin{Lemma}\label{dicotomia}
If $f:U \to \XX$ is a continuous map defined in an open and dense subset $U$ of compact metric space $\XX$, then either $\nexists x \in U$ such that $\omega_{f}(x)=\XX$ or $\omega (x)=\XX$ for a residual set of $x \in \XX$.
\end{Lemma}
\begin{proof}
Suppose that $\mbox{$\mathcal{O}$}^{+}_{f}(p)$ is dense in $\XX$ for some $p\in \bigcap_{j\ge0}f^{-j}(U)$. Write $p_{\ell}=f^{\ell}(p)$. For each $\ell\in\NN$ there is some $k_{n,\ell}$ such that $\{p_{\ell},$ $\cdots,$ $f^{k_{n,\ell}}(p_{\ell})\}$ is $(1/2n)$-dense. As $f$ is continuous and $U$ open, there is some $r_{n,\ell}>0$ such that $f^{j}(B_{r_{n,\ell}}(p_{\ell}))\subset B_{1/2n}(f^{j}(p_{\ell}))$, $\forall\,0\le j\le k_{n,\ell}$. Thus, $\{y,\cdots,f^{k_{n,\ell}}(y)\}$ is $(1/n)$-dense $\forall\,y\in B_{r_{n,\ell}}(p_{\ell})$. Let $$\XX_{n}=\{x\in\XX\,;\,\mbox{$\mathcal{O}$}^{+}_{f}(x)\text{ is }(1/n)-\text{dense}\}.$$ Therefore $\bigcup_{\ell\in\NN}B_{r_{n,\ell}}(p_{\ell})\subset\XX_{n}$ is a open and dense set. Furthermore, $$\bigcap_{n\in\NN}\bigcup_{\ell\in\NN}B_{r_{n,\ell}}(p_{\ell})$$ is a residual set contained in $\bigcap_{n\in\NN}\XX_{n}=\{x\in\XX$ $;$ $\omega_{f}(x)=\XX\}$.
\end{proof}
\begin{Lemma}
\label{aeroporto}
Let $\XX$ be a compact metric space and $f:U\to\XX$ be a continuous map defined in a subset $U$. If $x\in\bigcap_{n\ge0}f^{-n}(U)$ and $x\in\omega_f(x)$, then either $\mbox{$\mathcal{O}$}_f^+(x)$ is a periodic orbit (in this case $\omega_f(x)=\mbox{$\mathcal{O}$}_f^+(x)$) or $\omega_{f}(x)$ is a perfect set.
\end{Lemma}
\begin{proof}
Suppose $\exists p \in \omega_{f}(x)$ an isolated point, say $B_{\varepsilon}(p)\cap\omega_{f}(x)=\{p\}$, with $\varepsilon>0$. As $x\in\omega_{f}(x)$ and $f$ is continuous on $\mbox{$\mathcal{O}$}^{+}_{f}(x)$, we have $\mbox{$\mathcal{O}$}^{+}_{f}(x)\subset\omega_{f}(x)$. Thus, $\mbox{$\mathcal{O}$}^{+}_{f}(x)\cap(B_{\varepsilon}(p)\mbox{$\Leftarrow$}tminus\{p\})=\emptyset$.
As $p \in \omega_{f}(x) \Rightarrow \exists$ sequence $n_{j}\nearrow \infty$ such that $f^{n_{j}}(x)\to p$. Taking $j$ big enough we have $f^{n_{j}}(x)\in B_{\varepsilon}(p)$, then $f^{n_{j}}(x)=p$, $\forall j$ big and, then, $f^{n_{j+1}-n_{j}}(f^{n_{j}}(x))=p=f^{n_{j}}(x)$, that is, $f^{n_{j}}(x)$ is periodic. As $x \in \omega_{f}(x)=\omega(f^{n_{j}}(x))=\mbox{$\mathcal{O}$}^{+}(f^{n_{j}}(x))$, we have that $x$ is periodic.
\end{proof}
\begin{Corollary}
\label{CORaeroporto}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a contracting Lorenz map. If $c_-\in\omega_f(c_-)$, then either $f$ has a super-attractor containing $c_-$ or $\omega_f(c_-)$ is a perfect set. Analogously, If $c_+\in\omega_f(c_+)$, then either $f$ has a super-attractor containing $c_+$ or $\omega_f(c_+)$ is a perfect set.
\end{Corollary}
\begin{proof}
Suppose that $f$ does not have a super-attractor containing $c_-$. Thus, $v_1:=f(c_-)\notin\mbox{$\mathcal{O}$}_f^-(c)$. In this case, $\mbox{$\mathcal{O}$}_f^+(c_-)=\{c\}\cup\mbox{$\mathcal{O}$}_f^+(v_1)$ (recall the definition of $\mbox{$\mathcal{O}$}_f^+(c_-)$ in the beginning of Section~\ref{MainResults}). Note that $v_1\in\omega_f(v_1)$, because $c_-\in\omega_f(c_-)$. As $v_1$ can not be a periodic orbit and as $v_1\in\bigcup_{n\ge0}f^{-n}([0,1]\mbox{$\Leftarrow$}tminus\{c\})$, it follows from Lemma~\ref{aeroporto} that $\omega_f(v_1)$ is a perfect set. As $\omega_f(c_-)=\omega_f(v_1)$ (because $c\in\overline{\omega_f(v_1)\cap(0,c)}$), we finish the proof.\end{proof}
\begin{Corollary}
\label{CORaeroporto2}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a contracting Lorenz map without periodic attractors. Suppose $\omega_f(c_-)\ni c\in\omega_{f}(c_{+})$. If $\overline{\mbox{$\mathcal{O}$}_{f}^{+}(p)\cap(0,c)}\ni c\in\overline{(c,1)\cap\mbox{$\mathcal{O}$}_{f}^{+}(p)}$, $p\in(0,1)\mbox{$\Leftarrow$}tminus\{c\}$, then $\omega_{f}(p)$ is a perfect set and $\overline{\omega_{f}(p)\cap(0,c)}\ni c\in\overline{(c,1)\cap\omega_{f}(p)}$.\end{Corollary}
\begin{proof}
It follows from Corollary~\ref{CORaeroporto} that $\omega_f(c_-)$ and $\omega_f(c_+)$ are perfect sets. Furthermore, $\overline{\omega_f(c_-)\cap(0,c)}\ni c\in\overline{(c,1)\cap\omega_f(c_+)}$.
If $\overline{\mbox{$\mathcal{O}$}_{f}^{+}(p)\cap(0,c)}\ni c\in\overline{(c,1)\cap\mbox{$\mathcal{O}$}_{f}^{+}(p)}$
then $\omega_f(p)\supset\omega_f(c_-)\cup\omega_f(c_+)$ and so,
$\overline{\omega_f(p)\cap(0,c)}\ni c\in\overline{(c,1)\cap\omega_f(p)}$.
Now suppose that $\omega_{f}(p)$ is not perfect. Thus, there is $q\in\omega_{f}(p)$ and $\delta>0$ such that $B_{\delta}(q)\cap\omega_{f}(p)=\{q\}$. Let $J=(a,b)$ be the connect component of $[0,1]\mbox{$\Leftarrow$}tminus\big(\omega_{f}(p)\mbox{$\Leftarrow$}tminus\{q\}\big)$ containing $q$. Note that $a,b\subset\big(\omega_{f}(x)\cup\{0,1\})$.
\end{proof}
\mbox{$\mathcal{O}$}lor{black}
\subsubsection{The attractor for Cherry maps}\label{SecAtCherry}
\begin{Lemma}\label{atcher}
Let $f:[0,1]\mbox{$\Leftarrow$}tminus\{c\}\to[0,1]$ be a contracting Lorenz map without super-attractors. If $c\in\omega_f(x)$, $\forall\,x\in(0,1)$, then there exists a compact set $\Lambda\subset(0,1)$ such that $\omega_f(x)=\Lambda$, $\forall\,x\in(0,1)$. In particular, $\Lambda$ is a minimal set.
\end{Lemma}
\begin{proof}
As $f$ does not have super-attractor and $c\in\omega_f(x)$, $\forall\,x\in(0,1)$, we get $$Per(f)=\{0,1\}.$$
Note also that $f([0,c))\ni c\in f((c,1])$, because $c\in\omega_f(x)$, $\forall\,x\in(0,1)$.
Taking in Lemma~\ref{Lemma545g55} $(a,b)=(0,1)$, we conclude that $\mbox{$\mathcal{O}$}_f^+(x)\cap(0,c)\ne\emptyset\ne(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)$, $\forall\,x\in(0,1)$.
So, by Lemma \ref{omegaemc} we get
\begin{equation}\label{EqCL}
\overline{\mbox{$\mathcal{O}$}_f^+(x)\cap(0,c)}\ni c\in \overline{(c,1)\cap\mbox{$\mathcal{O}$}_f^+(x)},\,\;\forall\,x\in(0,1).
\end{equation}
As a consequence,
\begin{equation}\label{Eq323130}\omega_f(x)\supset\omega_f(c_-)\cup\omega_f(c_+),\,\;\forall\,x\in(0,1).
\end{equation}
In particular, $$c_-\in\omega_f(c_-)\text{ and }c_+\in\omega_f(c_+).$$
Thus, it follows from Corollary~\ref{CORaeroporto2} that
\begin{equation}\label{EqUU21}
\overline{\omega_f(x)\cap(0,c)}\ni c\in\overline{(c,1)\cap\omega_f(x)}\,\,\;\forall\,x\in(0,1).
\end{equation}
Now we will prove that $\omega_f(p)=\omega_f(q)$, $\forall\,p,q\in(0,1)$. If this is not true, then there exist $p,q\in(0,1)$ such that $\omega_f(p)\mbox{$\Leftarrow$}tminus\omega_f(q)\ne\emptyset$. Let $y\in \omega_f(p)\mbox{$\Leftarrow$}tminus\omega_f(q)$. Set $[\alpha,\beta]=\big[\min\omega_f(p),\max\omega_f(p)\big]$ (indeed, $[\alpha,\beta]=[f(c_{+}),f(c_{-})]$). It is easy to see that $f([\alpha,\beta])=[\alpha,\beta]$. As $c\in (\alpha,\beta)$ (by (\ref{EqCL})) and as $c\in\omega_f(x)$, $\forall\,x$, we get $\omega_f(x)\subset[\alpha,\beta]$, $\forall\,x\in(0,1)$. As a consequence, $y\in(\alpha,\beta)$.
Let $J=(a,b)$ be the connected component of $[0,1]\mbox{$\Leftarrow$}tminus\omega_f(p)$ containing $q$. As $y\in(\alpha,\beta)$, we get $a,b\in\omega_f(p)$. As $y\in \omega_f(q)\cap J$, one can find $0\le n_1<n_2$ such that $f^{n_1}(q),f^{n_2}(q)\in(a,b)$. We may suppose that $f^{n_1}(q)<f^{n_2}(q)$ (the case $f^{n_1}(q)>f^{n_2}(q)$ is analogous).
Let $T:=(t,f^{n_1}(q)]$ be the maximal interval contained in $(a,f^{n_1}(q)]$ such that $f^{n_2-n_1}|_T$ is a homeomorphism and that $f^{n_2-n_1}(T)\subset (a,f^{n_2}(q)]$.
\begin{Claim}
$f^{n_2-n_1}(T)=(a,f^{n_2}(q)]$
\end{Claim}
\begin{proof}[Proof of the claim]
If not, there are two possibles cases: (1) $f^{s}(t)=c$ for some $0\le s<n_2-n_1$ or (2) $t=a$ and $a<f^{n_2-n_1}(a)<f^{n_{2}}(q)$. As $a<f^{n_{2}-n_{1}}(a)<f^{n_{2}}(q)$ will implies that $\omega_{f}(p)\cap J\ne\emptyset$, and this contradicts the fact that $J\subset[0,1]\mbox{$\Leftarrow$}tminus\omega_{f}(p)$, we have only to analyze the first case.
Thus, $f^s(T)\cap\omega_f(p)=(c,f^s(f^{n_2-n1})(q))\cap\omega_f(p)\ne\emptyset$ (because of (\ref{EqUU21})). But this implies that $J\cap\omega_f(x)\supset f^{n_2-n_1}(T)\cap\omega_f(x)\supset f^{n_2-n_1-s}(f^s(T)\cap\omega_f(x))\ne\emptyset$. An absurd, as $J\subset[0,1]\mbox{$\Leftarrow$}tminus\omega_f(x)$.\end{proof}
It follows from the claim above that $f^{n_2-n_1}(T)=(a,f^{n_2}(q)]\supset T$. This implies that $f$ has a periodic point in $\overline{T}$ (because $f^{n_2-n_1}|_{T}$ is a homeomorphism). But this is a contradiction with the fact that $Per(f)\cap(0,1)=\emptyset$.
\end{proof}
\end{document}
|
\begin{document}
\title{Jump-induced mixed-mode oscillations through picewise-affine maps }
\begin{abstract}
Mixed-mode oscillations (MMOs) are complex oscillatory patterns in which large-amplitude relaxation oscillations (LAOs) alternate with small-amplitude oscillations (SAOs).
MMOs are found in singularly perturbed systems of ordinary differential equations of slow-fast type, and are typically related to the presence of so-called folded singularities and the corresponding canard trajectories in such systems.
Here, we introduce a canonical family of three-dimensional slow-fast systems that exhibit MMOs which are induced by relaxation-type dynamics, and which are hence based on a ``jump mechanism", rather than on a more standard canard mechanism.
In particular, we establish a correspondence between that family and a class of associated one-dimensional piecewise affine maps (PAMs) which exhibit MMOs with the same signature.
Finally, we give a preliminary classification of admissible mixed-mode signatures, and we illustrate our findings with numerical examples.
\end{abstract}
\section{Introduction} \label{intro}
In the theory of dynamical systems, one is generally interested in the qualitative behaviour of solutions of differential equations.
Thus, for instance, one investigates bifurcations of equilibria and periodic orbits in dependence of parameters in these systems.
In this paper, we focus on singularly perturbed three-dimensional systems of ``slow-fast" type, with two slow variables and one fast variable.
Such systems are characterised by the variables evolving on different time-scales, which can, in some circumstances, give rise to canard phenomena.
Canards \cite{BENOIT} arise when trajectories of a singularly perturbed system follow an attracting manifold, pass through a folded singularity and then -- somewhat counterintuitively -- stay close to a repelling slow manifold for some time.
In planar slow-fast systems, the canard phenomenon is often linked to the presence of a (singular) Hopf bifurcation at a turning (fold) point; one typical example is given by the singularly perturbed van der Pol equation \cite{DGKKOW12,DR96}.
Canards have been studied extensively over the past decades; their study has mainly been based on non-standard analysis \cite{BCDD81,D84, D94}, matched asymptotic expansions, and a geometric approach that combines Fenichel's geometric singular perturbation theory (GSPT) \cite{F72,F79}
and the so-called blow-up technique, which was introduced in the pioneering works of Dumortier and Roussarie \cite {DR96}, as well as of Krupa and Szmolyan \cite {KS01}.
In three-dimensional slow-fast systems with two slow variables, the canard phenomenon can give rise to mixed-mode oscillatory dynamics. Mixed-mode oscillations (MMOs) typically consist of
large-amplitude oscillations (LAOs) of relaxation type, followed by small-amplitude oscillations (SAOs). While no generally accepted rigorous definition of MMOs seems to exist, a clear, intuitive separation between LAOs and SAOS seems to be evident in most cases;
what draws immediate attention is the pattern that emerges in the alternation between oscillations
of distinct amplitudes. Specifically, a (periodic) MMO is said to have {\em signature} $L^s$ if
the corresponding orbit undergoes $s$ SAOs, followed by $L$ LAOs, at which point
that sequence repeats. See \cite{DGKKOW12} for a recent review of this complex oscillatory dynamics, as well as \cite{FG11,JMBNR13,chemists,MN09,MSLG97,VL97} for a small selection of biological, chemical, and physical models in which a variety of MMO patterns have been observed.
Among the numerous mechanisms that have been proposed to explain mixed-mode oscillatory dynamics in singularly perturbed systems of slow-fast type, the canard-based mechanism \cite{BKW06,KPK08,W05} has been among the most popular.
Roughly speaking, it combines local passage through the vicinity of a folded singularity -- which explains the SAO component of the corresponding MMO -- with a global return mechanism which results in relaxation (LAO), returning the flow to the basin of attraction of the folded singularity \cite{DGKKOW12}. In the present paper, we introduce an alternative mechanism for the generation of mixed-mode dynamics in three-dimensional slow-fast systems, which we will refer to as the ``jump mechanism". In the process, we will show that the occurrence of MMOs is not necessarily caused by the presence of a folded singularity, as in the canard-based mechanism; in fact, the main characteristic of the MMOs studied in this paper is that
both LAOs and SAOs are now of relaxation type and that the amplitude of the latter is thus of order $O(1)$ in the singular perturbation parameter.
Our study is inspired by previous work of Szmolyan and Wechselberger \cite{SW04}, Krupa, Popovi\'c, and Kopell \cite{KPK08}, and Rajpathak, Pillai, and Bandyopahdyay \cite{RPB12}. By considering a prototypical family of slow-fast systems which incorporates two jump mechanisms of the type studied in \cite{SW04}, we reproduce MMOs that alternate between LAOs and SAOs of relaxation type; see Figure~\ref{fig:assumptions} for an illustration of the resulting geometry, as well as Section~\ref{sec:assumptions} for a precise definition of our family. As we rely on established results from \cite{SW04}, we do not explicitly need to perform a family blow-up in order to desingularise the flow near fold curves along which normal hyperbolicity is lost.
In the process, we reduce the study of mixed-mode dynamics in our prototypical family to that of one-dimensional piecewise affine maps (PAMs) \cite{BKYY2000,DRBA08,PRV16,YO96}. In particular, we show that the singular limit of the corresponding first return (or Poincar\'e) map yields a PAM; see Proposition \ref{return-limit-prop}. Piecewise maps \cite{DIBERNARDO} have been popularised in the study of dynamical systems in recent decades, with a particular focus on models for switching phenomena such as electrical circuits \cite{H00,IK2020,SSI15} and neurons \cite{IMMTDZ11,JMBNR13, GKE17};
such maps are naturally related to the corresponding Poincar\'e maps in oscillatory systems.
Thus, we establish a natural two-way correspondence between the family of three-dimensional slow-fast systems studied here and a suitably defined class of one-dimensional PAMs which is associated to the reduced flow on the critical manifold of that family. Specifically, we show that a slow-fast system which satisfies the assumptions in Section~\ref{sec:assumptions} exhibits a periodic MMO of a given signature $L^s$ if the PAM which is associated to that system exhibits a periodic
MMO with the same signature (Theorem \ref{association_thm}).
Conversely, we show that any PAM within a very broad class of maps is associated to a slow-fast system within the family defined in Section~\ref{sec:assumptions} (Theorem~\ref{existence_thm}).
This paper is organised as follows. In Section~\ref{sec:assumptions}, we define the
three-dimensional family of slow-fast systems which underlies our results; in particular, we
introduce the jump-type mechanism that gives rise to mixed-mode dynamics in our context.
We state our main results in Section~\ref{statement of the results}; before proving those in
Section~\ref{section-proofs-all}, we explain how to compute the associated PAM (Section~\ref{sec:derivePAM}).
In Section~\ref{sec:numerics}, we consider a particular representative from our family of systems to verify our findings numerically, and we present relevant simulations. Moreover, we give numerical evidence of mixed, ``crossover" signatures.
Finally, we end our work with a concluding discussion of our findings, as well as with an outlook to potential future research endeavours.
\section{Slow-fast model and assumptions}\label{sec:assumptions}
We study MMOs in the context of the following canonical three-dimensional family of slow-fast systems in the standard form of geometric singular perturbation theory,
\begin{subequations}\label{VFfast}
\begin{align}
x' &=y-F(x,z,\epsilon,\delta)=:f(x,y,z,\epsilon,\delta), \label{VFfast-a} \\
y' &=\epsilon g_1(x,y,z,\epsilon,\delta), \label{VFfast-b} \\
z' &= \epsilon g_2(x,y,z,\epsilon,\delta); \label{VFfast-c}
\end{align}
\end{subequations}
here, $f$, $g_1$, $g_2$ are $C^{\infty}$-smooth functions in their arguments that will be specified in the following and $\epsilon \geq 0$ is a (small) singular
perturbation parameter.
We emphasise that \eqref{VFfast} contains an additional parameter $\delta$, the relevance of which will become evident below.
Correspondingly, $x \in \mathbb{R}$ is a fast variable, while $(y,z) \in \mathbb{R}^{2}$ are slow variables, all of which depend on the fast time $t$.
To avoid unnecessary abstraction, we have assumed that $f$ is of the specific form $f=y-F$ in \eqref{VFfast-a}; that assumption is made without loss of generality and does not represent a major restriction. Our study
will chiefly be based on Fenichel's geometric singular perturbation theory (GSPT) \cite{F72,F79}; an excellent introduction can be found in \cite{JONES}.
Rewriting the above
{\it fast system} in terms of the slow time variable $\tau=\epsilon t$, we obtain the equivalent
{\it slow system}
\begin{subequations}\label{VFslow}
\begin{align}
\epsilon\dot{x} &=y-F(x,z,\epsilon,\delta), \\
\dot{y} &=g_1(x,y,z,\epsilon,\delta), \\
\dot{z} &=g_2(x,y,z,\epsilon,\delta),
\end{align}
\end{subequations}
where the overdot denotes differentiation with respect to $\tau$.
In the singular limit of $\epsilon=0$, the above systems yield the {\it layer problem}
\begin{subequations}\label{VFlayer}
\begin{align}
x' &=y-F(x,z,0,\delta), \\
y' &=0, \\
z' &=0
\end{align}
\end{subequations}
and the {\it reduced problem}
\begin{subequations}\label{VFreduced}
\begin{align}
0 &=y-F(x,z,0,\delta), \\
\dot{y} &=g_1(x,y,z,0,\delta), \\
\dot{z} &=g_2(x,y,z,0,\delta),
\end{align}
\end{subequations}
respectively. In particular, \eqref{VFreduced} allows us to define the ($\delta$-family of) critical manifolds $\mathcal{S}:= \lbrace (x,y,z) \in \mathbb{R}^{3}: f(x,y,z,0,\delta)=0 \rbrace $, which is of central importance in GSPT: the sign of $\frac{\partial f}{\partial x}$ determines the stability of the steady states of the layer problem in \eqref{VFlayer}, which are located on $\mathcal{S}$.
Specifically, orbits that are initiated away from $\mathcal{S}$ will converge to attracting branches of the critical manifold under the layer flow of \eqref{VFlayer}; on $\mathcal{S}$,
they will then be subject to the reduced flow of \eqref{VFreduced}.
Away from zeros of $\frac{\partial f}{\partial x}$, the critical manifold $\mathcal{S}$ is {\it normally hyperbolic};
by Fenichel's First Theorem, normally hyperbolic segments of $\mathcal{S}$ will perturb, for $\epsilon$ positive and sufficiently small, to a slow manifold $\mathcal{S}_\epsilon$ \cite{F79}.
Correspondingly, the reduced flow on $\mathcal{S}$ will perturb in a regular fashion to the slow flow on $\mathcal{S}_\epsilon$.
Likewise, the fast flow of \eqref{VFfast} will be a regular perturbation of the layer flow off $\mathcal{S}$.
A canonical scenario in which normal hyperbolicity is lost is found at so-called fold curves in \eqref{VFfast}, where orbits exhibit jumping behaviour.
The reduced flow on $\mathcal{S}$ is directed towards (attraction) or away from (repulsion) these fold curves, which results in orbits having to jump to a different segment of the critical manifold there.
When such behaviour occurs in a periodic fashion, {\it relaxation oscillation} is observed.
The emergence of fold-induced relaxation oscillation in three-dimensional slow-fast systems was studied in detail in \cite{SW04}, where the desingularisation technique known as ``blow-up" \cite{DR96} was applied to remedy the loss of normal hyperbolicity.
We follow the same approach here and proceed to make the following (analogous) assumptions on the singular geometry of Equation~\eqref{VFfast}.
\begin{assumption}\label{A1}
Possibly restricting to a part of phase space for $z\in (z_{\rm min},z_{\rm max})$, we assume that the critical manifold $\mathcal{S}$ is $\xi$-shaped, or ``Bactrian"-shaped; see Figure~\ref{fig:assumptions}. In other words, $\mathcal{S}$ can be written as
\begin{align*}
\mathcal{S} = \mathcal S_{a_{1}} \cup L_{1} \cup \mathcal S_{r_{1}} \cup L_{2} \cup \mathcal S_{a_{2}} \cup L_{3} \cup \mathcal S_{r_{2}} \cup L_{4} \cup \mathcal S_{a_{3}},
\end{align*}
where $\mathcal S_{a_{1}} \cup \mathcal S_{a_{2}} \cup \mathcal S_{a_{3}} = \mathcal{S}\cap \lbrace \frac{\partial f}{\partial x}(x,y,z,0, \delta)<0 \rbrace $
and $\mathcal S_{r_{1}} \cup \mathcal S_{r_{2}} = \mathcal{S} \cap \lbrace \frac{\partial f}{\partial x}(x,y,z,0,\delta)>0 \rbrace $ denote the normally attracting and normally repelling segments of
$\mathcal S$, respectively, which are divided by four ($\delta$-families of) fold curves along which normal hyperbolicity is lost, denoted $L_{1}$, $L_{2}$, $L_{3}$ and $L_{4}$ from left to right. These fold curves can be written as graphs
\begin{align*}
L_{i} := \lbrace (x,y,z)\in \mathcal{S}: (x,y,z)=(\nu _{i}(z,\delta),\phi(\nu _{i}(z,\delta),z),z)\rbrace\quad \text{for }i=1,2,3,4;
\end{align*}
here, $\phi$ and $\nu_{i}$ ($i=1,2,3,4$) are appropriately defined functions along which the non-degeneracy conditions
\begin{align*}
\frac{\partial f}{\partial x}\big(\nu _{i}(z,\delta),\phi(\nu _{i}(z,\delta),z),z, 0, \delta\big)=0\quad\text{and}\quad
\frac{\partial^2 f}{\partial x^2}\big(\nu _{i}(z,\delta),\phi(\nu _{i}(z,\delta),z),z, 0, \delta\big)\neq 0
\end{align*}
are satisfied.
As $\nu _{1}<\nu _{2}<\nu _{3}<\nu _{4}$, by assumption, the normally attracting segments of $\mathcal S$ can equally be represented as
\begin{align*}
\mathcal S_{a_{1}} &:= \lbrace (x,y,z)\in \mathcal{S} \cap \lbrace x<\nu _{1} \rbrace \rbrace,\quad
\mathcal S_{a_{2}} := \lbrace (x,y,z)\in \mathcal{S} \cap \lbrace \nu _{2}<x<\nu _{3} \rbrace \rbrace,\quad\text{and} \\
\mathcal S_{a_{3}} &:= \lbrace (x,y,z)\in \mathcal{S} \cap \lbrace x>\nu _{4} \rbrace \rbrace,
\end{align*}
while the normally repelling ones are given by
\begin{align*}
\mathcal S_{r_{1}} := \mathcal{S}\cap \lbrace (x,y,z): \nu _{1}<x<\nu _{2} \rbrace\quad\text{and}\quad
\mathcal S_{r_{2}} := \mathcal{S}\cap \lbrace (x,y,z): \nu _{3}<x<\nu _{4} \rbrace.
\end{align*}
\end{assumption}
\begin{assumption}[Normal switching condition]\label{A2}
We assume that
\begin{equation}\label{eq:nsc}
f_{y} g_{1} + f_{z} g_{2} \Big\rvert_{p \in L_{i}} \neq 0\qquad\text{for }i=1,2,3,4,
\end{equation}
i.e., that any fold point $p$ on $L_i$ is a jump point.
In other words, \eqref{eq:nsc} asserts that the reduced flow of \eqref{VFreduced} is unbounded on the fold lines $L_i$ and that orbits must hence jump there.
It is therefore required that the reduced flow on both sides of the fold lines $L_{i}$ ($i=1,3,4$) is transverse to, and directed towards, the fold lines $L_{i}$ at all times.
\end{assumption}
We define by $\omega(L_{2})$ the projection of the fold line $L_{2}$ onto the attracting sheet $\mathcal S_{a_{1}}$ of $\mathcal{S}$; moreover, we define $\omega(L_{1})$ and $\omega(L_{3})$ as the projections of the fold lines $L_{1}$ and $L_{3}$ onto $\mathcal S_{a_{3}}$. Likewise, we define the projection $\omega(L_{4})$ of the fold line $L_{4}$ onto both $\mathcal{S}_{a_{1}}$ and $\mathcal{S}_{a_{2}}$. (In spite of the suggestive notation, these projections should not be confused with $\omega$-limit sets of these fold lines.) Then, we assume the following.
\begin{assumption}[Transversality of reduced flow]\label{A4}
For $\delta=0$, the reduced flow of \eqref{VFreduced} is transverse to $\omega(L_{1})$ and $\omega(L_{3})$ on $\mathcal S_{a_{3}}$, transverse to $\omega(L_{2})$ on $\mathcal S_{a_{1}}$, and transverse to $\omega(L_{4})$ on both $\mathcal{S}_{a_{1}}$ and $\mathcal{S}_{a_{2}}$.
\end{assumption}
Let $\Delta$ denote a section between $L_{2}$ and $L_{4}$ that is transverse to the layer flow of \eqref{VFlayer}, and let $P(L_{2})$ and $P(L_{4})$ denote the projections of the fold lines $L_{2}$ and $L_{4}$, respectively, onto $\Delta$.
The two projection lines intersect transversally at a point $P_{c} := P(L_{2}) \cap P({L_{4}})$, as indicated in Figure~\ref{fig:assumptions}.
Then, we make the following assumption:
\begin{assumption}[Breaking mechanism]\label{A3}
For {$\delta>0$} sufficiently small and $\epsilon=0$, the $z$-parametrized curves $P(L_{2})$ and $P(L_{4})$ intersect transversely at some $z$-value $z_0(\delta)$, with $z_0(0)=z_0$ and $z'_0(0)=0$; in particular, the intersection point $P_c$ between the curves thus depends on $\delta$.
Furthermore, we assume that for $z < z_{0}(\delta)$, the evolution of $P(L_{4})$ in forward time remains below the normally hyperbolic sheets $\mathcal S_{r_{1}}$ and $\mathcal S_{a_{2}}$, and that it lands directly on the opposite attracting sheet $\mathcal S_{a_{1}}$.
On the other hand, for $z>z_{0}(\delta)$, the evolution of $P(L_{4})$ lands on $\mathcal S_{a_{2}}$, safely away from the fold line $L_{2}$; see again Figure~\ref{fig:assumptions}.
\end{assumption}
We assume that $z_0'(0)=0$ to ensure that the piecewise affine map (PAM) associated to \eqref{VFfast}, as introduced in Section~\ref{statement of the results}, has a jump at the origin. As will become clear there, that assumption is made without loss of generality: the general case yields a piecewise affine map with a jump at non-zero $z_0'(0)$, which can be studied in an analogous fashion after a translation.
Under the above assumptions, we can already give a partial slow-fast analysis of the system in \eqref{VFfast}; it will be convenient to do so now in order to explain our final assumption. Consider first the fold curve $L_4$ where we assume all orbits to jump. Given Assumption~\ref{A3}, the $z$-coordinate of a given orbit will determine whether it is attracted to $\mathcal{S}_{a_1}$ or to $\mathcal{S}_{a_2}$ under the layer flow. For $z=z_0$, the fate of orbits cannot be decided; consideration of the perturbation terms in \eqref{VFfast} and a blow-up of $L_2$ would be necessary to describe the flow in that case. We expect that in general, canard phenomena are possible near $z=z_0$ where orbits will follow part of $\mathcal{S}_{r_1}$; here, we do not consider that scenario. However, as is clear from the above discussion, the $z$-value $z_0$ will nevertheless play a central role in our analysis. We will highlight two possible singular orbits passing through the transcritical intersection point $P_c=P(L_2)\cap P(L_4)$ at $z=z_0$: one orbit will continue along $\mathcal{S}_{a_1}$, while the other will continue along $\mathcal{S}_{a_2}$. In both instances, we will assume that the sought-after singular orbit follows the reduced flow until a fold line is reached -- $L_1$ in the former case and $L_3$ in the latter -- at which point the orbit jumps to $\mathcal{S}_{a_3}$ and ultimately reaches $L_4$, following again the reduced flow.
This ``ambiguous" behaviour of the singular flow is a key point in our study and motivates the following Assumption~\ref{A5}.
\begin{assumption}[Ambiguous singular orbit]\label{A5}
There exists a singular closed orbit $\Gamma_0^L$ that is defined by concatenating the reduced flow on $\mathcal{S}_{a_1}$ and on $\mathcal{S}_{a_3}$ with the layer flow between $L_1$ and $\mathcal{S}_{a_3}$ and between $L_4$ and $\mathcal{S}_{a_1}$, respectively.
Further, there exists a singular closed orbit $\Gamma_0^S$ that is defined by concatenating the reduced flow on $\mathcal{S}_{a_2}$ and on $\mathcal{S}_{a_3}$ with the layer flow between $L_3$ and $\mathcal{S}_{a_3}$ and between $L_4$ and $\mathcal{S}_{a_2}$, respectively.
Both $\Gamma_0^L$ and $\Gamma_0^S$ contain the point of intersection $P_c$ defined in Assumption~\ref{A3}; see Figure~\ref{fig:assumptions}.
Finally, we define $\Gamma_0 = \Gamma_0^L\cup\Gamma_0^S$ as the ``ambiguous'' singular orbit.
\end{assumption}
\begin{remark}
Any orbit $\Gamma_{0}$ in the above family forms a natural boundary between oscillations of different amplitude.
\end{remark}
For the sake of convenience, we will assume that both singular orbits $\Gamma_0^L$ and $\Gamma_0^S$ lie in a plane $z=z_0$. It follows that $\dot{z}|_{(z,\delta)=(z_0,0)}=0$ and, hence, that we can write
\begin{equation}\label{eq:g2}
g_2(x,y,z,\epsilon,\delta) = \delta G(x,y,z,\epsilon,\delta) + (z-z_0)H(x,y,z,\epsilon,\delta)
\end{equation}
in \eqref{VFfast-c}.
Our aim in this paper is to study the behaviour of orbits near $\Gamma_0$. To that end, we will formulate a first return map on the section $\Delta$ defined above which is transverse to the layer flow. We will present a partial study here: to be precise, we will restrict to characterising orbits that are sufficiently close, but not too close, to the point $P_c = P(L_2)\cap P(L_4)$; in other words, we will consider orbits in a sufficiently small neighbourhood of $P_c$ inside $\Delta$, uniformly away from $P(L_2)$. Our analysis will rely in part on \cite{SW04}, which will allow us to describe the persistence of both $\Gamma_0^L$ and $\Gamma_0^S$.
\begin{figure}
\caption{Critical manifold $\mathcal{S}
\label{fig:assumptions}
\end{figure}
\section{Statement of results}\label{statement of the results}
Recall the definition of the section $\Delta$ which is located between the fold lines $L_2$ and $L_4$ and which is transverse to the layer flow of Equation~\eqref{VFlayer}.
Also, recall that the projection of the fold lines $L_2$ and $L_4$ onto $\Delta$ is denoted by $P(L_2)$ and $P(L_4)$, respectively.
Our first result concerns the well-definedness of the first return map from $\Delta$ to itself under the flow of Equation~\eqref{VFfast} and is relatively straightforward,
since most of the relevant dynamics occurs along hyperbolically attracting parts of the critical manifold $\mathcal{S}$.
We do, however, need to take additional care in a neighbourhood of $P(L_2)$ on $\Delta$, as the fate of orbits sufficiently close to the fold curve $L_2$ is difficult to analyse: such orbits could either jump onto an attracting sheet or follow a repelling sheet of the critical manifold $\mathcal{S}$ after passing near $L_2$, which would give rise to canard behaviour.
As stated above, we will avoid this unpredictability here; we will therefore formulate a result on the first return map that avoids a neighbourhood of $P(L_2)$.
First, we note that the form of the vector field in \eqref{VFfast} allows us to conclude that $P(L_2)$ and $P(L_4)$ are smooth $\delta$-families of graphs
\begin{align*}
y= \phi_{L_2}(z,\delta)\qquad\text{and}\qquad y = \phi_{L_4}(z,\delta)
\end{align*}
which intersect in the point $P_c$ given by $(y_0,z_0,0) + O(\delta)$.
Next, for $r>0$, we define the open neighbourhood
\begin{align*}
B_r(P(L_2)) = \{ (y,z)\in\Delta : |y-\phi_{L_2}(z,\delta)| < r\}
\end{align*}
of $P(L_2)$. Then, we have the following result.
\begin{theorem}\label{firstreturnmap_thm}
There exists an open neighborhood $\mathcal{U}$ of the point $P_c$ in $\Delta$ such that,
for each $\delta>0$ and $r>0$ sufficiently small, there exists $\epsilon>0$ small enough such that the first return map
\begin{align*}
\Pi: \mathcal{U}\setminus B_{r}(P(L_2))\subset \Delta\to\Delta\colon (y,z) \mapsto (\mathcal{Y}(y,z,\epsilon,\delta),\mathcal{Z}(y,z,\epsilon,\delta))
\end{align*}
is well-defined. Here, we write
\begin{align*}
\mathcal{Y}(y,z,\epsilon,\delta) = \phi_{L_4}(\mathcal{Z}(y,z,0,\delta),\delta) + \mathcal{E}(y,z,\epsilon,\delta)
\end{align*}
for some function $\mathcal{E}$ that is uniformly $o(1)$ as $\epsilon\to 0$.
\end{theorem}
Theorem~\ref{firstreturnmap_thm} will be proved in Section~\ref{proof of first return map theorem}. Let us now give some heuristics on how the return map $\Pi$ can be related to a suitably defined PAM, which we require in order to formulate our next result. We will give full proofs in Sections~\ref{sec:derivePAM} and \ref{section-proofs-all} below.
In the singular limit of $\epsilon=0$, the map $\Pi$ is given by
\begin{align*}
\Pi|_{\epsilon=0}: (y,z)\mapsto ( \phi_{L_4}(\mathcal{Z}(y,z,0,\delta),\delta),\mathcal{Z}(y,z,0,\delta) );
\end{align*}
since the image of that map lies on $P(L_4)$, it makes sense to also restrict its domain to the graph $y=\phi_{L_4}(z,\delta)$, reducing it in essence to a one-dimensional map
\begin{align*}
\Pi_0: z\mapsto \mathcal{Z}_{1}(z,\delta) := \mathcal{Z}(\phi_{L_4}(z,\delta),z,0,\delta).
\end{align*}
The map $\Pi_0$ is only defined for values of $z$ that are at least an $O(r)$-distance away from $z_0(\delta)$; however, since we can apply Theorem~\ref{firstreturnmap_thm} for any choice of $r$, the $\epsilon=0$-limit of $\Pi$ is actually defined for all $ z \not= z_0(\delta) $.
Let us now consider the neighbourhood of $z=z_0$ by writing $z=z_0 + \delta Z$. Recalling that $z_0\mapsto z_0$ for $\delta=0$, which marks the transverse intersection point of $P_c$ of $P(L_2)$ and $P(L_4)$ in Assumption~\ref{A3}, we arrive at
\begin{align*}
\tilde{\Pi}_0 : Z\mapsto \tilde{\mathcal{Z}}_1(Z,\delta) := \frac{\mathcal{Z}_{1}(z_0+\delta Z,\delta) - \mathcal{Z}_{1}(z_0,0)}{\delta}\qquad\text{for }
z_0+\delta Z\ne z_0(\delta),
\end{align*}
as $z_0=\mathcal{Z}_{1}(z_0,0)$.
Using Assumption~\ref{A3} once more, we can assume the map $\tilde{\Pi}_0$ to have a well-defined limit
\[
\tilde{\Pi}_{00} : Z\mapsto \lim_{\delta\to0}\tilde{\mathcal{Z}}_1(Z,\delta)\qquad\text{for }Z\not=0.
\]
Since $O(Z^2)$-terms are scaled away in the above limit, one expects the map $\tilde{\Pi}_{00}$ to be piecewise affine. While the above argument is heuristic, it can be made rigorous by relation to the vector field in \eqref{VFfast} and on the basis of the two limiting systems that are obtained therefrom for $\epsilon=0$. Below, we express this correspondence in terms of a definition; a rigorous proof of our heuristics can be found in Proposition \ref{return-limit-prop}.
\begin{definition} \label{assosc_def0}
Let $\gamma$ be a curve on a normally hyperbolic segment of the critical manifold $\mathcal{S}$ for Equation~\eqref{VFfast} that is parametrized by $x$. Then, we define the affine map $M_\gamma$ associated to $\gamma$ as
\begin{align*}
M_\gamma(Z) = \big(e^{\int_{\gamma} p(x)dx}\big)Z + \left(\int_{\gamma} q(x) e^{\int_{\gamma_{x}} p(x')dx'}dx \right),
\end{align*}
with
\begin{align*}
p(x) := \frac{H(x,y,z,0,0)}{g_1(x,y,z,0,0)} F_{x}(x,z)
\quad\text{and}\quad
q(x) := \frac{G(x,y,z,0,0)}{g_1(x,y,z,0,0)} F_{x}(x,z),
\end{align*}
where we substitute $y$ and $z$ with the $y$-coordinate and the $z$-coordinate of $\gamma$, respectively. (In particular, we note that $y=F(x,z):=F(x,z,0,0)$.)
\end{definition}
We remark that the coefficient of $Z$ in the definition of $M_\gamma$ is strictly positive, as well as that the last integral is defined along a curve $\gamma$ that is parametrized from $x$ until the end of the curve; see \eqref{ODE_soln} below.
Next, we apply Definition~\ref{assosc_def0} in the context of Equation~\eqref{VFfast}, i.e., to the slow portions of the singular orbits $\Gamma_0^S$ and $\Gamma_0^L$, which are given by $\Gamma_0^S\cap\mathcal{S}_{a_2}$, $\Gamma_0^S\cap\mathcal{S}_{a_3}$, $\Gamma_0^L\cap\mathcal{S}_{a_1}$, and $\Gamma_0^L\cap\mathcal{S}_{a_3}$.
\begin{definition}\label{assosc_def}
We define the piecewise affine map (PAM)
\begin{equation}
M(Z)=\begin{cases}
M_{\Gamma_0^L\cap \mathcal{S}_{a_3}} \circ M_{\Gamma_0^L\cap \mathcal{S}_{a_1}}(Z)=:M_{1}(Z) & \quad\text{for }Z<0, \\
M_{\Gamma_0^S\cap \mathcal{S}_{a_3}} \circ M_{\Gamma_0^S\cap \mathcal{S}_{a_2}}(Z)=:M_{2}(Z) & \quad\text{for }Z>0,
\end{cases}
\label{PAM}
\end{equation}
and we say that $M$ is \emph{associated} with the vector field in \eqref{VFfast}.
\end{definition}
We refer to Section~\ref{sec:derivePAM} for specific expressions for $M$ in the context of \eqref{VFfast}.
\begin{remark}
We note that the $Z$-coefficient of the PAM $M$ is strictly positive, as was the case in Definition~\ref{assosc_def0}.
However, the image of the map could contain $Z=0$; in fact, the situation where the images $M|_{Z<0}$ and $M|_{Z>0}$ partly overlap represents the most interesting scenario here.
We will see that the heuristics following the statement of Theorem~\ref{firstreturnmap_thm} can be proved rigorously, and we will show that the limiting map $\tilde{\Pi}_{00}$ is precisely the associated PAM $M$. The fact that the singular limit of the first return map $\Pi$ is not one-to-one causes parts of the remaining analysis to differ from \cite{SW04}.
\end{remark}
The following is our second main result:
\begin{theorem}\label{association_thm}
Given a slow-fast system of the form in \eqref{VFfast} that satisfies Assumptions~\ref{A1} through \ref{A5}, assume that its associated PAM $M$, as defined in \eqref{PAM}, exhibits a stable periodic MMO with signature ${L_{1}}^{s_{1}}{L_{2}}^{s_{2}}...{L_{k}}^{s_{k}}$, for some $k \in \mathbb{N}$. Further, assume that this MMO avoids the discontinuity point at $Z=0$. Then,
\eqref{VFfast} exhibits a stable MMO with the same signature ${L_{1}}^{s_{1}}{L_{2}}^{s_{2}}...{L_{k}}^{s_{k}}$, for $\epsilon,\delta>0$ sufficiently small.
\end{theorem}
By requiring that $\epsilon$ and $\delta$ be (sufficiently) small in Theorem~\ref{association_thm}, we mean that for every $\delta>0$ small, there exists $\epsilon_0(\delta)>0$ small such that the result is true for every $\epsilon\in(0,\epsilon_0(\delta)]$.
We prove Theorem~\ref{association_thm} in Section~\ref{proof of assosciation theorem}.
Our theoretical results are complemented by an ``inverse" theorem: any PAM is associated to a suitably chosen slow-fast system of the form in \eqref{VFfast}. In the proof of the following theorem, we will make specific choices for the functions $F$, $g_1$, and $g_2$ therein, which will allow us to obtain convenient expressions for the corresponding vector field.
\begin{theorem}\label{existence_thm}
Assume we are given any piecewise affine map (PAM) of the form
\begin{align*}
M(Z)= \begin{cases}
a_{11}Z + a_{12} & \quad\text{for }Z < 0, \\
a_{21}Z + a_{22} & \quad\text{for }Z > 0,
\end{cases}
\end{align*}
with $a_{11}, a_{21}>0$. Then, there exists a slow-fast system of the form in \eqref{VFfast} which satisfies Assumptions~\ref{A1} through \ref{A5} such that the PAM associated to the vector field is given by $M$.
\end{theorem}
Again, Theorem~\ref{existence_thm} will be proved in Section~\ref{Proof of existence theorem}.
\begin{remark}
Theorem~\ref{association_thm} concerns MMOs that are stable both for the PAM $M$ and the associated slow-fast system in \eqref{VFfast}. Given the form of the Poincar\'e map $\Pi$, it then necessarily follows that $a_{11}^L a_{21}^s <1$ in the definition of $M$; see Section~\ref{proof of assosciation theorem} for details. (Here, $L = \sum^{k}_{i=1} L_{i}$ and $s = \sum^{k}_{i=1} s_{i}$, as per the notation of Theorem~\ref{association_thm}.) Clearly, this condition is implied by the somewhat more generic requirement that $a_{11},a_{21}<1$, which is for instance imposed in \cite{RPB12}; cf.~also Section~\ref{sec:atmost+atleast} below.
\end{remark}
\section{Computation of associated PAM}\label{sec:derivePAM}
In this section, we obtain expressions for the PAM $M$ defined in \eqref{PAM} that is associated to the vector field in \eqref{VFfast}. Moreover, we establish formally that $M$ equals the limit $\tilde{\Pi}_{00}$ of the first return map $\Pi$; see Proposition~\ref{return-limit-prop}. Recall the definition of the system in \eqref{VFfast}, which satisfies Assumptions~\ref{A1} through \ref{A5}. In particular, Assumption~\ref{A1} implies that the function $F(x,z,0,\delta)$ has four distinct local extrema with respect to the variable $x$, which we denote by
\begin{align*}
x_{1}(z,\delta)<x_{2}(z,\delta)<x_{3}(z,\delta)<x_{4}(z,\delta);
\end{align*}
see Figure~\ref{fig:critical_manifold} for an illustration.
\begin{figure}\label{fig:critical_manifold_slice}
\label{fig:critical_manifold}
\end{figure}
Next, we recall that we assumed for the sake of convenience that $\Gamma_0^S$ and $\Gamma_0^L$ lie in the plane $\{z=z_0\}$; see \eqref{eq:g2} and the text above that equation. Hence, we can make the general definition of the PAM $M$ somewhat more explicit in the present context. Specifically, the assumption in \eqref{eq:g2} allows us to introduce the following rescaling: since we are interested in $z\approx z_0$ and $\delta$ small in \eqref{VFfast}, we define the transformation
\begin{align}\label{eq:rescale}
z=z_{0} + \delta Z.
\end{align}
Substituting \eqref{eq:rescale} and \eqref{eq:g2} into \eqref{VFfast}, we have
\begin{align*}
x' &=y-F(x,z_{0}+\delta Z,\epsilon,\delta), \\
y' &=\epsilon g_1(x,y,z_{0}+\delta Z,\epsilon,\delta), \\
Z' &=\epsilon\big[ G(x,y,z_0+\delta Z,\epsilon,\delta)+Z H(x,y,z_0+\delta Z,\epsilon,\delta)\big],
\end{align*}
which implies
\begin{align*}
x' &=y-F(x,z_{0})+ \mathcal{O}(\epsilon,\delta), \\
y' &=\epsilon \big[g_1(x,y,z_0,0,0) + \mathcal{O}(\epsilon,\delta)\big], \\
Z' &=\epsilon\big[G(x,y,z_{0},0,0)+ Z H(x,y,z_{0},0,0) + \mathcal{O}(\epsilon,\delta)\big]
\end{align*}
after Taylor expansion of $F$, $g_1$, $G$, and $H$. (Here, we again write $F(x,z_0)=F(x,z_0,0,0)$.)
Reverting to the ``slow time" $\tau$ in the above, we find
\begin{subequations}\label{slow_proof}
\begin{align}
\epsilon\dot{x} &=y-F(x,z_{0})+ \mathcal{O}(\epsilon,\delta), \\
\dot{y} &= g_1(x,y,z_{0},0,0) + \mathcal{O}(\epsilon,\delta), \\
\dot{Z} &= G(x,y,z_{0},0,0)+ Z H(x,y,z_{0},0,0) + \mathcal{O}(\epsilon,\delta).
\end{align}
\end{subequations}
In the singular limit of $\epsilon=0$, we obtain
\begin{subequations}\label{singular_proof}
\begin{align}
y &=F(x,z_{0})+ \mathcal{O}(\delta), \label{singular_proofa} \\
\dot {y} &= g_1(x,y,z_{0},0,0) + \mathcal{O}(\delta), \\
\dot{Z} &= G(x,y,z_{0},0,0)+ Z H(x,y,z_{0},0,0) + \mathcal{O}(\delta).
\end{align}
\end{subequations}
Differentiating \eqref{singular_proofa} with respect to $\tau$, we find
\begin{align*}
\dot{y}=F_{x}(x,z_{0})\dot{x}+ \mathcal{O}(\delta)
\end{align*}
which, together with the $(y,Z)$-subsystem of \eqref{singular_proof}, yields the projection of the reduced flow (in $\epsilon$) onto the $(x,Z)$-plane:
\begin{subequations}\label{red:slow_flow}
\begin{align}
\dot{x} &=\frac{g_1(x,F(x,z_{0}),z_{0},0,0)}{F_{x}(x,z_{0})}+ \mathcal{O}(\delta), \\
\dot{Z}&= G(x,F(x,z_{0}),z_{0},0,0)+ Z H(x,F(x,z_{0}),z_{0},0,0) + \mathcal{O}(\delta).
\end{align}
\end{subequations}
The important observation now is that Equation~\eqref{red:slow_flow} is partially decoupled for $\delta=0$. As a consequence, Assumption~\ref{A5} on the existence of a singular orbit $\Gamma_0$ actually implies that $g_1(x,F(x,z_{0}),z_{0},0,0)$ is non-zero. In other words, we may parametrize the reduced flow by the variable $x$. Hence, introducing $x$ as the independent variable in \eqref{red:slow_flow} and noting that $F_{x}(x,z_{0})\ne 0$ away from $L_i$, we obtain an ordinary differential equation
\begin{align}\label{ODE}
\frac{dZ}{dx} = p(x)Z + q(x) + O(\delta),
\end{align}
with
\begin{align*}
p(x) := \frac{H(x,F(x,z_{0}),z_{0},0,0)}{g_1(x,F(x,z_{0}),z_{0},0,0)} F_{x}(x,z_{0})
\ \text{and}\
q(x) := \frac{G(x,F(x,z_{0}),z_{0},0,0)}{g_1(x,F(x,z_{0}),z_{0},0,0)} F_{x}(x,z_{0}),
\end{align*}
which is linear with respect to $Z$ when $\delta=0$.
In that limit, \eqref{ODE} can hence be solved exactly, with initial condition $Z(x_{\rm init})=Z_{\rm init}$:
\begin{equation}\label{ODE_soln}
Z(x,x_{\rm init},Z_{\rm init})=\big(e^{\int^{x}_{x_{\rm init}} p(s)ds }\big) Z_{\rm init} +\left(\int^{x}_{x_{\rm init}} q(u) e^{ \int^{x}_{u} p(s) ds}du\right).
\end{equation}
Note that \eqref{ODE_soln} is precisely the affine map defined in Definition~\ref{assosc_def0} that is associated to the slow portion of $\Gamma_0^S$ or $\Gamma_0^L$ between $x_{\rm init}$ and $x$.
Now, we make use of \eqref{ODE_soln} to define a map that encodes the mixed-mode dynamics of our canonical system, Equation~\eqref{VFfast}.
The discussion underneath Assumptions~\ref{A3} and \ref{A4} implies that the sought-after map will have two branches which describe oscillations with different amplitudes as we pass through $Z_{\rm init}=0$. Specifically, for $Z_{\rm init}<0$, we observe \textit{large-amplitude oscillations (LAOs)}, while for $Z_{\rm init}>0$, we have \textit{small-amplitude oscillations (SAOs)};
we hence proceed to define the following one dimensional piecewise affine map associated with \eqref{VFfast},
\begin{align} \label{PAM_proof}
M(Z_{\rm init})=
\begin{cases}
Z(x_{4},\hat{x}_{1},Z(x_{1},\hat{x}_{4},Z_{\rm init})) = M_{1}(Z_{init}) & \quad\text{if }Z_{\rm init}<0, \\
Z(x_{4},\hat{x}_{3},Z(x_{3},x_{2},Z_{\rm init}))=M_{2}(Z_{init}) & \quad\text{if }Z_{\rm init}>0,
\end{cases}
\end{align}
where $\hat{x}_{4}$, $\hat{x}_3$ and $\hat{x}_{1}$ are defined as in Figure~\ref{fig:critical_manifold}.
Given \eqref{ODE_soln}, we find the expressions for the affine maps defined in \eqref{PAM_proof} or, equivalently, in \eqref{PAM}:
\begin{multline}\label{PAM_LAO}
Z(x_{4}, \hat{x}_{1},Z(x_{1},\hat{x}_{4},Z_{\rm init})) = \Big({\rm e}^{\big(\int^{x_{1}}_{\hat{x}_{4}}+\int^{x_{4}}_{\hat{x}_{1}}\big)p(s)ds}\Big) Z_{\rm init} \\
+\bigg(\int^{x_{1}}_{\hat{x}_{4}} q(u){\rm e}^{\big(\int^{x_{1}}_{u}+\int^{x_{4}}_{\hat{x}_{1}}\big) p(s) ds}du + \int^{x_{4}}_{\hat{x}_{1}} q(u) {\rm e}^{\int^{x_{4}}_{u} p(s) ds}du\bigg)
\end{multline}
and
\begin{multline}\label{PAM_SAO}
Z(x_{4}, \hat{x}_{3},Z(x_{3},{x}_{2},Z_{\rm init})) = \Big({\rm e}^{\big(\int^{x_{3}}_{x_{2}}+\int^{x_{4}}_{\hat{x}_{3}}\big)p(s)ds}\Big) Z_{\rm init} \\
+\bigg(\int^{x_{3}}_{{x}_{2}} q(u){\rm e}^{\big(\int^{x_{3}}_{u}+\int^{x_{4}}_{\hat{x}_{3}}\big) p(s) ds}du + \int^{x_{4}}_{\hat{x}_{3}} q(u) {\rm e}^{\int^{x_{4}}_{u} p(s) ds}du\bigg).
\end{multline}
\begin{proposition}\label{return-limit-prop}
Let $\tilde{\Pi}_0$ be defined as in Section~\ref{statement of the results}. For each $Z\ne 0$, $\tilde{\Pi}_0$ is well-defined for $\delta>0$ sufficiently small; moreover, the limit $\tilde{\Pi}_{00}(Z)=\lim_{\delta\to 0}\tilde{\Pi}_0(Z)$ exists and is equal to the piecewise affine map given in \eqref{PAM_proof}, i.e., $\tilde{\Pi}_{00}(Z_{\rm init})=M(Z_{\rm init})$, with $Z_{\rm init}\ne 0$.
\end{proposition}
Proposition \ref{return-limit-prop} will be proved in Section~\ref{section-prop-proof}. In the proof, we will use an important observation made in Section~\ref{proof of first return map theorem}: the $\mathcal{Z}$-component of the return map $\Pi$ defined in Theorem \ref{firstreturnmap_thm} is a small $\epsilon$-perturbation of the return map induced by the reduced flow of \eqref{VFfast} near $\Gamma_0^L$ and $\Gamma_0^S$, respectively, provided we are below and above $B_{r}(P(L_2))$, respectively.
\section{Proof of main results}\label{section-proofs-all}
In this section, we present rigorous proofs for our main results, as introduced in Section~\ref{statement of the results}.
\subsection{Proof of Theorem~\ref{firstreturnmap_thm}}\label{proof of first return map theorem}
We first prove Theorem~\ref{firstreturnmap_thm}. To that end, we consider Equation~\eqref{VFfast} under Assumptions \ref{A1} through \ref{A5} to show that there exists an open neighborhood $\mathcal{U}$ of the intersection point $P_c$ of $P(L_2)$ and $P(L_4)$ such that, for all $\delta>0$ and $r>0$ small and fixed, the Poincar\'{e} map $\Pi: \mathcal{U}\setminus B_{r}(P(L_2))\subset\Delta\to\Delta$ induced by (\ref{VFfast}) is well-defined for $\epsilon$ sufficiently small.
Our proof is based on the techniques developed in \cite{SW04}, as indicated in Figure~\ref{fig:Thm21-proof-sketch}: ``fast" orbits of \eqref{VFfast} passing through $\Delta=\tilde{\Delta}^{3}_{\rm out}$ below the tubular neighbourhood $B_{r}(P(L_2))$ are attracted to $\mathcal{S}_{a_1}$, and therefore give rise to LAOs in the resulting mixed-mode time series;
similarly, orbits passing through $\tilde{\Delta}^{3}_{\rm out}$ above $B_{r}(P(L_2))$ are attracted to $\mathcal{S}_{a_2}$, resulting in SAOs. Considered separately, each of these two cases can clearly be reduced to the return map studied in \cite{SW04}, for fixed $\delta>0$ and $r>0$. (Recall that we stay uniformly away from the fold line $L_2$.) We focus on the first case of LAOs here; the
second case, of SAOs, can be studied in an analogous fashion.
Fundamentally, we need to show that, for $r>0$ and $\delta>0$ sufficiently small, the flow of
\eqref{VFfast} stays close to the singular closed orbit $\Gamma_{0}^{L}$ such that the return
map $\Pi$ exists for $\epsilon$ small.
Following \cite{SW04}, the map $\Pi$ is essentially composed of three different types of transition maps: $\pi_T$, $\pi_{S_{a_1}}$, and $\tilde{\pi}_{L_1}$, as illustrated in Figure~\ref{fig:Thm21-proof-sketch}. Here, the map $\pi_T$ is defined by following the fast flow towards the attracting portion $\mathcal{S}_{a_1}$ of $\mathcal{S}$, while $\pi_{S_{a_1}}$ describes the passage near $\mathcal{S}_{a_1}$ away from the fold line $L_1$; the study of $\pi_T$ and $\pi_{S_{a_1}}$ is based on Fenichel's standard GSPT. The map $\tilde{\pi}_{L_1}$, which describes the passage near the fold line $L_1$, is studied via geometric desingularisation, or ``blow-up".
\begin{figure}
\caption{The return map $\Pi$ induced by (\ref{VFfast}
\label{fig:Thm21-proof-sketch}
\end{figure}
Let us now consider the ``half-return" map $\Pi_{H_\alpha}=\tilde{\pi}_{L_{1}} \circ \pi_{S_{a_{1}}} \circ \pi_{T}$ from the portion of $\mathcal{U}$ below $B_{r}(P(L_2))$ to a section $\tilde{\Delta}^{1}_{\rm out}$ transverse to $\Gamma_{0}^{L}$. Following Theorem 2 in \cite{SW04}, the half-return map $\Pi_{H_\alpha}$ is given by
\begin{equation*}
\Pi_{H_\alpha}(y,z)=(\mathcal{Y}_\alpha(y,z,\epsilon,\delta),\mathcal{Z}_\alpha(y,z,\epsilon,\delta)),
\end{equation*}
with $\mathcal{Z}_\alpha(y,z,\epsilon,\delta)=\mathcal{Z}_\alpha(y,z,\delta)+O(\epsilon\ln\epsilon)$ where $\mathcal{Z}_\alpha(y,z,\delta)$ is defined by following the orbit of the reduced flow on the attracting portion $\mathcal{S}_{a_1}$ between the $\omega$-limit of the point $(y,z)$ and $L_1$. Moreover, we have $\mathcal{Y}_\alpha(y,z,\epsilon,\delta)=\phi_\alpha(\mathcal{Z}_\alpha(y,z,\delta),\delta)+o(1)$, where $y=\phi_\alpha(z,\delta)$ describes the projection of the fold $L_1$ onto $\tilde{\Delta}^{1}_{\rm out}$ and where the $o(1)$-term tends uniformly to zero as $\epsilon\to 0$.
The half-return map $\Pi_{H_\beta}=(\mathcal{Y}_\beta,\mathcal{Z}_\beta)$ from $\tilde{\Delta}^{1}_{\rm out}$ back to $\Delta$ can be studied in a similar fashion, as a composition of transition maps that are of the same type as in $\Pi_{H_\alpha}$. Combining the two, the return map $\Pi=\Pi_{H_\beta}\circ\Pi_{H_\alpha}$, which is defined in the region below $B_{r}(P(L_2))$, can be written as
\begin{equation*}
\Pi(y,z)=(\mathcal{Y}(y,z,\epsilon,\delta),\mathcal{Z}(y,z,\epsilon,\delta)),
\end{equation*}
with $\mathcal{Z}(y,z,\epsilon,\delta)=\mathcal{Z}(y,z,\delta)+O(\epsilon\ln\epsilon)$, where
\begin{equation}\label{return-reduced}
\mathcal{Z}(y,z,\delta)=\mathcal{Z}_\beta(\phi_\alpha(\mathcal{Z}_\alpha(y,z,\delta),\delta),\mathcal{Z}_\alpha(y,z,\delta),\delta)
\end{equation}
is the return map defined by the reduced flow on $\mathcal{S}_{a_1}$ and $\mathcal{S}_{a_3}$. We can also conclude that the function $\mathcal{Y}$ has the property given in Theorem \ref{firstreturnmap_thm}, which completes the proof.
\subsection{Proof of Proposition~\ref{return-limit-prop}}\label{section-prop-proof}
Next, we prove Proposition~\ref{return-limit-prop}. Recall that the map
\[
\tilde{\Pi}_0(Z_{\rm init})= \frac{\mathcal{Z}_{1}(z_0+\delta Z_{\rm init},\delta) -z_0}{\delta},
\]
with $\mathcal{Z}_{1}(z,\delta) = \mathcal{Z}(\phi_{L_4}(z,\delta),z,0,\delta)$, is defined for $z\ne z_0(\delta)$; cf.~Section~\ref{statement of the results}. If $Z_{\rm init}\ne 0$ is fixed, then $z=z_0+\delta Z_{\rm init}\ne z_0(\delta)$ for $\delta>0$ sufficiently small due to $z_{0}'(0)=0$; see Assumption~\ref{A3}. Thus, $\tilde{\Pi}_0$ is well-defined for $Z_{\rm init}\ne 0$ provided that $\delta>0$ is small.
First, let us consider $Z_{\rm init}<0$ and fixed. Then, we have that $z=z_0+\delta Z_{\rm init}<z_0(\delta)$ for $\delta>0$ small, i.e., the point $(\phi_{L_4}(z,\delta),z)$ is attracted to $\mathcal{S}_{a_1}$; see again Assumption~\ref{A3}. We therefore observe LAOs and $\mathcal{Z}_{1}(z,\delta) = \mathcal{Z}(\phi_{L_4}(z,\delta),z,\delta)$, where the function $\mathcal{Z}(y,z,\delta)$ is defined in (\ref{return-reduced}).
Now, we note that the system in (\ref{red:slow_flow}) is obtained by applying the coordinate transformation in (\ref{eq:rescale}) to the reduced flow in \eqref{VFreduced} in $(x,z)$-space, where $g_2$ is given in (\ref{eq:g2}). It follows that the orbit of (\ref{red:slow_flow}) which is initiated at $(\hat{x}_{4}(z_0+\delta Z_{\rm init},\delta), Z_{\rm init})$, with $F(\hat{x}_{4}(z,\delta),z,0,\delta)=\phi_{L_4}(z,\delta)$ -- see Figure \ref{fig:critical_manifold} -- intersects the projection of the fold line $L_1$ onto the $(x,Z)$-space in $(x_{1}(z_0+\delta Z_\alpha,\delta),Z_\alpha)$, where
\begin{equation*}
Z_\alpha=\frac{\mathcal{Z}_\alpha(\phi_{L_4}(z_0+\delta Z_{\rm init},\delta),z_0+\delta Z_{\rm init},\delta)-z_0}{\delta}.
\end{equation*}
(Here, $\mathcal{Z}_\alpha$ is defined as in Section \ref{proof of first return map theorem}.)
Thus, $Z_\alpha$ converges to $Z(x_{1},\hat{x}_{4},Z_{\rm init})$ as $\delta\to 0$, with $Z(x,x_{\rm init},Z_{\rm init})$ given in (\ref{ODE_soln}), where we denote by $\hat{x}_{4}$ and $x_{1}$, respectively, the limit of $\hat{x}_{4}(z,\delta)$ and $x_{1}(z,\delta)$, respectively, as $(z,\delta)\to(z_0,0)$. Here, we have used the fact that $\Gamma_0^L$ is located in the plane $\{z=z_0\}$ with $\delta=0$ and, thus, that $\delta Z_\alpha\to 0$ as $\delta\to 0$ in $x_1$. Moreover, we have exploited our observation in Section~\ref{sec:derivePAM} that (\ref{red:slow_flow}) is a $\delta$-perturbation of a linear (in $Z$) differential equation.
Similarly, the orbit of (\ref{red:slow_flow}) that is initiated at $(\hat{x}_{1}(z_0+\delta Z_\alpha,\delta),Z_\alpha)$, with $F(\hat{x}_{1}(z,\delta),z,0,\delta)=\phi_{\alpha}(z,\delta)=F({x}_{1}(z,\delta),z,0,\delta)$, again by Figure \ref{fig:critical_manifold}, intersects the $(x,Z)$-projection of the fold line $L_4$ in $(x_{4}(z_0+\delta Z_\beta,\delta),Z_\beta)$, with
\begin{equation*}
Z_\beta=\frac{\mathcal{Z}_\beta(\phi_{\alpha}(z_0+\delta Z_{\alpha},\delta),z_0+\delta Z_{\alpha},\delta)-z_0}{\delta}.
\end{equation*}
We therefore conclude that $Z_\beta$ converges to (\ref{PAM_LAO}) as $\delta\to 0$. (As above,
we use that $\delta Z_\beta\to 0$ as $\delta\to 0$ in $x_4$.) Now, it suffices to note that $\tilde{\Pi}_0(Z_{\rm init})=Z_\beta$, from (\ref{return-reduced}).
The case where $Z_{\rm init}>0$ can be studied in a similar fashion to show that $\tilde{\Pi}_0(Z_{\rm init})$ tends to (\ref{PAM_SAO}) as $\delta\to 0$, as claimed, which completes the proof.
\subsection{Proof of Theorem~\ref{association_thm}}\label{proof of assosciation theorem}
For the sake of simplicity and readability, we first prove Theorem~\ref{association_thm} for a MMO with signature $1^0$; then, we will indicate how the proof can be extended to the general case, i.e.,
to MMOs with signature $L_1^{s_1}L_2^{s_2}\cdots L_k^{s_k}$, with $k\ge 1$ integer.
Thus, we suppose that the PAM in \eqref{PAM} which is associated with the vector field in \eqref{VFfast} has a stable periodic orbit that undergoes one LAO, i.e., that $M(Z^\ast)=Z^\ast$ for $Z^\ast<0$ and $a_{11}<1$, where $a_{11}$ is the coefficient of $Z$ in $M$. Our goal is to prove that \eqref{VFfast} has a stable periodic orbit with one LAO for $\epsilon,\delta>0$ small. Clearly, periodic orbits for \eqref{VFfast} correspond to fixed points of the first return map $\Pi$ defined in Theorem~\ref{firstreturnmap_thm}. It can easily be seen that $(y,z)$ is a solution of $\Pi(y,z)-(y,z)=(0,0)$ if and only if $(y,Z)$, with $z=z_0+\delta Z$, is a solution of
\begin{align}\label{fixed points-1}
\begin{split}
\phi_{L_4}(z_0+\delta\tilde{Z}(y,Z,0,\delta),\delta)+\mathcal{E}(y,z_0+\delta Z,\epsilon,\delta)-y &=0, \\
\tilde{Z}(y,Z,\epsilon,\delta)-Z &=0,
\end{split}
\end{align}
where
\[
\tilde{Z}(y,Z,\epsilon,\delta):=\frac{\mathcal{Z}(y,z_0+\delta Z,\epsilon,\delta)-z_0}\delta
\]
and $\phi_{L_4}$, $\mathcal{Z}$, and $\mathcal{E}$ are defined as in Theorem \ref{firstreturnmap_thm}. Using the Implicit Function Theorem, we show that the system in \eqref{fixed points-1} has a unique solution $(y^\ast_{\epsilon,\delta},Z^\ast_{\epsilon,\delta})$ for $\epsilon>0$ and $\delta>0$ sufficiently small, with $(y^\ast_{\epsilon,\delta},Z^\ast_{\epsilon,\delta})$ close to $(\phi_{L_4}(z_0,0),Z^\ast)$. (An alternative approach is outlined in Remark~\ref{approach-2}.) Note that $\tilde{Z}(\phi_{L_4}(z_0+\delta Z,\delta),Z,0,\delta)=\tilde{\Pi}_0(Z)$, where $\tilde{\Pi}_0(Z)$ tends to $M(Z)$ as $\delta\to 0$, by Proposition~\ref{return-limit-prop}. More generally, we have $\tilde{Z}(y,Z,0,\delta)\to M(y,Z)$ as $\delta\to 0$, where $M(y,Z)$ is a PAM as in \eqref{PAM_proof} or, equivalently, in \eqref{PAM}, with $\hat{x}_4$ and $x_2$ depending on $y$. (This follows easily from the proof of Proposition \ref{return-limit-prop}.) Now, letting $\epsilon \to 0$ and then $\delta\to 0$, the system in \eqref{fixed points-1} reduces to
\begin{align}\label{fixed points-2}
\begin{split}
\phi_{L_4}(z_0,0)-y &=0, \\
M(y,Z)-Z &=0.
\end{split}
\end{align}
Since $Z=Z^*$ is a fixed point of $M(Z)$ -- or, equivalently, of $M(\phi_{L_4}(z_0,0),Z)$, it follows that $(y,Z)=(\phi_{L_4}(z_0,0),Z^*)$ is a solution of \eqref{fixed points-2}. The Jacobian determinant of the left-hand side in \eqref{fixed points-2} evaluated at this solution is $1-a_{11}\ne 0$, where we note that $M'(Z^\ast)=a_{11}$ because $Z^\ast<0$. The Implicit Function Theorem now implies the existence of a solution $(y^\ast_{\epsilon,\delta},Z^\ast_{\epsilon,\delta})$ of \eqref{fixed points-1} for $\epsilon,\delta>0$ small. Thus, $(y,z)=(y^\ast_{\epsilon,\delta},z_0+\delta Z^\ast_{\epsilon,\delta})$ is a fixed point of $\Pi$. It is clear that the corresponding periodic orbit is stable. This completes the proof.
In the general case, where the given MMO has signature $L_1^{s_1}L_2^{s_2}\cdots L_k^{s_k}$,
we have to study fixed points of the $\kappa$-th iterate of the first return map $\Pi$, where $\kappa:=\sum^{k}_{i=1}{\big( L_{i}+s_{i}}\big)$.
In the limit of $\epsilon=0=\delta$, the $\kappa$-th iterate of $\Pi$ can be written as $(y,Z)\to (\phi_{L_4}(z_0,0),M^{\kappa-1}(M(y,Z)))$ in $(y,Z)$-coordinates.
The Jacobian determinant of the corresponding system $\{\phi_{L_4}(z_0,0)-y=0,M^{\kappa-1}(M(y,Z))-Z=0\}$ is then equal to $1-a_{11}^L a_{21}^s$, with $L=\sum^{k}_{i=1} L_{i}$ and $s=\sum^{k}_{i=1} s_{i}$.
Since we supposed that $M^\kappa(Z^\ast)=Z^\ast$ for some $Z^\ast<0$ with $a_{11}^L a_{21}^s<1$ (stability), the result easily follows.
\begin{remark}\label{approach-2}
Alternatively, Theorem~\ref{association_thm} can be proved via the approach taken in \cite{SW04}.
For $\delta>0$ small, the first return map $\Pi$ from Theorem~\ref{firstreturnmap_thm} contracts its domain to the curve $y=\phi_{L_4}(z,\delta)$, in the limit as $\epsilon\to 0$.
Following Theorem 3 in \cite{SW04}, $\Pi$ admits a one-dimensional attracting invariant manifold $y=m_{\epsilon,\delta}(z)$; the dynamics of $\Pi$ on that manifold is given by $\Pi_0$ in the limit of $\epsilon\to 0$, with $\Pi_0$ as defined underneath Theorem~\ref{firstreturnmap_thm}.
In $(y,Z)$-coordinates, $\Pi_0$ is given by the PAM $M(Z)$ for $\delta\to 0$; see
Proposition~\ref{return-limit-prop}.
Now, it suffices to note that hyperbolic fixed points of $M$ persist under perturbation
of $M$ in $\delta$ -- which gives $\Pi_0$ -- and, subsequently, under perturbation of $\Pi_0$ in $\epsilon$. Thus, we find a fixed point of the one-dimensional map $\Pi_{m_{\epsilon,\delta}}$;
the $\kappa$-th iterate of $\Pi$ can be studied in a similar fashion.
\end{remark}
\subsection{Proof of Theorem~\ref{existence_thm}}\label{Proof of existence theorem}
To prove Theorem~\ref{existence_thm}, we introduce a specific sub-family of slow-fast systems
of the form in \eqref{VFfast} that satisfies Assumptions~\ref{A1} through \ref{A5}. Then, we will
show that a given PAM $M$ can be associated to a representative system from that family.
Specifically, we take
\[
F(x,z,\epsilon,\delta)=F(x,z),\ g_1(x,y,z,\epsilon,\delta)=J(x),\ \text{and}\ g_2(x,y,z,\epsilon,\delta)=\delta G(x)+z H(x)
\]
in \eqref{VFfast}. In particular, we take $F(x,z)$ to be a polynomial of degree $9$ in $x$,
restricted to $(x,z)\in (-3,2)\times (-1,1)$; moreover, we choose the functions $G(x)$, $H(x)$, $J(x)$,
and $Q(x)$ such that the integrals to be evaluated in \eqref{PAM_LAO} and \eqref{PAM_SAO} are as
simple as possible, with convenient substitutions inside the integrands. Also, for simplicity, we take
$z_0=0$.
In sum, we hence have
\begin{subequations}\label{VF_represent}
\begin{align}
\begin{split}
F(x,z) &=a_{9}x^{9}+\sum_{k=2}^8a_{k}(z)x^{k},\ \text{with} \ a_{9}= \tfrac{184180}{67741437},
\ a_{8}(z)=\tfrac{1}{8}\big(\tfrac{138135}{90321916}z+\tfrac{3558512}{22580479}\big), \\
a_{7}(z) &=\tfrac{1}{7}\big(\tfrac{751493}{90321916}z+\tfrac{212863}{22580479}\big), \ a_{6}(z)=-\tfrac{1}{6}\big(\tfrac{2793109}{361287664}z+\tfrac{23361467}{22580479}\big), \\
a_{5}(z) &=-\tfrac{1}{5}\big(\tfrac{10284179}{180643832}z+\tfrac{1224990}{22580479}\big),\
a_{4}(z)=\tfrac{1}{4}\big(\tfrac{2417921}{45160958}z+\tfrac{64963913}{22580479}\big), \\
a_{3}(z) &=\tfrac{1}{3}\big(\tfrac{45620545}{361287664}z+\tfrac{459587}{22580479}\big), \ \text{and} \
a_{2}(z)=-\tfrac{1}{2}\big(\tfrac{1}{8}z+2\big),
\end{split}\\
J(x)&=\frac{1}{2}-x, \ \rho(x)=p+x+qx^2, \ Q(x)=\int^{x}_{0} \rho(s) \frac{\partial F}{\partial s}(s, z_0)ds, \\
G(x)&=\Big[\kappa+\lambda\Big(\frac{\alpha Q(x)^2}{2}+\beta Q(x)\Big)\Big](\alpha Q(x)+\beta)\rho(x)J(x), \ \text{and} \\
H(x)&=\rho(x)\bigg[\int^{x}_{0}\Big(\rho(s) \frac{\partial F}{\partial s}(s, z_0)ds\Big)\alpha+ \beta\bigg]J(x).
\end{align}
\end{subequations}
While the choices in \eqref{VF_represent} seem far from simple at first glance, they are made for
the sole purpose of simplifying the requisite calculations that follow. (A related system will also underlie the numerical simulations presented in the next Section~\ref{sec:numerics}; although that system will mostly be identical to the one in \eqref{VF_represent}, the definition of the function $\rho(x)$ will differ for computational efficiency.)
By Definition~\ref{PAM}, the given PAM $M(Z)$ is determined by the coefficients $a_{ij}$, for $i,j=1,2$. We will prove that there exists a fast-slow system of the specific form
in \eqref{VF_represent} which is associated with $M$; to that end, we need to show that the system of equations
\begin{align}\label{eq:aij}
a_{ij}=f_{ij}(\alpha,\beta,\kappa,\lambda,p,q),\quad\text{with }i,j=1,2,
\end{align}
has at least one solution $(\alpha_\ast,\beta_\ast,\kappa_\ast,\lambda_\ast,p_\ast,q_\ast)$ which fully determines the vector field in \eqref{VF_represent}.
(Here, the notation $f_{ij}$ is shorthand for
the right-hand sides in the definition of $a_{ij}$ in Definition~\ref{PAM}.)
In a first step, we note that $a_{11}$ and $a_{21}$ depend on $(\alpha,\beta,p,q)$ only, i.e., that
\begin{align*}
a_{11} &=g_{11}(\alpha,\beta,p,q)\quad\text{and} \\
a_{21} &=g_{21}(\alpha,\beta,p,q)
\end{align*}
for some new functions $g_{11}$ and $g_{21}$, as well as that $a_{11}$ and $a_{21}$ are positive by definition:
\begin{align*}
a_{11} &= g_{11}(\alpha,\beta,p,q)= \exp\big(\mathcal{A}_{11}(p,q)\alpha + \mathcal{A}_{12}(p,q)\beta\big)\quad\text{and} \\
a_{21} &= g_{21}(\alpha,\beta,p,q)= \exp\big(\mathcal{A}_{21}(p,q)\alpha + \mathcal{A}_{22}(p,q)\beta\big)
\end{align*}
for some functions $\mathcal{A}_{ij}(p,q)$ which are, in fact, polynomial in $p$ and $q$.
Taking logarithms, we find a linear system in the unknowns $(\alpha,\beta)$ whose principal matrix has determinant $\Delta(p,q)$.
With the aid of the computer algebra package {\sc Maple}, we compute $\Delta$ to be a polynomial
of degree $3$ with positive coefficients. Restricting to the parameter domain $\{p>0,q>0\}$, we can hence safely assume that $\Delta$ is non-zero and, hence, that the above system has a solution
\[
\alpha = \alpha_\ast(p,q,a_{11},a_{21})\quad\text{and}\quad\beta = \beta_\ast(p,q,a_{11},a_{21}).
\]
The next part of the proof is more intricate, and again relies on symbolic computation in {\sc Maple}. Substituting the above expressions for $(\alpha,\beta)=(\alpha_\ast,\beta_\ast)$ into
\eqref{eq:aij}, we obtain
\begin{align}\label{eq:aij2}
a_{12} &=f_{12}(\alpha_\ast,\beta_\ast,\kappa,\lambda,p,q) \\
a_{22} &=f_{22}(\alpha_\ast,\beta_\ast,\kappa,\lambda,p,q),
\end{align}
which is a linear system in $(\kappa,\lambda)$ whose principal matrix has determinant $\tilde\Delta(p,q,a_{11},a_{21})$. The expression for $\tilde\Delta$ can be written as
\begin{align*}
\tilde\Delta(p,q,a_{11},a_{21})=\frac{N( p,q,a_{11},a_{21},\ln a_{11},\ln a_{21},{\rm e}^{E_1},{\rm e}^{E_2})}{\Delta( p,q)},
\end{align*}
where $\Delta$ is defined as above, the exponents of the exponential terms ${\rm e}^{E_i}$ are of the form $E_i=E_i(\alpha,\beta,p,q)$, and $N$ is polynomial in all its $8$ arguments. (In fact,
$N$ has degree 1 with respect to $e^{E_1}$ and $e^{E_2}$.)
It now suffices to show that, for each choice of $(a_{11},a_{21})$, there is at least one choice of $(p,q)$, with $p>0$ and $q>0$, for which $N$ is non-zero. Given the complex algebraic form of $N$, that is a cumbersome task. However, it suffices to argue that almost any choice of $(p,q)$ will be
admissible.
We will outline that argument here. First, we write $N = N_0 + N_1 e^{E_1} + N_2 e^{E_2}$, where
each $N_i$ is a polynomial expression in $(p,q,a_{11},a_{21})$. Using {\sc Maple}, we verify that
\[
\lim_{p\to\pm \infty}\frac{E_i}{p} = \frac{R_i(q,\ln a_{11},\ln a_{12})}{S(q)},
\]
for some strictly positive degree-$2$ polynomial $S$ and some degree-$1$ polynomials (in $q$) $R_1$ and $R_2$. As there is only one choice for $q$ where the asymptotics of $E_1$ coincides with that of $E_2$, we restrict to the generic case where the two limits are strictly different. Then, there are six possibilities,
\begin{subequations}
\begin{align}
\textstyle\lim_{p\to\infty}\frac{E_1}{p} &< \textstyle\lim_{p\to \infty}\frac{E_2}{p} < 0, \\
\textstyle\lim_{p\to\infty}\frac{E_1}{p} &<0 \leq\textstyle\lim_{p\to\infty}\frac{E_2}{p},\quad\text{and} \\
0 &\leq\textstyle\lim_{p\to\infty}\frac{E_1}{p}<\textstyle\lim_{p\to\infty}\frac{E_2}{p},
\end{align}
\end{subequations}
as well as the three possibilities obtained by swapping $E_1$ and $E_2$. Let us consider the third case as an example: in that scenario, as $p\to\infty$, the contributions of $N_0$ and $N_1$ in $N$ become negligible, and it suffices to see whether or not one can find $q$ for which $N_2$ is non-zero. Equally, in the first two scenarios, we see that the contribution of $N_1$ becomes significant in the limit as $p\to-\infty$.
It now suffices to observe that both $N_1/p$ and $N_2/p$ are asymptotic to a quadratic polynomial in $q$ for large $|p|$ and, hence, that there are many choices of $(p,q)$ for which these expressions are non-zero. Hence, at least for $|p|$ sufficiently large, one can solve Equation~\eqref{eq:aij2} for
$(\kappa,\lambda)$, which, in sum, gives a solution $(\alpha_\ast,\beta_\ast,\kappa_\ast,\lambda_\ast)$ to \eqref{eq:aij}. Hence, generically, given a PAM $M$, one can choose
$(p_\ast,q_\ast)$ such that there exists a slow-fast vector field within the family defined by \eqref{VF_represent} to which $M$ is associated. This completes the proof.
\begin{remark}
In practice, one would not take $|p|$ too large, as that would introduce another layer of time scale separation in the system.
\end{remark}
\section{Numerical verification}\label{sec:numerics}
Finally, in this section, we give a numerical verification of two of our main results, Theorems~\ref{association_thm} and \ref{existence_thm}.
To that end, we consider the family of one-dimensional PAMs of the form
\begin{equation} \label{eq:PAM_prototype}
M(Z)=\begin{cases}
a_{11} Z + a_{12} & \quad\text{for }Z<0, \\
a_{21} Z + a_{22} & \quad\text{for }Z>0,
\end{cases}
\end{equation}
where $a_{ij}=f_{ij}(\alpha,\beta,\kappa,\lambda,p,q)$ with $i,j=1,2$, as introduced in~\eqref{eq:aij}.
For the calculations of the integrals appearing in (\ref{PAM_LAO}) and (\ref{PAM_SAO}), we require the following $x$-values, which are obtained from (\ref{VF_represent}) with $z_0=0$:
\[
\hat{x}_{4}= -\tfrac{5}{2},\ x_{1}=-2,\ x_{2}=-1,\ x_{3}=0,\ x_{4}=1,\ \hat{x}_{3}=\frac{3}{2},\ \text{and}\ \hat{x}_{1}=\frac{8}{5};
\]
see Figure \ref{fig:critical_manifold}.
Next, and as outlined in the proof of Theorem~\ref{existence_thm} in Section~\ref{Proof of existence theorem}, we have to choose a suitable function $\rho$ in \eqref{VF_represent}. Rather than taking
$\rho$ within the family specified there, we pick the numerically more convenient function
\begin{equation}\label{eq:rho}
\rho(x) = \bigg( \frac{552540}{22580479}x^4+\frac{2453432}{22580479}x^3-\frac{4141461}{22580479}x^2-\frac{11520033}{22580479}x+1 \bigg)^{-1}.
\end{equation}
The choice in \eqref{eq:rho} allows us to determine the four pivotal quantities
$\alpha$, $\beta$, $\kappa$, and $\lambda$ in \eqref{VF_represent}, in agreement with our
expectation that a wide range of functions $\rho(x)$ will yield an admissible solution
$(\alpha_\ast,\beta_\ast,\kappa_\ast,\lambda_\ast)$. That solution then specifies a
three-dimensional slow-fast system from the family determined by \eqref{VF_represent}
that is associated to the given PAM in \eqref{eq:PAM_prototype}.
Below, we showcase a number of examples which verify that the resulting mixed-mode time series
in that system have identical signature to the corresponding periodic orbits for the PAM $M$, thus
verifying Theorem~\ref{association_thm}. Here, we note that the functions $\rho$ and $J$ in
\eqref{VF_represent} are independent of $\alpha$, $\beta$, $\kappa$ and $\lambda$, and that they
hence do not change with the signature. The functions $Q$, $G$, and $H$, on the other hand,
are signature-dependent.
\begin{remark}
Given that $F(x,z)$ in \eqref{VF_represent} is a ninth-degree polynomial in $x$,
we rescaled $x$ and $y$ as
\[
x\mapsto\frac27x \quad \text{and} \quad y\mapsto\frac32y
\]
in our visualisation in order to restrict the area of interest in $x$ to the interval $[-1,1]$.
(We note that $z$ remains rescaled to $Z$, as defined by the change of coordinates in \eqref{eq:rescale} which was used in both Sections~\ref{statement of the results} and \ref{sec:derivePAM}).
\end{remark}
\subsection{Examples: MMOs of various signatures}
In a first step, we fix the coefficients $a_{11}$, $a_{21}$, and $a_{22}$ in the definition of the
PAM in \eqref{eq:PAM_prototype}, varying only $a_{12}$ as the ``bifurcation parameter". In Table~\ref{table:signatures_PAMS_abkl} below, we list two sequences of mixed-mode signatures that are obtained upon variation of $a_{12}$, with $a_{11}$, $a_{21}$, and $a_{22}$ fixed as stated there. For completeness, and to illustrate the two-way correspondence established in Theorems~\ref{association_thm} and \ref{existence_thm}, we also give the corresponding pivotal quantities $\alpha$, $\beta$, $\kappa$ and $\lambda$ in the definition of the associated vector field in \eqref{VF_represent}.
(We note that, given $a_{11}$, $a_{21}$, and $a_{22}$, $\alpha$ and $\beta$ do not change as $a_{12}$ is varied, in contrast to $\kappa$ and $\lambda$, as is to be expected from the proof of Theorem~\ref{existence_thm}.)
In particular, we thus observe an unfolding of a ``regular" sequence of signatures which are either
of the form $\{1^s\}$ or $\{L^1\}$ in the bifurcation parameter $a_{12}$.
A selection of (periodic) MMO orbits, both for the PAM in \eqref{eq:PAM_prototype} and the associated vector field, is illustrated graphically in the figures below. We emphasise that we observe the same signature in all three (state) variables $x$, $y$, and $z$ in \eqref{VFfast}, which is due to
the geometry of the underlying critical manifold $\mathcal{S}$; see Figure~\ref{fig:MMO1-3}
below, where we highlight the signature $1^3$ as one particular example.
\begin{table}[H]
\setlength{\tabcolsep}{2pt}
\centering
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\hline
\tiny{\textbf{Signature}}&$a_{11}$&$a_{12}$&$a_{21}$&$a_{22}$ &\textbf{$\alpha$}&\textbf{$\beta$}&\textbf{$\kappa$} &\textbf{$\lambda$} \\
\hline
\hline
$1^1$ &0.3 &1 &0.9 &-2 &0.8743 &0.0240 &27. 2674 &-64.5764 \\[2pt]
\hline $1^2$ &0.3 &3 &0.9 &-2 &0.8743 &0.0240 &28.2364 &-73.1866 \\[2pt]
\hline $1^3$ &0.3 &7 &0.9 &-2 &0.8743 &0.0240 &30.1744 &-90.4070 \\[2pt]
\hline $1^4$ &0.3 &10 &0.9 &-2 &0.8743 &0.0240 &31.6279 &-103.3223 \\[2pt]
\hline $1^5$ &0.3 &12 &0.9 &-2 &0.8743 &0.0240 &32.5969 &-111.9325 \\[2pt]
\hline $1^6$ &0.3 &15 &0.9 &-2 &0.8743 &0.0240 &34.0504 &-124.8478 \\[2pt]
\hline $1^7$ &0.3 &20 &0.9 &-2 &0.8743 &0.0240 &36.4729 &-146.3733 \\[2pt]
\hline $1^8$ &0.3 &25 &0.9 &-2 &0.8743 &0.0240 &38.8954 &-167.8987 \\[2pt]
\hline
\hline
\hline $1^1$ &0.9 &3 &0.4 &-3 &-0.5065 &1.0238 &3.2091 &-7,7202 \\[2pt]
\hline $2^1$ &0.9 &1.5 &0.4 &-3 &-0.5065 &1.0238 &3.9766 &-4,4118 \\[2pt]
\hline $3^1$ &0.9 &1 &0.4 &-3 &-0.5065 &1.0238 &4.2325 &-3.3088 \\[2pt]
\hline $4^1$ &0.9 &0.7 &0.4 &-3 &-0.5065 &1.0238 &4.3860 &-2.6471 \\[2pt]
\hline $5^1$ &0.9 &0.5 &0.4 &-3 &-0.5065 &1.0238 &4.4883 &-2.2059 \\[2pt]
\hline $6^1$ &0.9 &0.4 &0.4 &-3 &-0.5065 &1.0238 &4.5395 &-1.9853 \\[2pt]
\hline $7^1$ &0.9 &0.3 &0.4 &-3 &-0.5065 &1.0238 &4.6162 &-1.7647 \\[2pt]
\hline $8^1$ &0.9 &0.25 &0.4 &-3 &-0.5065 &1.0238 &4.6418 &-1.6544 \\[2pt]
\hline
\end{tabular}
\caption{Signatures of the form $L^1$ and $1^s$ generated by (\ref{VF_represent}) and the associated PAM in \eqref{eq:PAM_prototype}.}
\label{table:signatures_PAMS_abkl}
\end{table}
\begin{figure}
\caption{MMO of signature $1^1$: (a) piecewise affine map and (b) associated three-dimensional slow-fast system.}
\label{fig:MMO1-1}
\end{figure}
\begin{figure}
\caption{MMO of signature $1^3$: (a) piecewise affine map and (b) associated three-dimensional slow-fast system, with time series of $x$, $y$, and $Z$ in (c) through (e).}
\label{fig:MMO1-3}
\end{figure}
\begin{figure}
\caption{MMO of signature $3^1$: (a) piecewise affine map and (b) associated three-dimensional slow-fast system.}
\label{fig:MMO3-1}
\end{figure}
\begin{figure}
\caption{MMO of signature $4^1$: (a) piecewise affine map and (b) associated three-dimensional slow-fast system.}
\label{fig:MMO4-1}
\end{figure}
\begin{figure}
\caption{MMO of signature $1^5$: (a) piecewise affine map and (b) associated three-dimensional slow-fast system.}
\label{fig:MMO1-5}
\end{figure}
\begin{figure}
\caption{MMO of signature $1^8$: (a) piecewise affine map and (b) associated three-dimensional slow-fast system.}
\label{fig:MMO1_8}
\end{figure}
\subsection{The At Most \& At Least Lemma}\label{sec:atmost+atleast}
In this section, we give conditions on the coefficients in the definition of the PAM $M$ in \eqref{eq:PAM_prototype} that guarantee the occurrence of certain numbers of
LAOs ($Z<0$) or SAOs ($Z>0$) in a periodic MMO generated by the PAM $M$ in
\eqref{eq:PAM_prototype}. To that end, we apply results of \cite{RPB12}; in a first step, we
transform $M$ into the form considered there, via $a = a_{11}$, $b = a_{21}$, $\mu = a_{12}$, and $l= a_{22}-a_{12}$:
\begin{equation}
M(Z)=\begin{cases}
aZ + \mu & \quad\text{for }Z<0, \\
bZ + \mu + l & \quad\text{for }Z>0.
\end{cases}
\label{trans_PAM}
\end{equation}
To ensure the stability of the resulting MMOs, we assume that $a$ and $b$ take values in the interval $(0,1)$. Then, the parameter $l$ represents the height of the jump at $Z=0$, while the parameter $\mu$ will be varied. As explained in \cite{RPB12}, we restrict to $0< \mu < -l$, in which case \eqref{trans_PAM} has no fixed points and periodic orbits are possible.
The following result then gives conditions on the control parameter $\mu$ for at most, or at least, $L$ consecutive LAOs, respectively $s$ SAOs, to appear in a periodic MMO for $M$.
\begin{proposition}[At Most \& At Least Lemma \cite{RPB12}] Let $M$ be as defined in \eqref{trans_PAM}. Then, the following statements hold true.
\begin{itemize}
\item[(1)] When
$\mu \leq \frac{ -la^{L-1} }{ a^{L-1}b + \sum^{L-1}_{k=0}a^{k} }=:\mu_1$,
then at least $L$ consecutive LAOs appear in a periodic MMO of $M$.
When
$\mu > \frac{ -la^{L} }{ \sum^{L}_{k=0}a^{k} }=:\mu_2$,
then at most $L$ consecutive LAOs appear in a periodic MMO of $M$.
\item[(2)] When
$\mu < \frac{-l \sum^{s-1}_{k=0} b^k}{ \sum^{s}_{k=0} b^k }$,
then at most $s$ consecutive SAOs appear in a periodic MMO of $M$.
When
$\mu\ge \frac{-l\big[\sum^{s-1}_{k=0}b^{k}+b^{s-1}(a-1)\big] }{ b^{s-1}a + \sum^{s-1}_{k=0}b^{k}}$,
then at least $s$ consecutive SAOs appear in a periodic MMO of $M$.
\label{atmost+atleast}
\end{itemize}
\end{proposition}
Given Proposition~\ref{atmost+atleast}, it can be shown \cite{RPB12} that for $\mu\in(\mu_2,\mu_1]$,
the only possible periodic MMO for $M$ is the one with signature $L^1$. Similarly, we can determine intervals for $\mu$ on which periodic MMOs with signature $1^s$ exist. We summarise a sample of MMO signatures, and the corresponding parameter regimes, in Table~\ref{table:param_int} below. Here, the relevant $\mu$-intervals are obtained from Proposition~\ref{atmost+atleast}; throughout, we find agreement between the theory (``Predicted $\mu$") and our numerics (``Actual $\mu$").
{\renewcommand{1.2}{1.2}
\begin{table}[h!]
\setlength{\tabcolsep}{2pt}
\centering
\begin{tabular}{|c|c|c|c|c|c|c|c|c|c|}
\hline
\hline
\tiny{\textbf{Signature}} &$a$ &$b$ &$l$ &\tiny{\textbf{Predicted $\mu$}} &\tiny{\textbf{Actual $\mu$}} &\textbf{$\alpha$} &\textbf{$\beta$} &\textbf{$\kappa$} &\textbf{$\lambda$} \\ [0.5ex]
\hline
\hline
$1^2$ &0.3 &0.9 &-5 &$\mu \in \big(2.9262,3.5055 \big]$ &$3$ &0.8743&0.0241&28.23&73.18 \\[2pt]
\hline $1^3$ &0.3 &0.9 &-9 &$\mu \in \big(6.5313,7.0921\big]$ &$7$ &0.8743&0.0241&30.1744&90.4070 \\[2pt]
\hline $1^4$ &0.3 &0.9 &-11 &$\mu \in \big(8.8076,9.2376\big]$ &$9$ &0.8743&0.0241&31.1434&-99.0172 \\[2pt]
\hline $1^8$ &0.3 &0.9 &-2.28 &$\mu \in (2.0932,2.1197]$ &$2.1$ &0.8743&0.0241&3.4279&-14.4651 \\[2pt]
\hline $1^9$ &0.3 &0.9 &-2.68 &$\mu \in (2.4955,2.5205]$ &$2.5$ &0.8743&0.0241&3.6217&-16.1861 \\[2pt]
\hline $1^{25}$ &0.5 &0.9 4 &-15.25 &$\mu \in \big(14.9889,15.0064\big]$ &$15$&0.5025&0.0152&15.8532&-177.4797 \\[2pt]
\hline $2^1$ &0.9 &0.8 &-7.2 &$\mu \in \big[2.1520,2.4732\big)$ &$2.2$ &-0.0610&0.2430&24.4916&-96.1819 \\[2pt]
\hline $3^1$ &0.9 &0.8 &-6.5 &$\mu \in \big[1.3778,1.5678\big)$ &$1.5$ &-0.0610&0.2430&24.5673&-81.8569 \\[2pt]
\hline $6^1$ &0.9 &0.8 &-5.6 &$\mu \in \big[0.5704,0.6410\big)$ &$0.6$ &-0.0610&0.2430&24.6646&-63.4391 \\[2pt]
\hline $8^1$ &0.9 &0.9 &-6.5 &$\mu \in [0.4675,0.5075)$ &$0.5$ &0.0147&0.1104&65.5190&-462.9354 \\[2pt]
\hline $9^1$ &0.9 &0.9 &-9.6 &$\mu \in [0.5710,0.6344)$ &$0.6$ &0.0147&0.1104&98.1512&-683.7200 \\[2pt]
\hline
\end{tabular}
\caption{Signatures of the form $L^1$ and $1^s$ generated by (\ref{VF_represent}) and the corresponding $\mu$-intervals, as determined from Proposition~\ref{atmost+atleast}.}
\label{table:param_int}
\end{table}
}
Given the above, it is natural to ask whether MMOs with signature $L^s$ for $L>1$
and $s>1$ can be found in the present context. Following again \cite{RPB12}, it can be shown
that stable periodic MMOs with such signatures cannot occur; we outline the argument here for completeness.
In \cite{RPB12}, an orbit $\mathcal{O}$ is called \textit{admissible} if the $\mu$-interval for which $\mathcal{O}$ exists is non-empty. Then, their Lemma 2 states that ``\textit{for any admissible orbit $\mathcal{O}$, its pattern cannot contain consecutive $L$s and consecutive $R$s simultaneously}", where $L$ and $R$ denote numbers of LAOs and
SAOs in $\mathcal{O}$, respectively. The proof of Lemma~2 is by contradiction: if one assumes that an orbit with signature $L^R$ is actually possible, one concludes that, necessarily, $a,b>1$ in
Equation~\eqref{trans_PAM}; however, that contradicts the underlying assumption of $a,b \in (0,1)$ which is imposed in \cite{RPB12}.
In fact, since $a=a_{11}$ and $b=a_{21}$, $a,b>1$ would also imply instability of the corresponding MMO in \eqref{trans_PAM}; recall the proof of Theorem~\ref{association_thm} in Section~\ref{proof of assosciation theorem}. Hence, it is natural to assume that $0<a,b<1$ in our own analysis, as well, in which case the existence of more ``exotic" stable periodic MMOs with general signature $L^s$ can be ruled out.
\subsection{Crossover signatures}\label{sec:crossover}
Given our numerical results in the previous two subsections, it is natural to ask what happens between two ``consecutive" signatures, i.e., how the shape of an MMO changes as orbits cross over from a cycle of signature $L^{s}$ to one of signature $L^{s+1}$ or, equivalently, from one of signature $L^{s}$ to one of signature $(L+1)^{s}$.
Motivated again by results of \cite{RPB12} -- see, in particular, Lemma 4, Figure 3, and Note 2 therein -- we observe the existence of so-called ``crossover signatures" inside ``intermediate neighbourhoods" for some of the corresponding parameters in the definition of the transformed PAM $M$ in \eqref{trans_PAM}. (In \cite{RPB12}, the existence of similar regions, named ``molecular regions" there, is concluded.) These observations lead to the conclusion that MMO signatures are not arranged in a monotonous way, as far as the number of LAOs or SAOs therein is concerned. For illustration, we showcase a simple case here, namely, an MMO of signature $1^4$, which can be obtained from the following PAM,
\begin{equation}
M_{1^{4}}(Z)=\begin{cases}
0.9 Z + 6 & \quad\text{for }Z<0, \\
0.85Z - 1 & \quad\text{for }Z>0,
\end{cases}
\end{equation}
with corresponding parameter values $\alpha \approx -0.0220$, $\beta \approx 0.1747$, $\kappa \approx 7.7321$, and $\lambda \approx -233.3068$ in the associated slow-fast vector field that is determined by \eqref{VF_represent}.
It is straightforward to obtain an MMO with the ``consecutive signature", namely $1^5$, in the following PAM:
\begin{equation}
M_{1^{5}}(Z) = \begin{cases}
0.9 Z + 7.2 & \quad\text{for }Z<0, \\
0.85Z - 1 & \quad\text{for }Z>0,
\end{cases}
\end{equation}
with parameter values $\alpha \approx -0.0220$, $\beta \approx 0.1747$, $\kappa \approx 7.9239$, and $\lambda \approx -275.3021$.
Noting that the numerical values of the parameters $\alpha$ and $\beta$ that determine $M_{1^{4}}$
and $M_{1^{5}}$ are almost identical while $\kappa$ and $\lambda$ vary, we take
$\kappa \in (7.7321, 7.9239)$ and $\lambda \in (-275.3021, -233.3068)$, which generates
the ``crossover signature" $1^{4}1^{5}$ for $\mu=6.5$, as shown in Figure~\ref{fig:crossover1415}(a).
Here, it is important to emphasise that these mixed signatures do not contradict the At Most \& At Least Lemma, Proposition~\ref{atmost+atleast}. Rather, for a fixed choice of the pivotal quantities
$\alpha$, $\beta$, $\kappa$, and $\lambda$, we obtain a hierarchy of disjoint $\mu$-intervals that
correspond to mixed-mode signatures of the form $L^s$ from Proposition~\ref{atmost+atleast}.
``Crossover" signatures are found for $\mu$ chosen in the complements of those intervals; from a practical point of view, our choice of the pivotal quantities is guided by where the adjacent, ``simple'' signatures are found, whereupon $\mu$ can be fixed from the At Most \& At Least Lemma. See
Table~\ref{table:param_int} for a specification of the corresponding $\mu$-intervals.
\begin{figure}
\caption{Transition from MMOs with signature $1^4$ to $1^5$ via the "crossover" signature $1^41^5$ in (a) the PAM defined in \eqref{eq:PAM_prototype}
\label{fig:crossover1415}
\end{figure}
Following the same procedure as above, we were able to detect intermediate neighbourhoods for the signature $2^1$ crossing over to $3^1$; see Figure~\ref{fig:crossover2131} for an illustration.
Again, we first consider a PAM which realises the signature $2^1$:
\begin{equation}
M_{2^1}(Z)=\begin{cases}
0.9 Z + 2.2 & \quad\text{for }Z<0, \\
0.8 Z - 5 & \quad\text{for }Z>0,
\end{cases}
\end{equation}
where $\alpha = -0.0610$, $\beta = 0.2430$, $\kappa = 24.4916$, and $\lambda = -96.1819$, as well as
a map which generates the ``consecutive" signature $3^1$:
\begin{equation}
M_{3^1}(Z)=\begin{cases}
0.9 Z + 1.5 & \quad\text{for }Z<0, \\
0.8 Z - 5 & \quad\text{for }Z>0,
\end{cases}
\end{equation}
with $\alpha = -0.0610$, $\beta = 0.2430$, $\kappa = 24.5673$, and $\lambda = -81.8569$.
Naturally, here too the numerical values of the parameters $\alpha$ and $\beta$ that determine $M_{2^1}$ and $M_{3^1}$ are almost indistinguishable. If we then pick $\kappa\in (24.4916, 24.5673)$ and $\lambda\in (-96.1819, -81.8569)$, we observe an MMO with crossover signature $2^{1}3^{1}$ for $\mu=1.8$, as shown in Figure~\ref{fig:crossover2131}.
\begin{figure}
\caption{Transition from MMOs with signature $2^1$ to $3^1$ via the "crossover" signature $2^13^1$ in (a) the PAM defined in \eqref{eq:PAM_prototype}
\label{fig:crossover2131}
\end{figure}
Note that for both examples, we took $a_{11}=a<1$ and $a_{21}=b<1$, in accordance with \cite{RPB12}; a more general choice of coefficients, with $a^{L}b^s<1$, still yields stable mixed-mode orbits for the PAM $M$ in \eqref{trans_PAM}. However, as stated above, that scenario is excluded in \cite{RPB12} and hence cannot be considered within the framework of the At Most \& At Least Lemma, Proposition~\ref{atmost+atleast}.
\begin{remark}\label{rem:crossover}
Numerical evidence suggests that we only encounter a combination of either one of the adjacent signatures between two ``simple", consecutive signatures: for example, picking $1^s$ and $1^{s+1}$, in the ''intermediate neighbourhoods" we can only expect MMOs of signature $(1^{s} )^{i} (1^{s+1})^{j}$, with finite multiplicity $0<i,j < \infty$. This last assertion is easier to observe for small $s$, since the parameter intervals corresponding to such ``crossover" signatures tend to shrink with increasing $s$. An analogous assertion applies to MMOs with signature $L^1$.
\end{remark}
\section{Conclusions}\label{Conclusions}
In this paper, we have introduced a novel ``jump mechanism" for the generation of mixed-mode oscillations (MMOs) in a family of three-dimensional slow-fast systems of singular perturbation type.
In marked contrast to the canard-based mechanism that is typically invoked to explain mixed-mode dynamics in such systems, we do not assume the presence of a folded singularity on any of the fold lines in our system at which normal hyperbolicity is lost;
in fact, we require all such fold lines to consist of jump points only.
Correspondingly, the SAO component in the resulting mixed-mode trajectories is then also of relaxation type, with an amplitude that is $O(1)$ in the singular perturbation parameter $\epsilon$.
At this point, we remark that it is possible to obtain quantitative information on the MMOs constructed here. In particular, the amplitudes of both the LAO and SAO components can be determined by observing the height of the fold lines $L^1$ and $L^3$ with respect to the $y$-coordinate. The corresponding periods can be approximated from the transition times on normally attracting portions of the critical manifold in a manner similar to that of Section ~\ref{sec:derivePAM}; given a mixed-mode trajectory of signature $L^s$, the overall period would be found by multiplying the periods of one LAO and one SAO with $L$ and $s$, respectively, before adding them. The details are left to the interested reader.
As our principal result, we have established a two-way correspondence between our family of slow-fast systems and a class of one-dimensional piecewise affine maps (PAMs) which are naturally associated to each other.
In particular, we have shown that for every such PAM that exhibits an MMO with a certain given signature, there exists a slow-fast system that can be associated to it and {\it vice versa}, given that certain conditions are met.
Thus, we have reduced the study of MMOs in a relatively broad family of singularly perturbed three-dimensional systems to the well-developed theory of one-dimensional maps.
We were able to verify our own results numerically, showing that they are consistent with those obtained in \cite{RPB12} -- and, in particular, with the At Most \& At Least Lemma -- in the process.
Naturally, a number of questions arise from the present analysis.
The first of these concerns an in-depth investigation of a neighbourhood of the singular orbit $\Gamma_0$ defined in Assumption~\ref{A5}, as well as of the corresponding discontinuity in the associated PAM, where canard phenomena could occur. We conjecture that this discontinuity gives rise to a canard explosion which determines the interchange between LAOs and SAOs in the resulting mixed-mode time series.
Next, it seems natural to comment on the interplay between $\epsilon$ and $\delta$ in
our prototypical family of slow-fast systems in Equation~\eqref{VFfast}, which we restate below for
reference:
\begin{align*}
x' &=y-F(x,z,\epsilon,\delta)=:f(x,y,z,\epsilon,\delta), \\
y' &=\epsilon g_1(x,y,z,\epsilon,\delta), \\
z' &= \epsilon g_2(x,y,z,\epsilon,\delta).
\end{align*}
The form of the above two-parameter singular perturbation problem
implies that our choice of $\delta$ will mostly affect the slow flow along normally hyperbolic (attracting) portions of the corresponding critical manifold $\mathcal{S}$,
away from the fold lines $L_i$. Conversely, it also appears that any restrictions on the magnitude of $\epsilon$ are only due to the jump behaviour at those lines; both observations are corroborated by
numerical experimentation in {\sc Maple}.
Now, the initial rescaling of $z$ with respect to $\delta$, as illustrated in Section~\ref{sec:derivePAM}, implies that the relevant $z$-window for our analysis is $\mathcal{O}(\delta)$ wide.
While $Z<0$ — in the rescaled $Z$-variable — results in LAOs in the corresponding mixed-mode time series, whereas $Z>0$ yields SAOs, that classification is only true in the singular limit, i.e. for $\epsilon = 0$. To specify the interplay between $\delta$ and $\epsilon$ away from that limit, one
would need to ``blow up” (desingularise) the flow of \eqref{VFfast} in the vicinity of the degenerate
point $P_c$; as $\epsilon$ would receive triple the weight of
$\delta$ in the corresponding blow-up transformation, it would then follow that we not only have to avoid $P_c$ itself, but also an $\mathcal{O}(\epsilon^{1/3})$ ``hole" around that
point, in order not to have to consider canard phenomena in our analysis. When $\epsilon^{1/3} > \delta$, on the other hand, the Poincar\'e map $\Pi$ associated to \eqref{VFfast} is likely to return the flow inside this \textit{canard hole}.
Correspondingly, in Section~\ref{sec:numerics}, we fixed $\epsilon = 10^{-7}$ in our numerics, as we had experimentally concluded that the optimal choice of $\delta$ is somewhere in the region of $5 \times 10^{-3}$, in agreement with the above reasoning.
The next question that comes to mind, which concerns the patterns that the resulting MMO signatures follow, is motivated by results of Freire and Gallas in \cite{FG11}. There, it is shown that the number of SAOs in a given mixed-mode orbit is not arbitrary, but that it is organised in a pattern dictated by a so-called Stern-Brocot tree. It would seem natural to investigate whether similar number-theoretical
arguments can be applied in the context of the family of slow-fast systems studied in the present work. Preliminary analysis seems to suggest that the ``crossover” signatures observed between
simple patterns of the form $1^s$ or $L^1$ are relatively regular; recall Remark~\ref{rem:crossover}.
Our final remark concerns the prototypical family of slow-fast systems in \eqref{VFfast}, and specifically those which have the property in \eqref{eq:g2}. We are confident that, near the fold lines $L_{1}$, $L_{3}$, and $L_{4}$, a suitable Fenichel-like normal form can be derived. Should that expectation be verified, it would follow that one could apply our approach to connect the well-developed theory of one-dimensional PAMs with the vast family of slow-fast systems that can be brought into said normal form.
\end{document}
|
\begin{document}
\title{f Between graphical zonotope and graph\,--\,associahedron}
\begin{abstract}
This manuscript introduces a finite collection of generalized
permutohedra associated to a simple graph. The first polytope of this
collection is the graphical zonotope of the graph and the last is the
graph--associahedron associated to it. We describe the weighted integer
points enumerators for polytopes in this collection as Hopf algebra
morphisms of combinatorial Hopf algebras of decorated graphs. In the last
section, we study some properties related to $\mathcal{H}$--polytopes.
\noindent \textbf{Keywords}:
generalized permutohedron, quasisymmetric function, graph, decorated graph, combinatorial Hopf algebra, $f-$polynomial
\end{abstract}
\section{Introduction }
In this paper we construct a finite collection of generalized permutohedra associated to a simple graph. This collection starts with the graphical zonotope and ends with the graph--associahedron. To each generalized permutohedron $Q$ there is associated quasisymmetric function $F(Q)$, introduced in~\cite{BJR}. It enumerates positive lattice points in the normal fan $\mathcal{N}_Q$. The weighted analogue $F_q(Q)$ of this enumerator, which takes into account the face structure of the normal fan $\mathcal{N}_Q$, is introduced and studied in~\cite{GPS}. Among its properties is that the $f-$polynomial $f(Q)$ can be obtained as the principal specialization of $F_q(Q)$. For different classes of generalized permutohedra the algebraic interpretation of these enumerators is given as universal morphism from appropriately defined combinatorial Hopf algebras to the combinatorial Hopf algebra of quasisymmetric functions $\mathcal{Q}Sym$.
The cases of graphical zonotopes $Z_\Gamma$ and graph--associahedra $P_\Gamma$ are of special interest. The enumerator $F(Z_\Gamma)$ is known to be the Stanley chromatic symmetric function and the enumerator $F(P_\Gamma)$ is the chromatic quasisymmetric function introduced in~\cite{G}. Our construction produces a finite collection of weighted quasisymmetric functions between $F_q(Z_\Gamma)$ and $F_q(P_\Gamma)$. We show that each of these weighted quasisymmetric functions is actually derived from certain combinatorial Hopf algebra of decorated graphs.
\section{Graph polytopes}
For the standard basis vectors $\{e_s\}_{1\leq s\leq n}$ in $\mathbb{R}^n$, let $\Delta_H:=\mathsf{conv}\{e_s\,:\,s\in H\}$ be the simplex determined by a subset $H\subseteq[n]$. The \emph{hypergraphic polytope} of a hypergraph $\mathsf{H}$ on $[n]$ is the Minkowski sum of simplices
$$Q_\mathsf{H}\,:=\,\sum_{H\in\mathsf{H}}\Delta_H.$$
As generalized permutohedron can be described as the Minkowski sum of dilated simplices (see~\cite{P}), we have that hypergraphic polytope is generalized permutohedron, i.e. a convex polytope whose normal fan $\mathcal{N}_{Q_\mathsf{H}}$ is refined by the reduced normal fan $\mathcal{N}_{Pe^{n-1}}$ of standard $(n-1)-$dimensional permutohedron $Pe^{n-1}$. The $(n-d)-$dimensional faces of $Pe^{n-1}$ are in one$-$to$-$one correspondence with set compositions $\mathcal{C}=C_1|C_2|\cdots|C_{d}$ of the set $[n]$ (see~\cite{P}, Proposition 2.6). By this correspondence and the correspondence between set compositions and flags of subsets we identify face $\mathcal{C}$ of $Pe^{n-1}$ with the flag $\mathcal{F}:\emptyset=F_0\subset F_1\cdots\subset F_{d}=[n]$, where $F_m=\cup_{i=0}^mC_i$ for $1\leq m\leq d.$\\
Next, for a flag $\mathcal{F}$ of subsets let $M_\mathcal{F}$ be the enumerator of positive integer points $\omega=(\omega_1,\omega_2,\ldots,\omega_n)\in\mathbb{Z}^n_+$ in the interior of the normal cone $\mathcal{N}_{Pe^{n-1}}(\mathcal{F})$ at the $(n-d)-$dimensional face $\mathcal{F}$,
$$M_\mathcal{F}\;\;:=\;\sum_{\omega\in\mathbb{Z}^n_+\cap\,\mathcal{N}^\circ_{Pe^{n-1}}(\mathcal{F})}x_{\omega_1}x_{\omega_2}\cdots x_{\omega_n}.$$
The enumerator $M_\mathcal{F}$ is a monomial quasisymmetric function depending only on the composition $\mathsf{type}(\mathcal{F}):=(|F_1|,|F_2|-|F_1|,\ldots,|F_{d}|-|F_{d-1}|).$\\
Further, for a hypergraph $\mathsf{H}$, in ~\cite{MP} is defined its \emph{splitting hypergraph} $\mathsf{H}/\mathcal{F}$ by a flag $\mathcal{F}:F_0\subset F_1\subset\cdots\subset F_k$ with
$$\mathsf{H}/\mathcal{F}\;:=\;\bigsqcup_{i=1}^k(\mathsf{H}|_{F_i})/F_{i-1},$$
where the \emph{restriction} $\mathsf{H}|_F$ and the \emph{contraction}
$\mathsf{H}/F$ are defined by $\mathsf{H}|_F:=\{H\in\mathsf{H}\,:\,H\subseteq F\}$ and $H/F:=\{H\setminus F:H\in\mathsf{H}\}.$ In the same paper, the \emph{weighted integer points enumerator} is defined as
\begin{equation}\label{for1}
F_q(Q_\mathsf{H})\;\;\;:=\sum_{\mathcal{F}\in L(Pe^{n-1})}
q^{\mathsf{rk}(\mathsf{H}/\mathcal{F})}M_\mathcal{F},
\end{equation}
where $\mathsf{rk}(\mathsf{H}/\mathcal{F}):=n-c(\mathsf{H}/\mathcal{F})$, $c(\mathsf{H}/\mathcal{F})$ is the number of connected components of the hypergraph $\mathsf{H}/\mathcal{F}$ and $L(Pe^{n-1})$ is the face lattice of the generalized permutohedron $Pe^{n-1}$.
\begin{definition}\emph{
For a simple graph $\Gamma=([n],E)$ and an integer $m\in\mathbb{N}$ we define the \emph{$m-$graph polytope}
$$Q_{\Gamma,m}\;\;\;:=\sum_{\substack{S\subseteq[n],\,|S|\leq m+1\\\Gamma|_S\text{ is connected}}}\Delta_S.$$
}\end{definition}
Note that $Q_{\Gamma,1}$ is a graphical zonotope and $Q_{\Gamma,m}$ is a graph$-$associahedron for $m\geq n-1$.
If $\mathsf{H}_{m}(\Gamma):=\left\{S\subseteq[n]\,:\,|S|\leq m+1\text{ and }\Gamma|_{S}\text{ is connected}\right\}$, then $Q_{\Gamma,m}$ is the hypergraphical polytope $Q_{\mathsf{H}_{m}(\Gamma)}$.
\section{Hopf algebras of decorated graphs}
We say that $\Gamma^w=([n],E,w)$ is a \emph{decorated graph} if $\Gamma=([n],E)$ is a simple graph and $w:E\rightarrow\mathbb{N}$ is the \emph{decoration} of $\Gamma$. Let $\Gamma^w|_S$ be the induced decorated subgraph on $S\subseteq [n]$ and $\Gamma^w/S$ be induced subgraph on $[n]\setminus S$ with additional edges $uv$ for all pairs of vertices $u,v\in[n]\setminus S$ connected by edge
paths through $S$. The decoration of a new edge $uv$ is the minimal sum of decorations over edge paths through $S$, i.e.
$$w(uv)\;\;\;\;\;:=\min_{\substack{us_1,\,s_1s_2,\,\ldots,\,s_kv\in\, E(\Gamma^w)\\ s_1,s_2,\ldots,s_k\in S}}\{w(us_1)+w(s_1s_2)+\cdots+w(s_kv)\}.$$
We call $\Gamma^w|_S$ the \emph{ripping} of a decorated graph $\Gamma^w$ to $S$ and $\Gamma^w/S$ the \emph{sewing} of a decorated graph $\Gamma^w$ by $S$.
\begin{example}
For the decorated graph $\Gamma^w$ and the subset $S=\{1,4,5,6\}$, the ripping $\Gamma^w|_S$ and the sewing $\Gamma^w/S$ are given on the Figure~\ref{fig:slika2}.
\vspace*{-4mm}
\begin{figure}
\caption{Decorated graphs $\Gamma^w$, $\Gamma^w|_S$ and $\Gamma^w/S$.}
\label{fig:slika2}
\end{figure}
\end{example}
We say that $\Gamma^w$ is an \emph{$1-$uniform decorated graph}, denoted by $\Gamma^\bold{1}$, if $w(uv)=1$ for all $uv\in E$. Then, the decoration of an additional edge $uv$ in $\Gamma^{\textbf1}/S$, for $S\subseteq [n]$, is the length of the shortest path through $S$ from $u$ to $v$.
Two decorated graphs are \emph{isomorphic} if there is a bijection between them, which preserves decoration.
Let $\mathcal{G}^{W}_n$ denote the $\bold{k}-$span of all isomorphism classes of decorated graphs on $n$ vertices, where $\mathcal{G}^{W}_0:=\bold{k}\{\emptyset\}$ and $\emptyset$ is the unique decorated graph on the empty set. For each $m\in\mathbb{N}$ we will endow
$$\mathcal{G}^{W}:=\bigoplus_{n\geq0}\mathcal{G}^{W}_n$$
with the structure of a graded Hopf algebra. The \emph{unit} $u:\bold{k}\rightarrow\mathcal{G}^{W}$, \emph{counit} $\varepsilon:\mathcal{G}^{W}\rightarrow\bold{k}$ and the \emph{product} $\mu:\mathcal{G}^{W}\otimes\mathcal{G}^{W}\rightarrow\mathcal{G}^{W}$
are the same for all $m$ and they are defined by $u(1):=\emptyset$,
$$\varepsilon(\Gamma^w)=\begin{cases}1,&\Gamma^w=\emptyset,\\0,&\text{otherwise},\end{cases}
\qquad\text{and}\qquad
\Gamma^{w_1}_1\cdot \Gamma^{w_2}_2:=(\Gamma_1\sqcup \Gamma_2)^{w_1w_2}.$$
Here, the decoration $w_1w_2:E_1\sqcup E_2\rightarrow\mathbb{N}$ is defined with
$$w_1w_2(uv)=\begin{cases}w_1(uv),&uv\in E_1,\\w_2(uv),&uv\in E_2.
\end{cases}$$
For an integer $m\in\mathbb{N}$ we define the \emph{coproduct} $\Delta_m:\mathcal{G}^{W}\rightarrow\mathcal{G}^{W}\otimes\mathcal{G}^{W}$ by
$$\Delta_m(\Gamma^w)=\sum_{S\subseteq [n]}\mathrm{pr}_m\left(\Gamma^w|_S\right)\otimes\mathrm{pr}_m\left(\Gamma^w/S\right),\;\;\;\Gamma^w\in\mathcal{G}^{W}_{n},$$
where $\mathrm{pr}_m:\mathcal{G}^{W}\rightarrow\mathcal{G}^{W}$ is the map which deletes all edges greater than $m$ of the decoration $w$.\\
The antipode $\mathcal{S}_m$ of $\Gamma^w$ is determined by general Takeuchi’s formula for the antipode of a graded connected bialgebra
$$\mathcal{S}(\Gamma^w)=\sum_{k\geq1}(-1)^k\sum_{\mathcal{F}_k}\prod_{i=1}^k\mathrm{pr}_m(\Gamma^w|_{F_{i}}/F_{i-1}),$$
where the inner sum goes over all flags of subset $\mathcal{F}_k:\emptyset=F_0\subset F_1\subset\cdots\subset F_k=[n].$
\begin{proposition}
For all $m\in\mathbb{N}$,
$\mathcal{G}^{W,m}=(\mathcal{G}^W,\mu,u,\Delta_m,\varepsilon,\mathcal{S}_m)$ is a graded connected Hopf algebra.
\end{proposition}
\begin{proof}
We prove compatibility of the product and coproduct and coassociativity, since other properties are straightforward. For a decorated graph $\Gamma^w$ of $V$, one has the following identities
{\small
\begin{align*}
((\Delta\otimes\mathsf{Id})\circ\Delta)(\Gamma^w)
&=\sum_{\emptyset\subseteq S_1\subseteq S_2\subseteq [n]}
\mathrm{pr}_m(\Gamma^w|_{S_1})\otimes\mathrm{pr}_m(\Gamma^w|_{S_2}/S_1)\otimes\mathrm{pr}_m(\Gamma^w/S_2)\\
((\mathsf{Id}\otimes\Delta)\circ\Delta)(\Gamma^w)
&=\sum_{\emptyset\subseteq S_1\subseteq S_2\subseteq [n]}
\mathrm{pr}_m(\Gamma^w|_{S_1})\otimes\mathrm{pr}_m(\Gamma^w/S_1|_{S_2\setminus S_1})\\
&\qquad\qquad\qquad\qquad\qquad\;\;\;\;
\otimes\mathrm{pr}_m((\Gamma^w/S_1)/(S_2\setminus S_1)).
\end{align*}}
\vspace*{-3mm}
\noindent
Since $(\Gamma^w/S_1)/(S_2\setminus S_1)=\Gamma^w/S_2$, it is sufficient to show that
$$\mathrm{pr}_m(\Gamma^w|_{S_2}/S_1)=\mathrm{pr}_m(\Gamma^w/S_1|_{S_2\setminus S_1}).$$
Let $uv\in \Gamma^w|_{S_2}/S_1$ and $w(uv)\leq m.$ That means that $u$ and $v$ are connected in $\Gamma^w|_{S_2\setminus S_1}$ or there is a path in $\Gamma^w|_{S_2}$ throught $S_1$. In both cases $uv\in\mathrm{pr}_m(\Gamma^w/S_1|_{S_2\setminus S_1})$. Also, if $uv\in\Gamma^w/S_1|_{S_2\setminus S_1}$ and $w(uv)\leq m$, then $u$ and $v$ are connected in $\Gamma^w|_{S_2\setminus S_1}$ or there is a path in $\Gamma^w|_{S_2}$ throught $S_1.$ Again, in both cases $uv\in\mathrm{pr}_m(\Gamma^w|_{S_2}/S_1).$
\noindent
Furthermore, for a pair of decorated graphs $\Gamma^{w_1}_1\in\mathcal{G}^{W,m}_{n_1}$ and $\Gamma^{w_2}_2\in\mathcal{G}^{W,m}_{n_2}$ and subsets $S_1\subset [n_1]$ and $S_2\subset [n_2]$ one has isomorphisms
$$(\Gamma^{w_1}_1|_{S_1})\cdot(\Gamma^{w_2}_2|_{S_2})=(\Gamma^{w_1}_1\cdot \Gamma^{w_2}_2)|_{S_1\sqcup S_2}$$
and
$$(\Gamma^{w_1}_1/S_1)\cdot(\Gamma^{w_2}_2/S_2)=(\Gamma^{w_1}_1\cdot \Gamma^{w_2}_2)/(S_1\sqcup S_2),$$
which proves commutativity of the bialgebra diagram in the definition of Hopf algebra.
\end{proof}
Now we define $\zeta_q:\mathcal{G}^{W,m}\rightarrow\bold{k}[\,q\,]$ by
$$\zeta_q(\Gamma^w):=q^{n-c(\Gamma^w)},\;\;\;\Gamma^w\in\mathcal{G}^{W,m}_n,$$
where $c(\Gamma^w)$ is the number of connected components of $\Gamma.$ It is straightforward that $\zeta_q$ is a multiplicative morphism, which turns $(\mathcal{G}^{W,m},\zeta_q)$ into a combinatorial Hopf algebra.
By the fundamental theorem of combinatorial Hopf algebras (see~\cite{ABS}, Theorem 4.1), there is a a unique morphism
$$\Psi_q^m:(\mathcal{G}^{W,m},\zeta_q)\rightarrow(\mathcal{Q}Sym,\zeta).$$
In the monomial basis it is given by
$$\Psi_q^m(\Gamma^w)=\sum_{\alpha\models n}(\zeta_q)_\alpha(\Gamma^w)M_\alpha.$$
For a composition $\alpha=(\alpha_1,\alpha_2,\ldots,\alpha_k)$, the coefficient $(\zeta_q)_\alpha(\Gamma^w)$ is determined by
$$(\zeta_q)_\alpha(\Gamma^w)=\zeta_q^{\otimes k}\circ(p_{\alpha_1}\otimes p_{\alpha_2}\otimes\cdots\otimes p_{\alpha_k})\circ\Delta_m^{k-1}(\Gamma^w),
$$
where $p_i$ is the projection on the $i-$th homogeneous component and $\Delta_m^{k-1}$ is the $(k-1)-$fold coproduct map of $\mathcal{G}^{W,m}$.\\
For a decorated graph $\Gamma^w$ and a flag of subsets $\mathcal{F}:\emptyset=F_0\subset F_1\subset\cdots\subset F_k=[\,n\,]$ let
$$\Gamma^w/\mathcal{F}\;:=\;\bigsqcup_{i=1}^k\Gamma^w|_{F_i}/F_{i-1}.$$
Thus, the coefficient corresponding to a composition $\alpha=(\alpha_1,\alpha_2,\ldots,\alpha_k)\models n$ is a
polynomial in $q$ determined by
\begin{align*}
(\zeta_q)_\alpha(\Gamma^w)
=
\sum_{\mathcal{F}:\mathsf{type}(\mathcal{F})=\alpha}\prod_{i=1}^kq^{|F_i/F_{i-1}|-c(\mathrm{pr}_m(\Gamma^w|_{F_i}/F_{i-1}))}
=
\sum_{\mathcal{F}:\mathsf{type}(\mathcal{F})=\alpha}q^{\mathsf{rk}_m(\Gamma^w/\mathcal{F})},
\end{align*}
where
\begin{equation*}
\mathsf{rk}_m(\Gamma^w/\mathcal{F}):=n-\sum_{i=1}^kc(\mathrm{pr}_m(\Gamma^w|_{F_i}/F_{i-1})).
\end{equation*}
Finally, we obtain
\begin{equation}\label{for2}
\Psi_q^m(\Gamma^w)=\sum_{\mathcal{F}\in L(Pe^{n-1})}q^{\mathsf{rk}_m(\Gamma^w/\mathcal{F})}M_\mathcal{F},
\end{equation}
where $L(Pe^{n-1})$ is the face lattice of the generalized permutohedron $Pe^{n-1}.$
\begin{theorem}
Given a simple graph $\Gamma$ and $m\in\mathbb{N}$, let $Q_{\Gamma,m}$ be the corresponding $m-$graph polytope. Then, the following identity holds
$$F_q(Q_{\Gamma,m})=\Psi^m_q(\Gamma^{\bf 1}),$$
where $F_q(Q_{\Gamma,m})$ is the weighted integer points enumerator of a $m-$graph polytope $Q_{\Gamma,m}$.
\end{theorem}
\begin{proof}
From~(\ref{for1}) and~(\ref{for2}), it is suifficient to prove that for an any flag of subsets $\mathcal{F}:\emptyset=F_0\subset F_1\subset\cdots\subset F_k=[n]$ and $1\leq i\leq k$ it holds
$$c(\mathrm{pr}_m(\Gamma^{\bold{1}}|_{F_i}/F_{i-1}))=c(\mathsf{H}_{m}(\Gamma)|_{F_i}/F_{i-1}).$$
All edges in the decorated graph $\mathrm{pr}_m(\Gamma^{\textbf1}|_{F_i}/F_{i-1})$ have decorations less than or equal to $m$, i.e. $uv\in E\left(\mathrm{pr}_m(\Gamma^{\textbf1}|_{F_i}/F_{i-1})\right)$ if and only if there is a path from $u$ to $v$ of the length not greater than $m$ in the graph $\Gamma|_{F_i}$. Equivalently, there exists $H\in\mathsf{H}_{m}(\Gamma)$ such that $u,v\in H.$
\end{proof}
From the general theorem of generalized permutohedra (see~\cite{GPS}, Theorem 4.4), the $f-$polynomial of a $m-$graph polytope $Q_{\Gamma,m}$ is determined by the principal specialization of the enumerator $F_q(Q_{\Gamma,m})$, i.e.
\begin{align}
\label{f_vektor}
f(Q_{\Gamma,m},q)=(-1)^n\mathsf{ps}^1(\Psi_{-q}^m(\Gamma^\bold{1}))(-1).
\end{align}
\begin{example}
For the line graph $L_4=([4],\{12,23,34\})$, we have
\begin{align*}
F_q(Q_{L_4,1})
&=q^3M_4+2q^2(M_{1,3}+M_{3,1}+M_{2,2})\\
&+2q(M_{1,3}+M_{3,1}+M_{2,2}+3M_{2,1,1}+3M_{1,2,1}+3M_{1,1,2})\\
&+2M_{2,2}+6M_{1,1,2}+6M_{1,2,1}+6M_{2,1,1}+24M_{1,1,1,1},\\
F_q(Q_{L_4,2})
&=q^3M_4+2q^2(M_{1,3}+2M_{3,1}+M_{2,2})\\
&+2q(M_{3,1}+2M_{2,2}+5M_{1,1,2}+4M_{1,2,1}+3M_{2,1,1})\\
&+\;\;\;\;\;\;\;\;\;\;\;\;\;\,2M_{1,1,2}+4M_{1,2,1}+6M_{2,1,1}+24M_{1,1,1,1},\\
F_q(Q_{L_4,3})
&=q^3M_4+q^2(2M_{3,1}+4M_{3,1}+3M_{2,2})\\
&=q(
2M_{3,1}+M_{2,2}+12M_{1,1,2}+8M_{1,2,1}+6M_{2,1,1})\\
&+\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;4M_{1,2,1}+6M_{2,1,1}+24M_{1,1,1,1}.
\end{align*}
Corresponding $f$--polynomials are determined by the principal specialization
\vspace*{-9mm}
\begin{align*}
f(Q_{L_4,1},q)
&=q^3+6q^2+12q+8,\\
f(Q_{L_4,2},q)
&=q^3+8q^2+18q+12,\\
f(Q_{L_4,3},q)
&=q^3+9q^2+21q+14.
\end{align*}
\begin{figure}
\caption{Graph polytopes $Q_{L_4,1}
\label{fig:slika1}
\end{figure}
\end{example}
In the sequel, we associate the sequence of quasysimmetric functions
$$(\Psi_q^1(\Gamma^\bold{1}),\Psi_q^2(\Gamma^\bold{1}),\ldots,\Psi_q^{n-1}(\Gamma^\bold{1}))$$
to a simple graph $\Gamma$ on $n$ vertices.
The following theorem answers how this sequence stabilizes.
\begin{theorem}\label{thm_1}
Given a connected simple graph $\Gamma$, let $m$ be the cardinality of the maximal subset $M\subseteq[n]$ such that $\Gamma|_M\cong L_{|M|}$. Then, for all $k\geq |M|$ we have
$$\Psi^{k}_q(\Gamma^{\bold{1}})=\Psi^{|M|-1}_q(\Gamma^{\bold{1}}).$$
\end{theorem}
\begin{proof}
It is sufficient to prove that $c\left(\mathrm{pr}_k(\Gamma^\bold{1}/\mathcal{F})\right)=c\left(\mathrm{pr}_{|M|-1}(\Gamma^\bold{1}/\mathcal{F})\right)$ for all $k\geq|M|$ and for all flags $\mathcal{F}$ of subsets of the set $[n]$.
Let us suppose that $u,v\in F_{i}\setminus F_{i-1}$ for the flag $\mathcal{F}:\emptyset=F_0\subset F_1\subset\cdots\subset F_k=[n]$.
Since the maximal distance between vertices in the graph $\Gamma$ is $m-1$, it follows that $w(u,v)\leq |M|-1$.
If $u$ and $v$ are connected in $\Gamma|_{F_{i}}$, then $u$ and $v$ are not connected in $\mathrm{pr}_t(\Gamma^\bold{1}/\mathcal{F})$ for $t<w(u,v)$, but they are connected in $\mathrm{pr}_t(\Gamma^\bold{1}/\mathcal{F})$ for all $t\geq w(u,v)\geq|M|-1$. Similarly, if $u$ and $v$ are not connected in $\Gamma|_{F_{i}}$, then $u$ and $v$ are not connected in $\mathrm{pr}_k(\Gamma^\bold{1}/\mathcal{F})$ for any $k$.
\end{proof}
\begin{example}
Note that $\Psi^k_q(K_n^\bold{1})=\Psi^1_q(K_n^\bold{1})$ for all $k\geq1$, where $K_n$ is the complete graph on $n$ vertices. For the star graph $S_n$, it holds $\Psi^k_q(S_n^\bold{1})=\Psi^2_q(S_n^\bold{1})$ for all $k\geq2$.
\end{example}
\begin{corollary}
The line graph $L_n$ is the only graph on $n$ vertices such that $(\Psi^1_q(L_n^\bold{1}),$ $\Psi^2_q(L_n^\bold{1}),$ $\ldots,$$ \Psi^{n-1}_q(L_n^\bold{1}))$
are different quasysimmetric functions.
\end{corollary}
For the graph $\Gamma$ on $n$ vertices, the enumerator $\Psi^1_0(\Gamma^\bold{1})$ is Stanley chromatic symmetric function of graph $\Gamma$ and the enumerator $\Psi^{n-1}_0(\Gamma^\bold{1})$ is chromatic quasisymmetric function introduced in \cite{G}. There is only one pair of graphs on five vertices with the same Stanley chromatic functions, but their chromatic quasisymmetric functions are different. On the other hand, there are three pairs of graphs on six vertices whose chromatic quasisymmetric functions are the same, but the corresponding Stanley chromatic functions are not.
\begin{example}
For graphs $\Gamma_1$ and $\Gamma_2$, see Figure \ref{fig:slika11}, with the same Stanley chromatic symmetric functions, we have
{\small
\begin{align*}
\Psi^1_0(\Gamma_1)=\Psi^1_0(\Gamma_2)&=4M_{1,2,2}+4M_{2,1,2}+4M_{2,2,1}\\
&+24M_{1,1,1,2}+24M_{1,1,2,1}+24M_{1,2,1,1}+24M_{2,1,1,1}+120M_{1,1,1,1,1}.
\end{align*}
}
\begin{figure}
\caption{Graphs $\Gamma_1$ and $\Gamma_2$ with the same Stanley chromatic symmetric functions}
\label{fig:slika11}
\end{figure}
\vspace*{-4mm}
\noindent
By Theorem \ref{thm_1}, $\Psi_0^k(\Gamma_1)=\Psi_0^2(\Gamma_1)$, for $k\geq2$, and $\Psi_0^k(\Gamma_2)=\Psi_0^3(\Gamma_2)$, for $k\geq3$. Also, the following equations hold
{\small
\begin{align*}
\Psi^2_0(\Gamma_1)&=4M_{2,2,1}\qquad\qquad\;\;\;\;
+\;8\,M_{1,1,2,1}+16M_{1,2,1,1}+24M_{2,1,1,1}+120M_{1,1,1,1,1},\\
\Psi^2_0(\Gamma_2)&=\qquad\qquad\;\, 6M_{1,1,1,2}+10M_{1,1,2,1}+16M_{1,2,1,1}+24M_{2,1,1,1}+120M_{1,1,1,1,1},
\\
\Psi^3_0(\Gamma_2)&=\qquad\qquad\qquad\qquad\;\;\;\;\;\;\; 6M_{1,1,2,1}+16M_{1,2,1,1}+24M_{2,1,1,1}+120M_{1,1,1,1,1}.
\end{align*}}
\end{example}
\begin{example}
For the graphs in Figure 4, we have that the corresponding chromatic quasisymmetric functions are the same, i.e.
$\Psi^3_0(\Gamma_3)=\Psi^3_0(\Gamma_4)$. Note that the coefficient on $M_{1,1,1,1,2}$ in $\Psi^2_0(\Gamma_3)$ is 24 and the coefficient on $M_{1,1,1,1,2}$ in $\Psi^2_0(\Gamma_4)$ is 0, so
$$\Psi^2_0(\Gamma_3)\neq\Psi^2_0(\Gamma_4).$$
\vspace*{-4mm}
\begin{figure}
\caption{Graphs $\Gamma_3$ and $\Gamma_4$ with the same chromatic quasisymmetric functions}
\label{fig:slika1}
\end{figure}
\noindent
Moreover, in \cite{GPS} is shown that $\Psi_0^1(\Gamma_3)\neq\Psi_0^1(\Gamma_4).$
\end{example}
In the previous theorem we have shown how the quasysimmetric functions $(\Psi_q^1(\Gamma^\bold{1}),$ $\Psi_q^2(\Gamma^\bold{1}),$ $\ldots,$ $\Psi_q^{n-1}(\Gamma^\bold{1}))$
associated to the graph $\Gamma$ are stabilized. Now, the following question arises: What happens with the sequence of polytopes $(Q_{\Gamma,1}, Q_{\Gamma,2},\ldots, Q_{\Gamma,n-1})$ associated to the graph $\Gamma.$ To answer this question, we will need the following lemma.
\begin{lemma}[\cite{DP}, Lemma 2.4]\label{lema_1}
Consider two polytopes $P$ and $Q$. Let $\psi$ be an injection from the
vertex set of $P$ to the vertex set of $P+Q$ such that, for every vertex $u$ of $P$, $\psi(u)=u+v$, where $v$ is a vertex of $Q$. If $\psi$ is a bijection, then the normal fan of $P$ coincides with the normal fan of $P+Q$.
\end{lemma}
\begin{proposition}
Given a connected simple graph $\Gamma$, let $m$ be the cardinality of the maximal subset $M\subseteq[n]$ such that $\Gamma|_M\cong L_{|M|}$. Then, for all $k\geq|M|$ the polytopes $Q_{\Gamma,k}$ and $Q_{\Gamma,|M|-1}$ are normally equivalent, i.e. the normal fan of $Q_{\Gamma,k}$ coincides with the normal fan of $Q_{\Gamma,|M|-1}.$
\end{proposition}
\begin{proof}
From Theorem \ref{thm_1} and (\ref{f_vektor}), we have that for all $k\geq |M|$ polytopes $Q_{\Gamma,k}$ and $Q_{\Gamma,|M|-1}$ have the same $f-$polynomial. Specially, for all $k\geq |M|$ polytopes $Q_{\Gamma,k}$ and $Q_{\Gamma,|M|-1}$ have the same number of vertices. Since
$$Q_{\Gamma,k}\;\;\;=\;\;\;Q_{\Gamma,|M|-1}\;\;\;+\sum_{\substack{S\subseteq[n],|M|+1\leq|S|\leq k+1\\ \Gamma|_S\text{ is connected}}}\Delta_S\;\;=\;\;Q_{\Gamma,|M|-1}\;\;+\;\;P,$$
it holds that the map $\psi$, from the previous lemma, is a bijection from the vertex set of $Q_{\Gamma,|M|-1}$ to the vertex set of $Q_{\Gamma,|M|-1}+P.$ According to Lemma \ref{lema_1}, the normal fan of the polytope $Q_{\Gamma,|M|-1}$ coincides with the normal fan of the polytope $Q_{\Gamma,|M|-1}+P$.
\end{proof}
\begin{theorem}
For a connected simple graph $\Gamma$ on $n$ vertices and $m\in\mathbb{N}$, the polytopes
$$Q_{\Gamma,m}\;\;\;:=\sum_{\substack{S\subseteq[n],\,|S|\leq m+1\\c(\Gamma|_S)=1}}\Delta_S
\qquad\text{and}\qquad
Q^L_{\Gamma,m}\;\;\;:=\sum_{\substack{S\subseteq[n],\,|S|\leq m+1\\\Gamma|_S\cong L_{|S|}}}\Delta_S.$$
are normally equivalent.
\end{theorem}
\begin{proof}
Let be $M\subseteq[n]$ such that $|M|=m$ and $\Gamma|_M\cong L_m.$ If all $S\subseteq[n]$, $|S|\leq m$, satisfy $\Gamma|_S\cong L_{|S|}$, the statment is true. Further, let $K$ be a subset of maximum cardinality $k$ of the nonempty set
$$\{S\subseteq[n]\,:\,|S|\leq m,\,\Gamma|_S\ncong L_{|S|}\}.$$
It is sufficient to prove that polytopes $Q_{\Gamma,m}$ and $Q_{\Gamma,m}-\Delta_K$ are normally equivalent. By the previous theorem, there exists $k'<k$ such that polytopes $Q_{\Gamma|_K,k}$ and $Q_{\Gamma|_K,k'}$ are normally equivalent. In particular, it means that
$$Q_{\Gamma|_K,k}=Q_{\Gamma|_K,k-1}+\Delta_K
\qquad\text{and}\qquad
Q_{\Gamma|_K,k-1}$$
are normally equivalent polytopes. Since $Q_{\Gamma|_K,k}$ is the Minkowski summand of $Q_{\Gamma,m}$, it implies that polytopes
$Q_{\Gamma,m}$ and $Q_{\Gamma,m}-\Delta_{K}$
are normally equivalent as well.
\end{proof}
\begin{corollary}
For a connected simple graph $\Gamma$ on $n$ vertices, the polytopes
$$Q_{\Gamma}\;\;\;:=\sum_{\substack{S\subseteq[n],\\c(\Gamma|_S)=1}}\Delta_S
\qquad\text{and}\qquad
Q^L_{\Gamma}\;\;\;:=\sum_{\substack{S\subseteq[n],\\\Gamma|_S\cong L_{|S|}}}\Delta_S.$$
are normally equivalent.
\end{corollary}
\section{$\mathcal{H}-$posets}
For a given vertex $v$ of a $m-$graph polytope $Q_{\Gamma,m}$ there exists a poset $\mathsf{P}_v$ (described in \cite{PRW}, Corollary 3.9) whose linear extensions corresponding to the \emph{Weyl chambers} are contained in the normal cone of the vertex $v$.
For example, if $\mathsf{P}_v:(1<2,3<2)$ then the normal cone of the vertex $v$ contains Weyl chambers determined by $x_1\leq x_3\leq x_2$ and $x_3\leq x_1\leq x_2.$
We can regard a poset $\mathsf{P}$
as a directed graph where $j<_{\mathsf{P}}i$
if and only if there is a directed path from $i$ to $j$ in that directed graph. In the general case, vice versa does not hold. If a directed graph is \emph{acyclic}, i.e. there exist no vertices $v_1,v_2,\cdots,v_k$ such that
$v_1\rightarrow v_2\rightarrow\cdots\rightarrow v_l\rightarrow v_1$, we can view this directed graph as a binary relation whose transitive closure defines a poset.
For a given hypergraph $\mathsf{H}$ on the set $[n]$, an $1-$\emph{orientation of a hyperedge} $H\in\mathsf{H}$ is a par $(h_1,H_2)$, where $h_1$ is a distinguished element of $H\subseteq[n]$, and $H_2=H\setminus\{h_1\}$. An \emph{$1-$orientation $\mathcal{O}$ of the hypegraph} $\mathsf{H}$ is the set of $1-$orientations of all its hyperedges. Then we can construct an oriented multigraph $\mathsf{H}/\mathcal{O}$ on the set $[n]$ with $h_1\rightarrow h_2$ for all $h_2\in H_2$ satisfying $(h_1,H_2)\in\mathcal{O}$. Specially, if the oriented multigraph $\mathsf{H}/\mathcal{O}$ has no cycles we say that the $1-$orientation $\mathcal{O}$ is \emph{acyclic}. In that case $\mathsf{H}/\mathcal{O}$ is an oriented graph.
\begin{example}
For the hypergraph $\mathsf{H}_2(L_4)=([4],\{12,23,34,123,234\})$ the $1$--orientation
$$\mathcal{O}_1=\{(1,\{2\}), (2,\{3\}), (3,\{4\}),(1,\{2,3\}),(2,\{3,4\}\}$$
is acyclic. The $1$--orientation
$$\mathcal{O}_2=\{(1,\{2\}), (2,\{3\}), (3,\{4\}),(3,\{1,2\}),(4,\{2,3\}\}$$
is not acyclic, since $\mathsf{H}/\mathcal{O}_2$ has cycle $2\to3\to4\to2$.
\end{example}
Let $\mathcal{O}$ be an acyclic $1-$orientation of a hypegraph $\mathsf{H}$ on the set $[n]$. The transitive closure of the acyclic orientied graph $\mathsf{H}/\mathcal{O}$ is a poset $\mathsf{P}$ such that for all $H\in\mathsf{H}$ the restriction $\mathsf{P}|_H$ is a poset whose Hasse diagram is a \emph{rooted tree}. The root of this rooted tree is the first component of $1-$orientation $(h_1,H_2)$ of a hyperedge $H$ in $\mathsf{H}.$
\begin{example}
The $1$--orientation $\mathcal{O}_2$ from the pervious example is acyclic, so the transitive closure od the orientied graph $\mathsf{H}/\mathcal{O}_2$ is the poset $\mathsf{P}(2<1,3<1,4<1,3<2,4<2,4<3)$.
\end{example}
\begin{definition}\label{Hposet}\emph{
A poset $\mathsf{P}$ on the set $[n]$ is a \emph{$\mathcal{H}-$poset} of a hypergraph $\mathsf{H}$ if
\begin{enumerate}
\item for all $H\in\mathsf{H}$ the Hasse diagram of the restriction $\mathsf{P}|_H$ is a rooted tree,
\item $i\lessdot_\mathsf{P}j$ if and only if there exists $H\in\mathsf{H}$ such that $\{i,j\}\subseteq H$, where $i\lessdot_\mathsf{P}j$ means that there is no $k\in\mathsf{P}$ such that $i<_\mathsf{P}k<_\mathsf{P}j.$
\end{enumerate}}
\end{definition}
Note that $\mathcal{B}-$trees defined in \cite{P} satisfy the requirements of the previous definition, so $\mathcal{B}-$trees are the special kind of $\mathcal{H}-$posets.
\begin{example}
Let $L_4$ be the line graph on the set $[4]$ with edges $\{12,23,34\}$. For the hypergraph $\mathsf{H}_1(L_4)$ there are eight $\mathcal{H}-$posets:
\[\mathsf{P}_{1},\mathsf{P}_{2},\mathsf{P}_{3},\mathsf{P}_{4},\mathsf{P}_{5},\mathsf{P}_{6},\mathsf{P}_{7},\mathsf{P}_{8}.\]
Also, there are twelve $\mathcal{H}-$posets for the hypergraph $\mathsf{H}_2(L_4)$:
\[\mathsf{P}_{1},\mathsf{P}_{2},\mathsf{P}_{3},\mathsf{P}_{4},\mathsf{P}_{9},\mathsf{P}_{10},\mathsf{P}_{11},\mathsf{P}_{12},\mathsf{P}_{13},\mathsf{P}_{14},\mathsf{P}_{15},\mathsf{P}_{16}.\]
Finally, for the hypergraph $\mathsf{H}_3(L_4)$ we have fourteen $\mathcal{H}-$posets:
\[\mathsf{P}_{1},\mathsf{P}_{2},\mathsf{P}_{3},\mathsf{P}_{4},\mathsf{P}_{9},\mathsf{P}_{10},\mathsf{P}_{11},\mathsf{P}_{12},\mathsf{P}_{13},\mathsf{P}_{14},\mathsf{P}_{17},\mathsf{P}_{18},\mathsf{P}_{19},\mathsf{P}_{20}.\]
\vspace*{-3mm}
\begin{figure}
\caption{The Hasse diagrams of $\mathcal{H}
\label{fig:slika1}
\end{figure}
\end{example}
\begin{proposition}
A poset $\mathsf{P}$ is a $\mathcal{H}-$poset of a hypergraph $\mathsf{H}$ if and only if there exists an acyclic $1-$orientation $\mathcal{O}$ of $\mathsf{H}$ such that $\mathsf{P}$ is the transitive closure of $\mathsf{H}/\mathcal{O}$.
\end{proposition}
\begin{proof}
If $\mathsf{P}$ is a $\mathcal{H}-$poset then for every hyperedge $H\in\mathsf{H}$ there is $h_1\in H$ such that $\mathsf{P}|_H$ is the rooted tree with the root $h_1$. Note that $(h_1,H\setminus\{h_1\})$ is an $1-$orientation of a hyperedge $H$ and that the set of all $1-$orientations of hyperedges forms an acyclic $1-$orientation $\mathcal{O}$ of the hypergraph $\mathsf{H}$, for if $\mathcal{O}$ is not acyclic $1-$orientation, there exists a path $v_1\rightarrow v_2\rightarrow \cdots\rightarrow v_k\rightarrow v_{k+1}=v_1$ in the directed graph $\mathsf{H}/\mathcal{O}$. Then, we have hyperedges $H_1,H_2,\ldots,H_k$ with $1-$orientations $(v_1,H_{1}\setminus\{v_1\}),(v_2,H_{2}\setminus\{v_2\}),\ldots,(v_k,H_{k}\setminus\{v_k\})$ where $v_{i+1}\in H_{i}\setminus\{v_i\}$ for $1\leq i\leq k.$ Since $1-$orientations of hyperedges arise from the poset $\mathsf{P}$, it holds $v_1>_\mathsf{P}v_2>_\mathsf{P}\cdots>_\mathsf{P}v_k>_\mathsf{P}v_1$, the contradiction.
On the other hand, if $\mathcal{O}$ is an acyclic $1-$orientation of $\mathsf{H}$, the transitive closure of $\mathsf{H}/\mathcal{O}$ satisfies the requirements of the definition $\ref{Hposet}$, so $\mathsf{H}/\mathcal{O}$ is a $\mathcal{H}-$poset.
\end{proof}
Let $f:[n]\rightarrow\mathbb{N}$ be a function on the set of vertices of a poset $\mathsf{P}$ on $[n]$. We say that the function $f$ is a \emph{natural $\mathsf{P}-$partition} if
$f(i)\leq f(j)$ for $v_i\leq_\mathsf{P}v_j$ and a \emph{strict $\mathsf{P}-$partition} if, additionaly, $f(i)<f(j)$ for $i<_\mathsf{P}j$. Denote by $\mathcal{A}(\mathsf{P})$ the set of all natural $\mathsf{P}$-partitions and by $\mathcal{A}_0(\mathsf{P})$ the set of strict $\mathsf{P}-$partitions. Let $F(\mathsf{P})$ be the \emph{quasisymmetric enumerator of strict $\mathsf{P}-$partitions} defined by
\[F(\mathsf{P})=\sum_{f\in\mathcal{A}_0(\mathsf{P})}x_{f(1)}x_{f(2)}\cdots x_{f(n)}.\]
\begin{proposition}
For a simple connected graph $\Gamma$ on the set $[n]$ and $m\geq1$ it holds
\begin{align}\label{poset}
\Psi^m_0(\Gamma)\;\;\;=
\sum_{\mathsf{P}\in\mathcal{H}(\mathsf{H}_m(\Gamma))}F(\mathsf{P}),
\end{align}
where $F$ is the quasisymmetric enumerator of strict $\mathsf{P}-$partitions.
\end{proposition}
\begin{proof}
In \cite{BBM}, Theorem 12, is shown that vertices of an $m-$graph polytope $Q_{\Gamma,m}$ are naturally labeled by acyclic $1-$orientations of the hypergraph $\mathsf{H}_m(\Gamma)$, i.e. that the cone $C_\mathcal{O}$ defined by $x_i\geq x_j$ for $x_i\rightarrow x_j$ in $\mathsf{H}_m(\Gamma)/\mathcal{O}$ is the cone of some vertex in the hypergraphical polytope $Q_{\Gamma,m}.$ If $x_i\geq x_j$ and $x_j\geq x_k$ then $x_i\geq x_k$, so the $\mathcal{H}-$poset $\mathsf{P}$ which is the transitive closure of $\mathsf{H}_m(\Gamma)/\mathcal{O}$ induces the same cone $C_\mathcal{O}.$ Since $\Psi^m_0(\Gamma)$ counts points in the normal cones of vertices of the $m-$graph polytope $Q_{\Gamma,m}$, the equation $(\ref{poset})$ is true.
\end{proof}
In the end we will describe $\mathcal{H}-$posets corresponding to the sequence of polytopes $(Q_{\Gamma,1},Q_{\Gamma,2},\ldots,Q_{\Gamma,n-1})$ associated to a simple graph $\Gamma$. The following theorem shows that from $\mathcal{H}-$posets of $\mathsf{H}_k(\Gamma)$ we can obtain $\mathcal{H}-$posets of the hypergraph $\mathsf{H}_{k+1}(\Gamma).$ Note thar $\mathcal{H}-$posets of $\mathsf{H}_1(\Gamma)$ are transitive closure of acyclic orientations of graph $\Gamma$, and $\mathcal{H}-$posets of $\mathsf{H}_{n-1}(\Gamma)$ are $\mathcal{B}-$trees.
\begin{theorem}
Let $\mathsf{P}$ be a $\mathcal{H}-$poset of the hypergraph $\mathsf{H}_k(\Gamma)$ with the property that $\mathsf{P}$ is not a $\mathcal{H}-$poset of $\mathsf{H}_{k+1}(\Gamma)$. Then there exists an algorithm that creates $\mathcal{H}-$posets of $\mathsf{H}_{k+1}(\Gamma)$ by adding some relation in the $\mathcal{H}-$poset $\mathsf{P}$.
\end{theorem}
\begin{proof}
Let $\mathsf{P}$ be a $\mathcal{H}-$poset of $\mathsf{H}_k(\Gamma)$. It follows that for all subsets $H\in\mathsf{H}_k(\Gamma)$ the restriction $\mathsf{P}|_{H}$ is a rooted tree. As $\mathsf{P}$ is not a $\mathcal{H}-$poset of $\mathsf{H}_{k+1}(\Gamma)$, we can find $H_1\in\mathsf{H}_{k+1}(\Gamma)$ such that $|H_1|=k+1$ and $\mathsf{P}|_{H_1}$ is not a rooted tree. Since there exist at least two subsets of $H_1$ of the cardinality $k$ which belong to $\mathsf{H}_k(\Gamma)$, there are $v_1,v_2\in H_1$ for which there is no element $v_3\in H_1\setminus\{v_1,v_2\}$ satisfying either $v_1<_{\mathsf{P}|_{H_1}}v_3$ or $v_2<_{\mathsf{P}|_{H_1}}v_3$. By adding, respectivelly, the relations $v_1<v_2$ and $v_2<v_1$ to the poset $\mathsf{P}$ we obtain new binary relations on the set $[n]$ whose transitive closures are new posets $\mathsf{P}_1$ are $\mathsf{P}_2$ such that $\mathsf{P}_1|_{H_1}$ and $\mathsf{P}_2|_{H_1}$ are rooted trees. If $\mathsf{P}_1$ and $\mathsf{P}_2$ are $\mathcal{H}-$posets of the hypergraph $\mathsf{H}_{k+1}(\Gamma)$ we will stop this procedure. Otherwise, we will continue in the same way, by taking a new hyperedge $H_2\in\mathsf{H}_{k+1}(\Gamma)$ of the cardinality $k+1$ such that $\mathsf{P}_1|_{H_2}$ or $\mathsf{P}_2|_{H_2}$ is not a rooted tree and by repeating the previously described procedure. This process will end, since there is a finite number of subsets of the cardinality $k+1$ in the hypergraph $\mathsf{H}_{k+1}(\Gamma).$
\end{proof}
\begin{example}
Consider posets $\mathsf{P}_6$, $\mathsf{P}_{10}$, $\mathsf{P}_{16}$, $\mathsf{P}_{18}$ and $\mathsf{P}_{20}$ from the previous example and recall that $\mathsf{P}_6$ is the $\mathcal{H}$--poset for $\mathsf{H}_1(L_4)$, $\mathsf{P}_{10}$ and $\mathsf{P}_{16}$ are the $\mathcal{H}-$posets for $\mathsf{H}_2(L_4)$, $\mathsf{P}_{18}$ and $\mathsf{P}_{20}$ for $\mathsf{H}_3(L_4)$.
From the $\mathcal{H}-$poset $\mathsf{P}_{6}$ we can obtain posets $\mathsf{P}_{10}$ and $\mathsf{P}_{16}$ by adding respectivelly relations $1<3$ and $3<1$ in $\mathsf{P}_{6}$ since $\mathsf{P}_6|_{\{1,2,3\}}$ is not a rooted tree. Simillary, from $\mathsf{P}_{16}$ we can obtain $\mathcal{H}-$posets $\mathsf{P}_{18}$ and $\mathsf{P}_{20}$ by adding respectivelly relations $4<1$ and $1<4$ since $\mathsf{P}_{16}|_{\{1,2,3,4\}}$ is not a rooted tree.
\end{example}
\end{document}
|
\begin{document}
\title{Quadratic forms and systems of forms in many variables\thanks{This research was supported by Engineering and Physical Sciences Research Council grants EP/J500495/1 and EP/M507970/1.}
}
\author{\href{https://maths.fan}{Simon L. Rydin Myerson}
}
\maketitle
\begin{abstract}
Let $F_1,\dotsc,F_R$ be quadratic forms with integer coefficients in $n$ variables. When $n\geq 9R$ and the variety $V(F_1,\dotsc,F_R)$ is a smooth complete intersection, we prove an asymptotic formula for the number of integer points in an expanding box at which these forms simultaneously vanish, which in particular implies the Hasse principle for $V(F_1,\dotsc,F_R)$. Previous work in this direction required $n$ to grow at least quadratically with $R$. We give a similar result for $R$ forms of degree $d$, conditional on an upper bound for the number of solutions to an auxiliary inequality. In principle this result may apply as soon as $n> d2^dR$. In the case that $d\geq 3$, several strategies are available to prove the necessary upper bound for the auxiliary inequality. In a forthcoming paper we use these ideas to apply the circle method to nonsingular systems of forms with real coefficients.
\paragraph{Keywords}forms in many variables $\cdot$ Hardy-Littlewood method $\cdot$\\ quadratic forms $\cdot$ rational points
\paragraph{Mathematics Subject Classification (2000)}11D45 $\cdot$ 11P55 $\cdot$ 11D72 $\cdot$ 11G35 $\cdot$ 14G05
\end{abstract}
\section{Introduction}
\subsection{Results}\label{1.sec:main_result}
Our goal is to improve the following classic result of Birch.
\begin{theorem}[Birch~\cite{birchFormsManyVars}]\label{1.thm:birch's_theorem_long}
Let $d\geq 2$ and let $F_1(\vec{x}),\dotsc,F_R(\vec{x})$ be homogeneous forms of degree $d$, with integer coefficients in $n$ variables $x_1,\dotsc,x_n$. Let $\mathscr{B}$ be a box in $\bbR^n$, contained in the box $\clsd{-1}{1}^R$, and having sides of length at most 1 which are parallel to the coordinate axes. For each $ P\geq 1$, write
\begin{equation*}
\numZeroesInBoxOf{F_1,\dotsc,F_R}(P)
=
\#
\set{ \vec{x}\in\bbZ^n :
\vec{x}/P\in\mathscr{B},\,
F_1(\vec{x})=0, \dotsc,F_R(\vec{x})=0
}.
\end{equation*}
Let $W$ be the projective variety cut out in $\bbP_\bbQ^{n-1}$ by the condition that the $R\times n$ Jacobian matrix $\brax{\partial F_i(\vec{x})/ \partial x_j}_{ij}$ has rank less than $R$. If
\begin{equation}\label{1.eqn:birch's_condition_long}
n-1-\dim W
> (d-1)2^{d-1}R(R+1),
\end{equation}
then for all $P\geq 1$, some $\operatorname{Sing}IntegralBox\geq 0$ depending only on the $c_i$ and $\mathscr{B}$, and some $\operatorname{Sing}Series\geq 0$ depending only on the $c_i$, we have
\begin{equation}\label{1.eqn:HL_formula}
\numZeroesInBoxOf{F_1,\dotsc,F_R}(P)
=
\operatorname{Sing}IntegralBox\operatorname{Sing}Series P^{n-dR}
+
O\brax{P^{n-dR-\delta}}
\end{equation}
where the implicit constant depends only on the forms $F_i$ and $\delta$ is a positive constant depending only on $d$ and $R$. If the variety $V(F_1,\dotsc,F_R)$ cut out in $\bbP_\bbQ^{n-1}$ by the forms $F_i$ has a smooth point over $\bbQ_p$ for each prime $p$ then $\operatorname{Sing}Series >0$, and if it has a smooth real point whose homogeneous co-ordinates lie in $\mathscr{B}$ then $\operatorname{Sing}IntegralBox>0$.
\end{theorem}
Here $\operatorname{Sing}IntegralBox,$ $\operatorname{Sing}Series$ are the usual singular integral and series; see \eqref{1.eqn:evaluating_frakI} and \eqref{1.eqn:evaluating_frakS} below.
We focus in particular on weakening the hypothesis \eqref{1.eqn:birch's_condition_long} on the number of variables, when the number of forms $R$ is greater than one. Previous improvements of this type have required $R=1$ or 2. Our first result, proved in \S\ref*{1.sec:main_thm_proof}, is as follows:
\begin{theorem}\label{1.thm:main_thm_short}
When $d=2$ and $\dim V(F_1,\dotsc,F_R)=n-1-R$, we may replace \eqref{1.eqn:birch's_condition_long} with the condition
\begin{equation}
n-\sigma_\bbR
>
8R,
\label{1.eqn:condition_on_n_short}
\end{equation}
where $\sigma_\bbR$ is the element of $\set{0,\dotsc,n}$ defined by \begin{equation}\label{1.eqn:def_of_sigma-sub-R}
\sigma_\bbR=
1+\max_{\vec{\beta}\in\bbR^R\setminus\set{\vec{0}}} \dim\operatorname{Sing} V(\vec{\beta}\cdot\vec{F}),
\end{equation}
and $ V(\vec{\beta}\cdot\vec{F})$ is the of the hypersurface cut out in $\bbP_\bbR^{n-1}$ by $\beta_1F_1+\dotsc+\beta_RF_R=0$.
\end{theorem}
Note that \eqref{1.eqn:condition_on_n_short} is equivalent to
\begin{equation}\label{1.eqn:condition_on_rank}
\min_{\vec{\beta}\in\bbR^R\setminus\set{\vec{0}}} \operatorname{rank} (\vec{\beta}\cdot\vec{F}) > 8R,
\end{equation}
where $\operatorname{rank} (\vec{\beta}\cdot\vec{F})$ is the rank of the matrix of the quadratic form $ \beta_1F_1+\dotsc+\beta_RF_R$. The hypothesis \eqref{1.eqn:condition_on_n_short} is strictly weaker than the case $d=2$ of the condition \eqref{1.eqn:birch's_condition_long} as soon as $R\geq 4$. Indeed we have $\operatorname{Sing} V(\vec{\beta}\cdot\vec{F})\subset W$ whenever $\vec{\beta}\in\bbR^R\setminus\set{\vec{0}}$, and so
\[
\sigma_\bbR
\leq
1+\dim W.
\]
Thus \eqref{1.eqn:condition_on_n_short} is weaker than \eqref{1.eqn:birch's_condition_long} whenever $2R(R+1)<8R$ holds, that is for $R\geq 4$.
To obtain the result described in the abstract we can simplify \eqref{1.eqn:condition_on_n_short} with the following lemma, proved at the end of \S\ref*{1.sec:main_thm_proof}.
\begin{lemma}\label{1.lem:nonsing_case}
Let $d\geq 2$ and let $F_1,\dotsc,F_R$ and $W$ be as in Theorem~\ref*{1.thm:birch's_theorem_long}. If $V(F_1,\dotsc,F_R)$ is smooth with dimension $n-1-R$, then we have
\begin{equation}\label{1.eqn:nonsing_case}
\sigma_\bbR
\leq
1+\dim W
\leq R-1.
\end{equation}
\end{lemma}
If $V(F_1,\dotsc,F_R)$ is a smooth complete intersection and $n\geq 9R$ then Theorem~\ref*{1.thm:main_thm_short} and Lemma~\ref*{1.lem:nonsing_case} imply that the asymptotic formula \eqref{1.eqn:HL_formula} holds. This in turn implies that $V(F_1,\dotsc,F_R)$ satisfies the Hasse principle, by the last part of Theorem~\ref*{1.thm:birch's_theorem_long}. As is usual with the circle method one also obtains weak approximation for $V(F_1,\dotsc,F_R)$ in this case; see the comments after the proof of Theorem~\ref*{1.thm:main_thm_short} in \S\ref*{1.sec:main_thm_proof}.
The ``square-root cancellation" heuristic discussed around formula (1.12) in Browning and Heath-Brown~\cite{browningHeathBrownDiffDegrees} suggests that the condition $n > 4R$ should suffice in place of the $n\geq 9R$ in the previous paragraph. So \eqref{1.eqn:condition_on_n_short} brings us within a constant factor of square-root cancellation as $R$ grows, while \eqref{1.eqn:birch's_condition_long} misses by a factor of $O(R)$.
We deduce Theorem~\ref*{1.thm:main_thm_short} from the following more general result, proved in \S\ref*{1.sec:main_thm_proof}.
\begin{definition}\label{1.def:aux_ineq}
For each $k \in\bbN\setminus\set{\vec{0}}$ and $\vec{t}\in\bbR^k$ we write $\supnorm{\vec{t}} = \max_i \abs{t_i}$ for the supremum norm. Let $f(\vec{x})$ be any polynomial of degree $d\geq 2$ with real coefficients in $n$ variables $x_1,\dotsc,x_n$. For $i= 1,\dotsc, n$ we define
\begin{equation*}
m^{( f )}_i \brax{\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)} }
=
\sum_{j_1,\dotsc,j_{d-1}=1}^n
x^{(1)}_{j_1} \dotsm x^{(d-1)}_{j_{d-1}}
\frac{\partial^{d} f(\vec{x})}{\partial x_{j_1} \dotsm \partial x_{j_{d-1}} \partial x_i},
\end{equation*}
where we write $\vecsuper{x}{j}$ for a vector of $n$ variables $(x^{(j)}_1,\dotsc,x^{(j)}_n)^T$. This defines an $n$-tuple of multilinear forms
\[
\vec{\nabla}SomethingMultilinear{ f } {\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)} }\in \bbR[\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)}]^n.
\]
Finally, for each $B \geq 1$ we put $\auxIneqOfSomethingNumSolns{f} \brax{ B }$ for the number of $(d-1)$-tuples of integer $n$-vectors $\vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}$ with
\begin{gather}
\supnorm{\vecsuper{x}{1}},\dotsc,\supnorm{\vecsuper{x}{d-1}} \leq B,
\nonumber
\\
\label{1.eqn:aux_ineq}
\supnorm{\vec{\nabla}SomethingMultilinear{ f }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)} }} < \supnorm{ f^{[d]} } B^{d-2}
\end{gather}
where we let $\supnorm{f^{[d]}} = \frac{1}{d!} \max_{\vec{j}\in\set{1,\dotsc,n}^d} \abs[\big]{\frac{\partial^d f(\vec{x})}{\partial x_{j_1}\dotsm\partial x_{j_d}}}$.
\end{definition}
\begin{theorem}\label{1.thm:manin}
Let the forms $F_i$ and the counting function $ \numZeroesInBoxOf{F_1,\dotsc,F_R}(P)$ be as in Theorem~\ref*{1.thm:birch's_theorem_long}, and let $\auxIneqOfSomethingNumSolns{f}(B)$ be as in Definition~\ref*{1.def:aux_ineq}. Suppose that the $F_i$ are linearly independent and that
\begin{equation}\label{1.eqn:aux_ineq_bound_in_manin_thm}
\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}(B)
\leq
C_0 B^{(d-1)n-2^d{\mathscr C}}
\end{equation}
for some $C_0 \geq 1$, ${\mathscr C} > dR$ and all $\vec{\beta}\in\bbR^R$ and $B\geq 1$, where we have written $\vec{\beta}\cdot\vec{F}$ for $ \beta_1F_1+\dotsb+\beta_RF_R$. Then for all $P\geq 1$ we have
\begin{equation*}
\numZeroesInBoxOf{F_1,\dotsc,F_R}(P)
=
\operatorname{Sing}IntegralBox\operatorname{Sing}Series P^{n-dR}
+
O\brax{P^{n-dR-\delta}},
\end{equation*}
where the implicit constant depends at most on $C_0$, ${\mathscr C}$ and the $F_i$, and $\delta$ is a positive constant depending at most on ${\mathscr C}$, $d$ and $R$. Here $\operatorname{Sing}IntegralBox$ and $\operatorname{Sing}Series $ are as in Theorem~\ref*{1.thm:birch's_theorem_long}.
\end{theorem}
One trivially has \[
B^{(d-2)n}
\ll_{d,n}
\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}(B)\ll_{d,n}B^{(d-1)n}.
\]
So \eqref{1.eqn:aux_ineq_bound_in_manin_thm} requires us to save a factor of $P^{2^d{\mathscr C}}$ over the trivial upper bound, while the largest saving possible is of size $O(P^n)$. It follows that we must have $n>d2^{d}R$ in order for both \eqref{1.eqn:aux_ineq_bound_in_manin_thm} and ${\mathscr C}>dR$ to hold.
Counting functions similar to $\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}(B)$ play a similar role in some other applications of the circle method, with the equations
\begin{equation}\label{1.eqn:aux_eq}
{\vec{\nabla}SomethingMultilinear{ f }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)} }}
= \vec{0}
\end{equation}
in place of the inequality \eqref{1.eqn:aux_ineq}. The quantities $M(a_1,\dotsc,a_r;H)$ from formula (9) of Dietmann~\cite{dietmannWeylsIneq}, and $\calM_f(P)$ from Lemma~2 of Schindler~\cite{schindlerWeylsIneq} are both of this type. In this setting one needs to save a factor of size $B^{O(R^2)}$ over the trivial bound.
In forthcoming work we bound the function $\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}(B)$ for degrees higher than 2, with the goal of handling systems $F_i$ in roughly $d2^{d}R$ variables. We will approach this problem variously by using elementary methods, by generalising the argument used in Lemma~3 of Davenport~\cite{davenportSixteen} to treat the equations \eqref{1.eqn:aux_eq}, and by applying the circle method iteratively to the inequalities \eqref{1.eqn:aux_ineq}. We will also combine the ideas used here with the variant of the circle method due to Freeman \cite{freemanAsymptBoundsAndFormulas} to give a version of Theorem~\ref*{1.thm:manin} for systems of forms $F_i$ with real coefficients.
\subsection{Related work}\label{1.sec:related_work}
Theorem~1 of M\"uller~\cite{mullerSystemsQuadIneqsAndValueDistns} gives a result with exactly the same number of variables as Theorem~\ref*{1.thm:main_thm_short}, but for quadratic inequalities with real coefficients rather than quadratic equations with rational coefficients. It is in turn founded on work of Bentkus and G\"otze~\cite{bentkusGotzeEllipsoids,bentkusGotzeDistributionQuadForms} concerning a single quadratic inequality. The method of proof is related to ours, see \S\S\ref*{1.sec:moat_lemmas} and~\ref*{1.sec:weyl_diff} below.
When $d=2$, the forms $F_i$ are diagonal and the variety $V(F_1,\dotsc,F_R)$ is smooth, then the conclusions of Theorem~\ref*{1.thm:birch's_theorem_long} hold whenever $n > 4R$. That is, we have the ``square-root cancellation" situation described at the end of \S\ref*{1.sec:main_result}. This follows by standard methods from a variant of Hua's lemma due to Cook~\cite{cookNoteHuasLemma}.
When $d=2$ Dietmann~\cite{dietmannSystemsQuadForms}, improving work of Schmidt~\cite{schmidtSystemsQuadForms}, gives conditions similar to \eqref{1.eqn:condition_on_n_short} under which the asymptotic formula \eqref{1.eqn:HL_formula} holds and the constant $\operatorname{Sing}Series$ is positive. In particular it is sufficient that either $
\min_{\vec{\beta}\in\bbC^R\setminus\set{\vec{0}}} \operatorname{rank}(\vec{\beta}\cdot\vec{F})>2R^2+3R,
$ or that $\min_{\vec{a}\in\bbQ^R\setminus\set{\vec{0}}} \operatorname{rank}(\vec{a}\cdot\vec{F})
>2R^3 +\tau(R)R$, where $\tau(R) =2$ if $R$ is odd and 0 otherwise. He also shows that if $d=2$, the variety $V(F_1,\dotsc,F_R)$ has a smooth real point and
$
\min_{\vec{a}\in\bbQ^R\setminus\set{\vec{0}}} \operatorname{rank}(\vec{a}\cdot\vec{F})
>2R^3-2R
$
then $V(F_1,\dotsc,F_R)$ has a rational point.
Munshi~\cite{munshiPairsQuadrics11Vars} proves the asymptotic formula \eqref{1.eqn:HL_formula} when $d=2$, $n=11$ and $V(F_1,F_2)$ is smooth. By contrast using Theorem~\ref*{1.thm:birch's_theorem_long} and \eqref{1.eqn:nonsing_case} would require $n \geq 14$. When $d=2$ and $R=1$ we have a single quadratic form $F$. Heath-Brown~\cite{heathBrownNewForm} then proves such an asymptotic formula whenever $V(F)$ is smooth and $n \geq 3$.
If $F$ is a cubic form, Hooley \cite{hooleyOctonaryCubicsII} shows that when $n=8$, the variety $V(F)$ is smooth, and $\mathscr{B}$ is a sufficiently small box centred at a point where the Hessian determinant of $F$ is nonzero, then we have a smoothly weighted asymptotic formula analogous to \eqref{1.eqn:HL_formula}. This result is conditional on a Riemann hypothesis for a certain modified Hasse-Weil $L$-function. For $n=9$ he proves a similar result without any such assumption \cite{hooleyNonaryCubicsIII}, with an error term $O(P^{n-3}(\log P)^{-\delta})$ instead of the $O(P^{n-3-\delta})$ in \eqref{1.eqn:HL_formula}. In this setting Theorem~\ref*{1.thm:birch's_theorem_long} requires $n \geq 17$.
In the case of a single quartic form $F$ such that $V(F)$ is smooth, Hanselmann~\cite{hanselmannQuartics40Vars} gives the condition $n\geq 40$ in place of the $n \geq 49$ required to apply Theorem~\ref*{1.thm:birch's_theorem_long}. Work in progress of Marmon and Vishe yields a further improvement.
When $d\geq 5$ and $R=1$, a sharper condition than \eqref{1.eqn:birch's_condition_long} is available by work of Browning and Prendiville~\cite{browningPrendivilleImprovements}. For $d\leq 10$ and a smooth hypersurface $V(F)$ this is essentially a reduction of one quarter in the number of variables required.
Dietmann~\cite{dietmannWeylsIneq} and Schindler~\cite{schindlerWeylsIneq} show that the condition \eqref{1.eqn:birch's_condition_long} may be replaced with $n-\sigma_\bbZ>(d-1)2^{d-1}R(R+1)$, where we define
\begin{equation}\label{1.eqn:def_of_sigma-sub-Z}
\sigma_\bbZ
=
1+ \max_{\vec{a}\in \bbZ^R\setminus\set{ \vec{0} }} \dim\operatorname{Sing} V(\vec{a}\cdot\vec{f}^{[d]}),
\end{equation}
Note that the maximum here is over integer points, and so we may have $\sigma_\bbZ < \sigma_\bbR$.
Birch's work~\cite{birchFormsManyVars} is generalised to systems of forms with differing degrees by Browning and Heath-Brown~\cite{browningHeathBrownDiffDegrees} over \tbbQ and by Frei and Madritsch~\cite{freiMadritschDifferingDegrees} over number fields. It is extended to linear spaces of solutions by Brandes~\cite{brandesFormsRepresentingForms,brandesLinearSpacesNumberFields}. Versions of the result for function fields are due to Lee~\cite{leeFunctionFields} and to Browning and Vishe~\cite{browningVisheFunctionFields}. A version for bihomogeneous forms is due to Schindler~\cite{schindlerBihomogeneous}, and Mignot~\cite{mignotTridegree111,mignotCircleMethodWithToricHeights} further develops these methods for certain trilinear forms and for hypersurfaces in toric varieties. Liu~\cite{liuQuadricsPrimes} proves existence of solutions in prime numbers to a quadratic equation in 10 or more variables. Asymptotic formulae for systems of equations of the same degree with prime values of the variables are considered by Cook and Magyar~\cite{cookMagyarPrimeVars} and by Xiao and Yamagishi~\cite{xiaoYamagishiPrimeVars}. Magyar and Titchetrakun~\cite{magyarEtAlAlmostPrimes} extend these results to values of the variables with a bounded number of prime factors, while Yamagishi~\cite{yamagishiDifferingDegreesPrimes} treats systems of equations with differing degrees and prime variables. It is natural to ask whether similar generalisations exist for Theorem~\ref*{1.thm:main_thm_short}.
\subsection{Notation}\label{1.sec:notation}
Parts of our work apply to polynomials with general real coefficients. Therefore we let $f_1(\vec{x}),\dotsc,f_R(\vec{x})$ be polynomials with real coefficients, of degree $d \geq 2$ in $n$ variables $x_1,\dotsc,x_n$, and we write $f^{[d]}_1(\vec{x}), \dotsc, f^{[d]}_R(\vec{x})$ for the degree $d$ parts.
Implicit constants in $\ll$ and big-$O$ notation are always permitted to depend on the polynomials $f_i$, and hence on $d,n,$ and $R$. We use scalar product notation to indicate linear combinations, so that for example $\vec{\alpha}\cdot\vec{f}=\sum_{i=1}^R \alpha_i f_i$. Throughout, $\supnorm{\vec{t}}$, $\supnorm{f}$, $\vecsuper{m}{f}$ and $\auxIneqOfSomethingNumSolns{f} \brax{ B }$ are as in Definition~\ref*{1.def:aux_ineq}. We do not require algebraic varieties to be irreducible, and we use the convention that $\dim\emptyset = -1$.
By an \emph{admissible box} we mean a box in $\bbR^n$ contained in the box $\clsd{-1}{1}^R$, and having sides of length at most 1 which are parallel to the coordinate axes. We let $\mathscr{B}$ be an admissible box. For each $\vec{\alpha}\in\bbR^R$ and $P\geq 1$, we define the exponential sum
\begin{equation}\label{1.eqn:def_of_S}
\expSumSBoxAt{\vec{\alpha}}
=
\sum_{\substack{ \vec{x} \in \bbZ^n \\ \vec{x}/P \in \mathscr{B} }}
e( \vec{\alpha}\cdot\vec{f}(\vec{x}) )
\end{equation}
where $e(t) = e^{2\pi i t}$. This depends implicitly on $\mathscr{B}$ and the $f_i$. We often write the expression $\max\setbrax{P^{-d} \supnorm{\vec{\beta}}^{-1}, \supnorm{\vec{\beta}}^{\frac{1}{d-1}}}$, and if $\vec{\beta}=\vec{0}$ this quantity is defined to be $+\infty$.
\subsection{Structure of this paper}
In \S\ref*{1.sec:circle_method} we apply the circle method to a system of degree $d$ polynomials with integer coefficients, assuming a certain hypothesis \eqref{1.eqn:assumed_arcs} on $\expSumSBoxAt{\vec{\alpha}}$. In \S\ref*{1.sec:aux_ineq} we prove this hypothesis on ${\expSumSBoxAt{\vec{\alpha}}}$ for polynomials with real coefficients, assuming that the bound \eqref{1.eqn:aux_ineq_bound_in_manin_thm} above holds. We then prove Theorems~\ref*{1.thm:main_thm_short} and~\ref*{1.thm:manin} in \S\ref*{1.sec:main_thm_proof}.
\section{The circle method}\label{1.sec:circle_method}
In this section we apply the circle method, assuming that the bound
\begin{equation}\label{1.eqn:assumed_arcs}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }
\leq
C
\max \setbrax[\big]{ P^{-d}\supnorm{\vec{\beta}}^{-1} ,\, \supnorm{\vec{\beta}}^{\frac{1}{d-1}} }^{{\mathscr C}}
\end{equation}
holds for all $\vec{\alpha},\vec{\beta}\in \bbR^R$, $P \geq 1$, some ${\mathscr C}>dR$, $C \geq 1$, and some small $\epsilon>0$. In particular we will show that \eqref{1.eqn:assumed_arcs} implies that the set of points $\vec{\alpha}$ in $\bbR^R$ where $\abs{\expSumSBoxAt{\vec{\alpha}}}$ is large has small measure. Our goal is the result below, which will be proved in \S\ref*{1.sec:completing_the_circle_method}.
\begin{proposition}\label{1.prop:circle_method}
Assume that the polynomials $f_i$ have integer coefficients, and that the leading forms $f^{[d]}_i(\vec{x})$ are linearly independent. Write
\begin{equation}\label{1.eqn:def_of_num_solns_in_box}
N_{f_1,\dotsc,f_R}(P)
=
\#
\set{ \vec{x}\in\bbZ^n :
\vec{x}/P\in\mathscr{B},\,
f_1(\vec{x})=\dotsb= f_R(\vec{x})=\vec{0}
}.
\end{equation}
Suppose we are given ${\mathscr C}>dR$, $C \geq 1$ and $\epsilon>0$ such that the bound \eqref{1.eqn:assumed_arcs} holds for all $\vec{\alpha},\vec{\beta}\in \bbR^R$, all $P \geq 1$ and all admissible boxes $\mathscr{B}$. If $\epsilon$ is sufficiently small in terms of ${\mathscr C}$, $d$ and $R$, then we have
\begin{equation*}
N_{f_1,\dotsc,f_R}(P)
=
\operatorname{Sing}IntegralBox\operatorname{Sing}Series P^{n-dR}
+
O_{C,f_1,\dotsc,f_R}\brax{P^{n-dR-\delta}}
\end{equation*}
for all $P\geq 1$, all admissible boxes $\mathscr{B}$, and some $\delta>0$ depending only on ${\mathscr C}$, $d$, $R$. Here $\operatorname{Sing}IntegralBox,$ $\operatorname{Sing}Series$ are the usual singular integral and series given by \eqref{1.eqn:evaluating_frakI} and \eqref{1.eqn:evaluating_frakS} below.
\end{proposition}
We comment on the role of \eqref{1.eqn:assumed_arcs}. If the $f_i$ have integer coefficients, then we have
\begin{equation}\label{1.eqn:circle_method}
N_{f_1,\dotsc,f_R}(P)
=
\int_{\clsd{0}{1}^R} \expSumSBoxAt{\vec{\alpha}} \,\mathrm{d}\vec{\alpha}.
\end{equation}
If both $\expSumSBoxAt{\vec{\alpha}}$ and $\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}$ are large then \eqref{1.eqn:assumed_arcs} implies that one of the terms $P^{-d}\supnorm{\vec{\beta}}^{-1}$ or $\supnorm{\vec{\beta}}^{\frac{1}{d-1}}$ must be large. In particular, the points $\vec{\alpha}$ and $\vec{\alpha}+\vec{\beta}$ must either be very close or somewhat far apart. In this sense \eqref{1.eqn:assumed_arcs} is a ``repulsion principle" for the sum $\expSumSBoxAt{\vec{\alpha}}$. We can use this fact to bound the measure of the set where $\expSumSBoxAt{\vec{\alpha}}$ is large, and this will enable us to reduce \eqref{1.eqn:circle_method} to an integral over major arcs.
To see the source of the condition ${\mathscr C}>dR$ in Proposition~\ref*{1.prop:circle_method}, consider the case
\begin{equation}\label{1.eqn:amount_of_cancellation}
\abs{\expSumSBoxAt{\vec{\alpha}}} = \abs{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}} = CP^{n-{\mathscr C}+\epsilon}.
\end{equation}
In general we always have
\begin{equation*}
\max \setbrax[]{ P^{-d}\supnorm{\vec{\beta}}^{-1} ,\, \supnorm{\vec{\beta}}^{\frac{1}{d-1}} }^{\mathscr C} \geq P^{-{\mathscr C}},
\end{equation*}
with equality when $\supnorm{\vec{\beta}} = P^{1-d}$ holds. So in the case \eqref{1.eqn:amount_of_cancellation}, the assumption \eqref{1.eqn:assumed_arcs} is trivial. In other words \eqref{1.eqn:assumed_arcs} might still be satisfied even if the function $\expSumSBoxAt{\vec{\alpha}}$ had absolute value $P^{n-{\mathscr C}+\epsilon}$ at every point $\vec{\alpha}$ in real $R$-space. This will lead to an error term of size at least $P^{n-{\mathscr C}+\epsilon}$ in evaluating the integral \eqref{1.eqn:circle_method}. Hence we require ${\mathscr C}>dR$ in the proposition above in order for the error term to be smaller than the main term.
\subsection{Mean values from bounds of the form \eqref{1.eqn:assumed_arcs}}\label{1.sec:moat_lemmas}
We show that the bound \eqref{1.eqn:assumed_arcs} implies upper bounds for the integral of the function $\expSumSBoxAt{\vec{\alpha}}$ over any bounded measurable set. M\"uller~\cite{mullerSystemsQuadIneqsAndValueDistns} and Bentkus and G\"otze~\cite{bentkusGotzeEllipsoids,bentkusGotzeDistributionQuadForms} previously used similar ideas to treat quadratic forms with real coefficients.
We begin with a technical lemma.
\begin{lemma}\label{1.lem:from_moats_to_mean_values}
Let $r_1:\open{0}{\infty}\to\open{0}{\infty}$ be a strictly decreasing bijection, and let $r_2:\open{0}{\infty}\to\open{0}{\infty}$ be a strictly increasing bijection. Write $r_1^{-1}$ and $r_2^{-1}$ for the inverses of these maps. Let $\nu>0$ and let $E_0$ be a hypercube in $\bbR^R$ whose sides are of length $\nu$ and parallel to the coordinate axes. Let $E$ be a measurable subset of $E_0$ and let $\varphi: E\to\clsdopen{0}{\infty}$ be a measurable function.
Suppose that for all $\vec{\alpha},\vec{\beta}\in\bbR^R$ such that $\vec{\alpha}\in E$ and $\vec{\alpha}+\vec{\beta}\in E$, we have
\begin{equation}\label{1.eqn:arcs_for_random_function}
\min\setbrax*{ \varphi(\vec{\alpha}) , \, \varphi(\vec{\alpha}+\vec{\beta}) }
\leq
\max \setbrax[\big]{ r_1^{-1}(\supnorm{\vec{\beta}}) ,\, r_2^{-1}(\supnorm{\vec{\beta}}) }.
\end{equation}
Then, for any integers $k$ and $\ell$ with $k<\ell$, we have
\begin{align}
\int_{E} \varphi(\vec{\alpha}) \,\mathrm{d}\vec{\alpha}
\ll_R {}&
\nu^R2^k
+
\sum_{i=k}^{\ell-1}2^i
\brax[\bigg]{
\frac{
\nur_1(2^i)}{\min\setbrax{r_2(2^i),\nu}}}^R
\nonumber
\\
&+
\brax[\bigg]{\frac{\nur_1(2^\ell)}{\min\setbrax{r_2(2^\ell),\nu}}}^R \sup_{\vec{\alpha}\in E} \varphi\brax{\vec{\alpha}}
,
\label{1.eqn:from_moats_to_mean_values}
\end{align}
where the implicit constant depends only on $R$.
\end{lemma}
Note that if we choose
\[
\varphi(\vec{\alpha})=\abs{\expSumSBoxAt{\vec{\alpha}}} / CP^{n+\epsilon},
\qquad
r_1(t)
= P^{-d}t^{-1/{\mathscr C}},
\qquad
r_2(t)
=
t^{(d-1)/{\mathscr C}},
\]
then the hypotheses \eqref{1.eqn:assumed_arcs} and \eqref{1.eqn:arcs_for_random_function} become identical. This will enable us to apply Lemma~\ref*{1.lem:from_moats_to_mean_values} to bound the integral $\int_{\frakm_{P,d,\Delta}}\expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha}$, where $\frakm_{P,d,\Delta}$ is a set of minor arcs on which $\expSumSBoxAt{\vec{\alpha}}$ is somewhat small.
\begin{proof}
The strategy of proof is as follows. We deduce from \eqref{1.eqn:arcs_for_random_function} that if both $\varphi(\vec{\alpha})\geq t$ and $\varphi(\vec{\alpha}+\vec{\beta})\geq t$ hold, then either $\supnorm{\vec{\beta}}\leq r_1(t)$ or $\supnorm{\vec{\beta}}\geq r_2(t)$ must hold. From this we will show that the set of points $\vec{\alpha}$ satisfying the bound $\varphi(\vec{\alpha})\geq t$ can be covered by a collection of hypercubes of side $2 r_1(t)$, each of which is separated from the others by a gap of size $\tfrac{1}{2}r_2(t)$. The lemma will follow upon bounding the total Lebesgue measure of this collection of hypercubes.
For each $t>0$ we set
\begin{equation}
\label{1.eqn:def_of_superlevel_set_for_random_function}
D\brax{t}
=
\set{\vec{\alpha}\in E: \varphi(\vec{\alpha})\geq t}.
\end{equation}
Observe that if $\vec{\alpha}$ and $\vec{\alpha}+\vec{\beta}$ both belong to $D\brax{t{}}$, then \eqref{1.eqn:arcs_for_random_function} implies that
\[
\max \setbrax[\big]{ r_1^{-1}(\supnorm{\vec{\beta}}) ,\, r_2^{-1}(\supnorm{\vec{\beta}}) }
\geq
t,
\]
from which it follows that either $\supnorm{\vec{\beta}}\leq r_1(t)$ or $\supnorm{\vec{\beta}}\geq r_2(t)$ must hold.
Let $\frakb$ be any hypercube in $\bbR^R$ whose sides are of length $\frac{1}{2}r_2(t)$ and parallel to the coordinate axes. We claim that $\frakb \cap D\brax{t}$ is contained in a hypercube \tfrakB whose sides are of length $2r_1(t)$. To see this let $\vec{\alpha}$ be any fixed vector lying in $\frakb \cap D\brax{t}$, and set
\begin{equation*}
\frakB
=
\set{\vec{\alpha}+\vec{\beta}: \vec{\beta}\in\bbR^R, \supnorm{\vec{\beta}}\leq r_1(t)}.
\end{equation*}
If $\vec{\alpha}+\vec{\beta}$ belongs to $\frakb \cap D\brax{t}$, then by definition of \tfrakb the bound $\supnorm{\vec{\beta}} \leq \frac{1}{2}r_2(t)$ must hold. In particular $\supnorm{\vec{\beta}} <r_2(t)$, so by the comments after \eqref{1.eqn:def_of_superlevel_set_for_random_function}, the bound $\supnorm{\vec{\beta}} \leq r_1(t)$ must hold. This shows that $\vec{\alpha}+\vec{\beta} \in \frakB$, and hence that $\frakb \cap D\brax{t}\subset \frakB$, as claimed. In particular the Lebesgue measure of $\frakb \cap D\brax{t}$ is at most
$
(2r_1(t))^R.
$
The set $D(t)$ is contained in $E_0$, a hypercube of side $\nu$. So in order to cover the set $D(t)$ with boxes \tfrakb of side $\tfrac{1}{2}r_2(t)$ one needs at most
\[
\ll_R \frac{\nu^R}{\min\setbrax{r_2(t),\nu}^{R}}
\]
boxes. Summing over all the boxes \tfrakb, it follows that
\begin{equation}\label{1.eqn:measure_of_arcs}
L\brax{t}
\ll_R
\brax[\bigg]{\frac{\nur_1(t)}{\min\setbrax{r_2(t),\nu}}}^R,
\end{equation}
where we write $L\brax{t}$ for the Lebesgue measure of $D\brax{t}$. So we have
\begin{align*}
\int_{E} \varphi (\vec{\alpha})\,\mathrm{d}\vec{\alpha}
={}
&
\int\limits_{ E \setminus D\brax{2^k} }
\varphi(\vec{\alpha})
\,\mathrm{d}\vec{\alpha}
+ \sum_{i=k}^{\ell-1}\,
\int\limits_{ E \cap \brax[]{D\brax{2^{i}} \setminus D\brax{2^{i+1}}}}
\varphi(\vec{\alpha})
\,\mathrm{d}\vec{\alpha}
\nonumber
\\&
+
\int\limits_{E\cap D\brax{2^\ell}}
\varphi(\vec{\alpha})
\,\mathrm{d}\vec{\alpha}
\\
{}\leq{}
&
\nu^R 2^k
+ \sum_{i=k}^{\ell-1}2^{i+1}
L\brax{2^i}
+
L\brax{2^\ell}
\sup_{\vec{\alpha}\in E} \varphi(\vec{\alpha}).
\end{align*}
With \eqref{1.eqn:measure_of_arcs} this yields \eqref{1.eqn:from_moats_to_mean_values}.
\withqed\end{proof}
We now apply Lemma~\ref*{1.lem:from_moats_to_mean_values} to deduce mean values from bounds of the form \eqref{1.eqn:assumed_arcs}. The following result is stated in greater generality than is strictly required here, to facilitate future applications to forms with real coefficients.
\begin{lemma}\label{1.lem:mean_value_from_power-law_moats}
Let $T$ be a complex-valued measurable function on $\bbR^R$. Let $E_0$ be a hypercube in $\bbR^R$ whose sides are of length $\nu$ and parallel to the coordinate axes, and let $E$ be a measurable subset of $E_0$. Suppose that the inequality
\begin{equation}
\label{1.eqn:power-law_moats}
\min\setbrax*{ \abs*{\frac{T\brax{ \vec{\alpha} }}{P^{n}}} , \, \abs*{\frac{T\brax{ \vec{\alpha}+\vec{\beta} }}{P^{n}}} }
\leq
\max \setbrax[\big]{ P^{-d}\supnorm{\vec{\beta}}^{-1} ,\, \supnorm{\vec{\beta}}^{\frac{1}{d-1}} }^{\mathscr C}
\end{equation}
holds for some $P\geq 1$ and ${\mathscr C}>0$ and all $\vec{\alpha}, \vec{\beta} \in \bbR^R$. Suppose further that
\begin{equation}\label{1.eqn:weyl's_ineq_mean_value_lem}
\sup_{\vec{\alpha}\in E} \abs{T\brax{\vec{\alpha}}}
\leq
P^{n-\delta}
\end{equation}
for some $\delta\geq 0$. Then we have
\begin{multline}\label{1.eqn:mean_value_lem}
\int_{E} {T\brax{\vec{\alpha}}} \,\mathrm{d}\vec{\alpha}
\\
\ll_{{\mathscr C},d,R}
\left\{
\begin{array}{@{}l@{}l@{\hspace{1.7em}}l@{}}
\nu^R P^{n-{\mathscr C}}
&{}+
P^{n-{\mathscr C}-(d-1)R}
&\text{if } {\mathscr C} < R
\\
\nu^R P^{n-{\mathscr C}}
&{}+
P^{n-dR}\log P
&\text{if } {\mathscr C} = R
\\
\nu^R P^{n-{\mathscr C}}
&{}+
P^{n-dR-\delta\brax{ 1-\frac{R}{{\mathscr C}}}}
&\text{if } R < {\mathscr C} < dR
\\
\nu^R P^{n-{\mathscr C}}\log P
&{}+
P^{n-dR-\delta\brax{ 1-\frac{R}{{\mathscr C}}}}
&\text{if } {\mathscr C} = dR
\\
\nu^R P^{n-dR-\delta(1-\frac{dR}{{\mathscr C}})}
&{}+
P^{n-dR-\delta\brax{ 1-\frac{R}{{\mathscr C}}}}
&\text{if } {\mathscr C} > dR.
\end{array}
\right.
\end{multline}
\end{lemma}
Later we will take $T\brax{\vec{\alpha}} = C^{-1} P^{-\epsilon} \expSumSBoxAt{\vec{\alpha}}$ where $C$ is as in Proposition~\ref*{1.prop:circle_method}. We will take $E$ to be a set of minor arcs $\frakm_{P,d,\Delta}$, and we will interpret the integral $\int _{\frakm_{P,d,\Delta}} \expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha}$ as an error term, which will need to be smaller than a main term of size around $P^{n-dR}$. As a result, only the case ${\mathscr C}>dR$ of the bound \eqref{1.eqn:mean_value_lem} will be satisfactory for the present application.
\begin{proof}
We apply Lemma~\ref*{1.lem:from_moats_to_mean_values} with
\begin{equation}\label{1.eqn:choice_of_phi,_inradius,_outradius}
\varphi(\vec{\alpha}) = \frac{\abs{T(\vec{\alpha})}}{P^n},
\quad
r_1(t) = P^{-d}t^{-1/{\mathscr C}},
\quad
r_2(t) = t^{(d-1)/{\mathscr C}},
\end{equation}
noting that the bound \eqref{1.eqn:arcs_for_random_function} then follows from \eqref{1.eqn:power-law_moats}.
It remains to choose the parameters $k$ and $\ell$ from \eqref{1.eqn:from_moats_to_mean_values}. We will choose these so that the right-hand side of \eqref{1.eqn:from_moats_to_mean_values} is dominated by the sum $\sum_{i=k}^{\ell-1}$, rather than either of the other two terms. More precisely, take
\begin{equation}\label{1.eqn:def_of_tee-sub-i}
k
=
\floor{\log_2 P^{-{\mathscr C}}},
\qquad
\ell
=
\ceil{\log_2 P^{-\delta} },
\end{equation}
observing that
\begin{equation}
\tfrac{1}{2}P^{-{\mathscr C}}
<
2^k
\leq
P^{-{\mathscr C}},
\quad
P^{-\delta}
\leq
2^\ell
<
2 P^{-\delta}.
\label{1.eqn:tee-sub-k_and_tee-sub-ell}
\end{equation}
We may assume that ${\mathscr C}>\delta$, for otherwise the bound $\int_E T(\vec{\alpha})\,\mathrm{d}\vec{\alpha} \leq \nu^R P^{n-\delta}$, which follows from \eqref{1.eqn:weyl's_ineq_mean_value_lem}, is stronger than any of the bounds listed in \eqref{1.eqn:mean_value_lem}. We then have $k < \ell$ and so this choice of $k, \ell$ is admissible in
Lemma~\ref*{1.lem:from_moats_to_mean_values}. Hence \eqref{1.eqn:from_moats_to_mean_values} holds, and substituting in our choices \eqref{1.eqn:choice_of_phi,_inradius,_outradius} for the parameters yields
\begin{align}
\int_{E} \frac{\abs{T\brax{\vec{\alpha}}}}{P^n} \,\mathrm{d}\vec{\alpha}
&\ll_R
\nu^R2^k
+
\sum_{i=k}^{\ell-1}
2^i
\brax[\bigg]{\frac{
\nu
P^{-d} 2^{-{i/{\mathscr C}}}}{ \min\setbrax{2^{{(d-1)i/{\mathscr C}}}, \nu }}}^R
\nonumber
\\
&\hphantom{{}\ll_R{}}+\brax[\bigg]{
\frac{
\nu
P^{-d} 2^{-{\ell/{\mathscr C}}} }{ \min\setbrax{2^{{(d-1)\ell/{\mathscr C}}}, \nu }}}^R
\sup_{\vec{\alpha}\in E} \frac{\abs{T\brax{\vec{\alpha}}}}{P^n}.
\label{1.eqn:mean_value_I}
\end{align}
By \eqref{1.eqn:weyl's_ineq_mean_value_lem} and \eqref{1.eqn:tee-sub-k_and_tee-sub-ell} we have $\sup_{\vec{\alpha}\in E} \frac{\abs{T\brax{\vec{\alpha}}}}{P^n}\leq 2^\ell $, and so we may extend the sum in \eqref{1.eqn:mean_value_I} from $\sum_{i=k}^{\ell-1}$ to $\sum_{i=k}^\ell$ to obtain
\begin{equation*}
\int_{E} \frac{\abs{T\brax{\vec{\alpha}}}}{P^n} \,\mathrm{d}\vec{\alpha}
\ll_R
\nu^R 2^k
+
\sum_{i=k}^{\ell}
2^i
\brax[\bigg]{\frac{
\nu
P^{-d} 2^{-{i/{\mathscr C}}} }{ \min\setbrax{2^{{(d-1)i/{\mathscr C}}}, \nu } }}^R.
\end{equation*}
Since
\[
\frac{ P^{-d} 2^{-{i/{\mathscr C}}}}{ \min\setbrax{2^{{(d-1)i/{\mathscr C}}}, \nu }}
\leq
P^{-d}2^{-di/{\mathscr C}}+\nu^{-1}P^{-d}2^{-i/{\mathscr C}},
\]
we deduce that
\begin{equation}\label{1.eqn:mean_value_II}
\int_{E} \frac{\abs{T\brax{\vec{\alpha}}}}{P^n} \,\mathrm{d}\vec{\alpha}
\ll_R
\nu^R2^k +
\sum_{i=k}^{\ell}
\nu^R
P^{-dR} 2^{i(1-dR/{\mathscr C})}
+
\sum_{i=k}^{\ell}
P^{-dR} 2^{i(1-R/{\mathscr C})}.
\end{equation}
Note that
\begin{equation*}
\sum_{i=k}^{\ell}
2^{i(1-dR/{\mathscr C})}
\ll_{{\mathscr C},d,R}
\begin{cases}
2^{k\brax{1-dR/{\mathscr C}}}
&\text{if }{\mathscr C}<dR
\\
\ell-k
&\text{if }{\mathscr C} = dR
\\
2^{\ell\brax{1-dR/{\mathscr C}}}
&\text{if }{\mathscr C}>dR.
\end{cases}
\end{equation*}
Recall from \eqref{1.eqn:tee-sub-k_and_tee-sub-ell} that we have $2^k \geq \tfrac{1}{2} P^{-{\mathscr C}}$ and $2^\ell \leq 2 P^{-\delta}$, and observe that by \eqref{1.eqn:def_of_tee-sub-i} the bound $\ell-k \leq 2+{\mathscr C}\log_2 P$ holds. It follows that
\begin{equation*}
\sum_{i=k}^{\ell}
2^{i(1-dR/{\mathscr C})}
\ll_{{\mathscr C},d,R}
\begin{cases}
P^{{\mathscr C}-dR}
&\text{if }{\mathscr C}<dR
\\
\log P
&\text{if }{\mathscr C} = dR
\\
P^{-\delta\brax{1-dR/{\mathscr C}}}
&\text{if }{\mathscr C}>dR,
\end{cases}
\end{equation*}
and reasoning similarly for $\sum_{i=k}^{\ell}
2^{i(1-R/{\mathscr C})}$, we deduce from \eqref{1.eqn:mean_value_II} that
\begin{multline*}
\int_{E} \frac{\abs{T\brax{\vec{\alpha}}}}{P^n} \,\mathrm{d}\vec{\alpha}
\\
\ll
\left\{
\begin{array}{@{}l@{}l@{\hspace{1.7em}}l@{}}
\nu^R2^k+
\nu^R P^{-{\mathscr C}}
&{}+
P^{n-{\mathscr C}-(d-1)R}
&\text{if } {\mathscr C} < R
\\
\nu^R2^k+
\nu^R P^{-{\mathscr C}}
&{}+
P^{-dR}\log P
&\text{if } {\mathscr C} = R
\\
\nu^R2^k+
\nu^R P^{-{\mathscr C}}
&{}+
P^{-dR-\delta(1-R/{\mathscr C})}
&\text{if } R < {\mathscr C} < dR
\\
\nu^R2^k+
\nu^R P^{-{\mathscr C}}\log P
&{}+
P^{-dR-\delta(1-R/{\mathscr C})}
&\text{if } {\mathscr C} = dR
\\
\nu^R2^k+
\nu^R P^{-dR-\delta(1-dR/{\mathscr C})}
&{}+
P^{-dR-\delta(1-R/{\mathscr C})}
&\text{if } {\mathscr C} > dR,
\end{array}
\right.
\end{multline*}
with an implicit constant depending only on ${\mathscr C},d,$ and $R$. One final application of the bound $2^k \leq P^{-{\mathscr C}}$ from \eqref{1.eqn:tee-sub-k_and_tee-sub-ell} completes the proof of \eqref{1.eqn:mean_value_lem}.
\withqed\end{proof}
\subsection{Notation for the circle method}\label{1.sec:circle_method_notation}
We split the domain $\clsd{0}{1}^R$ into two regions. Let $\Delta \in \open{0}{1}$ and set
\begin{align}\label{1.eqn:def_of_major_arcs}
\frakM_{P,d,\Delta}
&=
\bigcup_{\substack{q \in \bbN \\ q\leq P^\Delta}}
\bigcup_{\substack{0 \leq a_1,\dotsc,a_R \leq q\\ (a_1,\dotsc ,a_R,q) =1}}
\set[\big]{\vec{\alpha}\in\clsdopen{0}{1}^R : \supnormbig{\vec{\alpha}-\tfrac{\vec{a}}{q}} < P^{\Delta-d} },
\\
\nonumber
\frakm_{P,d,\Delta}
&=
\clsd{0}{1}^R\setminus\frakM_{P,d,\Delta}.
\end{align}
We give local analogues of $\expSumSBoxAt{\vec{\alpha}}$ and of the integral $\int_{\frakM_{P,d,\Delta}}\expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha}$. We set
\begin{equation*}
\Sloc{q}{\vec{a}}
=
q^{-n}
\sum_{\vec{y}\in \set{1,\dotsc,q}^n}
e\brax[\big]{\aOverQDotLittleF(\vec{y})}
\end{equation*}
for each $q \in \bbN$ and $\vec{a}\in\bbZ^R$, and we put
\begin{equation*}
\operatorname{Sing}SeriesIncomplete{P}
=
\sum_{q\leq P^\Delta}
\sum_{\substack{\vec{a}\in\set{1,\dotsc,q}^R \\ (a_1,\dotsc ,a_R,q) =1}}
\Sloc{q}{\vec{a}}.
\end{equation*}
For each $\vec{\gamma}\in\bbR^R$, set
\begin{equation*}
\Sloc{\infty}{\vec{\gamma}}
=
\int_{\mathscr{B}}
e\brax[\big]{\vec{\gamma}\cdot\vec{f}^{[d]}(\vec{t})}
\,\mathrm{d}\vec{t},
\end{equation*}
and let
\begin{equation*}
\operatorname{Sing}IntegralBoxIncomplete{P}
=
\int\limits_{\substack{\vec{\alpha}\in\bbR^R \\ \supnorm{\vec{\alpha}}\leq P^{\Delta-d} }} P^n \Sloc{\infty}{P^d\vec{\alpha}} \,\mathrm{d}\vec{\alpha}.
\end{equation*}
Finally we define a quantity $\delta_0$ which in some sense measure the extent to which the system $f_i$ is singular. Let $\sigma_\bbZ\in \set{ 0,\dotsc,n }$ be as in \eqref{1.eqn:def_of_sigma-sub-Z}, and let
\begin{equation*}
\delta_0
=
\frac{n-\sigma_\bbZ}{(d-1)2^{d-1}R}.
\end{equation*}
\subsection{The minor arcs}
On the minor arcs $\frakm_{P,d,\Delta}$ we have the following bound, compare \eqref{1.eqn:weyl's_ineq_mean_value_lem} in Lemma~\ref*{1.lem:mean_value_from_power-law_moats}.
\begin{lemma}[Dietmann~\cite{dietmannWeylsIneq}, Schindler~\cite{schindlerWeylsIneq}]\label{1.lem:weyl's_ineq}
Suppose that the polynomials $f_i$ have integer coefficients. Let $\Delta$, $\frakm_{P,d,\Delta}$ and $\delta_0$ be as in \S\ref*{1.sec:circle_method_notation}, and let $\epsilon>0$. Let the sum $\expSumSBoxAt{\vec{\alpha}}$ be as in \eqref{1.eqn:def_of_S}.
Then we have
\begin{equation}\label{1.eqn:weyl's_ineq}
\sup_{\vec{\alpha}\in\frakm_{P,d,\Delta}} \abs{\expSumSBoxAt{\vec{\alpha}}}
\llcv{\epsilon}{d,n,R,\epsilon}
P^{n-\Delta\delta_0 + \epsilon} \end{equation}
where the implicit constant depends only on $d,n, R,$ and $\epsilon$. The constant $\delta_0$ satisfies $\delta_0 \geq \frac{1}{(d-1)2^{d-1}R}$ whenever the forms $f^{[d]}_i$ are linearly independent.
\end{lemma}
\begin{proof}
The bound \eqref{1.eqn:weyl's_ineq} follows either from Lemma~4 in Dietmann~\cite{dietmannWeylsIneq}, or from Lemma~2.2 in Schindler~\cite{schindlerWeylsIneq}, by setting the parameter $\theta$ in either author's work to be
\begin{equation*}
\theta
=
\frac{\Delta-\epsilon}{(d-1)R},
\end{equation*}
and taking $P\ggcv{\epsilon}{d,n,R,\epsilon} 1$ sufficiently large. Provided the forms $f^{[d]}_i$ are linearly independent, the variety $V(\vec{a}\cdot\vec{f}^{[d]})$ is a proper subvariety of $\bbP_\bbQ^{n-1}$ for each $\vec{a}\in\bbZ^R\setminus\set{\vec{0}}$, and so $\sigma_\bbZ \leq n-1$ holds, by \eqref{1.eqn:def_of_sigma-sub-Z}. This implies that $\delta_0 \geq \frac{1}{(d-1)2^{d-1}R}$, as claimed.
\withqed\end{proof}
\subsection{The major arcs}\label{1.sec:major_arcs}
In this section we estimate $\int_{\frakM_{P,d,\Delta}}\expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha}$, the integral over the major arcs.
\begin{lemma}\label{1.lem:Sloc}
Suppose that the polynomials $f_i$ have integer coefficients. Let $\Delta$, $\majorarcs$\xspace, $\Sloc{\infty}{\vec{\gamma}}$, $\Sloc{q}{\vec{a}}$, $\operatorname{Sing}SeriesIncomplete{P}$ and $\operatorname{Sing}IntegralBoxIncomplete{P}$ be as in \S\ref*{1.sec:circle_method_notation}. Then for all $\vec{a}\in\bbZ^R$ and all $q\in\bbN$ such that $q \leq P$, we have
\begin{equation}\label{1.eqn:Sloc}
\expSumSBoxAt{\vec{\alpha}}BigAt{\tfrac{\vec{a}}{q}+\vec{\alpha}}
=
P^n\Sloc{q}{\vec{a}} \Sloc{\infty}{P^d\vec{\alpha}}
+
\Ocv{}{\vec{f}}\brax[]{ qP^{n-1}\brax{ 1+P^{d}\supnorm{\vec{\alpha}} }},
\end{equation}
and it follows that
\begin{equation}\label{1.eqn:integral_over_frakM}
\int_{\frakM_{P,d,\Delta}} \expSumSBoxAt{\vec{\alpha}} \,\mathrm{d}\vec{\alpha}
=
\operatorname{Sing}SeriesIncomplete{P}\operatorname{Sing}IntegralBoxIncomplete{P}
+ \Ocv{}{\vec{f}} \brax[\big]{ P^{n-dR+(2R+3)\Delta -1}}.
\end{equation}
\end{lemma}
\begin{proof}
To show \eqref{1.eqn:Sloc} we follow the proof of Lemma~5.1 in Birch~\cite{birchFormsManyVars}. First observe that $\vec{\alpha}\cdot\vec{f}(\vec{x}) = \vec{\alpha}\cdot\vec{f}^{[d]}(\vec{x}) +O(\supnorm{\vec{x}}^{d-1}\supnorm{\vec{\alpha}})$, and so
\begin{align}
\expSumSBoxAt{\vec{\alpha}}At{\tfrac{\vec{a}}{q}+\vec{\alpha}}
&
=
\sum_{1 \leq y_1,\dotsc,y_n \leq q} e\brax[\big]{\aOverQDotLittleF(\vec{y})}
\sum_{\substack{\vec{x}\in\bbZ^n\\ \vec{x}/P\in\mathscr{B} \\ \vec{x} \equiv \vec{y}\,\text{mod}\,q}} e(\vec{\alpha}\cdot\vec{f}^{[d]}(\vec{x}))
\nonumber
\\
&
\hphantom{={}}
+O\brax{P^{n+d-1}\supnorm{\vec{\alpha}}}.
\label{1.eqn:S_near_a_rational}
\end{align}
If $\psi$ is any differentiable complex-valued function on $\bbR^n$, then we have
\begin{equation*}
\psi(\vec{x})
=
q^{-n}\int_{\substack{\vec{u}\in\bbR^n\\ \supnorm{\vec{u}}\leq q/2}}
\psi(\vec{x}+\vec{u})
\,\mathrm{d}\vec{u}
+O_n\brax[\Big]{ q \max_{\substack{\vec{u}\in\bbR^n\\ \supnorm{\vec{u}}\leq q/2}} \supnorm{\vec{\nabla}_{\vec{u}} \psi(\vec{x}+\vec{u})} }.
\end{equation*}
Setting $\psi(\vec{x}) = e(\vec{\alpha}\cdot\vec{f}^{[d]}(\vec{x})) $, we deduce that
\begin{align*}
\sum_{\substack{\vec{x}\in\bbZ^n\\ \vec{x}/P\in\mathscr{B} \\ \vec{x} \equiv \vec{y}\,\text{mod}\,q}} e(\vec{\alpha}\cdot\vec{f}^{[d]}(\vec{x}))
&=
q^{-n}\int_{\substack{\vec{v}\in\bbR^n\\ \vec{v}/P \in\mathscr{B}}}
e(\vec{\alpha}\cdot\vec{f}^{[d]}(\vec{v}))
\,\mathrm{d}\vec{v}
\\
&\hphantom{{}={}}
+O(q^{1-n}P^{n+d-1}\supnorm{\vec{\alpha}}+q^{1-n}P^{n-1}),
\end{align*}
where the term $q^{1-n}P^{n-1}$ allows for errors in approximating the boundary of the box $\mathscr{B}$. Substituting into \eqref{1.eqn:S_near_a_rational} shows that
\begin{equation*}
\expSumSBoxAt{\vec{\alpha}}At{\tfrac{\vec{a}}{q}+\vec{\alpha}}
=
\Sloc{q}{\vec{a}}
\int_{\substack{\vec{v}\in\bbR^n\\ \vec{v}/P \in\mathscr{B}}}
e(\vec{\alpha}\cdot\vec{f}^{[d]}(\vec{v}))
\,\mathrm{d}\vec{v}
+O(qP^{n-1}(1+P^d\supnorm{\vec{\alpha}})).
\end{equation*}
To complete the proof of \eqref{1.eqn:Sloc} it suffices to set $\vec{u}=P\vec{t}$ and use the definition of $\Sloc{\infty}{\vec{\gamma}}$ from \S\ref*{1.sec:circle_method_notation}. Now \eqref{1.eqn:integral_over_frakM} follows from \eqref{1.eqn:Sloc} by the definition \eqref{1.eqn:def_of_major_arcs} of $\majorarcs$\xspace.
\withqed\end{proof}
We remark that in the case when $\vec{a}=\vec{0}$ and $q=1$, the proof of \eqref{1.eqn:Sloc} is valid whether or not the polynomials $f_i$ have integer coefficients. That is, we always have
\begin{equation}\label{1.eqn:Sinfty}
\expSumSBoxAt{\vec{\alpha}}
=
P^n\Sloc{\infty}{P^d\vec{\alpha}}
+
\Ocv{}{\vec{f}}\brax[]{ P^{n-1}\brax{ 1+P^{d}\supnorm{\vec{\alpha}} }}
\end{equation}
for any $f_i$ with real cofficients. Next we treat the quantity $\operatorname{Sing}SeriesIncomplete{P}$ from \eqref{1.eqn:integral_over_frakM}.
\begin{lemma}\label{1.lem:frakS}
Let the polynomials $f_i$ have integer coefficients, let the box $\mathscr{B}$ from \S\ref*{1.sec:notation} be $\clsd{0}{1}^n$, and let $\Sloc{q}{\vec{a}}$ be as in \S\ref*{1.sec:circle_method_notation}.
Suppose we are given $\epsilon \geq 0$ and $C\geq 1$, such that for all $\vec{\alpha},\vec{\beta}\in \bbR^R$ and all $P\geq 1$ the bound \eqref{1.eqn:assumed_arcs} holds. Then:
\begin{enumerate}[(i)]
\item\label{1.itm:local_moats}
There is $\epsilon'>0$ such that $\epsilon' = O_{{\mathscr C}}(\epsilon)$ and
\begin{equation}
\label{1.eqn:local_moats}
\min\setbrax[\big]{\abs{\Sloc{q}{\vec{a}}},\,\abs{\Sloc{q'}{\vec{a}'}}}
\llcv{C}{C,\vec{f}}
(q'+q)^\epsilon
\supnormbig{\tfrac{\vec{a}}{q}-\tfrac{\vec{a}'}{q'}}^{\frac{{\mathscr C}- \epsilon'}{d-1} }
\end{equation}
for all $\vec{a}\in \set{1,\dotsc,q}^R$ and $\vec{a}'\in \set{1,\dotsc,q}^R$ such that $\frac{\vec{a}'}{q'}\neq \frac{\vec{a}}{q}$.
\item\label{1.itm:measure_of_localarcs}
If ${\mathscr C}>\epsilon'$, then for all $t>0$ and $q_0\in\bbN$ we have
\[
\#\localarcs{q_0}{t}
\llcv{C}{C,\vec{f}}
(q_0^\epsilon
t)^{-\frac{(d-1)R}{{\mathscr C}-\epsilon'}
},
\]
where it is understood that the fractions $\tfrac{\vec{a}}{q}$ are in lowest terms.
\item\label{1.itm:bound_on_Sq}
Let $\delta_0$ be as in \S\ref*{1.sec:circle_method_notation} and let $\epsilon''>0$. For all $q \in \bbN$ and all $\vec{a} \in \bbZ^R$ such that $(a_1,\dotsc,a_R,q)=1$, we have
\[
\abs{\Sloc{q}{\vec{a}}}
\llcv{\epsilon''}{\vec{f},\epsilon''}
q^{-\delta_0+\epsilon''}.
\]
\item\label{1.itm:frakS_converges}
Let $\Delta$ and $\operatorname{Sing}SeriesIncomplete{P}$ be as in \S\ref*{1.sec:circle_method_notation}. Suppose that $\epsilon$ is sufficiently small in terms of ${\mathscr C}$, $d$ and $R$. Provided the inequality ${\mathscr C}>(d-1)R$ holds and the forms $f^{[d]}_i$ are linearly independent, we have
\begin{equation}\label{1.eqn:frakS_converges}
\operatorname{Sing}SeriesIncomplete{P}-\operatorname{Sing}Series
\llcv{C,{\mathscr C}}{C,{\mathscr C},\vec{f}}
P^{-\Delta \delta_1}
\end{equation}
for some $\operatorname{Sing}Series\in\bbC$ and some $\delta_1 >0$ depending at most on ${\mathscr C}$, $d$ and $R$. We have
\begin{multline}
\label{1.eqn:evaluating_frakS}
\operatorname{Sing}Series
= \prod_p
\lim_{k\to\infty}
\tfrac{1}{p^{k(n-R)}}
\#
\big\{ \vec{b} \in \set{1,2,\dotsc,p^k}^n
:
\\
f_1(\vec{b})
\equiv 0 ,
\dotsc,
f_R(\vec{b})
\equiv
0
\mod{p^k}
\big\}
\end{multline}
where the product is over primes $p$ and converges absolutely.
\end{enumerate}
\end{lemma}
\begin{proof}[Proof of part~\ref*{1.itm:local_moats}]
Provided $P$ is sufficiently large, Lemma~\ref*{1.lem:Sloc} will allow us to approximate the sum $\Sloc{q}{\vec{a}}$ by a multiple of $S\brax[\big]{{\vec{a}/q} ;P}$. This will enable us to transform \eqref{1.eqn:assumed_arcs} into the bound \eqref{1.eqn:local_moats}. Let $P\geq 1$ be a parameter, to be chosen later. Then \eqref{1.eqn:assumed_arcs} gives
\begin{equation}\label{1.eqn:local_moats_first_step}
\min\setbrax*{
\abs*{ \frac{S\brax[\big]{\frac{\vec{a}}{q} ;P} }{P^{n+\epsilon}} }
,\,
\abs*{ \frac{S \brax[\big]{\frac{\vec{a}'}{q'};P} }{P^{n+\epsilon}} }
}
\leq
C
\max\setbrax{P^{-d}\supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{-1} ,\, \supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{\frac{1}{d-1}} }^{{\mathscr C}}.
\end{equation}
Since $\mathscr{B} = \clsd{0}{1}^n$ the equality $\Sloc{\infty}{\vec{0}}=1$ holds, and so \eqref{1.eqn:Sloc} implies that
\begin{equation}\label{1.eqn:Sloc_for_local_moats}
\frac{ S\brax[\big]{\frac{\vec{a}}{q};P} }{P^{n}}
=
\Sloc{q}{ \vec{a} }
+
\Ocv{}{\vec{f}}\brax{qP^{-1}},
\quad
\frac{ S\brax[\big]{\frac{\vec{a}'}{q'};P} }{P^{n}}
=
\Sloc{q'}{ \vec{a}' }
+
\Ocv{}{\vec{f}}\brax{q'P^{-1}}.
\end{equation}
Together \eqref{1.eqn:local_moats_first_step} and \eqref{1.eqn:Sloc_for_local_moats} yield
\begin{multline}\label{1.eqn:local_moats_second_step}
\min\setbrax[\big]{
\abs{ \Sloc{q}{ \vec{a} } }
,\,
\abs{ \Sloc{q'}{ \vec{a}' } }
}
\\
\leq
C
P^{\epsilon-{\mathscr C} d}\supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{-{\mathscr C}} +CP^\epsilon \supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{\frac{{\mathscr C}}{d-1}}
+
\Ocv{}{\vec{f}}\brax[]{ (q'+q)P^{-1}}.
\end{multline}
Observe that for $P$ sufficiently large the term $C P^\epsilon \supnorm{\frac{\vec{a}'}{q'}-\frac{\vec{a}}{q}}^{{\mathscr C}/(d-1)}$ dominates the right-hand side of \eqref{1.eqn:local_moats_second_step}. We claim this is the case for
\begin{equation}\label{1.eqn:choice_of_P_for_local_moats}
P
=
(q'+q)\supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{-\frac{1+{\mathscr C}}{d-1}}.
\end{equation}
Indeed, since $\supnorm{\frac{\vec{a}'}{q'}-\frac{\vec{a}}{q}}\leq 1$, it follows from \eqref{1.eqn:choice_of_P_for_local_moats} and \eqref{1.eqn:local_moats_second_step} that
\begin{align*}
\MoveEqLeft[1]
\min\setbrax[\big]{
\abs{ \Sloc{q}{ \vec{a} } }
,\,
\abs{ \Sloc{q'}{ \vec{a}' } }
}
\\
&\leq
C
P^{\epsilon}
(q'+q)^{-{\mathscr C} d} \supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{\frac{{\mathscr C}+{\mathscr C}^2 d}{d-1}} + C
P^{\epsilon}\supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{\frac{{\mathscr C}}{d-1}}
+
\Ocv{}{\vec{f}}\brax[\Big]{\supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{\frac{1+{\mathscr C}}{d-1}}}.
\\
&\llcv{C}{C,\vec{f}}
P^\epsilon
\supnormbig{ \tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q} }^{\frac{{\mathscr C}}{d-1}},
\end{align*}
which proves the result.
\end{proof}
\begin{proof}[Proof of part~\ref*{1.itm:measure_of_localarcs}]
If $\epsilon'< {\mathscr C}$ is small, then by part~\ref*{1.itm:local_moats}, the points in the set
\[
\localarcs{q_0}{t}
\]
are separated by gaps of size
\[
\supnorm{\tfrac{\vec{a}'}{q'}-\tfrac{\vec{a}}{q}}
\ggcv{C}{C,\vec{f}} (q_0^{-\epsilon} t)^{\frac{d-1}{{\mathscr C}-\epsilon'}}.
\]
At most $\Ocv{C}{C,\vec{f}}(q_0^{\epsilon} t)^{-\frac{(d-1)R}{{\mathscr C}-\epsilon'}}$ such points fit in the box $\clsdopen{0}{1}^R$, proving the claim.
\end{proof}
\begin{proof}[Proof of part~\ref*{1.itm:bound_on_Sq}]
This follows from Lemma~\ref*{1.lem:weyl's_ineq} by an argument which is now standard, see the proof of Lemma~5.4 in Birch~\cite{birchFormsManyVars}.
\end{proof}
\begin{proof}[Proof of part~\ref*{1.itm:frakS_converges}]
In this part of the proof, whenever we write $\vec{a}/q$ it is understood that $\vec{a}\in\bbZ^R$ and $q\in\bbN$ with $(a_1,\dotsc ,a_R,q) =1$. We will show below that
\begin{equation}\label{1.eqn:tail_of_frakS}
s(Q)
=
\sum_{\substack{\vec{a}/q \in\clsdopen{0}{1}^R \\ Q < q \leq 2Q}}
\abs{\Sloc{q}{\vec{a}}}
\llcv{C,{\mathscr C}}{C,{\mathscr C},\vec{f}}
Q^{-\delta_1}
\end{equation}
for all $Q \geq 1$, and some $\delta_1>0$ depending only on ${\mathscr C}$, $d$ and $R$. Since
\begin{align*}
\abs[\bigg]{\operatorname{Sing}SeriesIncomplete{P}
-\sum_{ \vec{a}/q\in\clsdopen{0}{1}^R }
\Sloc{q}{\vec{a}}}
&\leq
\sum_{\substack{\vec{a}/q \in\clsdopen{0}{1}^R \\ q> P^\Delta}}
\abs{\Sloc{q}{\vec{a}}}
\\
&=
\sum_{\substack{ Q = 2^k P^\Delta \\ k = 0, 1, \dotsc}} s(Q),
\end{align*}
this proves \eqref{1.eqn:frakS_converges} with
\begin{equation}
\label{1.eqn:def_of_frakS}
\operatorname{Sing}Series
=
\sum_{ \vec{a}/q\in\clsdopen{0}{1}^R }
\Sloc{q}{\vec{a}},
\end{equation}
where this sum is absolutely convergent. Then \eqref{1.eqn:evaluating_frakS} follows as in \S 7 of Birch~\cite{birchFormsManyVars}.
We prove \eqref{1.eqn:tail_of_frakS}. Let $\ell \in \bbZ$. We have
\begin{align}
s(Q)
={}&
\sum_{\substack{ \vec{a}/q\in \clsdopen{0}{1}^R \\ \abs{\Sloc{q}{\vec{a}}}\geq 2^{-\ell} \\ Q < q \leq 2Q }}
\abs{\Sloc{q}{\vec{a}}}
+
\sum_{i = \ell}^\infty
\sum_{\substack{ \vec{a}/q\in \clsdopen{0}{1}^R \\ 2^{-i}> \abs{\Sloc{q}{\vec{a}}}\geq 2^{-i-1} \\ Q < q \leq 2Q }}
\abs{\Sloc{q}{\vec{a}}}
\nonumber
\\
\leq{}&
\#\localarcs{2Q}{2^{-\ell}} \cdot \sup_{q>Q} \abs{\Sloc{q}{\vec{a}}}
\nonumber
\\
&+
\sum_{i=\ell}^\infty
\#\localarcs{2Q}{2^{-i-1}}\cdot 2^{-i}.
\label{1.eqn:bounding_S(Q)}
\end{align}
Now parts~\ref*{1.itm:measure_of_localarcs} and~\ref*{1.itm:bound_on_Sq} show that
\[
\#\localarcs{2Q}{t} \llcv{C}{C,\vec{f}} (Q^{\epsilon} t)^{-\frac{(d-1)R}{ {\mathscr C}-\epsilon'}}
\]
and that
\[
\sup_{q>Q} \abs{\Sloc{q}{\vec{a}}} \llcv{}{\vec{f}} Q^{-\delta_0/2}.
\]
Substituting these bounds into \eqref{1.eqn:bounding_S(Q)} gives
\begin{equation*}
s(Q)
\llcv{C}{C,\vec{f}}
Q^{\Ocv{{\mathscr C}}{{\mathscr C},d,R}(\epsilon)-\delta_0/2}
2^{\ell \frac{(d-1)R}{{\mathscr C}-\epsilon'}}
+
Q^{\Ocv{{\mathscr C}}{{\mathscr C},d,R}(\epsilon)}
\sum_{i=\ell}^\infty
2^{(i+1) \brax[\big]{\frac{(d-1)R}{{\mathscr C}-\epsilon'}}-i}.
\end{equation*}
We have ${\mathscr C}>(d-1)R$ and we have assumed that $\epsilon'$ is small in terms of ${\mathscr C}$, $d$ and $R$, so we may assume that the bound ${\mathscr C}>(d-1)R+\epsilon'$ holds. So we may sum the geometric progression to find that
\begin{equation*}
s(Q) \ll_{C,{\mathscr C}}
Q^{\Ocv{{\mathscr C}}{{\mathscr C},d,R}(\epsilon)}
2^{\ell \frac{(d-1)R}{{\mathscr C}-\epsilon'}}
\brax[\big]{
Q^{-\delta_0/2} +2^{-\ell} }.
\end{equation*}
Picking $
\ell
=
\floor{\log_2 Q^{\delta_0/2}}
$
shows that
\begin{equation*}
s(Q)
\llcv{C,{\mathscr C}}{C,{\mathscr C},d,R}
Q^{-\delta_0\frac{(d-1)R-{\mathscr C}}{2{\mathscr C}}+\Ocv{{\mathscr C}}{{\mathscr C},d,R}(\epsilon)}.
\end{equation*}
The forms $f^{[d]}_i$ are linearly independent, so $\delta_0\geq \frac{1}{(d-1)2^{d-1}R}$, by Lemma~\ref*{1.lem:Sloc}. As $\epsilon$ is small in terms of ${\mathscr C}$, $d$ and $R$ it follows that $s(Q)\llcv{C,{\mathscr C}}{C,{\mathscr C},d,R} Q^{-\delta_1}$ for some $\delta_1>0$ depending only on ${\mathscr C}$, $d$ and $R$. This proves \eqref{1.eqn:tail_of_frakS}.
\withqed\end{proof}
We estimate the integral $\operatorname{Sing}IntegralBoxIncomplete{P}$ from \eqref{1.eqn:integral_over_frakM}.
\begin{lemma}\label{1.lem:frakI}
Let $\Sloc{\infty}{\vec{\gamma}}$, $\Delta$ and $\operatorname{Sing}IntegralBoxIncomplete{P}$ be as in \S\ref*{1.sec:circle_method_notation}.
\begin{enumerate}[(i)]
\item\label{1.itm:bound_on_Sinfty}
Suppose that the bound \eqref{1.eqn:assumed_arcs} holds for some $C\geq 1$, ${\mathscr C}>0$ and $\epsilon\geq 0$ and all $\vec{\alpha},\vec{\beta}\in\bbR^R$ and $P\geq 1$. Then for all $\vec{\gamma}\in\bbR^R$ we have
\begin{equation}\label{1.eqn:bound_on_Sinfty}
\Sloc{\infty}{\vec{\gamma}}
\llcv{C}{C,\vec{f}}
\supnorm{\vec{\gamma}}^{-{\mathscr C}+\epsilon'},
\end{equation}
for some $\epsilon'>0$ such that $\epsilon' = O_{{\mathscr C}}(\epsilon)$.
\item\label{1.itm:frakI_converges}
If the conclusion of part~\ref*{1.itm:bound_on_Sinfty} holds and ${\mathscr C}-\epsilon'>R$, then there exists $\operatorname{Sing}IntegralBox \in \bbC$ such that for all $P\geq 1$ we have
\begin{equation}\label{1.eqn:frakI_converges}
\tfrac{1}{P^{n-dR}}\operatorname{Sing}IntegralBoxIncomplete{P}
-
\operatorname{Sing}IntegralBox
\llcv{{\mathscr C},C,\epsilon'}{{\mathscr C},C,\vec{f},\epsilon'}
P^{-\Delta\brax{{\mathscr C}-\epsilon'-R}}.
\end{equation}
Furthermore we have
\begin{equation}\label{1.eqn:evaluating_frakI}
\operatorname{Sing}IntegralBox
=
\lim_{P \to \infty}\tfrac{1}{P^{n-dR}}
\Meas{ \vec{t}\in\bbR^n : \tfrac{1}{P}\vec{t} \in \mathscr{B}, \abs{f^{[d]}_1(\vec{t})} \leq \tfrac{1}{2} ,\dotsc,
\abs{f^{[d]}_R(\vec{t})} \leq \tfrac{1}{2}
}
\end{equation}
where $\meas{\,\cdot\,}$ denotes the Lebesgue measure.
\end{enumerate}
\end{lemma}
\begin{proof}[Proof of part ~\ref*{1.itm:bound_on_Sinfty}]
First, for all $\vec{\beta}\in\bbR^R$ we have $\abs{\expSumSBoxAt{\vec{\alpha}}At{\vec{\beta}}}\leq \expSumSBoxAt{\vec{\alpha}}At{\vec{0}}$, from the definition \eqref{1.eqn:def_of_S}. Consequently, taking $\vec{\alpha} = \vec{0}$, $\vec{\beta}= P^{-d}\vec{\gamma} $ in our hypothesis \eqref{1.eqn:assumed_arcs} shows that
\begin{equation*}
\abs{ \expSumSBoxAt{\vec{\alpha}}At{P^{-d}\vec{\gamma}}}
\leq
C
P^{n+\epsilon}
\max\setbrax{\supnorm{\vec{\gamma}}^{-1} ,\, P^{-\frac{d}{d-1}}\supnorm{\vec{\gamma}}^{\frac{1}{d-1}} }^{{\mathscr C}}.
\end{equation*}
Together with the case $\vec{\alpha}= P^{-d}\vec{\gamma} $ of the bound \eqref{1.eqn:Sinfty}, this yields
\begin{equation}\label{1.eqn:bound_on_Sinfty_II}
\Sloc{\infty}{\vec{\gamma}}
\llcv{C}{C,\vec{f}}
P^{\epsilon} \max\setbrax{\supnorm{\vec{\gamma}}^{-1},\,
P^{-\frac{d}{d-1} } \supnorm{\vec{\gamma}}^{\frac{1}{d-1}}}^{\mathscr C}
+
P^{-1}
+
P^{-1}\supnorm{\vec{\gamma}}.
\end{equation}
If we have $\supnorm{\vec{\gamma}} \leq 1$, then we set $P=1$ and~\ref*{1.itm:bound_on_Sinfty} follows at once. Otherwise we put $P = \max\setbrax{1, \supnorm{\vec{\gamma}}^{1+{\mathscr C}}}$, and the result follows since \eqref{1.eqn:bound_on_Sinfty_II} then implies
\begin{align*}
\Sloc{\infty}{\vec{\gamma}}
&\llcv{C}{C,\vec{f}}
P^{\epsilon} \max\setbrax[\big]{\supnorm{\vec{\gamma}}^{-1},\,
\supnorm{\vec{\gamma}}^{-1-\frac{{\mathscr C} d}{d-1}}}^{\mathscr C}
+\supnorm{\vec{\gamma}}^{-1-{\mathscr C}}
+\supnorm{\vec{\gamma}}^{-{\mathscr C}}
\nonumber
\\
&\leq
3 \supnorm{\vec{\gamma}}^{-{\mathscr C}+(1+{\mathscr C})\epsilon}.
\end{align*}
\end{proof}
\begin{proof}[Proof of part~\ref*{1.itm:frakI_converges}]
If the inequality ${\mathscr C}-\epsilon'>R $ holds, then by \eqref{1.eqn:bound_on_Sinfty} we have
\begin{align*}
\brax[\bigg]{\int\limits_{\vec{\gamma}\in\bbR^R } P^{n-dR} \Sloc{\infty}{\vec{\gamma}} \,\mathrm{d}\vec{\gamma}}
-
\operatorname{Sing}IntegralBoxIncomplete{P}
&=
\int\limits_{\substack{\vec{\gamma}\in\bbR^R \\ \supnorm{\vec{\gamma}}> P^{\Delta} }} P^{n-dR} \Sloc{\infty}{\vec{\gamma}} \,\mathrm{d}\vec{\gamma}
\\
&\llcv{{\mathscr C},C,\epsilon'}{{\mathscr C},C,\vec{f},\epsilon'} P^{n-dR-\Delta\brax{{\mathscr C}-\epsilon'-R}} ,
\end{align*}
where the integrals converge absolutely.
This proves \eqref{1.eqn:frakI_converges} with
\begin{equation}\label{1.eqn:def_of_frakI}
\operatorname{Sing}IntegralBox
=
\int\limits_{\vec{\gamma}\in\bbR^R } \Sloc{\infty}{\vec{\gamma}} \,\mathrm{d}\vec{\gamma}.
\end{equation}
It remains to prove \eqref{1.eqn:evaluating_frakI}. Let $\chi : \bbR^R \to \clsd{0}{1}$ be the indicator function of the box $\clsd{-\frac{1}{2}}{\frac{1}{2}}^R$. We must evaluate the limit
\begin{multline}\label{1.eqn:sing_int_as_volume_integral}
\lim_{P \to \infty}
\tfrac{1}{P^{n-dR}}
\Meas{ \vec{t}\in\bbR^n : \tfrac{1}{P}\vec{t} \in \mathscr{B}, \abs{f^{[d]}_1(\vec{t})} \leq \tfrac{1}{2} ,\dotsc,
\abs{f^{[d]}_R(\vec{t})} \leq \tfrac{1}{2} }
\\
=
\lim_{P\to\infty}
\tfrac{1}{P^{n-dR}}
\int_{\substack{ \vec{t}\in\bbR^n \\ \vec{t}/P \in\mathscr{B} }}
\chi\brax[\big]{ f^{[d]}_1(\vec{t}) ,\dotsc,
f^{[d]}_R(\vec{t}) } \,\mathrm{d}\vec{t}.
\end{multline}
Let $\varphi$ be any infinitely differentiable, compactly supported function on $\bbR^R$, taking values in $\clsd{0}{1}$. We evaluate $\frac{1}{P^{n-dR}}\int_{\vec{t}/P\in\mathscr{B}} \varphi( f^{[d]}_1(\vec{t}) ,\dotsc,
f^{[d]}_R(\vec{t}) )\,\mathrm{d}\vec{t}$, which we think of as a smoothed version of \eqref{1.eqn:sing_int_as_volume_integral}. Fourier inversion gives
\begin{align}
\int_{\substack{ \vec{t}\in\bbR^n \\ \vec{t}/P \in\mathscr{B} }}
\varphi\brax[\big]{ f^{[d]}_1(\vec{t}) ,\dotsc,
f^{[d]}_R(\vec{t}) } \,\mathrm{d}\vec{t}
&=
\int_{\substack{ \vec{t}\in\bbR^n \\ \vec{t}/P \in\mathscr{B} }}
\int_{\bbR^R}
\hat{\varphi}(\vec{\alpha})
e\brax{ \vec{\alpha}\cdot\vec{f}^{[d]}(\vec{t}) } \,\mathrm{d}\vec{\alpha} d\vec{t}
\nonumber
\\
&=
\int_{\bbR^R}
\hat{\varphi}(\vec{\alpha})
\int_{\substack{ \vec{t}\in\bbR^n \\ \vec{t}/P \in\mathscr{B} }}
e\brax{ \vec{\alpha}\cdot\vec{f}^{[d]}(\vec{t}) } \,\mathrm{d}\vec{t} d\vec{\alpha}
\nonumber
\\
&=
\int_{\bbR^R}
\hat{\varphi}(\vec{\alpha})
P^n \Sloc{\infty}{P^d\vec{\alpha}} d\vec{\alpha}
\label{1.eqn:fourier_transform_frakI-sub-varphi}
\end{align}
where $\hat{\varphi}(\vec{\alpha})$ is the Fourier transform $ \int_{\bbR^R} \varphi(\vec{\gamma}) e\brax{- \vec{\alpha}\cdot\vec{\gamma} } \,\mathrm{d}\vec{\gamma}$.
Since $ {\mathscr C}-\epsilon' >R$ holds by assumption, it follows from \eqref{1.eqn:bound_on_Sinfty} that the function $S_\infty$ is Lebesgue integrable. Hence \eqref{1.eqn:def_of_frakI} implies
\begin{align}
\hat{\varphi}(\vec{0}) \operatorname{Sing}IntegralBox
\nonumber
&=
\int_{\bbR^R}
\hat{\varphi}(\vec{0})
\Sloc{\infty}{ \vec{\gamma}} \,\mathrm{d}\vec{\gamma}
\nonumber
\\
&=
\lim_{P\to\infty}
\int_{\bbR^R}
\hat{\varphi}(P^{-d}\vec{\gamma})
\Sloc{\infty}{ \vec{\gamma}} \,\mathrm{d}\vec{\gamma}
\nonumber
\\
&=
\lim_{P\to\infty}
P^{dR}
\int_{\bbR^R}
\hat{\varphi}(\vec{\alpha})
\Sloc{\infty}{P^d \vec{\alpha}} \,\mathrm{d}\vec{\alpha}.
\label{1.eqn:dominated_convergence_trick}
\end{align}
Together \eqref{1.eqn:fourier_transform_frakI-sub-varphi} and \eqref{1.eqn:dominated_convergence_trick} show that for any infinitely differentiable, compactly supported $\varphi$ taking values in $\clsd{0}{1}$, we have
\begin{equation}\label{1.eqn:evaluation_of_frakI-sub-varphi}
\lim_{P\to\infty}
\tfrac{1}{P^{n-dR}}
\int_{\substack{ \vec{t}\in\bbR^n \\ \vec{t}/P \in\mathscr{B} }}
\varphi\brax{ f^{[d]}_1(\vec{t}) ,\dotsc,
f^{[d]}_R(\vec{t}) } \,\mathrm{d}\vec{t}
=
\hat{\varphi}(\vec{0}) \operatorname{Sing}IntegralBox.
\end{equation}
With $\chi$ as in \eqref{1.eqn:sing_int_as_volume_integral}, choose $\varphi$ such that $\varphi(\vec{\gamma})\leq \chi(\vec{\gamma})$ for all $\vec{\gamma}\in \bbR^R$. Then by \eqref{1.eqn:sing_int_as_volume_integral} and \eqref{1.eqn:evaluation_of_frakI-sub-varphi} we have
\begin{equation*}
\liminf_{P \to \infty}\tfrac{1}{P^{n-dR}}
\Meas{ \vec{t}\in\bbR^n : \tfrac{1}{P}\vec{t} \in \mathscr{B}, \abs{f^{[d]}_1(\vec{t})} \leq \tfrac{1}{2} ,\dotsc,
\abs{f^{[d]}_R(\vec{t})} \leq \tfrac{1}{2} }
\geq
\hat{\varphi}(\vec{0})
\operatorname{Sing}IntegralBox.
\end{equation*}
Letting $\varphi \to \chi$ almost everywhere gives $
\hat{\varphi}(\vec{0}) \to 1$, so $\operatorname{Sing}IntegralBox$ is a lower bound for the limit inferior in \eqref{1.eqn:sing_int_as_volume_integral}. Repeating the argument with $\varphi(\vec{\gamma})\geq \chi(\vec{\gamma})$ instead of $\varphi(\vec{\gamma})\leq \chi(\vec{\gamma})$ shows that $\operatorname{Sing}IntegralBox$ is also an upper bound for the corresponding limit superior, so the limit exists and is equal to $\operatorname{Sing}IntegralBox$.
\withqed\end{proof}
\subsection{The proof of Proposition~\ref*{1.prop:circle_method}}\label{1.sec:completing_the_circle_method}
In this section we deduce Proposition~\ref*{1.prop:circle_method} from Lemmas~\ref*{1.lem:mean_value_from_power-law_moats},~\ref*{1.lem:weyl's_ineq},~\ref*{1.lem:Sloc},~\ref*{1.lem:frakS}, and ~\ref*{1.lem:frakI}.
\begin{proof}[Proof of Proposition~\ref*{1.prop:circle_method}]
Let $P\geq 1$ and $\Delta =\frac{1}{4R+6}$. By \eqref{1.eqn:circle_method} we have
\begin{equation*}
N_{f_1,\dotsc,f_R}(P)
=
\int_{\frakM_{P,d,\Delta}} \expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha}
+\int_{\frakm_{P,d,\Delta}} \expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha},
\end{equation*}
where $\frakM_{P,d,\Delta}$, $\frakm_{P,d,\Delta}$ are as in \S\ref*{1.sec:circle_method_notation}. We apply Lemma~\ref*{1.lem:mean_value_from_power-law_moats} with
\[
T(\vec{\alpha}) = C^{-1}P^{-\epsilon}\expSumSBoxAt{\vec{\alpha}}
,\quad
E_0 = \clsd{0}{1}^R,\quad
E = \frakm_{P,d,\Delta},\quad
\delta=\Delta\delta_0.
\]
With these choices for $T$, $E_0$, $E$ and $\delta$ we see that \eqref{1.eqn:power-law_moats} follows from \eqref{1.eqn:assumed_arcs}. Lemma~\ref*{1.lem:weyl's_ineq} shows that $\sup_{\vec{\alpha}\in\frakm_{P,d,\Delta}}CT(\vec{\alpha}) \ll_\epsilon P^{n-\delta}$, and after increasing $C$ if necessary this gives us \eqref{1.eqn:weyl's_ineq_mean_value_lem}. This verifies the hypotheses of Lemma~\ref*{1.lem:mean_value_from_power-law_moats}. Since we have ${\mathscr C}>dR$ by assumption, \eqref{1.eqn:mean_value_lem} gives
\begin{equation}\label{1.eqn:mean_value_on_minor_arcs}
\int_{\frakm_{P,d,\Delta}}\expSumSBoxAt{\vec{\alpha}}\,\mathrm{d}\vec{\alpha}
\llcv{C,{\mathscr C}}{C,{\mathscr C},d,R}
P^{n-dR-\Delta\delta_0\brax{1-\frac{dR}{{\mathscr C}}}+\epsilon}.
\end{equation}
For the major arcs, since $\Delta=\frac{1}{4R+6}$ we have by Lemma~\ref*{1.lem:Sloc} that
\begin{equation}\label{1.eqn:integral_over_frakM_application}
\int_{\frakM_{P,d,\Delta}} \expSumSBoxAt{\vec{\alpha}} \,\mathrm{d}\vec{\alpha}
=
\operatorname{Sing}SeriesIncomplete{P}\operatorname{Sing}IntegralBoxIncomplete{P}
+ \Ocv{}{\vec{f}} \brax[\big]{ P^{n-dR-\frac{1}{2}}},
\end{equation}
where $\operatorname{Sing}SeriesIncomplete{P}$, $\operatorname{Sing}IntegralBoxIncomplete{P}$ are as in \S\ref*{1.sec:circle_method_notation}. Since ${\mathscr C}>dR$ holds, the $f_i(\vec{x})$ are linearly independent, and $\epsilon$ is small in terms of ${\mathscr C}$, $d$ and $R$, both of Lemmas~\ref*{1.lem:frakS} and~\ref*{1.lem:frakI} apply. In particular \eqref{1.eqn:frakS_converges} and \eqref{1.eqn:frakI_converges} shows that
\begin{equation}\label{1.eqn:product_of_densities}
\operatorname{Sing}SeriesIncomplete{P}\operatorname{Sing}IntegralBoxIncomplete{P}
=
\operatorname{Sing}Series\operatorname{Sing}IntegralBox P^{n-dR}
+\Ocv{{\mathscr C},C}{{\mathscr C},C,\vec{f}}
\brax[\big]{
P^{n-dR-\Delta({\mathscr C}-R)/2}
}
+\Ocv{{\mathscr C},C}{{\mathscr C},C,\vec{f}}
\brax[\big]{
P^{n-dR-\Delta\delta_1}
}
\end{equation}
where $\delta_1 >0$ depends at most on ${\mathscr C}$, $d$ and $R$. By \eqref{1.eqn:mean_value_on_minor_arcs}, \eqref{1.eqn:integral_over_frakM_application}, and \eqref{1.eqn:product_of_densities}, the result holds.
\withqed\end{proof}
\section{The auxiliary inequality}\label{1.sec:aux_ineq}
In this section we verify the hypothesis \eqref{1.eqn:assumed_arcs}, assuming a bound on the number of solutions to the auxiliary inequality from Definition~\ref*{1.def:aux_ineq}. The goal is the following result, proved at the end of \S\ref*{1.sec:proof_of_moat_lemma}.
\begin{proposition}\label{1.prop:moat_lemma}
Let $\auxIneqOfSomethingNumSolns{f}\brax{ B }$, $\supnorm{f}$ be as in Definition~\ref*{1.def:aux_ineq}. Suppose that we are given $C_0\geq 1$ and ${\mathscr C}>0$ such that for all $\vec{\beta}\in\bbR^R$ and $B\geq 1$ we have
\begin{equation}\label{1.eqn:aux_ineq_bound_moat_lemma}
N^{\operatorname{aux}}_{\vec{\beta}\cdot\vec{f}} \brax{ B }
\leq
C_0
B^{(d-1)n-2^{d}{\mathscr C}}.
\end{equation}
Further let $M > \mu >0$ such that for all $\vec{\beta}\in\bbR^R$ we have
\begin{equation}\label{1.eqn:norm_of_|b.f|}
\mu \supnorm{ \vec{\beta} }
\leq
\supnorm{ \vec{\beta}\cdot\vec{f}^{[d]} }
\leq
M \supnorm{ \vec{\beta} },
\end{equation}
noting that some such $M, \mu$ exist whenever the forms $f^{[d]}_i$ are linearly independent. Let $\epsilon>0$. Then there exists $C\geq 1$, depending only on $C_0,d,n,\mu,M$ and $\epsilon$, such that the bound \eqref{1.eqn:assumed_arcs} holds for all $P\geq 1 $ and all $\vec{\alpha},\vec{\beta}\in\bbR^R$.
\end{proposition}
\subsection{Weyl differencing}\label{1.sec:weyl_diff}
We prove \eqref{1.eqn:assumed_arcs} using the following estimate, which combines work of Birch~\cite[Lemma~2.4]{birchFormsManyVars} and Bentkus and G\"otze~\cite[Theorem~5.1]{bentkusGotzeEllipsoids}.
\begin{definition}\label{1.def:weyl_diff_ineq}
Let $f$, $\vec{\nabla}SomethingMultilinear{f}{\vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}$ be as in Definition~\ref*{1.def:aux_ineq}. Given $B \geq 1$ and $\delta>0$, we let $\weylDiffIneqOfSomethingNumSolns{f} \brax{B,\delta}$ be the number of $(d-1)$-tuples of integer $n$-vectors $\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)}$ such that
\begin{equation*}
\supnorm{\vec{x}^{(1)}},\dotsc,\supnorm{\vec{x}^{(d-1)}} \leq B,
\qquad
\min_{\vec{v}\in\bbZ^n} \supnormbig{\vec{v}-\vec{\nabla}SomethingMultilinear{ f }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}} < \delta.
\end{equation*}
\end{definition}
\begin{lemma}\label{1.lem:weyl_diff_ineq}
Let $\weylDiffIneqOfSomethingNumSolns{f} \brax{ B,\delta }$ be as in Definition~\ref*{1.def:weyl_diff_ineq}. For all $\epsilon>0$, $\vec{\alpha}, \vec{\beta}\in\bbR^R$ and $\theta \in \openclsd{0}{1}$, we have
\begin{equation}
\label{1.eqn:from_min_to_prod}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }^{2^d}
\ll_{d,n,\epsilon}
\frac{
U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta, P^{(d-1)\theta-d} }
}{P^{(d-1)\theta n}}
\end{equation}
where the implicit constant depends only on $d,n,\epsilon$.
\end{lemma}
\begin{proof}
Observe that \eqref{1.eqn:from_min_to_prod} will follow if we can prove that
\begin{equation*}
\abs*{\frac{\expSumSBoxAt{\vec{\alpha}} \expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{2(n+\epsilon)}}}^{2^{d-1}}
\ll_{d,n,\epsilon}
\frac{
U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta, P^{(d-1)\theta-d} }
}{P^{(d-1)\theta n}}.
\end{equation*}
First we use an idea from the proof of Theorem~5.1 in Bentkus and G\"otze~\cite{bentkusGotzeEllipsoids}, also found in Lemma~2.2 of M\"uller~\cite{mullerSystemsQuadIneqs}, to eliminate $\vec{\alpha}$.
We have
\begin{align*}
\MoveEqLeft[2]
\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}} \widebar{\expSumSBoxAt{\vec{\alpha}}}
\nonumber
\\
&=
\sum_{\substack{\vec{x} \in \bbZ^n \\ \vec{x}/P\in\mathscr{B} }}
\sum_{\substack{ \vec{z} \in \bbZ^n \\ \brax{\vec{x}+\vec{z}}/P\in\mathscr{B} }}
e\brax[\big]{\brax{\vec{\alpha}+\vec{\beta}}\cdot\vec{f}(\vec{x}) -\vec{\alpha}\cdot\vec{f}(\vec{x}+\vec{z})}
\nonumber
\\
&\leq
\sum_{\substack{ \vec{z} \in \bbZ^n \\ \supnorm{\vec{z}}\leq P }}
\abs[\bigg]{
\sum_{\substack{\vec{x} \in \bbZ^n \\ \vec{x}/P\in\mathscr{B}_{\vec{z}} }}
e\brax[\big]{\brax{\vec{\alpha}+\vec{\beta}}\cdot\vec{f}(\vec{x}) -\vec{\alpha}\cdot\vec{f}(\vec{x}+\vec{z})}
}
\nonumber
\\
&=
\sum_{\substack{ \vec{z} \in \bbZ^n \\ \supnorm{\vec{z}}\leq P }}
\abs[\bigg]{
\sum_{\substack{\vec{x} \in \bbZ^n \\ \vec{x}/P\in\mathscr{B}_{\vec{z}} }}
e\brax[\big]{\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x}) + g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x})
}
}
\end{align*}
for some real polynomials $g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x})$ of degree at most $d-1$ in $\vec{x}$, and some boxes $\mathscr{B}_{\vec{z}}\subset\mathscr{B}$. Now by the special case of Cauchy's inequality
$
\abs{\sum_{i\in\calI} \lambda_i}^2
\leq
\brax{\#\calI} \cdot \sum_{i\in\calI} \abs{\lambda_i}^2
$, we have
\begin{align}
\MoveEqLeft[1]
\abs{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}} \widebar{\expSumSBoxAt{\vec{\alpha}}}}^{2^{d-1}}
\nonumber
\\
&\leq
\brax[\bigg]{\sum_{\substack{ \vec{z} \in \bbZ^n \\ \supnorm{\vec{z}}\leq P }}
\abs[\bigg]{
\sum_{\substack{\vec{x} \in \bbZ^n \\ \vec{x}/P\in\mathscr{B}_{\vec{z}} }}
e\brax[\big]{\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x}) + g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x}) }
}
}^{2^{d-1}}
\nonumber
\\
&\llcv{}{d,n}
P^{(2^{d-1}-1)n}
\sum_{\substack{ \vec{z} \in \bbZ^n \\ \supnorm{\vec{z}}\leq P }}
\abs[\bigg]{
\sum_{\substack{\vec{x} \in \bbZ^n \\ \vec{x}/P\in\mathscr{B}_{\vec{z}} }}
e\brax[\big]{\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x}) + g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x}) }
}^{2^{d-1}}.
\label{1.eqn:WD2}
\end{align}
Bentkus and G\"otze used the double large sieve of Bombieri and Iwaniec \cite{bombieriDoubleSievePaper} to bound the inner sum in \eqref{1.eqn:WD2} in the case when $d=2$. We extend the argument to higher $d$ by employing Lemma~2.4 of Birch~\cite{birchFormsManyVars}, which states that\footnote{Birch writes $N \brax{ P^\theta ; P^{(d-1)\theta-d} ; {\alpha} }$ for our $\weylDiffIneqOfSomethingNumSolns{\vec{\alpha}\cdot\vec{f}} \brax{ P^\theta, P^{(d-1)\theta-d} }$ and $S({\alpha})$ for our $\expSumSBoxAt{\vec{\alpha}}$.}
\[
\expSumSBoxAt{\vec{\alpha}}
\ll_{d,n,\epsilon}
P^{2^{d-1}n-(d-1)n\theta +\epsilon} \weylDiffIneqOfSomethingNumSolns{\vec{\alpha}\cdot\vec{f}} \brax{ P^\theta, P^{(d-1)\theta-d} }.
\]
The innermost sum in \eqref{1.eqn:WD2} has the same form as $\expSumSBoxAt{\vec{\alpha}}$, with $\mathscr{B}_{\vec{z}}$ in place of $\mathscr{B}$ and $\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x}) + g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x}) $ in place of $\vec{\alpha}\cdot\vec{f}$ as the underlying polynomial. The degree of $ g_{\vec{\alpha},\vec{\beta},\vec{z}}$ is at most $d-1$, so $\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x})$ is the leading part of this polynomial. So applying Birch's result to the innermost sum in \eqref{1.eqn:WD2} shows
\begin{align*}
\MoveEqLeft[6]
\abs[\bigg]{
\sum_{\substack{\vec{x} \in \bbZ^n \\ \vec{x}/P\in\mathscr{B}_{\vec{z}} }}
e\brax[\big]{\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x}) + g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x}) }
}^{2^{d-1}}
\\
& \llcv{\epsilon}{d,n,\epsilon}
P^{2^{d-1}n-(d-1)\theta n+\epsilon}
\weylDiffIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{f}^{[d]}(\vec{x}) + g_{\vec{\alpha},\vec{\beta},\vec{z}}(\vec{x})} \brax{ P^\theta, P^{(d-1)\theta-d} }
\\
&=
P^{2^{d-1}n-(d-1)\theta n+\epsilon}
U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta, P^{(d-1)\theta-d} },
\end{align*}
as $\weylDiffIneqOfSomethingNumSolns{f}$ depends only on the degree $d$ part of $f$. With \eqref{1.eqn:WD2} this proves the result.
\withqed\end{proof}
\subsection{Proof of Proposition~\ref*{1.prop:moat_lemma}}\label{1.sec:proof_of_moat_lemma}
\begin{proof}[Proof of Proposition~\ref*{1.prop:moat_lemma}]
Let us first suppose that for some $\theta >0$ we have
\begin{equation}\label{1.eqn:M<N}
N^{\operatorname{aux}}_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta }
<
U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta , P^{(d-1)\theta-d} }.
\end{equation}
Then there must be a $(d-1)$-tuple of vectors $\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)} \in \bbZ^n$ which is included in the count $U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta , P^{(d-1)\theta-d} }$ but not in $N^{\operatorname{aux}}_{\vec{\beta}\cdot\vec{f}} \brax{P^\theta }$.
Since the $(d-1)$-tuple $(\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)})$ is counted by $U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta , P^{(d-1)\theta-d} }$, the inequality $\supnorm{\vec{x}^{(i)}}\leq P^\theta $ holds for each $i=1,\dotsc,d-1$, and we have the bound
\begin{align}
\label{1.eqn:||f(xhat)||<stuff}
\supnormbig{
\vec{v}-\vec{\nabla}FMultilinear{ \vec{\beta} }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}
}
&<
P^{(d-1)\theta-d},
\intertext{for some $\vec{v}\in \bbZ^n$. Since this $(d-1)$-tuple $(\vec{x}^{(1)},\dotsc,\vec{x}^{(d-1)})$ is not counted by $N^{\operatorname{aux}}_{\vec{\beta}\cdot\vec{f}} \brax{P^\theta }$, we must also have}
\label{1.eqn:|f(xhat)|>alpha}
\supnorm{
\vec{\nabla}FMultilinear{ \vec{\beta} }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}
}
&\geq
\supnorm{\vec{\beta}\cdot\vec{f}^{[d]}} P^{(d-2)\theta}.
\end{align}
We use \eqref{1.eqn:||f(xhat)||<stuff} and \eqref{1.eqn:|f(xhat)|>alpha} to relate $P^\theta$ and $\supnorm{\vec{\beta}}$. It follows from \eqref{1.eqn:||f(xhat)||<stuff} that either
\begin{gather}
\label{1.eqn:|f(xhat)|<theta^d-1_/_P}
\supnormbig{\vec{\nabla}FMultilinear{ \vec{\beta} }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}} < P^{(d-1)\theta-d}
\intertext{or}
\label{1.eqn:|f(xhat)|>1/2}
\supnormbig{\vec{\nabla}FMultilinear{ \vec{\beta} }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}} \geq \frac{1}{2}.
\end{gather}
When \eqref{1.eqn:|f(xhat)|<theta^d-1_/_P} holds, then \eqref{1.eqn:|f(xhat)|>alpha} implies
\begin{equation}\label{1.eqn:alpha-alpha'_<_P^-d_(theta_P)^d-1}
\supnorm{\vec{\beta}\cdot\vec{f}^{[d]}}
<
\frac{P^{(d-1)\theta-d}}{P^{(d-2)\theta}}
=
P^{\theta-d}.
\end{equation}
When on the other hand \eqref{1.eqn:|f(xhat)|>1/2} holds, then the bound $\supnorm{\vec{x}^{(i)}}\leq P^\theta $ implies
\begin{equation*}
\supnorm{
\vec{\nabla}FMultilinear{ \vec{\beta} }{ \vec{x}^{(1)}, \dotsc, \vec{x}^{(d-1)}}
}
\llcv{}{d,n}
\supnorm{\vec{\beta}\cdot\vec{f}^{[d]}}
P^{(d-1)\theta},
\end{equation*}
and it follows by \eqref{1.eqn:|f(xhat)|>1/2} that
\begin{equation}\label{1.eqn:alpha-alpha'_>_(theta_P)^1-d}
\supnorm{\vec{\beta}\cdot\vec{f}^{[d]}}
\ggcv{}{d,n}
P^{-(d-1)\theta}.
\end{equation}
Either \eqref{1.eqn:alpha-alpha'_<_P^-d_(theta_P)^d-1} or \eqref{1.eqn:alpha-alpha'_>_(theta_P)^1-d} holds. So by rearranging and applying \eqref{1.eqn:norm_of_|b.f|} we infer
\begin{equation}\label{1.eqn:bound_on_theta_P}
P^{-\theta}
\ll_{\mu, M}
\max\setbrax{P^{-d}\supnorm{\vec{\beta}}^{-1},\,\supnorm{\vec{\beta}}^{\frac{1}{d-1}}}.
\end{equation}
We have shown that \eqref{1.eqn:M<N} implies \eqref{1.eqn:bound_on_theta_P}. Now Lemma~\ref*{1.lem:weyl_diff_ineq} shows that for $\theta\in\openclsd{0}{1}$ we have
\begin{equation*}
U_{\vec{\beta}\cdot\vec{f}} \brax{ P^\theta, P^{(d-1)\theta-d} }
\ggcv{\epsilon}{d,n,\epsilon}
P^{(d-1)\theta n}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }^{2^d},
\end{equation*}
and together with our assumption \eqref{1.eqn:aux_ineq_bound_moat_lemma} this implies that \eqref{1.eqn:M<N} will hold provided that $\theta\in\openclsd{0}{1}$ and that
\begin{equation}\label{1.eqn:M<N_sufficient_condition}
(P^\theta)^{(d-1)n - 2^d {\mathscr C} }
\leq
C_1^{-1}
P^{(d-1)\theta n}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }^{2^d}
\end{equation}
for some $C_1 \geq 1$ depending only on $C_0,d,n$ and $\epsilon$. Define $\theta$ by
\begin{equation}\label{1.eqn:choice_of_theta}
P^\theta
=
C_1^{1/2^d {\mathscr C}}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }^{-1/{\mathscr C}},
\end{equation}
so that equality holds in \eqref{1.eqn:M<N_sufficient_condition}. We consider three cases.
The first case is when $\theta\leq 0$ holds. We can rule this out. If $\theta \leq 0$ then \eqref{1.eqn:choice_of_theta} gives
\begin{equation}
\label{1.eqn:theta<0}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }
\geq
C_1^{-1/2^d}.
\end{equation}
To prove \eqref{1.eqn:assumed_arcs}, we can assume without loss of generality that $P \ggcv{\epsilon}{n,\epsilon} 1$ holds. But then \eqref{1.eqn:theta<0} is false, since $\abs{\expSumSBoxAt{\vec{\alpha}}}\leq (P+1)^n$ by the definition \eqref{1.eqn:def_of_S}.
The second case is when $0 < \theta\leq 1$ holds. Our choice \eqref{1.eqn:choice_of_theta} for the parameter $\theta$ then ensures that \eqref{1.eqn:M<N_sufficient_condition} holds. We saw above that when $\theta\in\openclsd{0}{1}$, that bound \eqref{1.eqn:M<N_sufficient_condition} implies the inequality \eqref{1.eqn:M<N}. We also saw that \eqref{1.eqn:M<N} leads to the estimate \eqref{1.eqn:bound_on_theta_P}. This estimate \eqref{1.eqn:bound_on_theta_P} implies the conclusion \eqref{1.eqn:assumed_arcs} of the lemma upon substituting in the value of~$\theta$ from \eqref{1.eqn:choice_of_theta} and choosing $C$ to satisfy the bound $C \gg_{\mu,M} C_1^{1/2^d}$.
The third and last case is when $\theta> 1$ holds. In this case we have by \eqref{1.eqn:choice_of_theta} that
\begin{equation}
\label{1.eqn:theta>1}
\min\setbrax*{ \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}}{P^{n+\epsilon}}} , \, \abs*{\frac{\expSumSBoxAt{\vec{\alpha}}At{\vec{\alpha}+\vec{\beta}}}{P^{n+\epsilon}}} }
<
C_1^{1/2^d}
P^{-{\mathscr C}}.
\end{equation}
Now for any $t >0$ we have $\max\setbrax{P^{-d} t^{-1},\,t^{\frac{1}{d-1}}} \geq P^{-1}$, and hence
\begin{equation*}
\max\setbrax{P^{-d}\supnorm{\vec{\beta}}^{-1},\,\supnorm{\vec{\beta}}^{\frac{1}{d-1}}}^{\mathscr C}
\geq P^{-{\mathscr C}}.
\end{equation*}
So \eqref{1.eqn:assumed_arcs} follows from \eqref{1.eqn:theta>1} on choosing $C$ such that $C\geq C_1^{1/2^d}$ holds.\withqed
\end{proof}
\section{The proof of Theorems~\ref*{1.thm:main_thm_short} and~\ref*{1.thm:manin}}\label{1.sec:main_thm_proof}
\begin{proof}[Proof of Theorem~\ref*{1.thm:manin}]
Let ${\numZeroesInBoxOf{F_1,\dotsc,F_R}}(P)$ be as in \eqref{1.eqn:def_of_num_solns_in_box}. Set $f_i = F_i$, and apply Propositions~\ref*{1.prop:circle_method} and~\ref*{1.prop:moat_lemma}. This shows that
\begin{equation}\label{1.eqn:HL_formula_application}
\numZeroesInBoxOf{f_1,\dotsc,f_R}(P)
=
\operatorname{Sing}Series\operatorname{Sing}IntegralBox P^{n-dR}
+O_{C,f_1,\dotsc,f_R}(P^{n-dR-\delta}),
\end{equation}
where $\delta= \delta({\mathscr C},d,R)$ is positive. It remains to prove that $\operatorname{Sing}IntegralBox$ and $\operatorname{Sing}Series$ are positive under the conditions given in the theorem. Note that since $V(F_1,\dotsc,F_R)$ has dimension $n-1-R$, a smooth point corresponds to a solution of the equations
\begin{equation}
F_1(\vec{x}) = 0,\dotsc,F_R(\vec{x})=0
\label{1.eqn:F=0}
\end{equation}
at which the $R\times n$ Jacobian matrix $ \brax{\partial F_i(\vec{x})/\partial x_j}_{ij}$ has full rank.
Let $\vec{x}=\vec{r}$ be a real solution to \eqref{1.eqn:F=0} at which the matrix $ \brax{\partial F_i(\vec{x})/\partial x_j}_{ij}$ has full rank, and for which $\vec{r}\in\mathscr{B}$. Applying the Implicit Function Theorem to the equations \eqref{1.eqn:F=0} at the point $\vec{r}$, we find an open set $U\subset \mathscr{B}$ on which the solutions to \eqref{1.eqn:F=0} form an $(n-R)$ dimensional real manifold. Considering a small neighbourhood of this manifold shows that for all $\epsilon\in\openclsd{0}{1}$ we have
\begin{equation*}
\Meas{ \vec{s} \in U : \abs{F_1(\vec{s})} \leq \epsilon, \dotsc, \abs{F_R(\vec{s})} \leq \epsilon}
\gg_{F_1,\dotsc,F_R}
\epsilon^R
\end{equation*}
where $\lambda$ is the Lebesgue measure. Letting $\vec{t} = P\vec{s}$ and $\epsilon = \tfrac{1}{2}P^{-d}$, we see that
\[
\Meas{ \vec{t}\in\bbR^n: \vec{t}/P \in U, \abs{F_1(\vec{t})} \leq \tfrac{1}{2}, \dotsc, \abs{F_R(\vec{t})} \leq \tfrac{1}{2}}
\gg_{F_1,\dotsc,F_R}
P^{n-dR},
\]
and \eqref{1.eqn:evaluating_frakI} from Lemma~\ref*{1.lem:frakI} then shows that $\operatorname{Sing}IntegralBox$ is positive.
To show that $\operatorname{Sing}Series$ is positive under the conditions given in the theorem we use a variant of Hensel's Lemma. Let $p$ be a prime and let $\vec{a}\in\bbZ_p^n$. Suppose that $\vec{x}=\vec{a}$ is a solution to the system $f_i(\vec{x})=\vec{0}$ for which the Jacobian matrix $\brax{\partial f_i(\vec{x})/\partial x_j}_{ij}$ is nonsingular. Possibly after permuting the variables $x_i$ if necessary, we can assume that the submatrix $M(\vec{x})$ consisting of the last $R$ columns of $\brax{\partial f_i(\vec{x})/\partial x_j}_{ij}$ is nonsingular at $\vec{x}=\vec{a}$.
The so-called valuation theoretic Implicit Function Theorem then applies to the polynomials $f_i$ with the common zero $\vec{a}$ over the valued field $\bbQ_p$. This is essentially a version of Hensel's Lemma; see Kuhlmann~\cite[Theorem~25]{kuhlmannHensel}. If we write $\abs{\det M(\vec{a})}_p = p^{-\alpha}$, the theorem states that for all $p$-adic numbers $a'_1,\dotsc,a'_{n-R}\in \bbQ_p$ with $\abs{a'_i-a_i}_p < p^{-2\alpha}$, there are unique $p$-adic numbers $a'_{n-R+1},\dotsc,a'_{n}\in\bbQ_p$ with $\abs{a'_i-a_i}_p < p^{-\alpha}$ such that each $f_i(\vec{a}')=0$.
Now let $a'_1,\dotsc,a'_{n-R}$ be $p$-adic integers satisfying $a'_i \equiv a_i$ modulo $p^{2\alpha+1}$. For each $k\in\bbN$ there are $p^{(k-2\alpha-1)(n-R)}$ choices for $a'_i$ which are distinct modulo $p^k$, and by the theorem above each one extends to a vector of $p$-adic integers $\vec{a}$ satisfying $\vec{f}(\vec{a}')=0$.
If this holds for each prime $p$, then $\operatorname{Sing}Series$ is positive. For then reducing the vectors $\vec{a}'$ modulo $p^k$ gives $\gg_{\vec{f},p} p^{k(n-R)}$ distinct vectors $\vec{b}\in\set{1,\dotsc,p^k}^n$ satisfying the system of congruences $f_i(\vec{b})\equiv \vec{0}$ modulo $p^k$. The equality \eqref{1.eqn:evaluating_frakS} then shows that $\operatorname{Sing}Series>0$.
\withqed\end{proof}
\begin{proof}[Proof of Theorem~\ref*{1.thm:main_thm_short}]
We let ${\mathscr C} = \frac{n-R+1}{4}$, and apply Theorem~\ref*{1.thm:manin} to the system of forms $F_i$. The result will follow if we can show that \eqref{1.eqn:aux_ineq_bound_in_manin_thm} holds, which is to say that
\begin{equation}\label{1.eqn:aux_ineq_bound}
\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}
(B)
\ll
B^{\sigma_\bbR}
\end{equation}
for all $\vec{\beta}\in\bbR^R$ and all $B\geq 1$. Here the quantity $\sigma_\bbR$ is defined by \eqref{1.eqn:def_of_sigma-sub-R}.
For each $\vec{\beta}\in\bbR^R$, let the matrix of the quadratic form $\vec{\beta}\cdot\vec{F}$ be $M\brax{\vec{\beta}}$. That is, $M\brax{\vec{\beta}}$ is the unique real $n\times n$ symmetric matrix with
\[
\vec{\beta}\cdot\vec{F}(\vec{x})
=
\vec{x}^T M\brax{\vec{\beta}}\vec{x}.
\]
Then we have
\[
\vec{\nabla}SomethingMultilinear{\vec{\beta}\cdot\vec{F}}{\vec{u}}
=
2M\brax{\vec{\beta}}\vec{u},
\]
so $\auxIneqOfSomethingNumSolns{\vec{\beta}}\brax{B}$ counts vectors $\vec{u}\in \bbZ^n$ satisfying
\begin{equation*}
\supnorm{\vec{u}}
\leq B,
\qquad
\supnorm{M\brax{\vec{\beta}}}
\leq
\tfrac{1}{2}\supnorm{\vec{\beta}\cdot\vec{F}}.
\end{equation*}
These vectors $\vec{u}$ are all contained in the box $\supnorm{\vec{u}}\leq B$, and in the ellipsoid
\begin{equation*}
E(\vec{\beta})
=
\set[]{\vec{t}\in\bbR^n : \vec{t}^T { M\brax{\vec{\beta}}^T M\brax{\vec{\beta}} } \vec{t}
<
n\cdot\supnorm{\vec{\beta}\cdot\vec{F}}^2}.
\end{equation*}
The ellipsoid has principal radii $\abs{\lambda}^{-1}\sqrt{n}\supnorm{\vec{\beta}\cdot\vec{F}}$ where $\lambda$ runs over the eigenvalues of the real symmetric matrix $M\brax{\vec{\beta}}$, counted with multiplicity.
Hence
\begin{equation*}
\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}\brax{B}
\ll_n
\prod_{\lambda} \min\setbrax{ \abs{\lambda}^{-1}\supnorm{\vec{\beta}\cdot\vec{F}}+1,\, B}
\end{equation*}
where $\lambda$ is as before. So to prove \eqref{1.eqn:aux_ineq_bound} it suffices that $n-\sigma_\bbR$ of the $\lambda$ are of size $\abs{\lambda} \gg \supnorm{\vec{\beta}\cdot\vec{F}}$ at least.
Suppose for a contradiction that this is false. Then there exists a sequence $\vecsuper{\beta}{i}\in\bbR^R$ such that at least $\sigma_\bbR+1$ of the eigenvalues of $M\brax{\vecsuper{\beta}{i}\cdot\vec{q}}$ satisfy $\lambda = o(\supnorm{\vecsuper{\beta}{i}\cdot\vec{F}})$. By passing to a subsequence, we can assume $\vecsuper{\beta}{i}/\supnorm{\vecsuper{\beta}{i}} \to \vec{\beta}$, and then at least $\sigma_\bbR+1$ of the eigenvalues of $M\brax{\vec{\beta}\cdot\vec{F}}$ must be zero. In other words,
\begin{equation*}
\dim \operatorname{Sing} V(\vec{\beta}\cdot\vec{F})
\geq
\sigma_\bbR.
\end{equation*}
But this contradicts the definition \eqref{1.eqn:def_of_sigma-sub-R}. So \eqref{1.eqn:aux_ineq_bound} holds as claimed.
\withqed\end{proof}
As alluded to after Lemma~\ref*{1.lem:nonsing_case}, the argument used to prove Theorems~\ref*{1.thm:main_thm_short} and~\ref*{1.thm:manin} also yields weak approximation for $V(F_1,\dotsc,F_R)$ if that variety is smooth. It suffices to show that if the system $F_i(q\vec{x}-\vec{a})=0$ has solutions in the $p$-adic integers for each $p$, then it has integral solutions $\vec{x}$ with $\frac{\vec{x}}{\supnorm{\vec{x}}}$ arbitrarily close to $\frac{\vec{r}}{\supnorm{\vec{r}}}$, for any fixed real solution $\vec{r}$ to the system $F_i(\vec{r})=0$. For this one can let $\mathscr{B}$ be a sufficiently small box containing $\frac{\vec{r}}{\supnorm{\vec{r}}}$, and repeat the proof of Theorems~\ref*{1.thm:main_thm_short} and \ref*{1.thm:manin} with the choice ${f}_i(\vec{x}) = F_i(q\vec{x}-\vec{a})$ instead of $f_i=F_i$ at the start of the proof of Theorem~\ref*{1.thm:manin}. Since $N^{\operatorname{aux}}_{\vec{\beta}\cdot\vec{f}}(B)=\auxIneqOfSomethingNumSolns{\vec{\beta}\cdot\vec{F}}(B)$ we obtain \eqref{1.eqn:HL_formula_application} as before. Recalling that any real or $p$-adic point of $V(F_1,\dotsc,F_R)$ must be smooth, the argument to prove that $\operatorname{Sing}IntegralBox,\operatorname{Sing}Series$ are positive goes through and we obtain the existence of an integral solution of the required kind.
\begin{proof}[Proof of Lemma~\ref*{1.lem:nonsing_case}]
We prove the first inequality in \eqref{1.eqn:nonsing_case}. Let $\vec{\beta}\in\bbR^R\setminus\set{\vec{0}}$ such that
\[
\sigma_\bbR
=\dim V(\vec{\beta}\cdot\vec{F}).
\]
Without loss of generality we may suppose that $\beta_R $ is nonzero. Then we have
\[
V(F_1,\dotsc,F_R)
=
V(F_1,\dotsc,F_{R-1},\vec{\beta}\cdot\vec{F}).
\]
Since $V(F_1,\dotsc,F_{R-1})$ has dimension $n-1-R$, it follows that
\[
V(F_1,\dotsc,F_{R-1})
\cap
\operatorname{Sing} V(\vec{\beta}\cdot\vec{F})
\subset
\operatorname{Sing} V(F_1,\dotsc,F_R)
\]
and so
$
V(F_1,\dotsc,F_{R-1})
\cap
\operatorname{Sing} V(\vec{\beta}\cdot\vec{F})
=\emptyset,
$
as $V(F_1,\dotsc,F_{R})$ is smooth. It follows that $\dim \operatorname{Sing} V(\vec{\beta}\cdot\vec{F})\leq R-1$, which proves the first inequality in \eqref{1.eqn:nonsing_case}.
The second inequality in \eqref{1.eqn:nonsing_case} follows from the work of Browning and Heath-Brown~\cite{browningHeathBrownDiffDegrees}. In those authors' formula (1.3), set
\begin{gather*}
D=2,
\quad
r_1=0,
\quad
r_2=R,
\quad
F_{i,2} =F_i.
\end{gather*}
Now the $R\times n$ Jacobian matrix $ \brax{\partial F_i(\vec{x})/\partial x_j}_{ij}$ has full rank at every nonzero solution $\vec{x}\in\widebar{\bbQ}^n$ to $F_1(\vec{x})=\dotsb=F_R(\vec{x})=0$, because $V(F_1,\dotsc,F_R)$ is smooth of dimension $n-1-R$. This makes $F_{i,j}$ a `nonsingular system" in the sense of Browning and Heath-Brown, as defined in their formula (1.7). The next step is to replace $F_{i,d}$ with an ``equivalent optimal system". The comments after formula (1.7) of those authors show that in our case this means replacing $F_i$ with $\sum_j A_{ij}f_j$, where $A$ is an invertible linear transformation. In particular this preserves $V(F_1,\dotsc,F_R)$ and $W$. Now their formulae (1.4) and (1.8) show that $
B_2
\leq
R-1
$, where
$
B_2 = 1+ \dim (W).
$
This proves \eqref{1.eqn:nonsing_case}.
\withqed\end{proof}
\end{document}
|
\begin{document}
\title[Uniqueness in discrete tomography of algebraic Delone sets]{Solution of a uniqueness
problem in the discrete tomography of algebraic Delone sets}
\author{Christian Huck}
\author{Michael Spie\ss}
\address{Fakult\"{a}t f\"{u}r Mathematik,
Universit\"{a}t Bielefeld, Postfach 100131, 33501 Bielefeld, Germany}
\email{[email protected]}
\email{[email protected]}
\keywords{Discrete tomography, X-ray, algebraic Delone set, model set,
$U$-polygon, convex body.}
\subjclass[2010]{Primary 52C23; Secondary 11R04, 11R06, 11R18, 12F10, 52A10, 52C05,82D99}
\begin{abstract}
We consider algebraic Delone sets $\varLambda$ in the
Euclidean plane and address the problem of
distinguishing convex subsets of $\varLambda$ by X-rays in prescribed $\varLambda$-directions, i.e., directions parallel to nonzero
interpoint vectors of $\varLambda$. Here, an X-ray in direction
$u$ of a finite
set gives the number of points in the set on each line
parallel to $u$. It is shown
that for any algebraic Delone set $\varLambda$ there are four
prescribed $\varLambda$-directions such that any two convex subsets of
$\varLambda$ can be distinguished by the corresponding X-rays. We
further prove the existence of a natural number
$c_{\varLambda}$ such that any two convex subsets of $\varLambda$ can be distinguished by their X-rays in any set of
$c_{\varLambda}$ prescribed
$\varLambda$-directions. In particular, this extends a well-known result of Gardner
and Gritzmann on the corresponding problem for planar lattices to
nonperiodic cases that are relevant in quasicrystallography.
\end{abstract}
\maketitle
\section{Introduction}\label{intro}
{\em Discrete tomography} is concerned with the
inverse problem of retrieving information about some {\em finite}
object in Euclidean space from
(generally noisy) information about its slices. One important problem
is the {\em unique reconstruction} of a finite point set in Euclidean $3$-space
from its {\em (discrete parallel) X-rays} in a small number of directions, where the
{\em X-ray} of the finite set in a certain direction is the {\em line sum
function} giving the
number of points in the set on each line parallel to this direction.
The interest in the discrete tomography of planar Delone sets $\varLambda$
with long-range order is motivated by the requirement in materials
science for the unique reconstruction of solid state materials like {\em quasicrystals}
slice by slice from their images under quantitative {\em high
resolution transmission electron microscopy} (HRTEM). In fact,
in~\cite{ks}, \cite{sk} a technique
is described, which can, for certain
crystals, effectively measure the number of atoms lying on densely occupied
columns. It is reasonable to expect that future developments in
technology will extend this situation to other solid state
materials. The aforementioned density condition forces us to consider only $\varLambda$-directions, i.e.,
directions parallel to nonzero interpoint vectors of $\varLambda$. Further, since typical objects may be damaged or even destroyed by the radiation
energy after about $3$ to $5$ images taken by
HRTEM, applicable results may only use a small
number of X-rays. It actually is this restriction to few high-density
directions that makes the
problems of discrete tomography mathematically challenging, even if
one assumes the absence of noise.
In the
traditional setting, motivated by {\em crystals}, the positions to be
determined form a finite subset of a
three-dimensional lattice, the latter allowing a slicing into equally
spaced
congruent copies of a planar lattice. In the crystallographic
setting, by the affine nature
of the problem, it therefore suffices to study
the discrete tomography of the square lattice;
cf.~\cite{GG}, \cite{GG2}, \cite{GGP}, \cite{Gr}, \cite{HT}, \cite{HK},
\cite{HK2} for an
overview. For the
{\em quasicrystallographic} setting, the
positions to be determined form a finite subset of a {\em nonperiodic Delone
set} with {\em long-range order}
(more precisely, a {\em mathematical
quasicrystal} or {\em model set}~\cite{BM}, \cite{Moody}) which on the other hand is contained
in a free additive subgroup of $\mathbb{R}^3$ of finite rank $r> 3$. These model sets
possess, as it is the case for lattices, a dimensional hierarchy, i.e., they allow a slicing into
planar model sets. However, the slices are in general no longer pairwise congruent or equally
spaced in $3$-space; cf.~\cite{PABP2}. Still, most of
the model sets that describe real quasicrystallographic structures
allow a slicing such that each slice is, when seen from a common
perpendicular viewpoint, a (planar) {\em $n$-cyclotomic model set},
where $n=5$, $n=8$ and $n=12$, respectively (Example~\ref{algex});
cf.~\cite[Sec.~1.2]{H}, \cite{H2}, \cite[Sec.~4.5]{H5} and \cite{St}
for details. These cyclotomic
model sets thus take over the role played by the planar lattices in
the crystallographic case. In the present text, we shall focus
on the larger class of {\em algebraic Delone sets} (Definition~\ref{algdeldef}).
Since different finite subsets of a Delone set $\varLambda$ may have the same X-rays in several
$\varLambda$-directions (in other words, the above problem of uniquely
reconstructing a finite point set from its X-rays is an
{\em ill-posed}\/ problem in general), one is naturally interested in conditions
to be imposed
on the set of $\varLambda$-directions together with restrictions on
the possible finite subsets of
$\varLambda$ such that the latter phenomenon
cannot occur. Here, we consider the {\em convex subsets}
of $\varLambda$ (i.e., bounded subsets of $\varLambda$ with
the property that their convex hull contains no new points of $\varLambda$) and show that for any algebraic Delone
set $\varLambda$ there are four prescribed $\varLambda$-directions such that any two convex subsets of
$\varLambda$ can be distinguished by the corresponding X-rays,
whereas less than four $\varLambda$-directions never suffice for this
purpose (Theorem~\ref{dtmain}(a)). We
further prove the existence of a finite number
$c_{\varLambda}$ such that any two convex subsets of $\varLambda$
can be distinguished by their X-rays in {\em any} set of
$c_{\varLambda}$ prescribed
$\varLambda$-directions (Theorem~\ref{dtmain}(b)). Moreover, we
demonstrate that the
least possible numbers $c_{\varLambda}$ in the
case of the practically most relevant examples of $n$-cyclotomic model
sets $\varLambda$ with $n=5$, $n=8$ and $n=12$ are (in that
very order) $11$, $9$ and $13$ (Theorem~\ref{dtmain2}(b) and Remark~\ref{rembest}). This extends a well-known result of Gardner
and Gritzmann (cf.~\cite[Thm.~5.7]{GG}) on the corresponding
problem for planar lattices $\varLambda$ ($c_{\varLambda}=7$) to
cases that are relevant in quasicrystallography and particularly solves Problem
4.34 of~\cite{H5}. The above results and their continuous analogue
(Theorem~\ref{tmain}) follow from deep insights into the existence
of certain {\em $U$-polygons} in the plane (cf.~Sec.~\ref{sec1}). We believe that our main
result on these polygons (Theorem~\ref{main}) is of independent
interest from a purely geometrical point of view. For the algorithmic
reconstruction problem in the quasicrystallographic setting, we refer
the reader to~\cite{BG2},~\cite{H2}.
\section{Preliminaries and notation}\label{sec1}
Natural numbers are always assumed to be positive. We denote the norm in Euclidean $d$-space by $\Arrowvert \cdot \Arrowvert$. The Euclidean plane will occasionally be
identified with the complex numbers. For $z\in\mathbb{C}$, $\bar{z}$ denotes the
complex conjugate of $z$ and $|z|=\sqrt{z\bar{z}}$ its modulus. The unit circle in $\mathbb{C}$ is
denoted by $\mathbb{S}^{1}$ and its elements are also called
{\em directions}. For $z\in \mathbb{C}^*$, we denote
by $\operatorname{sl}(z)$ the slope of $z$, i.e., $\operatorname{sl}(z)=-i(z-\bar{z})/(z+\bar{z})\in
\mathbb{R}\cup\{\infty\}$. For $r>0$ and $z\in\mathbb{C}$,
$B_{r}(z)$ is the open ball of radius $r$ about $z$. Recall that an
($\mathbb{R}$-){\em linear endomorphism} (resp., {\em affine endomorphism})
of $\mathbb{C}$ is given
by $z \mapsto az+b\bar{z}$ (resp., $z \mapsto az+b\bar{z}+t$), where $a,b,t\in\mathbb{C}$. In both cases,
it is an automorphism if and only if $az+b\bar{z}=0$ only holds for $z=0$. A {\em homothety} $h\!:\, \mathbb{C} \rightarrow
\mathbb{C}$ is given by $z \mapsto \lambda z + t$, where
$\lambda \in \mathbb{R}$ is positive and $t\in \mathbb{C}$. In the following, let
$\varLambda$ be a subset of $\mathbb{C}$. A direction
$u\in\mathbb{S}^{1}$ is called a $\varLambda${\em-direction} if it is
parallel to a nonzero element of the difference set
$\varLambda-\varLambda=\{v-w\,|\,v,w\in\varLambda\}$ of $\varLambda$. A {\em convex polygon} is the convex hull of a finite set of points in $\mathbb{C}$. A {\em polygon in} $\varLambda$ is a convex polygon with all vertices in $\varLambda$. Further, a bounded subset $C$ of $\varLambda$ is called a {\em convex subset of} $\varLambda$ if $C =
\operatorname{conv}(C)\cap \varLambda$, where $\operatorname{conv}(C)$
denotes the convex hull of $C$. Let $U\subset \mathbb{S}^{1}$ be
a finite set of directions. A nondegenerate convex polygon $P$ is
called a {\em $U$-polygon} if it has the property that whenever $v$ is
a vertex of $P$ and $u\in U$, the line in the complex plane in direction $u$
which passes through $v$ also meets another vertex $v'$ of $P$. By a
{\em regular polygon} we shall always mean a nondegenerate convex regular polygon. An
{\em affinely regular polygon} is the image of a
regular polygon under an affine automorphism of the complex plane. $\varLambda$ is called {\em uniformly discrete} if there is a radius
$r>0$ such that every ball $B_{r}(z)$ with $z\in\mathbb{C}$ contains at most one point of
$\varLambda$. Note that the bounded
subsets of a uniformly discrete set $\varLambda$ are precisely the
finite subsets of $\varLambda$. $\varLambda$ is called {\em relatively dense} if there is a radius $R>0$
such that every ball $B_{R}(z)$ with $z\in\mathbb{C}$ contains at least one point of
$\varLambda$. $\varLambda$ is called a {\em Delone set} if it is both uniformly
discrete and relatively dense. $\varLambda$ is said to be of
\emph{finite local complexity} if $\varLambda-\varLambda$ is
discrete and closed. Note that $\varLambda$ is of finite local complexity if and
only if for every $r>0$ there
are, up to translation, only finitely many \emph{patches of radius
$r$}, i.e., sets of the form $\varLambda\cap
B_{r}(z)$, where $z\in\mathbb{C}$; cf.~\cite{Moody}. A Delone set $\varLambda$ is a \emph{Meyer set} if $\varLambda-\varLambda$ is
uniformly discrete. Trivially, any Meyer set
is of finite local complexity. $\varLambda$ is called {\em periodic} if it has
nonzero translation symmetries. Finally, we denote by $K_{\varLambda}$
the intermediate field of $\mathbb{C}/\mathbb{Q}$ that is given by
$$
K_{\varLambda}\,\,:=\,\,\mathbb{Q}\left(\left(\varLambda-\varLambda\right)\cup\left(\overline{\varLambda-\varLambda}\right)\right)\,.
$$
\subsection{Recollections from the theory of cyclotomic fields}
Let $K\subset\mathbb{C}$ be a field and let $\mu$ be the group of roots of unity in
$\mathbb{C}$. We denote the maximal real subfield $K\cap\mathbb{R}$ of $K$ by $K^+$ and set $\mu(K):=\mu\cap K$. For $n\in \mathbb{N}$, we always let $\zeta_n := e^{2\pi
i/n}$, a primitive $n$th root of unity in $\mathbb{C}$. Then, $\mathbb{Q}(\zeta_n)$ is the
$n$th cyclotomic field. Further, $\phi$ will always denote Euler's totient function, i.e., $$\phi(n) =
\operatorname{card}\left(\big\{k \in \mathbb{N}\,\big |\,1 \leq k \leq n
\textnormal{ and } \operatorname{gcd}(k,n)=1\big\}\right)\,.$$
Recall that $\phi$ is multiplicative with $\phi(p^r)=p^{r-1}(p-1)$ for
$p$ prime and $r\in\mathbb{N}$.
\begin{fact}[Gau\ss]\cite[Thm.~ 2.5]{Wa}\label{gau}
$[\mathbb{Q}(\zeta_n) :
\mathbb{Q}] = \phi(n)$ and the field extension $\mathbb{Q}(\zeta_n)/ \mathbb{Q}$
is a Galois extension with Abelian Galois group $G(\mathbb{Q}(\zeta_n)/
\mathbb{Q}) \simeq (\mathbb{Z} / n\mathbb{Z})^{\times}$,
with $a \pmod n$ corresponding to the automorphism given by $\zeta_n \mapsto \zeta_n^{a}$.\qed
\end{fact}
Note that the composition $\mathbb{Q}(\zeta_n)\mathbb{Q}(\zeta_m)=\mathbb{Q}(\zeta_n,\zeta_m)$
of cyclotomic fields is equal to the cyclotomic field
$\mathbb{Q}(\zeta_{\operatorname{lcm}(n,m)})$. Further, the intersection
$\mathbb{Q}(\zeta_n)\cap\mathbb{Q}(\zeta_m)$ of cyclotomic fields is equal to the cyclotomic field
$\mathbb{Q}(\zeta_{\operatorname{gcd}(n,m)})$. Note that $\mathbb{Q}(\zeta_n)^+=\mathbb{Q}(\zeta_n+\bar{\zeta}_n)=\mathbb{Q}(\zeta_n+\zeta_n^{-1})$. Clearly, if $n$ divides $m$ then
$\mathbb{Q}(\zeta_n)$ is a subfield of $\mathbb{Q}(\zeta_m)$. Since
$\mathbb{Q}(\zeta_n)=\mathbb{Q}(\zeta_{2n})$ for odd $n$ by Fact~\ref{gau}, we may sometimes restrict ourselves to $n\in\mathbb{N}$ with $n\not\equiv 2 \pmod 4$.
\subsection{Cross ratios}
Let $(t_1,t_2,t_3,t_4)$ be an ordered tuple of four pairwise distinct
elements of $\mathbb{R}\cup\{\infty\}$. Then, its {\em cross ratio}
$\langle t_1,t_2,t_3,t_4\rangle$ is the nonzero real number defined by
$$
\langle t_1,t_2,t_3,t_4\rangle := \frac{(t_3 - t_1)(t_4 - t_2)}{(t_3 - t_2)(t_4 - t_1)}\,,
$$
with the usual conventions if one of the $t_i$ equals
$\infty$. We need the following invariance property of cross ratios of slopes.
\begin{fact}\cite[Lemma 2.17]{H}\label{crossratio}
Let $z_1,z_2,z_3,z_4\in \mathbb{C}^*$ be pairwise
nonparallel and let $\Psi$ be a linear automorphism of
the complex plane. Then, one has
$$
\big\langle \operatorname{sl}(z_{1}),\operatorname{sl}(z_{2}),\operatorname{sl}(z_{3}),\operatorname{sl}(z_{4})\big\rangle = \big\langle \operatorname{sl}(\Psi(z_1)),\operatorname{sl}(\Psi(z_2)),\operatorname{sl}(\Psi(z_3)),\operatorname{sl}(\Psi(z_4))\big\rangle\,.\qed
$$
\end{fact}
\begin{fact}\cite[Lemma 2.20]{H}\label{crkn4gen}
Let $\varLambda\subset\mathbb{C}$. Then the
cross ratio of slopes of four pairwise nonparallel
$\varLambda$-directions is an element of $K_{\varLambda}^+$.\qed
\end{fact}
\section{Algebraic Delone sets}
The following notions will
be useful; see also~\cite{H3}, \cite{H4}, \cite{H5} for
generalisations and for results related to
those presented below.
\begin{defi}\label{algdeldef}
A Delone set $\varLambda\subset\mathbb{C}$ is called an {\em algebraic Delone
set} if it satisfies the following properties:
\begin{eqnarray*}
\mbox{(Alg)}&&[K_{\varLambda}:\mathbb{Q}]<\infty\,.\\
\mbox{(Hom)}&&\mbox{For any finite subset $F$ of $K_{\varLambda}$, there is a
homothety}\\&&\mbox{$h$ of the complex plane such that $h(F)\subset \varLambda$\,.
}
\end{eqnarray*}
Moreover, $\varLambda$ is called an {\em $n$-cyclotomic
Delone set} if it satisfies the property
$$\mbox{($n$-Cyc)}\quad\quad K_{\varLambda}\subset\mathbb{Q}(\zeta_n)$$
for some $n\geq 3$
and has property (Hom). Further,
$\varLambda$ is called a {\em cyclotomic Delone set} if it is an
$n$-cyclotomic Delone set for a suitable $n\geq 3$.
\end{defi}
\begin{rem}\label{remalg}
Algebraic Delone
sets were already introduced in~\cite[Definition
4.1]{H5}. Clearly, for every algebraic Delone set $\varLambda$, the
field
extension
$K_{\varLambda}/\mathbb{Q}$ is an imaginary extension (due to $\varLambda$
being relatively dense) with $\overline{K_{\varLambda}}=K_{\varLambda}$. By the Kronecker-Weber theorem (cf.~\cite[Thm.~
14.1]{Wa}) and Fact~\ref{gau}, the cyclotomic Delone sets are precisely the algebraic
Delone sets $\varLambda$ with the additional property that
$K_{\varLambda}/\mathbb{Q}$ is an Abelian extension.
\end{rem}
Following Moody~\cite{Moody}, modified along the lines of the algebraic setting of Pleasants~\cite{PABP}, we
define as follows.
\begin{defi}\label{algmodel}
Let $K\subset\mathbb{C}$ be an imaginary quadratic extension
of a real algebraic
number field (necessarily, this real algebraic number field is $K^+$) of degree
$[K:\mathbb{Q}]=:d$ over $\mathbb{Q}$ (in particular, $d$ is even). Let
$\mathcal{O}_{K}$ be the ring of integers in $K$ and let $.^{\star}\!:\,
\mathcal{O}_{K}\rightarrow \mathbb{C}^{s-1}\times\mathbb{R}^t$ be any map of the form $z\mapsto
(\sigma_{2}(z),\dots,\sigma_{s}(z),\sigma_{s+1}(z),\dots,\sigma_{s+t}(z))$,
where
$\sigma_{s+1},\dots,\sigma_{s+t}$ are the real embeddings of
$K/\mathbb{Q}$ into $\mathbb{C}/\mathbb{Q}$ and $\sigma_{2},\dots,\sigma_{s}$ arise from the complex embeddings of
$K/\mathbb{Q}$ into $\mathbb{C}/\mathbb{Q}$ except the identity and the complex conjugation by
choosing exactly one embedding
from each pair of complex conjugate ones (in particular, $d=2s+t$ and
$s\geq 1$). Then, for any such choice,
each translate $\varLambda$ of
$$
\varLambda(W):=\{z\in\mathcal{O}_{K}\,|\,z^{\star}\in W\}\,,
$$
where $W\subset
\mathbb{C}^{s-1}\times\mathbb{R}^t\simeq\mathbb{R}^{d-2}$ is a relatively compact set with nonempty interior, is called a \emph{$K$-algebraic model set}. Moreover, $.^{\star}$ and $W$ are called the \emph{star
map} and the \emph{window} of $\varLambda$, respectively.
\end{defi}
\begin{rem}\label{modelrem}
Algebraic number fields $K$ as above may be obtained by starting with
a real algebraic number field $L$ and adjoining the square root of a
negative number from $L$. Note that, in the situation of Definition~\ref{algmodel}, the
quadratic extension $K/K^+$ is a Galois extension with $G(K/K^+)$
containing the identity and the complex conjugation (in particular,
one has $\overline{K}=K$). We use the
convention that for $d=2$ (meaning that $s=1$ and $t=0$), $\mathbb{C}^{s-1}\times\mathbb{R}^t$ is the trivial group $\{0\}$ and the star map is the zero
map. Due to the Minkowski
representation
$
\{(z,z^{\star})\,|\,z\in\mathcal{O}_{K}\}
$ of the maximal order $\mathcal{O}_{K}$ of $K$
being a (full) lattice in $\mathbb{C}\times\mathbb{C}^{s-1}\times\mathbb{R}^t\simeq \mathbb{R}^d$ (cf.~\cite[Ch.~2,
Sec.~3]{Bo}) that is in one-to-one correspondence with
$\mathcal{O}_{K}$ via the canonical
projection on the first factor and due to
$\mathcal{O}_{K}^{\star}$ being a dense subset of
$\mathbb{C}^{s-1}\times\mathbb{R}^t$ (see Lemma~\ref{dense} below),
$K$-algebraic model sets are indeed {\em model sets} and thus
are
Meyer sets; cf.~\cite{BM}, \cite{BM2}, \cite{Moody}, \cite{Schl2}, \cite{Schl} for the
general setting and further properties of model sets. Since the
star map is a monomorphism of Abelian groups for $d>2$ and since the
window is a bounded set, a $K$-algebraic model set
$\varLambda$ is periodic if and only if $d=2$, in which case $\varLambda$ is a
translate of the planar lattice $\mathcal{O}_K$.
\end{rem}
A real algebraic integer $\lambda$ is called a {\em Pisot-Vijayaraghavan number} ({\em
PV-number}\/) if $\lambda>1$ while all other conjugates of
$\lambda$ have moduli strictly less than $1$.
\begin{fact}\cite[Ch.~1, Thm.~ 2]{Sa}\label{pisot}
Every real algebraic number field contains a primitive element that is
a PV-number.\qed
\end{fact}
Before we can show that $K$-algebraic model sets are
algebraic Delone sets, we need the following lemmata.
\begin{lem}\label{r2}
Let $\varLambda$ be a nonperiodic $K$-algebraic model set
with star map $.^{\star}$. Then, there is an algebraic integer
$\lambda\in K^+$ such that a suitable power of the $\mathbb{Z}$-module
endomorphism $m_{\lambda}^{\star}$ of $\mathcal{O}_K^{\star}$,
defined by $m_{\lambda}^{\star}(z^{\star})=(\lambda z)^{\star}$,
is contractive, i.e., there is an $l\in\mathbb{N}$ and a real number
$c \in (0,1)$ such that $\Arrowvert (m_{\lambda}^{\star})^l(z^{\star})\Arrowvert\leq c\, \Arrowvert z^{\star}\Arrowvert$ holds for all $z\in \mathcal{O}_{K}$.
\end{lem}
\begin{proof}
By Fact~\ref{pisot}, we may choose a
PV-number $\lambda$ of degree $d/2=[K^+:\mathbb{Q}]$ in
$K^+$, where $d=[K:\mathbb{Q}]\geq 4$ due to the
nonperiodicity; see Remark~\ref{modelrem}. Since all norms on
$\mathbb{C}^{s-1}\times\mathbb{R}^t \simeq \mathbb{R}^{d-2}$ are equivalent, it suffices to prove the
assertion in case of the maximum norm on $\mathbb{C}^{s-1}\times\mathbb{R}^t$ with
respect to the absolute value on $\mathbb{C}$ and $\mathbb{R}$, respectively, rather than considering the Euclidean norm itself. But in that case, the assertion
follows immediately with $l:=1$ and $$c:=\operatorname{max}\big\{\lvert \sigma_{j}(\lambda)\rvert\,\big|
\, j\in\{2,\dots,s+t\}\big\}\,,$$ since the set
$\{\sigma_{2}(\lambda),\dots,\sigma_{s+t}(\lambda)\}$
of
conjugates of $\lambda$ does not contain $\lambda$ itself. To see this, note
that $\sigma_j(\lambda)=\lambda$, where $j\in\{2,\dots,s+t\}$, implies
that $\sigma_j$ fixes $K^+$ whence $\sigma_j$ is the identity
or the complex conjugation, a contradiction; see~Definition~\ref{algmodel} and Remark~\ref{modelrem}.
\end{proof}
\begin{lem}\label{dense}
Let $\varLambda$ be a $K$-algebraic model set
with star map $.^{\star}$ and let $d:=[K:\mathbb{Q}]$. Then
$\mathcal{O}_{K}^{\star}$ is dense in $\mathbb{C}^{s-1}\times\mathbb{R}^t\simeq\mathbb{R}^{d-2}$.
\end{lem}
\begin{proof}
If $d=2$, one even has $\mathcal{O}_{K}^{\star}=\mathbb{C}^{s-1}\times\mathbb{R}^t=\{0\}$.
Otherwise, choose a PV-number $\lambda$
of degree $d/2$ in $K^+$; cf.~Fact~\ref{pisot}. Since
$\mathcal{O}_{K}$ is a full $\mathbb{Z}$-module in $K$, the set
$\{\lambda^kz\,|\,z\in\mathcal{O}_{K}\}$ is a full
$\mathbb{Z}$-module in $K$ for any $k\in\mathbb{N}$ . Thus the set $$\{(\lambda^k
z,(m_{\lambda}^{\star})^k(z^{\star}))\,|\,z\in\mathcal{O}_{K}\}\,,$$
is a (full) lattice in
$\mathbb{C}^s\times\mathbb{R}^t\simeq\mathbb{R}^d$ for any $k\in\mathbb{N}$, where
$m_{\lambda}^{\star}$ is the $\mathbb{Z}$-module
endomorphism of $\mathcal{O}_K^{\star}$ from Lemma~\ref{r2}; cf.~\cite[Ch.~2,
Sec.~3]{Bo}. In conjunction with Lemma~\ref{r2}, this implies that,
for any $\varepsilon>0$, the $\mathbb{Z}$-module $\mathcal{O}_{K}^{\star}$
contains an $\mathbb{R}$-basis of $\mathbb{C}^{s-1}\times\mathbb{R}^t$ whose elements have
norms $\leq\varepsilon$. The assertion follows.
\end{proof}
\begin{lem}\label{dilate}
Let $\varLambda$ be a $K$-algebraic model set.
Then, for any finite set $F\subset K$, there is a homothety $h$ of
the complex plane such that $h(F)\subset \varLambda$. Moreover, $h$
can be chosen such that $h(z)=
\kappa z + v$, where $\kappa\in K^+$ is an algebraic integer with $\kappa\geq 1$ and
$v\in\varLambda$.
\end{lem}
\begin{proof}
Without loss of generality, we may assume that $\varLambda$ is of the
form $\varLambda(W)$ (see Definition~\ref{algmodel}) and that
$F\neq\varnothing$. Note that there is an $l\in
\mathbbm{N}$ such that $\{lz\,|\,z\in F\} \subset
\mathcal{O}_{K}$. Let $d:=[K:\mathbb{Q}]$ and let
$.^{\star}$ be the star map of $\varLambda$. If $d=2$, we are done by
setting $h(z):=lz$. Otherwise, since $W$ has nonempty interior, Lemma~\ref{dense} shows the existence of a
suitable $z_{0}\in \mathcal{O}_{K}$ with $z_{0}^{\star}\in W^{\circ}$. Consider the open neighbourhood $V:= W^{\circ} - z_{0}^{\star}$ of $0$ in
$\mathbb{C}^{s-1}\times\mathbb{R}^t$ and choose a PV-number $\lambda$
of degree $d/2$ in $K^+$;
cf.~Fact~\ref{pisot}. By virtue of Lemma~\ref{r2}, there is a
$k\in\mathbbm{N}$ such
that $$(m_{\lambda}^{\star})^{k}\big((lF)^{\star}\big)\subset V\,.$$
It
follows that $\{(\lambda^{k} z + z_{0})^{\star}\, |\, z\in lF\}\subset
W^{\circ}$ and, further, that $h(F)\subset \varLambda$,
where $h$ is the homothety given by $z
\mapsto (l\lambda^{k}) z + z_{0}$. The additional statement follows
immediately from the observation that $z_0\in\varLambda$.
\end{proof}
\begin{prop}\label{cmsads}
$K$-algebraic model sets are algebraic Delone
sets. Moreover, any $K$-algebraic model set $\varLambda$
satisfies $K_{\varLambda}=K$.
\end{prop}
\begin{proof}
Since $K_{\varLambda}=K_{t+\varLambda}$ for any $t\in\mathbb{C}$, we may
assume that $\varLambda$ is of the
form $\varLambda(W)$ (see Definition~\ref{algmodel}). Any $K$-algebraic model set $\varLambda$ is a Delone
set by Remark~\ref{modelrem}. Property (Alg) follows from the
observation that $K_{\varLambda}\subset K$ (recall that
$\varLambda-\varLambda\subset\mathcal{O}_K$ and that $\overline{K}=K$). Further,
property (Hom) is an immediate consequence of Lemma~\ref{dilate}. Let
$\{\alpha_1,\dots,\alpha_d\}$ be a $\mathbb{Q}$-basis of $K/\mathbb{Q}$. By the additional statement of
Lemma~\ref{dilate} there is a nonzero element $\kappa\in K^+$ and a
point
$v\in\varLambda$ such
that the
$\mathbb{Q}$-linear independent set
$\{\kappa\alpha_1,\dots,\kappa\alpha_d\}$ is contained in
$\varLambda-\{v\}\subset K_{\varLambda}$. Since
$K_{\varLambda}\subset K$, this shows that $K_{\varLambda}=K$.
\end{proof}
\begin{rem}\label{okdirections}
As another immediate consequence of Lemma~\ref{dilate}, one verifies
that, for any $K$-algebraic model set $\varLambda$, the set of
$\varLambda$-directions is precisely the set of
$\mathcal{O}_{K}$-directions.
\end{rem}
\begin{ex}\label{algex}
Standard examples of $n$-cyclotomic Delone sets are the $\mathbb{Q}(\zeta_n)$-algebraic
model sets, where $n\geq 3$, which from now on are called {\em $n$-cyclotomic
model sets}; cf.~Fact~\ref{gau} and Proposition~\ref{cmsads} (note
also that $\mathbb{Q}(\zeta_n)$ is obtained from $\mathbb{Q}(\zeta_n)^+$ by adjoining
the square root of the negative number $\zeta_n^2+\zeta_n^{-2}-2\in
\mathbb{Q}(\zeta_n)^+$, the latter being the discriminant of $X^2-(\zeta_n+\zeta_n^{-1})X+1$). These sets were also called {\em cyclotomic model sets with underlying
$\mathbb{Z}$-module $\mathbb{Z}[\zeta_n]$} in~\cite[Sec.~4.5]{H5}, since $\mathbb{Z}[\zeta_n]$
is the ring of integers in the $n$th cyclotomic field;
cf.~\cite[Thm.~ 2.6]{Wa}. The latter
range from periodic examples like the fourfold square lattice ($n=4$)
or the sixfold triangular lattice ($n=3$) to nonperiodic
examples like the vertex set of the tenfold T\"ubingen triangle
tiling~\cite{bk1}, \cite{bk2} ($n=5$), the eightfold Ammann-Beenker tiling
of the plane~\cite{am}, \cite{bj}, \cite{ga} ($n=8$) or the twelvefold
shield tiling~\cite{ga}
($n=12$);
see~\cite[Fig.~1]{H4}, ~\cite[Fig.~2]{H5} and Fig.~\ref{fig:tilingupolygon} below for
illustrations. In general, for any divisor $m$
of $\operatorname{lcm}(n,2)$, one can choose the window such that the
corresponding $n$-cyclotomic
model sets have $m$-fold cyclic symmetry in the
sense of symmetries of LI-classes, meaning that a discrete structure
has a certain symmetry if the original and the transformed structure
are locally indistinguishable; cf.~\cite{B} for details. Note that the vertex sets of the famous Penrose tilings of the
plane fail to be $5$-cyclotomic model sets but can still be seen to be $5$-cyclotomic Delone sets; see~\cite{bh}
and references therein.
\end{ex}
\section{A cyclotomic theorem}\label{cyc}
\begin{defi}\label{fmddefi}
Let $m\geq 4$ be a natural number. Set
$$
D_{m}:=\big\{(k_1,k_2,k_3,k_4)\in \mathbb{N}^4 \,\big|\, k_3<k_1\leq
k_2<k_4\leq m-1 \mbox{ and } k_1+k_2=k_3+k_4\big\}
$$
and define the function $f_{m}\,:\, D_{m}\rightarrow \mathbb{C}^*$ by
\begin{equation}\label{fmd}
f_{m}(k_1,k_2,k_3,k_4):=\frac{(1-\zeta_{m}^{k_1})(1-\zeta_{m}^{k_2})}{(1-\zeta_{m}^{k_3})(1-\zeta_{m}^{k_4})}.
\end{equation}
We further set $\mathcal{C}_m:=f_m(D_m)$ (note that $\mathcal{C}_m\subset
\mathcal{C}_{m'}$ for any multiple $m'$ of $m$) and
$\mathcal{C}:=\bigcup_{m\geq 4}\mathcal{C}_m$. Moreover, for a subset
$K$ of $\mathbb{C}$, we set $\mathcal{C}(K):=\mathcal{C}\cap K$ and
$\mathcal{C}_m(K):=\mathcal{C}_m\cap K$.
\end{defi}
\begin{fact}\label{fmdg1}\cite[Lemma 3.1]{GG}
Let $m\geq 4$. The function
$f_{m}$ is real-valued. Moreover, one has $f_{m}(d)>1$ for all $d\in
D_{m}$.\qed
\end{fact}
For our application to discrete tomography, we shall below show the
{\em finiteness} of the
set $\mathcal{C}(L)$
for all real algebraic number fields $L$ and provide
explicit results in the three cases $\mathbb{Q}(\zeta_{5})^+=\mathbb{Q}(\sqrt{5})$,
$\mathbb{Q}(\zeta_8)^+=\mathbb{Q}(\sqrt{2})$ and
$\mathbb{Q}(\zeta_{12})^+=\mathbb{Q}(\sqrt{3})$. Gardner and Gritzmann showed the
following result for the field $\mathbb{Q}=\mathbb{Q}(\zeta_3)^+=\mathbb{Q}(\zeta_4)^+$.
\begin{theorem}\cite[Lemma 3.8, Lemma 3.9 and Thm.~ 3.10]{GG}\label{intersectq}
$$
\mathcal{C}(\mathbb{Q})=\mathcal{C}_{12}(\mathbb{Q})=\big\{\tfrac{4}{3},\tfrac{3}{2},2,3,4\big\}\,.
$$
Moreover, all solutions of $f_{m}(d)=q\in\mathbb{Q}$, where $m\geq 4$ and
$d\in D_{m}$, are either given, up to multiplication of $m$
and $d$ by the same factor, by $m=12$ and one of the following
$$
\begin{array}{rlrl}
\textnormal{(i)}&d=(6,6,4,8),q=\frac{4}{3};&\textnormal{(ii)}&d=(6,6,2,10),q=4;\\
\textnormal{(iii)}&d=(4,8,3,9),q=\frac{3}{2};&\textnormal{(iv)}&d=(4,8,2,10),q=3;\\
\textnormal{(v)}&d=(4,4,2,6),q=\frac{3}{2};&\textnormal{(vi)}&d=(8,8,6,10),q=\frac{3}{2};\\
\textnormal{(vii)}&d=(4,4,1,7),q=3;&\textnormal{(viii)}&d=(8,8,5,11),q=3;\\
\textnormal{(ix)}&d=(3,9,2,10),q=2;&\textnormal{(x)}&d=(3,3,1,5),q=2;\\
\textnormal{(xi)}&d=(9,9,7,11),q=2;&&
\end{array}
$$
or by one of the following
$$
\begin{array}{rl}
\textnormal{(xii)}&d=(2k,s,k,k+s),q=2, \mbox{ where } s\geq 2, m=2s \mbox{
and } 1\leq k\leq \frac{s}{2};\\
\textnormal{(xiii)}&d=(s,2k,k,k+s),q=2, \mbox{ where } s\geq 2, m=2s \mbox{
and } \frac{s}{2}\leq k< s.\qed
\end{array}
$$
\end{theorem}
The next three lemmata are the key tools for our approach.
\begin{lem}\label{l1}
Let $a\in\mathbb{R}^*$. If $a=\tfrac{1+x}{1+y}$ for $x,y\in\mu\cup\{0\}$ with
$y\neq -1$ then
$a\in\{\tfrac{1}{2},1,2\}$.
\begin{proof}
It suffices to consider the cases $a=1+\omega$ and
$a=\tfrac{1+\omega_1}{1+\omega_2}$ with
$\omega,\omega_1,\omega_2\in\mu$ and $\omega_2\neq -1$. In the
first case, one has $\omega=a-1\in\mu(\mathbb{R})=\{\pm 1\}$ whence $\omega=1$ (due
to $a\neq 0$) and $a=2$. In the second case, one has
$$
a=\bar{a}=\frac{1+\bar{\omega}_1}{1+\bar{\omega}_2}=\omega_2\omega_1^{-1}\frac{1+\omega_1}{1+\omega_2}=\omega_2\omega_1^{-1}a
$$
wherefore $\omega_1=\omega_2$ and $a=1$.
\end{proof}
\end{lem}
\begin{lem}[Comparison of coefficients]\label{l2}
Let $K\subset\mathbb{C}$ be a field, let $m\in\mathbb{N}$, and let $\zeta\in\mu$ with
$\zeta^m\in K$. Let
$a_0,\dots,a_{m-1},b_0,\dots,b_{m-1}\in K$ with
$$
\sum_{i=0}^{m-1}a_i\zeta^i=\sum_{i=0}^{m-1}b_i\zeta^i\,.
$$
Then one has $a_i=b_i$ for all $i=0,\dots,m-1$ if one of the following
conditions holds.
\begin{itemize}
\item[(a)]
$[K(\zeta) :K]=m$.
\item[(b)]
$[K(\zeta) :K]=m-1$ and at most $m-1$ of $a_0,\dots,a_{m-1},b_0,\dots,b_{m-1}$ are nonzero.
\end{itemize}
Moreover, if $[K(\zeta) :K]=m-1$ and $a_k-b_k\neq 0$ for some $k$ then
$|a_i-b_i|=|a_j-b_j|\neq 0$ for all $i,j$.
\end{lem}
\begin{proof}
In case (a), the assertion follows immediately from the linear independence
of $1,\zeta,\dots,\zeta^{m-1}$ over $K$. If $[K(\zeta) :K]=m-1$, set $\omega:=\zeta^m\in K$. The minimum
polynomial $f\in K[X]$ of $\zeta$ over $K$ has degree $m-1$ and one
has $X^{m}-\omega=(X-\epsilon)f$ with $\epsilon\in K$, hence
$\omega=\epsilon^m$ (in particular, $\epsilon \in\mu(K)$) and
$$
f=\frac{X^m-\epsilon^m}{X-\epsilon}=\sum_{i=0}^{m-1}\epsilon^{m-1-i}X^i\,.
$$
If $\sum_{i=0}^{m-1}(a_i-b_i)\zeta^i=0$ then there is an element $c\in K$ with
$a_i=b_i+c\epsilon^{m-1-i}$ for all $i=0,\dots,m-1$. By assumption
(b) one has $a_i=0=b_i$ for some $i$. This implies $c=0$ and therefore
the assertion. For the additional statement, first observe that due to
$a_k\neq b_k$ for some $k$ one has $c\neq 0$. Thus
$|a_i-b_i|=|c\epsilon^{m-1-i}|=|c|=|c\epsilon^{m-1-j}|=|a_j-b_j|\neq
0$ for all $i,j$.
\end{proof}
\begin{lem}\label{l3}
Let $K\subset\mathbb{C}$ be a field, let $m\in\mathbb{N}$, and let $\zeta\in\mu$ with
$\zeta^m\in K$. Further, let
$\omega_1,\omega_2,\omega_3,\omega_4\in\mu(K)$ and
$k_1,k_2,k_3,k_4\in\{0,\dots,m-1\}$ satisfy the following conditions.
\begin{itemize}
\item
$\operatorname{gcd}(k_i,m)=1$ for some $i\in\{1,2,3,4\}$.
\item
$k_1+k_2\equiv k_3+k_4 \pmod m$
\item
$\omega_3\zeta^{k_3},\omega_4\zeta^{k_4}\neq 1$ and $a:=\frac{(1-\omega_1\zeta^{k_1})(1-\omega_2\zeta^{k_2})}{(1-\omega_3\zeta^{k_3})(1-\omega_4\zeta^{k_4})}\in
K\cap (\mathbb{R}^*\setminus\{\pm 1 \})$.
\end{itemize}
Then one has $a\in\{\tfrac{1}{2},2\}$ if one of the following
conditions holds.
\begin{itemize}
\item[(a)]
$[K(\zeta) :K]=m$ and $m\geq 3$.
\item[(b)]
$[K(\zeta) :K]=m-1$ and $m\geq 5$.
\end{itemize}
\end{lem}
\begin{proof}
Without restriction, we may assume that
$\operatorname{gcd}(k_1,m)=1$. Then, for $i=2,3,4$,
there are $a_i,b_i\in\mathbb{Z}$ such that $k_i=a_ik_1+b_i m$ and, with
$\zeta':=\zeta^{k_1}$, $\zeta^{k_i}=(\zeta')^{a_i}(\zeta^m)^{b_i}$. Since one has $(\zeta')^m\in K$, $K(\zeta')=K(\zeta)$ and
$$
\frac{(1-\omega_1\zeta^{k_1})(1-\omega_2\zeta^{k_2})}{(1-\omega_3\zeta^{k_3})(1-\omega_4\zeta^{k_4})}=\frac{(1-\omega'_1\zeta')(1-\omega'_2\zeta'^{k'_2})}{(1-\omega'_3\zeta'^{k'_3})(1-\omega'_4\zeta'^{k'_4})}
$$
for suitable $\omega'_1,\omega'_2,\omega'_3,\omega'_4\in\mu(K)$ and
$k'_2,k'_3,k'_4\in\{0,\dots,m-1\}$ with $1+k'_2\equiv k'_3+k'_4 \pmod
m$, we may further assume that
$k_1=1$. We thus obtain
$$
1-\omega_1\zeta-\omega_2\zeta^{k_2}+\omega_1\omega_2\zeta^{k_2+1}=a-a\omega_3\zeta^{k_3}-a\omega_4\zeta^{k_4}+a\omega_3\omega_4\zeta^{k_3+k_4}\,,
$$
where, without restriction, $k_3\leq k_4$. From now on, let $[k]\in\{0,\dots,m-1\}$ denote the canonical
representative of the equivalence class of $k\in\mathbb{Z}$ modulo
$m$. We may finally write
$$
1-\omega_1\zeta-\omega_2\zeta^{k_2}+\omega_1\omega_2\omega\zeta^{[k_2+1]}=a-a\omega_3\zeta^{k_3}-a\omega_4\zeta^{[1+k_2-k_3]}+a\omega_3\omega_4\omega'\zeta^{[k_2+1]}
$$
with $k_3\leq [1+k_2-k_3]$ and suitable $\omega,\omega'\in\mu(K)$.
{\bf Case 1.} $k_2=0$. Then
$$
1-\omega_1\zeta-\omega_2+\omega_1\omega_2\omega\zeta=a-a\omega_3\zeta^{k_3}-a\omega_4\zeta^{[1-k_3]}+a\omega_3\omega_4\omega'\zeta
$$
If $k_3=0$ then $a=\frac{1-\omega_2}{1-\omega_3}$ by Lemma~\ref{l2}
and the
assertion follows from Lemma~\ref{l1}. The case $k_3=1$ cannot occur
(due to $k_3\leq [1-k_3]$), whereas $k_3\geq 2$ implies $a=1-\omega_2$ by Lemma~\ref{l2}. The assertion follows from Lemma~\ref{l1}.
{\bf Case 2.} $k_2=1$. Then
$$
1-(\omega_1+\omega_2)\zeta+\omega_1\omega_2\omega\zeta^2=a-a\omega_3\zeta^{k_3}-a\omega_4\zeta^{[2-k_3]}+a\omega_3\omega_4\omega'\zeta^2
$$
If $k_3=0$ then $a=\frac{1}{1-\omega_3}$ by
Lemma~\ref{l2} and the assertion follows from
Lemma~\ref{l1}. If $k_3=1$ then Lemma~\ref{l2} implies $a=1$, which
is excluded by assumption. The
case $k_3=2$ is impossible (due to $k_3\leq [2-k_3]$). Let $k_3\geq 3$
(hence $m\geq 4$). Under condition~(a), this implies $a=1$ by
Lemma~\ref{l2}, which
is excluded by assumption. Under
condition~(b), $k_3= 3$ implies
$$
1-(\omega_1+\omega_2)\zeta+\omega_1\omega_2\omega\zeta^{2}=a-a\omega_3\zeta^{3}-a\omega_4\zeta^{m-1}+a\omega_3\omega_4\omega'\zeta^{2}
$$
with $m-1\geq 4$ (due to $m\geq 5$). The
additional statement of Lemma~\ref{l2} implies $m=5$ and $|1-a|=|a\omega_4|=|a|$, wherefore $a=1/2$. If $k_3\geq 4$
then $m\geq 6$ (due to $k_3\leq [2-k_3]$) and Lemma~\ref{l2} implies $a=1$, which
is excluded by assumption.
{\bf Case 3.} $k_2\in\{2,\dots,m-2\}$ (hence $m\geq 4$ and $2\leq
k_2<k_2+1\leq m-1$). Then
$$
(1-a)-\omega_1\zeta-\omega_2\zeta^{k_2}+(\omega_1\omega_2\omega-a\omega_3\omega_4\omega')\zeta^{k_2+1}=-a\omega_3\zeta^{k_3}-a\omega_4\zeta^{[1+k_2-k_3]}
$$
Under condition~(a), Lemma~\ref{l2} shows that this is impossible, since there are at least three
nontrivial coefficients on the left-hand side and at
most two nontrivial coefficients on the right-hand side of this
equation. Under condition~(b) (hence $m\geq 5$), $k_3=0$
implies $a-1=a\omega_3$ by Lemma~\ref{l2} wherefore
$a=\frac{1}{1-\omega_3}$ and the assertion follows from
Lemma~\ref{l1}. If $k_3=1$ then $a=1$ by Lemma~\ref{l2}, which
is excluded by assumption. If $k_3\geq 2$
and $m\geq 7$ then $a=1$ by Lemma~\ref{l2}, which
is excluded by assumption. Employing the additional statement of
Lemma~\ref{l2}, we shall now see that the
missing cases ($k_3\geq 2$ and $m\in\{5,6\}$) are either impossible or yield
$|1-a|=1$ and thus $a=2$ (due to $a\neq 0$). In fact, $m=5$
and $k_3= 2$ imply $k_2=3$ (due to $k_3\leq [1+k_2-k_3]$) and, further,
$|1-a|=|\omega_1|=1$. The case $m=5$
and $k_3= 3$ cannot occur (due to $k_3\leq [1+k_2-k_3]$). If $m=5$
and $k_3= 4$ then $k_2=2$ (due to $k_3\leq [1+k_2-k_3]$) and, further,
$|1-a|=|\omega_1|=1$. If $m=6$
and $k_3= 2$ then $k_2\in\{3,4\}$ (due to $k_3\leq [1+k_2-k_3]$). The
case $k_2= 3$ is impossible, whereas the case $k_2= 4$ yields $|1-a|=|\omega_1|=1$. The case $m=6$
and $k_3= 3$ is impossible (due to $k_3\leq [1+k_2-k_3]$). The case $m=6$
and $k_3= 4$ implies $k_2=2$ (due to $k_3\leq [1+k_2-k_3]$) and, further, $|1-a|=|\omega_1|=1$. Finally, the case $m=6$
and $k_3= 5$ implies $k_2= 3$ (due to $k_3\leq [1+k_2-k_3]$) and, once
again, $|1-a|=|\omega_1|=1$.
{\bf Case 4.} $k_2=m-1$. Then
$$
(1+\omega_1\omega_2\omega)-\omega_1\zeta-\omega_2\zeta^{m-1}=a(1+\omega_3\omega_4\omega')-a\omega_3\zeta^{k_3}-a\omega_4\zeta^{[m-k_3]}
$$
Under condition~(a), Lemma~\ref{l2} implies
$\{k_3,[m-k_3]\}=\{1,m-1\}$, wherefore $k_3=1$ and $[m-k_3]=m-1$ (due
to $k_3\leq [m-k_3]$). Further, Lemma~\ref{l2} yields
$a=\omega_2/\omega_4$, a contradiction (due to $|a|\neq 1$). By the
additional statement of Lemma~\ref{l2}, condition~(b) (hence $m\geq 5$)
implies $m=5$, $k_3=2$ and, further, $|a\omega_3|=|\omega_2|=1$, a contradiction
(due to $|a|\neq 1$).
\end{proof}
We are now in a position to prove the following extension of Theorem~\ref{intersectq}.
\begin{theorem}\label{t1}
For $n\in\mathbb{N}$, one has
$$\mathcal{C}(\mathbb{Q}(\zeta_n)^+)=\mathcal{C}_{\operatorname{lcm}(2n,12)}(\mathbb{Q}(\zeta_n)^+)\,.$$
In particular, the last set is finite. Moreover, all solutions of $f_{m}(d)\in\mathbb{Q}(\zeta_n)^+$, where $m\geq 4$ and
$d\in D_{m}$, are either of the form
(xii) or (xiii) of Theorem~\ref{intersectq} or are given, up to multiplication of $m$
and $d$ by the same factor, by $m=\operatorname{lcm}(2n,12)$ and $d$
from a finite list.
\end{theorem}
\begin{proof}
Since $\mathbb{Q}(\zeta_n)^+=\mathbb{Q}(\zeta_{2n})^+$ for odd $n$ it suffices to
consider the case where $n$ is even (hence
$\operatorname{lcm}(2n,12)=\operatorname{lcm}(2n,3)$). Let $m\geq 4$
and $d:=(k_1,k_2,k_3,k_4)\in D_{m}$ such that
$$a:=f_m(d)=\frac{(1-\zeta_{m}^{k_1})(1-\zeta_{m}^{k_2})}{(1-\zeta_{m}^{k_3})(1-\zeta_{m}^{k_4})}\in\mathbb{Q}(\zeta_n)^+$$
Recall that $a>1$ by Fact~\ref{fmdg1}. We may
assume that $\operatorname{gcd}(m,k_1,k_2,k_3,k_4)=1$. By virtue of Theorem~\ref{intersectq}, we
may also assume that $a\not\in\mathbb{Q}$. Observe that
$$a\in\mathbb{Q}(\zeta_n)^+\cap \mathbb{Q}(\zeta_m)^+= \mathbb{Q}(\zeta_{\operatorname{gcd}(m,n)})^+$$
Claims 1 and 2 below show that $\operatorname{lcm}(2n,3)$ is a
multiple of $m$, hence the assertion.
{\bf Claim 1.} Let $p$ be an odd prime number and assume that
$\operatorname{ord}_p(n)<\operatorname{ord}_p(m)$. Then one has $p=3$,
$\operatorname{ord}_p(m)=1$ and $\operatorname{ord}_p(n)=0$.
To see this, set $r:=\operatorname{ord}_p(m)\geq 1$,
$K:=\mathbb{Q}(\zeta_{m/p})$ and note that $
\mathbb{Q}(\zeta_{\operatorname{gcd}(m,n)})\subset K$. Let $m=p^rm'$,
where $\operatorname{gcd}(p,m')=1$. Then, for $i=1,2,3,4$,
there are $a_i,b_i\in\mathbb{Z}$ such that $k_i=a_ip+b_i m'$ and, further,
$\zeta_m^{k_i}=\zeta_{m/p}^{a_i}\zeta_{p^r}^{b_i}$. Since $\zeta_{p^r}^p=\zeta_{p^{r-1}}\in K$ one has
$$
a=\frac{(1-\omega_1\zeta_{p^r}^{l_1})(1-\omega_2\zeta_{p^r}^{l_2})}{(1-\omega_3\zeta_{p^r}^{l_3})(1-\omega_4\zeta_{p^r}^{l_4})}
$$
for suitable $\omega_1,\omega_2,\omega_3,\omega_4\in\mu(K)$ and
$l_1,l_2,l_3,l_4\in\{0,\dots,p-1\}$ with $\operatorname{gcd}(l_i,p)=1$
for some $i\in\{1,2,3,4\}$ and $l_1+l_2\equiv l_3+l_4 \pmod
p$. Further, by Fact~\ref{gau}, one has
$$
[K(\zeta_{p^r}):K]=[\mathbb{Q}(\zeta_{m}):\mathbb{Q}(\zeta_{m/p})]=\frac{\phi(p^r)}{\phi(p^{r-1})}=\left\{\begin{array}{ll}
p-1 & \mbox{if $r=1$;}\\
p & \mbox{if $r\geq 2$.}
\end{array}\right.
$$
Lemma~\ref{l3} implies both for $p\geq
5$ and $r\geq 2$ that $a=2$, a contradiction. Therefore $p=3$,
$r=\operatorname{ord}_p(m)=1$ and consequently $\operatorname{ord}_p(n)=0$.
{\bf Claim 2.} $\operatorname{ord}_2(m)\leq
\operatorname{ord}_2(n)+1$.
Assume that $r:=\operatorname{ord}_2(m)\geq
\operatorname{ord}_2(n)+2\geq 3$. Set
$K:=\mathbb{Q}(\zeta_{m/4})$ and note that $\mathbb{Q}(\zeta_{\operatorname{gcd}(m,n)})\subset
K$. As above, since $\zeta_{2^{r}}^4=\zeta_{2^{r-2}}\in K$, one has
$$
a=\frac{(1-\omega_1\zeta_{2^r}^{l_1})(1-\omega_2\zeta_{2^r}^{l_2})}{(1-\omega_3\zeta_{2^r}^{l_3})(1-\omega_4\zeta_{2^r}^{l_4})}
$$
for suitable $\omega_1,\omega_2,\omega_3,\omega_4\in\mu(K)$ and
$l_1,l_2,l_3,l_4\in\{0,1,2,3\}$ with $\operatorname{gcd}(l_i,4)=1$
for some $i\in\{1,2,3,4\}$ and $l_1+l_2\equiv l_3+l_4 \pmod
4$. Further, by Fact~\ref{gau}, one has
$$[K(\zeta_{2^{s}}):K]=[\mathbb{Q}(\zeta_{m}):\mathbb{Q}(\zeta_{m/4})]=\frac{\phi(2^r)}{\phi(2^{r-2})}=4\,.$$
Lemma~\ref{l3} now implies $a=2$, a contradiction. This proves the
claim.
\end{proof}
\begin{rem}
Similar to the proof of Theorem~\ref{t1}, one can also use
Lemma~\ref{l3} to give another proof of the fact shown in~\cite{GG} that all solutions of $f_{m}(d)\in\mathbb{Q}\setminus\{2\}$, where $m\geq 4$ and
$d\in D_{m}$, are given, up to multiplication of $m$
and $d$ by the same factor, by $m=12$. Thus the number $2$ plays a special role
in this context. Indeed this number leads to infinite families of
solutions (see Theorem~\ref{intersectq}(xii)-(xiii) above) that can be
found by using the $2$-adic valuation;
cf.~\cite{GG} for details.
\end{rem}
One even has the following result, which improves~\cite[Thm.~ 4.19]{H5}.
\begin{theorem}\label{algcoro}
For any real algebraic number field $L$, the set
$\mathcal{C}(L)$ is finite. Moreover, there is a number
$m_L\in\mathbb{N}$ such that all solutions of $f_{m}(d)\in L$, where $m\geq 4$ and
$d\in D_{m}$, are either of the form
(xii) or (xiii) of Theorem~\ref{intersectq} or are given, up to multiplication of $m$
and $d$ by the same factor, by $m=m_L$ and $d$
from a finite list.
\end{theorem}
\begin{proof}
The finiteness of $L/\mathbb{Q}$ together with the identity
$$\mathbb{Q}(\mu)^+=\bigcup_{n\in\mathbb{N}}\mathbb{Q}(\zeta_n)^+$$ implies that $L \cap \mathbb{Q}(\mu)^+ = L\cap \mathbb{Q}(\zeta_n)^+$ for
some $n\in\mathbb{N}$. Since $\mathcal{C} \subset \mathbb{Q}(\mu)^+$ by Fact~\ref{fmdg1} it follows that
$$
\mathcal{C}(L) = L \cap \mathcal{C} = L \cap \mathcal{C} \cap \mathbb{Q}(\mu)^+= L\cap \mathcal{C}\cap \mathbb{Q}(\zeta_n)^+ \subset
\mathcal{C}(\mathbb{Q}(\zeta_n)^+)\,.
$$
By virtue of Theorem~\ref{t1}, the assertion follows with $m_L:=\operatorname{lcm}(2n,12)$.
\end{proof}
\begin{coro}\label{coro8125}
\begin{itemize}
\item[(a)]
\begin{eqnarray*}
\mathcal{C}(\mathbb{Q}(\sqrt{5}))&=&\mathcal{C}_{60}(\mathbb{Q}(\sqrt{5}))\\&=&
\big\{\tfrac{10-2\sqrt{5}}{5},\tfrac{\sqrt{5}}{2},\tfrac{9-3\sqrt{5}}{2},\tfrac{5+3\sqrt{5}}{10},\tfrac{5+\sqrt{5}}{6},-1+\sqrt{5},\tfrac{3+\sqrt{5}}{4},\tfrac{4}{3},
\tfrac{5-\sqrt{5}}{2},\tfrac{5+\sqrt{5}}{5},\\
&&\hphantom{\big\{}\tfrac{3}{2},6-2\sqrt{5},\tfrac{1+\sqrt{5}}{2},\tfrac{5+\sqrt{5}}{4},\tfrac{-3+3\sqrt{5}}{2},\tfrac{5+2\sqrt{5}}{5},2,\tfrac{2+\sqrt{5}}{2},\tfrac{15+3\sqrt{5}}{10},\sqrt{5},\\
&&\hphantom{\big\{}\tfrac{3+\sqrt{5}}{2},\tfrac{10+2\sqrt{5}}{5},3,1+\sqrt{5},\tfrac{5+\sqrt{5}}{2},4,2+\sqrt{5},3+\sqrt{5},\tfrac{5+3\sqrt{5}}{2},\tfrac{7+3\sqrt{5}}{2},\\
&&\hphantom{\big\{} \tfrac{9+3\sqrt{5}}{2},5+2\sqrt{5},6+2\sqrt{5}\big\}
\end{eqnarray*}
Moreover, all solutions of $f_{m}(d)\in\mathbb{Q}(\sqrt{5})$, where $m\geq 4$ and
$d\in D_{m}$, are either of the form
(xii) or (xiii) of Theorem~\ref{intersectq} or are given, up to multiplication of $m$
and $d$ by the same factor, by $m=60$ and $d$ from the following list.
$$
\begin{tabular}{|r|c|r|c|r|c|r|c|}
\hline
$1$&$(12, 36, 6, 42)$&$2$&$(24, 24, 9, 39)$&$3$& $(24, 48, 18, 54)$&$4$& $(36, 36, 21, 51)$\\\hline $5$& $(24, 24, 18,
30)$&$6$& $(36, 36, 30, 42)$&$7$& $(4, 8, 2, 10)$&$8$& $(5, 25, 3, 27)$\\\hline $9$& $(6, 42, 4, 44)$&$10$&
$(8, 14, 4, 18)$&$11$& $(8, 32, 5, 35)$&$12$& $(8, 50, 6, 52)$\\\hline $13$& $(9, 21, 5, 25)$&$14$& $(9,
39, 6, 42)$&$15$& $(10, 10, 4, 16)$&$16$& $(10, 28, 6, 32)$\\\hline $17$& $(10, 52, 8, 54)$&$18$& $(12,
18, 6, 24)$&$19$& $(14, 26, 8, 32)$&$20$& $(14, 34, 9, 39)$\\\hline $21$& $(14, 42, 10, 46)$&$22$& $(16,
32, 10, 38)$&$23$& $(18, 18, 8, 28)$&$24$& $(18, 26, 10, 34)$\\\hline $25$& $(18, 36, 12, 42)$&$26$&
$(18, 46, 14, 50)$&$27$& $(18, 54, 16, 56)$&$28$& $(21, 51, 18, 54)$\\\hline $29$& $(24, 24, 12,
36)$&$30$& $(24, 42, 18, 48)$&$31$& $(26, 32, 16, 42)$&$32$& $(26, 46, 21, 51)$\\\hline $33$& $(28, 34,
18, 44)$&$34$& $(28, 44, 22, 50)$&$35$& $(28, 52, 25, 55)$&$36$& $(32, 50, 28, 54)$\\\hline $37$& $(34,
42, 26, 50)$&$38$& $(34, 46, 28, 52)$&$39$& $(35, 55, 33, 57)$&$40$& $(36, 36, 24, 48)$\\\hline $41$&
$(39, 51, 35, 55)$&$42$& $(42, 42, 32, 52)$&$43$& $(42, 48, 36, 54)$&$44$& $(46, 52, 42,
56)$\\\hline $45$& $(50, 50, 44, 56)$&$46$& $(52, 56, 50, 58)$&$47$& $(18, 18, 6, 30)$&$48$& $(42, 42,
30, 54)$\\\hline $49$& $(4, 52, 2, 54)$&$50$& $(5, 35, 2, 38)$&$51$& $(6, 18, 2, 22)$&$52$& $(8, 10, 2,
16)$\\\hline $53$& $(8, 28, 3, 33)$&$54$& $(8, 46, 4, 50)$&$55$& $(8, 56, 6, 58)$&$56$& $(9, 21, 3, 27)$\\\hline $57$&
$(9, 39, 4, 44)$&$58$& $(10, 32, 4, 38)$&$59$& $(10, 50, 6, 54)$&$60$& $(12, 42, 6, 48)$\\\hline $61$&
$(14, 18, 4, 28)$&$62$& $(14, 26, 5, 35)$&$63$& $(14, 34, 6, 42)$&$64$& $(14, 52, 10, 56)$\\\hline $65$&
$(16, 28, 6, 38)$&$66$& $(18, 24, 6, 36)$&$67$& $(18, 34, 8, 44)$&$68$& $(18, 42, 10, 50)$\\\hline $69$&
$(18, 48, 12, 54)$&$70$& $(21, 51, 16, 56)$&$71$& $(24, 36, 12, 48)$&$72$& $(25, 55, 22,
58)$\\\hline $73$& $(26, 28, 10, 44)$&$74$& $(26, 42, 16, 52)$&$75$& $(26, 46, 18, 54)$&$76$& $(28, 50,
22, 56)$\\\hline $77$& $(32, 34, 16, 50)$&$78$& $(32, 44, 22, 54)$&$79$& $(32, 52, 27, 57)$&$80$& $(34,
46, 25, 55)$\\\hline $81$& $(36, 42, 24, 54)$&$82$& $(39, 51, 33, 57)$&$83$& $(42, 46, 32, 56)$&$84$&
$(42, 54, 38, 58)$\\\hline $85$& $(50, 52, 44, 58)$&$86$& $(12, 12, 2, 22)$&$87$& $(12, 24, 3, 33)$&$88$&
$(12, 36, 4, 44)$\\\hline $89$& $(12, 48, 6, 54)$&$90$& $(24, 24, 6, 42)$&$91$& $(24, 36, 10, 50)$&$92$&
$(24, 48, 16, 56)$\\\hline $93$& $(36, 36, 18, 54)$&$94$& $(36, 48, 27, 57)$&$95$& $(48, 48, 38,
58)$&$96$& $(8, 28, 6, 30)$\\\hline $97$& $(14, 26, 10, 30)$&$98$& $(18, 24, 12, 30)$&$99$& $(18, 42, 15,
45)$&$100$& $(32, 52, 30, 54)$\\\hline $101$& $(34, 46, 30, 50)$&$102$& $(36, 42, 30, 48)$&$103$& $(12, 24,
6, 30)$&$104$& $(24, 36, 15, 45)$\\\hline $105$& $(36, 48, 30, 54)$&$106$& $(15, 45, 12, 48)$&$107$& $(18,
30, 12, 36)$&$108$& $(30, 42, 24, 48)$\\\hline $109$& $(24, 36, 20, 40)$&$110$& $(10, 30, 8, 32)$&$111$&
$(15, 15, 9, 21)$&$112$& $(18, 30, 14, 34)$\\\hline $113$& $(24, 30, 18, 36)$&$114$& $(30, 36, 24,
42)$&$115$& $(30, 42, 26, 46)$&$116$& $(30, 50, 28, 52)$\\\hline $117$& $(45, 45, 39, 51)$&$118$& $(15, 15,
3, 27)$&$119$& $(18, 30, 6, 42)$&$120$& $(30, 42, 18, 54)$\\\hline $121$& $(45, 45, 33, 57)$&$122$& $(8, 32,
2, 38)$&$123$& $(14, 34, 4, 44)$&$124$& $(18, 18, 3, 33)$\\\hline $125$& $(18, 36, 6, 48)$&$126$& $(24, 42,
12, 54)$&$127$& $(26, 46, 16, 56)$&$128$& $(28, 52, 22, 58)$\\\hline $129$& $(42, 42, 27, 57)$&$130$& $(10,
30, 2, 38)$&$131$& $(15, 45, 6, 54)$&$132$& $(18, 30, 4, 44)$\\\hline $133$& $(24, 30, 6, 48)$&$134$& $(30,
36, 12, 54)$&$135$& $(30, 42, 16, 56)$&$136$& $(30, 50, 22, 58)$\\\hline $137$& $(24, 36, 6, 54)$&$138$&
$(30, 30, 6, 54)$&$139$& $(30, 30, 18, 42)$&$140$& $(12, 12, 6, 18)$\\\hline $141$& $(12, 24, 8, 28)$&$142$&
$(12, 36, 9, 39)$&$143$& $(12, 48, 10, 50)$&$144$& $(24, 24, 14, 34)$\\\hline $145$& $(24, 36, 18,
42)$&$146$& $(24, 48, 21, 51)$&$147$& $(36, 36, 26, 46)$&$148$& $(36, 48, 32, 52)$\\\hline $149$& $(48, 48,
42, 54)$&$150$& $(14, 26, 2, 38)$&$151$& $(18, 42, 6, 54)$&$152$& $(34, 46, 22, 58)$\\\hline $153$& $(14, 34,
12, 36)$&$154$& $(18, 18, 12, 24)$&$155$& $(26, 46, 24, 48)$&$156$& $(42, 42, 36, 48)$\\\hline $157$& $(18,
42, 12, 48)$&$158$& $(20, 20, 2, 38)$&$159$& $(20, 40, 6, 54)$&$160$& $(40, 40, 22, 58)$\\\hline $161$& $(20,
20, 8, 32)$&$162$& $(40, 40, 28, 52)$&$163$& $(20, 20, 14, 26)$&$164$& $(20, 40, 18, 42)$\\\hline $165$&
$(40, 40, 34, 46)$&$166$& $(20, 40, 12, 48)$&$167$& $(24, 24, 4, 44)$&$168$& $(36, 36, 16,
56)$\\\hline $169$& $(30, 30, 12, 48)$&$170$& $(30, 30, 24, 36)$&$171$&$(30,30,20,40)$&$172$&$(30,30,10,50)$\\\hline $173$&$(20,40,15,45)$&$174$&$(20,40,10,50)$&$175$&$(20,20,10,30)$&$176$&$(40,40,30,50)$\\\hline $177$&$(20,20,5,35)$&$178$&$(40,40,25,55)$&$179$&$(15,45,10,50)$&$180$&$(15,15,5,25)$\\\hline $181$&$(45,45,35,55)$&&&&&&\\
\hline
\end{tabular}
$$
\item[(b)]
\begin{eqnarray*}
\mathcal{C}(\mathbb{Q}(\sqrt{2}))&=&\mathcal{C}_{48}(\mathbb{Q}(\sqrt{2}))\\&=&
\big\{\tfrac{2+\sqrt{2}}{3},4-2\sqrt{2},\tfrac{1+\sqrt{2}}{2},-3+3\sqrt{2},\tfrac{4}{3},\sqrt{2},\tfrac{3}{2},\tfrac{2+\sqrt{2}}{2},2,,1+\sqrt{2},
\\
&&\hphantom{\big\{}3,2+\sqrt{2}, 4,\tfrac{6+3\sqrt{2}}{2},3+2\sqrt{2},4+2\sqrt{2},4+3\sqrt{2}\big\}\,.
\end{eqnarray*}
Moreover, all solutions of $f_{m}(d)\in\mathbb{Q}(\sqrt{2})$, where $m\geq 4$ and
$d\in D_{m}$, are either of the form
(xii) or (xiii) of Theorem~\ref{intersectq} or are given, up to multiplication of $m$
and $d$ by the same factor, by $m=48$ and $d$ from the following list.
$$
\begin{tabular}{|r|c|r|c|r|c|r|c|}
\hline
$1$&$(6, 18, 4, 20)$&$2$& $(10, 36, 8, 38)$&$3$& $(12, 12, 6, 18)$&$4$& $(12, 22, 8,
26)$\\\hline $5$& $(12, 30, 9, 33)$&$6$& $(12, 38, 10, 40)$&$7$& $(18, 18, 10, 26)$&$8$& $(18, 24,
12, 30)$\\\hline $9$& $(18, 36, 15, 39)$&$10$& $(24, 30, 18, 36)$&$11$& $(26, 36, 22, 40)$&$12$& $(30,
30, 22, 38)$\\\hline $13$& $(30, 42, 28, 44)$&$14$& $(36, 36, 30, 42)$&$15$& $(4, 10, 2, 12)$&$16$& $(8,
40, 6, 42)$\\\hline $17$& $(9, 33, 6, 36)$&$18$& $(10, 26, 6, 30)$&$19$& $(12, 18, 6, 24)$&$20$& $(15,
39, 12, 42)$\\\hline $21$& $(18, 30, 12, 36)$&$22$& $(20, 26, 12, 34)$&$23$& $(22, 28, 14,
36)$&$24$& $(22, 38, 18, 42)$\\\hline $25$& $(30, 36, 24, 42)$&$26$& $(38, 44, 36, 46)$&$27$& $(10, 22,
8, 24)$&$28$& $(18, 18, 12, 24)$\\\hline $29$& $(26, 38, 24, 40)$&$30$& $(30, 30, 24, 36)$&$31$& $(18,
30, 16, 32)$&$32$& $(4, 38, 2, 40)$\\\hline $33$& $(8, 8, 2, 14)$&$34$& $(9, 15, 3, 21)$&$35$& $(10, 22,
4, 28)$&$36$& $(10, 44, 8, 46)$\\\hline $37$& $(12, 30, 6, 36)$&$38$& $(18, 18, 6, 30)$&$39$& $(18, 36,
12, 42)$&$40$& $(20, 22, 8, 34)$\\\hline $41$& $(26, 28, 14, 40)$&$42$& $(26, 38, 20, 44)$&$43$& $(30,
30, 18, 42)$&$44$& $(33, 39, 27, 45)$\\\hline $45$& $(40, 40, 34, 46)$&$46$& $(6, 30, 2,
34)$&$47$& $(10, 12, 2, 20)$&$48$& $(12, 18, 3, 27)$\\\hline $49$& $(12, 26, 4, 34)$&$50$& $(12, 36, 6,
42)$&$51$& $(18, 24, 6, 36)$&$52$& $(18, 30, 8, 40)$\\\hline $53$& $(18, 42, 14, 46)$&$54$& $(22, 36,
14, 44)$&$55$& $(24, 30, 12, 42)$&$56$& $(30, 36, 21, 45)$\\\hline $57$& $(36, 38, 28, 46)$&$58$& $(10,
26, 2, 34)$&$59$& $(18, 30, 6, 42)$&$60$& $(22, 38, 14, 46)$\\\hline $61$& $(12, 24, 2, 34)$&$62$& $(24,
24, 6, 42)$&$63$& $(24, 36, 14, 46)$&$64$& $(12, 24, 10, 26)$\\\hline $65$& $(24, 24, 18,
30)$&$66$& $(24, 36, 22, 38)$&$67$& $(16, 16, 10, 22)$&$68$& $(32, 32, 26, 38)$\\\hline $69$& $(18, 18,
2, 34)$&$70$& $(30, 30, 14, 46)$&$71$& $(16, 32, 6, 42)$&$72$&$(24,24,16,32)$\\\hline $73$&$(24,24,8,40)$&$74$&$(16,32,12,36)$&$75$&$(16,32,8,40)$&$76$&$(16,16,8,24)$\\\hline $77$&$(32,32,24,40)$&$78$&$(16,16,4,28)$&$79$&$(32,32,20,44)$&$80$&$(12,36,8,40)$\\\hline $81$&$(12,12,4,20)$&$82$&$(36,36,28,44)$&&&&\\
\hline
\end{tabular}
$$
\item[(c)]
\begin{eqnarray*}
\mathcal{C}(\mathbb{Q}(\sqrt{3}))&=&\mathcal{C}_{24}(\mathbb{Q}(\sqrt{3}))\\&=&
\big\{8-4\sqrt{3},\tfrac{3+2\sqrt{3}}{6},\tfrac{-3+3\sqrt{3}}{2},\tfrac{3+\sqrt{3}}{4},\tfrac{2+\sqrt{3}}{3},3-\sqrt{3},\tfrac{4}{3},\tfrac{1+\sqrt{3}}{2},-2+2\sqrt{3},\\
&&\hphantom{\big\{}
\tfrac{3}{2},\tfrac{3+\sqrt{3}}{3},\sqrt{3},\tfrac{2+\sqrt{3}}{2},2,\tfrac{3+2\sqrt{3}}{3},\tfrac{3+\sqrt{3}}{2},1+\sqrt{3},3,\tfrac{6+2\sqrt{3}}{3},2+\sqrt{3},4,\\
&&\hphantom{\big\{}3+\sqrt{3},\tfrac{5+3\sqrt{3}}{2},3+2\sqrt{3},4+2\sqrt{3},6+3\sqrt{3}, 7+4\sqrt{3},8+4\sqrt{3}\big\}\,.
\end{eqnarray*}
Moreover, all solutions of $f_{m}(d)\in\mathbb{Q}(\sqrt{3})$, where $m\geq 4$ and
$d\in D_{m}$, are either of the form
(xii) or (xiii) of Theorem~\ref{intersectq} or are given, up to multiplication of $m$
and $d$ by the same factor, by $m=24$ and $d$ from the following list.
$$
\begin{tabular}{|r|c|r|c|r|c|r|c|}
\hline
$1$&$(4, 8, 2, 10)$&$2$&$(6, 16, 4, 18)$&$3$&$(8, 10, 4, 14)$&$4$&$(8, 14, 5, 17)$\\\hline $5$&$(8,
18, 6, 20)$&$6$&$(10, 16, 7, 19)$&$7$&$(14, 16, 10, 20)$&$8$&$(16, 20, 14, 22)$\\\hline $9$&$(3,
15, 2, 16)$&$10$&$(4, 6, 2, 8)$&$11$&$(6, 14, 4, 16)$&$12$&$(9, 21, 8, 22)$\\\hline $13$&$(10, 12, 6,
16)$&$14$&$(10, 18, 8, 20)$&$15$&$(12, 14, 8, 18)$&$16$&$(18, 20, 16, 22)$\\\hline $17$&$(10, 14, 8,
16)$&$18$&$(4, 4, 2, 6)$&$19$&$(4, 14, 3, 15)$&$20$&$(5, 7, 3, 9)$\\\hline $21$&$(5, 17, 4, 18)$&$22$&$(6,
10, 4, 12)$&$23$&$(7, 19, 6, 20)$&$24$&$(8, 14, 6, 16)$\\\hline $25$&$(10, 10, 6, 14)$&$26$&$(10, 16,
8, 18)$&$27$&$(10, 20, 9, 21)$&$28$&$(14, 14, 10, 18)$\\\hline $29$&$(14, 18, 12, 20)$&$30$&$(17, 19,
15, 21)$&$31$&$(20, 20, 18, 22)$&$32$&$(4, 10, 2, 12)$\\\hline $33$&$(10, 14, 6, 18)$&$34$&$(14, 20,
12, 22)$&$35$&$(4, 16, 2, 18)$&$36$&$(6, 8, 2, 12)$\\\hline $37$&$(8, 10, 3, 15)$&$38$&$(8, 14, 4,
18)$&$39$&$(8, 20, 6, 22)$&$40$&$(10, 16, 6, 20)$\\\hline $41$&$(14, 16, 9, 21)$&$42$&$(16, 18, 12,
22)$&$43$&$(8, 10, 6, 12)$&$44$&$(14, 16, 12, 18)$\\\hline $45$&$(3, 9, 1, 11)$&$46$&$(4, 18, 2, 20)$&$47$&
$(6, 10, 2, 14)$&$48$&$(6, 20, 4, 22)$\\\hline $49$&$(10, 12, 4, 18)$&$50$&$(12, 14, 6, 20)$ &$51$&
$(14, 18, 10, 22)$&$52$&$(15, 21, 13, 23)$\\\hline $53$&$(4, 10, 1, 13)$&$54$&$(4, 20, 2, 22)$&$55$&
$(5, 7, 1, 11)$&$56$&$(5, 17, 2, 20)$\\\hline $57$&$(6, 14, 2, 18)$&$58$&$(7, 19, 4, 22)$&$59$&$(8,
10, 2, 16)$&$60$&$(10, 14, 4, 20)$\\\hline $61$&$(10, 18, 6, 22)$&$62$&$(14, 16, 8, 22)$&$63$&$(14,
20, 11, 23)$&$64$&$(17, 19, 13, 23)$\\\hline $65$&$(6, 8, 1, 13)$&$66$&$(6, 16, 2, 20)$&$67$&$(8, 12,
2, 18)$&$68$&$(8, 18, 4, 22)$\\\hline $69$&$(12, 16, 6, 22)$&$70$&$(16, 18, 11, 23)$&$71$&$(6, 8, 4,
10)$&$72$&$(6, 16, 5, 17)$\\\hline $73$&$(8, 12, 6, 14)$&$74$&$(8, 18, 7, 19)$&$75$&$(12, 16, 10,
18)$&$76$&$(16, 18, 14, 20)$\\\hline $77$&$(6, 12, 4, 14)$&$78$&$(12, 18, 10, 20)$&$79$&$(6, 18, 2,
22)$&$80$&$(10, 12, 2, 20)$\\\hline $81$&$(12, 14, 4, 22)$&$82$&$(8, 14, 2, 20)$&$83$&$(10, 16, 4,
22)$&$84$&$(8, 16, 2, 22)$\\\hline $85$&$(10, 14, 2, 22)$&$86$&$(12, 12, 2, 22)$&$87$&$(12, 12, 10,
14)$&$88$&$(4, 14, 2, 16)$\\\hline $89$&$(10, 10, 4, 16)$&$90$&$(10, 20, 8, 22)$&$91$&$(14, 14, 8,
20)$&$92$&$(6, 12, 2, 16)$\\\hline $93$&$(12, 18, 8, 22)$&$94$&$(8, 8, 6, 10)$&$95$&$(16, 16, 14,
18)$&$96$&$(10, 10, 2, 18)$\\\hline $97$&$(14, 14, 6, 22)$&$98$&$(10, 10, 8, 12)$&$99$&$(14, 14, 12, 16)$&$100$&$(12,12,8,16)$\\\hline $101$&$(12,12,4,20)$&$102$&$(8,16,6,18)$&$103$&$(8,16,4,20)$&$104$&$(8,8,4,12)$\\\hline $105$&$(16,16,12,20)$&$106$&$(8,8,2,14)$&$107$&$(16,16,10,22)$&$108$&$(6,18,4,20)$\\\hline $109$&$(6,6,2,10)$&$110$&$(18,18,14,22)$&&&&\\
\hline
\end{tabular}
$$
\end{itemize}
\end{coro}
\begin{proof}
Applying Theorem~\ref{t1} to the cases $n=5,8,12$, the assertions
follow from a direct computation. Note that in either case the last eleven
entries of the lists above derive from (i)-(xi) of Theorem~\ref{intersectq}.
\end{proof}
\section{Determination of convex subsets of algebraic Delone
sets by X-rays}
\begin{defi}\label{xray..}
\begin{itemize}
\item[(a)]
Let $F$ be a finite subset of $\mathbb{C}$, let $u\in
\mathbb{S}^{1}$ be a direction, and let $\mathcal{L}_{u}$ be the set
of lines in the complex plane in direction $u$. Then the {\em
(discrete parallel)}\/ {\em X-ray} of $F$ {\em in direction} $u$ is
the function $X_{u}F: \mathcal{L}_{u} \rightarrow
\mathbb{N}_{0}:=\mathbb{N} \cup\{0\}$, defined by $$X_{u}F(\ell) :=
\operatorname{card}(F \cap \ell\,)\,.$$
\item[(b)]
Let $\mathcal{F}$ be a collection of finite subsets of
$\mathbb{C}$ and let $U\subset\mathbb{S}^{1}$ be a finite set
of directions. We say that the elements
of $\mathcal{F}$ are {\em determined} by the X-rays in the directions of $U$ if, for all $F,F' \in \mathcal{F}$, one has
$$
(X_{u}F=X_{u}F'\;\,\forall u \in U) \; \mathbb{R}ightarrow\; F=F'\,.
$$
\end{itemize}
\end{defi}
The following negative result shows that, for algebraic Delone sets $\varLambda$, one has to impose some restriction on the finite subsets of
$\varLambda$ to
be determined. The proof only needs property (Hom).
\begin{fact}\cite[Prop.~ 3.1 and Remark 3.2]{H5}\label{source}
Let $\varLambda$ be an algebraic Delone set and let
$U\subset\mathbb{S}^{1}$ be a finite set of pairwise nonparallel
$\varLambda$-directions. Then the finite subsets of $\varLambda$ are
not determined by the X-rays in the directions of $U$. \qed
\end{fact}
Here, we shall focus on the convex subsets of algebraic Delone
sets. One has the following fundamental
result which even holds for Delone sets $\varLambda$ with property
(Hom). See Figure~\ref{fig:tilingupolygon} for an
illustration of direction
(i)$\mathbb{R}ightarrow$(ii).
\begin{fact}\cite[Prop.~ 4.6 and Lemma 4.5]{H5}\label{characungen}
Let $\varLambda$ be an algebraic Delone set and let $U\subset\mathbb{S}^{1}$ be a set of two or more pairwise nonparallel $\varLambda$-directions. The following statements are equivalent:
\begin{itemize}
\item[(i)]
The convex subsets of $\varLambda$ are determined by the X-rays in the directions of $U$.
\item[(ii)]
There is no $U$-polygon in $\varLambda$.
\end{itemize}
In addition, if $\operatorname{card}(U)<4$, then there is a $U$-polygon in $\varLambda$. \qed
\end{fact}
The proof of the following central result uses Darboux's theorem on second
midpoint polygons; see~\cite{D}, ~\cite{GM} or~\cite[Ch.~1]{G}.
\begin{fact}\cite[Prop.~ 4.2]{GG}\label{uaffine}
Let $U\subset\mathbb{S}^1$ be a finite set of directions. Then there
exists a $U$-polygon if and only if there is an affinely regular
polygon such that each direction in $U$ is parallel to one of its edges. \qed
\end{fact}
\begin{rem}\label{urem}
Clearly, $U$-polygons have an even number of vertices. Moreover, an
affinely regular polygon with an even number of vertices is a
$U$-polygon if and only if each direction of $U$ is parallel to one of
its edges. On the other hand, it is important to note that a $U$-polygon need not be affinely regular, even if it is a
$U$-polygon in an algebraic Delone set. For example, there is a
$U$-icosagon in the vertex set of the
T\"ubingen triangle tiling of the plane (a $5$-cyclotomic
model set; see~\cite[Fig.~1,
Corollary 14 and Example 15]{H4}), which cannot be affinely regular
since that restricts the number of vertices to $3$, $4$, $5$, $6$ or $10$ by~\cite[Corollary 4.2]{H3}; see also~\cite[Example
4.3]{GG} for an example in the case of the square lattice. In
general, there is an affinely regular polygon with $n\geq 3$ vertices in
an algebraic Delone set $\varLambda$ if and only if
$\mathbb{Q}(\zeta_n)^+\subset K_{\varLambda}^+$, the latter being a relation
which (due to property (Alg)) can only hold
for finitely many values of $n$; cf.~~\cite[Thm.~ 3.3]{H3}.
\end{rem}
We can now prove our main result on $U$-polygons which is an extension
of~\cite[Thm.~
4.5]{GG}. In fact, we use the same
arguments as introduced by Gardner and Gritzmann in conjunction with
Fact~\ref{crkn4gen} and Theorem~\ref{algcoro}. Note that the result
even holds for arbitrary sets $\varLambda$ with property (Alg).
\begin{theorem}\label{main}
Let $\varLambda$ be an algebraic Delone set. Further, let $U\subset\mathbb{S}^1$ be a set of four or more pairwise
nonparallel $\varLambda$-directions and suppose the existence of a
$U$-polygon. Then the
cross ratio of slopes of any four directions of $U$, arranged in order
of increasing angle with the positive real axis, is an element of the
set
$\mathcal{C}(K_{\varLambda}^+)$. Moreover,
$\mathcal{C}(K_{\varLambda}^+)$ is finite and $\operatorname{card}(U)$ is bounded above by a finite number
$b_{\varLambda}\in\mathbb{N}$ that only depends on $\varLambda$.
\end{theorem}
\begin{proof}
Let $U$ be as in the assertion. By Fact~\ref{uaffine}, $U$ consists
of directions parallel to the edges of an affinely regular
polygon. There is thus a linear automorphism $\Psi$ of
the complex
plane such that
$$
V:= \big\{ \Psi(u)/| \Psi(u)|\, \big |\, u\in U \big \}
$$
is contained in a set of directions that are equally spaced
in $\mathbb{S}^{1}$, i.e., the angle between each pair of adjacent
directions is the same. Since the directions of $U$ are pairwise
nonparallel, we may assume that there is an $m\in\mathbb{N}$ with $m\geq 4$ such that
each direction of $V$ is given by $e^{h\pi i/m}$, where
$h\in\mathbb{N}_{0}$ satisfies $h\leq m-1$. Let $u_{j}$, $1\leq
j\leq 4$, be four
directions of $U$, arranged in order
of increasing angle with the positive real axis. By
Fact~\ref{crkn4gen}, one has
$$q:=\big\langle
\operatorname{sl}(u_{1}),\operatorname{sl}(u_{2}),\operatorname{sl}(u_{3}),\operatorname{sl}(u_{4})\big\rangle\in K_{\varLambda}^+\,.$$ We may assume that $\Psi(u_{j})/| \Psi(u_{j})| =e^{h_{j}\pi i/m}$, where $h_j \in\mathbb{N}_{0}$,
$1\leq j \leq 4$, and, $h_1<h_2<h_3<h_4\leq
m-1$. Fact~\ref{crossratio} now implies
\begin{eqnarray*}
q&=&\big \langle \operatorname{sl}(\Psi(u_1)),\operatorname{sl}(\Psi(u_2)),\operatorname{sl}(\Psi(u_3)),\operatorname{sl}(\Psi(u_4))\big\rangle\\&=&\frac{(\tan (\frac{h_3 \pi}{m})- \tan (\frac{h_1 \pi}{m}))(\tan (\frac{h_4 \pi}{m})- \tan (\frac{h_2 \pi}{m}))}{(\tan (\frac{h_3 \pi}{m})- \tan (\frac{h_2 \pi}{m}))(\tan (\frac{h_4 \pi}{m})- \tan (\frac{h_1 \pi}{m}))}\\
&=&\frac{\sin(\frac{(h_3-h_1)\pi}{m})\sin(\frac{(h_4-h_2)\pi}{m})}{\sin(\frac{(h_3-h_2)\pi}{m})\sin(\frac{(h_4-h_1)\pi}{m})}\,.
\end{eqnarray*}
Setting $k_1:=h_3-h_1$, $k_2:=h_4-h_2$, $k_3:=h_3-h_2$ and
$k_4:=h_4-h_1$, one gets $1\leq k_3<k_1, k_2<k_4\leq m-1$ and
$k_1+k_2=k_3+k_4$. Using
$\sin(\theta)=-e^{-i\theta}(1-e^{2i\theta})/2i$, one finally
obtains
$$
K_{\varLambda}^+\owns q=\frac{(1-\zeta_{m}^{k_1})(1-\zeta_{m}^{k_2})}{(1-\zeta_{m}^{k_3})(1-\zeta_{m}^{k_4})}=f_m(d)\,,
$$
with $d:=(k_1,k_2,k_3,k_4)$, as in~(\ref{fmd}). Then, $d\in D_m$ if its
first two coordinates are interchanged, if necessary, to ensure that
$k_1\leq k_2$; note that this operation does not change the value of
$f_m(d)$. This proves the first assertion.
Suppose that $\operatorname{card}(U)\geq 7$. Let
$U'$ consist of seven directions of $U$ and let $V':= \{
\Psi(u)/| \Psi(u)|\, |\, u\in U' \}$. We may assume
that all the directions of $V'$ are in the first two quadrants, so one
of these quadrants, say the first, contains at least four directions
of $V'$. Application of the above argument to these four directions gives integers $h_j$ satisfying $0\leq h_1<h_2<h_3<h_4\leq m/2$, where we may also assume, by rotating the directions of $V'$ if
necessary, that $h_1=0$. As above, we obtain a corresponding solution of
$f_m(d)=q\in K_{\varLambda}^+$, where $d\in D_m$.
By property (Alg) and Theorem~\ref{algcoro}, the set $\mathcal{C}(K_{\varLambda}^+)$
is finite and there is a number
$m_{\varLambda}\in\mathbb{N}$ such that all solutions of $f_{m}(d)\in K_{\varLambda}^+$, where $m\geq 4$ and
$d\in D_{m}$, are either of the form
(xii) or (xiii) of Theorem~\ref{intersectq} or are given, up to multiplication of $m$
and $d$ by the same factor, by $m=m_{\varLambda}$ and $d$
from a finite list. Without restriction, we may assume that $m_{\varLambda}$ is even.
Suppose that the above solution is of the form
(xii) or (xiii) of Theorem~\ref{intersectq}. Then using $h_1=0$, one
obtains $h_4=k_4=k+s>m/2$, a contradiction. Thus, our solution derives from
$m=m_{\varLambda}$ and finitely many values of $d\in D_m$. Since
this applies to any four directions of $V'$ lying in the first
quadrant, all such directions correspond to angles with the positive
real axis which are integer multiples of $\pi/m_{\varLambda}$.
We claim that all directions of $V'$ have the latter property. To see
this, suppose that there is a direction $v\in V'$ in the second
quadrant, and consider a set of four directions $v_j$, $1\leq j\leq
4$, in $V'$, where $v_4=v$ and $v_j$, $1\leq j\leq
3$, lie in the first quadrant. Suppose that $v_j=e^{h_j\pi i/m}$, $1\leq
j\leq 4$. Then $h_j$ is an integer multiple of $m/m_{\varLambda}$, for $1\leq j\leq
3$. Again, we obtain a corresponding solution of
$f_m(d)=q\in K_{\varLambda}^+$, where $d\in D_m$. If this solution derives
from the finite list guaranteed by Theorem~\ref{algcoro},
then clearly $h_4$ is also an integer multiple of
$m/m_{\varLambda}$. Otherwise, by Theorem~\ref{algcoro}, this
solution is of the form (xii) or (xiii) of Theorem~\ref{intersectq} and we
can take $h_1=0$ as before, whence either $h_2=k$, $h_3=2k$ and
$h_4=k+s$, $1\leq k\leq s/2$, or $h_2=s-k$, $h_3=s$ and
$h_4=k+s$, $s/2\leq k< s$, where $m=2s$. Since
$s=m/2=(m_{\varLambda}/2)(m/m_{\varLambda})$ is an integer
multiple of $m/m_{\varLambda}$, we conclude in either case that $k$, and hence
$h_4=k+s$, is also an integer multiple of $m/m_{\varLambda}$. This
proves the claim.
It thus remains to examine the case $m=m_{\varLambda}$ in more detail. Let $h_j$,
$1\leq j\leq 4$, correspond to the four directions of $V'$ having the
smallest angles with the positive real axis, so that $h_1=0$ and
$h_j\leq m/2$, $2\leq j\leq 4$. We have already shown that the
corresponding $d=(k_1,k_2,k_3,k_4)$ must occur in the finite list
guaranteed by Theorem~\ref{algcoro}. Since $h_j\leq m/2$, $1\leq j\leq 4$, we
also have $k_j\leq m/2$, $1\leq j\leq 4$. This yields only
finitely many quadruples $(h_1,h_2,h_3,h_4)=(0,k_1-k_3,k_1,k_4)$.
Suppose that $h$ corresponds to any other direction of $V'$ and replace $(h_1,h_2,h_3,h_4)$ by
$(h_2,h_3,h_4,h)$. We obtain finitely many
$d=(h_4-h_2,h-h_3,h_4-h_3,h-h_2)\in D_m$, which, by
Theorem~\ref{algcoro}, either occur in (xii) or (xiii) of
Theorem~\ref{intersectq} with $m=m_{\varLambda}$ or occur
in the finite list guaranteed by that result. This gives only finitely many
possible finite sets of more than four directions, which implies that
$\operatorname{card}(U)$ is bounded from above by a finite number that
only depends on $\varLambda$ (since the above analysis only depends on $\varLambda$).
\end{proof}
Similarly, the next result even holds for arbitrary sets $\varLambda$
with property ($n$-Cyc), where $n\geq 3$.
\begin{theorem}\label{finitesetncr0gen}
Let $n\geq 3$ and let $\varLambda$ be an $n$-cyclotomic Delone set. Further, let $U\subset\mathbb{S}^1$ be a set of four or more pairwise
nonparallel $\varLambda$-directions and suppose the existence of a
$U$-polygon. Then the
cross ratio of slopes of any four directions of $U$, arranged in order
of increasing angle with the positive real axis, is an element of the subset
$\mathcal{C}(K_{\varLambda}^+)$ of\/
$
\mathcal{C}(\mathbb{Q}(\zeta_n)^+)
$. Moreover
$$\mathcal{C}(\mathbb{Q}(\zeta_n)^+)=\mathcal{C}_{\operatorname{lcm}(2n,12)}(\mathbb{Q}(\zeta_n)^+)$$
is finite
and $\operatorname{card}(U)$ is bounded above by a finite number
$b_n\in\mathbb{N}$ that only depends on $n$. In particular, one can choose
$b_3=b_4=6$, $b_5=10$, $b_8=8$ and $b_{12}=12$.
\end{theorem}
\begin{proof}
Employing Theorem~\ref{t1} together with the trivial observation that
$K_{\varLambda}^+\subset\mathbb{Q}(\zeta_n)^+$ for any
$n$-cyclotomic Delone set, the general result follows from the same
arguments as used in the proof of Theorem~\ref{main}. The work of
Gardner and Gritzmann shows that one can choose $b_3=b_4=6$; cf.~\cite[Thm.~ 4.5]{GG}. The specific
bounds $b_n$ for $n=5,8,12$ are obtained by following the proof of
Theorem~\ref{main} and employing Corollary~\ref{coro8125}.
More precisely, let $n=8$ (whence $\operatorname{lcm}(2n,12)=48$) and suppose that $\operatorname{card}(U)\geq 7$. Let
$U'$ consist of seven directions of $U$ and let $V':= \{
\Psi(u)/| \Psi(u)|\, |\, u\in U' \}$, with $\Psi$ as described in the
proof of Theorem~\ref{main}. Then all
directions of $V'$ correspond to angles with the positive
real axis which are integer multiples of $\pi/48$ and it suffices to examine the case $m=48$ in more detail. Let $h_j$,
$1\leq j\leq 4$, correspond to the four directions of $V'$ having the
smallest angles with the positive real axis, so that $h_1=0$ and
$h_j\leq m/2=24$, $2\leq j\leq 4$. The corresponding $d=(k_1,k_2,k_3,k_4)$ must occur in (1)-(82) of
Corollary~\ref{coro8125}(b). Since $h_j\leq 24$, $1\leq j\leq 4$, we
also have $k_j\leq 24$, $1\leq j\leq 4$. The only possibilities are (1), (3), (15), (19), (27), (28), (33), (34),
(47), (67), (76) and (81) of
Corollary~\ref{coro8125}(b). These yield
\begin{eqnarray*}
(h_1,h_2,h_3,h_4)&\in&\big\{(0,2,6,20), (0,6,12,18), (0,2,4,12),
(0,6,12,24),\\&& \hphantom{\big\{} (0,2,10,24),(0,6,18,24), (0,6,8,14),
(0,6,9,21), \\&& \hphantom{\big\{}(0,8,10,20), (0,6,16,22), (0,8,16,24), (0,8,12,20)\big\}\,.
\end{eqnarray*}
Suppose that $h$ corresponds to any other direction of $V'$ and replace $(h_1,h_2,h_3,h_4)$ by
$(h_2,h_3,h_4,h)$. The corresponding $d$ either occur in (xii) or (xiii) of
Theorem~\ref{intersectq} with $m=48$ or occur in (1)-(82) of
Corollary~\ref{coro8125}(b). We obtain $(18,h-6,14,h-2)$, $(12,h-12,6,h-6)$,
$(10,h-4,8,h-2)$, $(18,h-12,12,h-6)$, $(22,h-10,14,h-2)$,
$(18,h-18,6,h-6)$, $(8,h-8,6,h-6)$, $(15,h-9,12,h-6)$,
$(12,h-10,10,h-8)$, $(16,h-16,6,h-6)$, $(16,h-16,8,h-8)$ and
$(12,h-12,8,h-8)$. The only possibilities are $h=24,30,36,42$ for $(12,h-12,6,h-6)$, $h=26,40$ for $(10,h-4,8,h-2)$,
$h=30,36,42$ for $(18,h-12,12,h-6)$, $h=38,46$ for $(22,h-10,14,h-2)$,
$h=36,42$ for $(18,h-18,6,h-6)$, $h=34$ for $(12,h-10,10,h-8)$,
$h=32,40$ for $(16,h-16,8,h-8)$ and $h=34$ for $(12,h-12,8,h-8)$. It
follows that the only possible sets of more than four directions only comprise
directions of the form $e^{h\pi i/48}$ and
are given by the ranges
\begin{eqnarray*}&&\{0,8,16,24,32,40\},
\{0,8,12,20,34\},
\{0,6,12,18,24,30,36,42\},\\&& \{0,2,4,12,26,40\},
\{0,6,12,24,30,36,42\}, \{0,2,10,24,38,46\},\\&& \{0,6,18,24,36,42\}, \{0,8,10,20,34\}
\end{eqnarray*}
of $h$. In particular, $\operatorname{card}(U)\leq 8$.
With the help of
Corollary~\ref{coro8125},
the cases $n=5,12$ can be treated analogously with the following
results.
For $n=12$, the only possible sets of more than four directions only comprise
directions of the form $e^{h\pi i/24}$ and are given by the ranges
\begin{eqnarray*}&&\{0,4,8,12,16,18,20,22\}, \{0,4,6,10,14,16,18,20,22\},\\&& \{0,2,4,10,12,14,18,20,22\},
\{0,2,4,8,12,14,16,18,20,22\},\\&& \{0,2,4,6,8,10,12,14,16,18,20,22\},
\{0,2,6,12,16,18,20,22\},\\&& \{0,2,4,12,14,20,22\}, \{0,4,6,12,14,16,18,20,22\},
\{0,2,8,12,18,20,22\},\\&& \{0,2,6,10,14,16,18,20,22\},\{0,2,8,10,16,18,20,22\},
\{0,2,10,12,20,22\}
\end{eqnarray*}
of $h$, whence $\operatorname{card}(U)\leq 12$.
For $n=5$, the only possible sets of more than four directions only comprise
directions of the form $e^{h\pi i/60}$ and are given by the ranges
\begin{eqnarray*}&&
\{0,10,20,30,40,50\},\{0,6,24,30,48,54\},
\{0,2,4,10,32,54\},\\&& \{0,4,8,18,34,50\},
\{0,6,10,16,38\},\{0,6,12,24,30,36,42,48,54\},\\&& \{0,10,18,28,44\},
\{0,12,18,30,36,42,48,54\},\{0,6,8,16,38\},\\&& \{0,10,14,28,34\},\{0,2,8,30,52,58\},
\{0,4,14,30,46,56\},\\&& \{0,6,18,30,42,48,54\},\{0,6,12,30,36,42,48,54\},\\&&
\{0,6,12,18,24,30,36,42,48,54\}, \{0,6,18,24,36,42,48,54\}
\end{eqnarray*}
of $h$, whence $\operatorname{card}(U)\leq 10$ in this case.
\end{proof}
Without further mention, the following result will be used in Remark~\ref{maxrem} below.
\begin{lem}\label{upollem}
Let $\varLambda$ be a $K$-algebraic model set and let
$U\subset\mathbb{S}^1$ be a finite set of directions. The following
statements are equivalent:
\begin{itemize}
\item[(i)]
There is a $U$-polygon in $\varLambda$.
\item[(ii)]
For any $K$-algebraic model set $\varLambda'$, there is a $U$-polygon
in $\varLambda'$.
\end{itemize}
\end{lem}
\begin{proof}
The assertion follows immediately from Proposition~\ref{cmsads} together with~\cite[Fact 4.4]{H5}.
\end{proof}
\begin{figure}
\caption{The boundary of a $U$-polygon in
the vertex set $\varLambda$ of the twelvefold
shield tiling, where $U$ is the set of
twelve pairwise nonparallel $\varLambda$-directions
given by the edges and diagonals of the central regular
dodecagon. The vertices of $\varLambda$ in the interior of
the $U$-polygon together with the vertices indicated by the black and grey dots, respectively,
give two different convex subsets of $\varLambda$ with the same X-rays in the
directions of $U$.}
\label{fig:tilingupolygon}
\end{figure}
\begin{rem}\label{maxrem}
The work of
Gardner and Gritzmann shows that $b_3=b_4=6$ is best possible for any
$3$- or $4$-cyclotomic model set; cf.~\cite[Example 4.3]{GG}. The $U$-icosagon in the vertex set of the T\"ubingen
triangle tiling from Remark~\ref{urem} has the property that
$\operatorname{card}(U)=10$; see~\cite[Figure 1]{H4}. This shows that, for any $5$-cyclotomic model set, the number $b_5=10$ is best
possible. Fig.~\ref{fig:tilingupolygon} shows a $U$-polygon with $24$ vertices
in the vertex set of the
shield tiling with $\operatorname{card}(U)=12$, wherefore $b_{12}=12$
is best possible for any $12$-cyclotomic model set. A similar example
of a $U$-polygon with $16$ vertices in the vertex set of the
Ammann-Beenker tiling with $\operatorname{card}(U)=8$ shows that $b_{8}=8$ is
best possible for any $8$-cyclotomic model set; cf.~\cite[Fig.~2]{H5}. $U$-polygons
of {\em class $c\geq 4$} (i.e., $U$-polygons with $4$ {\em
consecutive edges} parallel to directions of $U$) in cyclotomic
model sets were studied in~\cite{H4}. By~\cite[Corollary 14]{H4} (see
also~\cite[Thm.~ 12]{DP}), the
existence of a
$U$-polygon of class $c\geq 4$ in an $n$-cyclotomic model set with
$n\not\equiv 2\pmod 4$ having the property that $\phi(n)/2$ is equal to one or a prime number
implies that $\operatorname{card}(U)\leq a_n$, where $a_3=a_4=6$, $a_8=8$,
$a_{12}=12$ and $a_n=2n$ for all other such values of $n$. In particular,
one observes the coincidence $b_n=a_n$ for $n=3,4,5,8,12$; cf.~Theorem~\ref{finitesetncr0gen}. However, there does not seem to be a reason why the least
possible numbers $b_n$ in Theorem~\ref{finitesetncr0gen} may not be
larger than $a_n$ for other $n\geq 3$ having the above property.
\end{rem}
Summing up, we finally obtain our main result on the
determination of convex subsets of algebraic Delone sets;
see~\cite[Thm.~ 4.21]{H5} for a
weaker version.
\begin{theorem}\label{dtmain}
Let $\varLambda$ be an algebraic Delone set.
\begin{itemize}
\item[(a)]
There are sets of four pairwise nonparallel $\varLambda$-directions
such that the convex subsets of $\varLambda$ are determined by the corresponding
X-rays. In addition, less than four pairwise nonparallel $\varLambda$-directions never
suffice for this purpose.
\item[(b)]
There is a finite number $c_{\varLambda}\in\mathbb{N}$ such that the convex subsets of
$\varLambda$ are determined by the X-rays in any set of $c_{\varLambda}$
pairwise nonparallel $\varLambda$-directions.
\end{itemize}
\end{theorem}
\begin{proof}
To prove~(a), it suffices by Fact~\ref{characungen} and
Theorem~\ref{main} to take any set of four pairwise
nonparallel $\varLambda$-directions such that the cross ratio of their
slopes, arranged in order
of increasing angle with the positive real axis, is not an element of
the finite set $\mathcal{C}(K_{\varLambda}^+)$. Since
$\varLambda$ is relatively dense, the set of $\varLambda$-directions
is dense in $\mathbb{S}^1$. In particular, this shows that the set of
slopes of $\varLambda$-directions is infinite. For example by fixing three
pairwise nonparallel
$\varLambda$-directions and letting the fourth one vary, one sees from
this that
the set of cross ratios of
slopes of four pairwise
nonparallel $\varLambda$-directions, arranged in order
of increasing angle with the positive real axis, is infinite as well.
The assertion follows. The additional statement follows immediately
from Fact~\ref{characungen}. Part~(b) is a direct consequence of
Fact~\ref{characungen} and Theorem~\ref{main}.
\end{proof}
The following result improves~\cite[Thm.~
4.33]{H5} and particularly solves Problem 4.34
of~\cite{H5}; cf.~Example~\ref{algex} and compare~\cite[Thm.~ 5.7]{GG}.
\begin{theorem}\label{dtmain2}
Let $n\geq 3$ and let $\varLambda$ be an $n$-cyclotomic Delone set.
\begin{itemize}
\item[(a)]
There are sets of four pairwise nonparallel $\varLambda$-directions
such that the convex subsets of $\varLambda$ are determined by the corresponding
X-rays. In addition, less than four pairwise nonparallel $\varLambda$-directions never
suffice for this purpose.
\item[(b)]
There is a finite number $c_n\in\mathbb{N}$ that only depends on $n$ such that the convex subsets of
$\varLambda$ are determined by the X-rays in any set of $c_n$
pairwise nonparallel $\varLambda$-directions. In particular, one can
choose $c_3=c_4=7$, $c_5=11$, $c_8=9$ and $c_{12}=13$.
\end{itemize}
\end{theorem}
\begin{proof}
Part~(a) follows immediately from Theorem~\ref{dtmain}(a). Note that, by Fact~\ref{characungen} and
Theorem~\ref{finitesetncr0gen}, it suffices to take any set of four pairwise
nonparallel $\varLambda$-directions such that the cross ratio of their
slopes, arranged in order
of increasing angle with the positive real axis, is not an element of
the finite set $\mathcal{C}(\mathbb{Q}(\zeta_n)^+)$. Part~(b) is a direct consequence of
Fact~\ref{characungen} in conjunction with
Theorem~\ref{finitesetncr0gen}.
\end{proof}
\begin{rem}\label{rembest}
Remark~\ref{maxrem} shows that, for any $n$-cyclotomic
model set with $n=3,4,5,8,12$, the
number $c_n$ above is best possible with respect to the numbers of X-rays used. As already explained in the introduction, for practical applications, one additionally has to make sure that the
$\varLambda$-directions used yield densely occupied lines in
$\varLambda$. For the practically most relevant case of $n$-cyclotomic
model sets with $n=3,4,5,8,12$, this
can actually be achieved; cf.~\cite[Remark 5.8]{GG} and~\cite[Sec.~4]{H5} for examples of suitable sets of four pairwise nonparallel
$\varLambda$-directions in these cases. For the latter examples also recall that, for any $n$-cyclotomic model set $\varLambda$, the set of
$\varLambda$-directions is precisely the set
of $\mathbb{Z}[\zeta_n]$-directions; cf. Remark~\ref{okdirections} and
Example~\ref{algex}. It was shown in~\cite[Prop.~ 3.11]{H2} that {\em icosahedral model sets}
$\varLambda\subset\mathbb{R}^3$ can be sliced orthogonal to a fivefold axis of
their underlying $\mathbb{Z}$-module
into $5$-cyclotomic model sets. Applying Theorem~\ref{dtmain2} to
each such slice, one sees that the convex subsets of $\varLambda$
are determined by the X-rays in suitable four and
any eleven pairwise
nonparallel $\varLambda$-directions orthogonal to
the slicing axis.
\end{rem}
\section{Determination of convex bodies by continuous X-rays}
In~\cite{GM}, the following continuous version of
Fact~\ref{characungen} was
shown; compare Fact~\ref{uaffine}. Here, the {\em continuous X-ray} of a
{\em convex body\/} $K\subset\mathbb{C}$ (i.e., $K$ is compact with nonempty interior) in direction $u\in\mathbb{S}^1$ gives the length of
each chord of $K$ parallel to $u$ and the concept of determination is
defined as in the discrete case; cf.~ \cite{G}, \cite{GM} for details.
\begin{fact}\label{characuncont}
Let $U\subset\mathbb{S}^{1}$ be a set of two or more pairwise nonparallel directions. The following statements are equivalent:
\begin{itemize}
\item[(i)]
The convex bodies in $\mathbb{C}$ are determined by the continuous X-rays in the directions of $U$.
\item[(ii)]
There is no $U$-polygon.
\end{itemize}
In addition, if $\operatorname{card}(U)<4$, then there is a $U$-polygon. \qed
\end{fact}
Employing Fact~\ref{characuncont} instead of Fact~\ref{characungen},
the following result follows from the same arguments as used in the proofs
of Theorems~\ref{dtmain} and~\ref{dtmain2};
compare~\cite[Thm. 6.2]{GG}. Note that neither the uniform
discreteness of $\varLambda$ nor property (Hom)
are needed in the proof. More precisely, our proof of part~(a) needs property (Alg)
and the relative denseness of $\varLambda$, whereas part~(b) and the
additional statement
hold for arbitrary sets $\varLambda$ with property (Alg) and
($n$-Cyc) (where $n\geq 3$), respectively.
\begin{theorem}\label{tmain}
Let $\varLambda$ be an algebraic Delone set.
\begin{itemize}
\item[(a)]
There are sets of four pairwise nonparallel $\varLambda$-directions
such that the convex bodies in $\mathbb{C}$ are determined by the
corresponding continuous
X-rays. In addition, less than four pairwise nonparallel $\varLambda$-directions never
suffice for this purpose.
\item[(b)]
There is a finite number $c_{\varLambda}\in\mathbb{N}$ such that the convex
bodies in $\mathbb{C}$ are determined by the continuous X-rays in any set of $c_{\varLambda}$
pairwise nonparallel $\varLambda$-directions.
\end{itemize}
Moreover, for any
$n$-cyclotomic Delone set $\varLambda$, there is a finite number
$c_n\in\mathbb{N}$ that only depends on $n$ such that the convex
bodies in $\mathbb{C}$ are determined by the continuous X-rays in any set of $c_n$
pairwise nonparallel $\varLambda$-directions. In particular, one can choose $c_3=c_4=7$, $c_5=11$, $c_8=9$ and $c_{12}=13$.\qed
\end{theorem}
\begin{rem}
Employing the $U$-polygons from
Remark~\ref{maxrem}, it is straightforward to show
that the above numbers $c_n$, where $n=3,4,5,8,12$, are best
possible.
\end{rem}
\end{document}
|
\begin{document}
\title{Real-time covariance estimation for the local level model}
\author{K. Triantafyllopoulos\\ {\it Department of
Probability and Statistics, University of Sheffield, UK} \\ Email: {\tt [email protected]} }
\date{\today}
\maketitle
\begin{abstract}
This paper develops on-line inference for the multivariate local level model, with the focus being placed on covariance estimation of the innovations. We assess the application of the inverse Wishart prior distribution in this context
and find it too restrictive since the serial correlation structure of the observation
and state innovations is forced to be the same. We generalize the inverse Wishart distribution to allow for a more convenient correlation structure, but still retaining approximate conjugacy. We prove some relevant results for the new distribution and we develop approximate Bayesian inference, which allows simultaneous forecasting of time series data and estimation of the covariance of the innovations of the model. We provide results on the steady state of the level of the time series, which are deployed to achieve computational savings. Using Monte Carlo experiments, we compare the proposed methodology with existing estimation procedures. An example with real data consisting of production data from an industrial process is given.
\textit{Some key words:} multivariate time series, covariance estimation, adaptive estimation, dynamic linear models, multivariate control charts.
\end{abstract}
\section{Introduction}\label{model}
Let $\{y_t\}$ be a $p\times 1$ vector process,
generated from the state space model:
\begin{equation}\label{model2}
y_t=\theta_t+\epsilon_t \quad \textrm{and} \quad \theta_t=\phi
\theta_{t-1}+\omega_t,
\end{equation}
where $\theta_t$ is the conditional level of $y_t$, $\phi$ is a scalar
hyperparameter, and the innovation vectors $\epsilon_t$ and
$\omega_t$ follow $p$-variate
Gaussian distributions $\epsilon_t\sim N_p(0,\Sigma)$ and
$\omega_t\sim N_p(0,\Omega)$, for some covariance matrices
$\Sigma$ and $\Omega$, and for some integer $p\geq 1$. It is assumed that the sequences
$\{\epsilon_t\}$ and $\{\omega_t\}$ are individually and mutually
uncorrelated and they are also uncorrelated with the initial state
$\theta_0$, which follows a $p$-variate Gaussian distribution too. For $\phi=1$ the
above model gives the popular local level model, known also as
random walk plus noise model or as steady forecasting model, which is extensively covered in Harvey
(1986, 1989) and in West and Harrison (1997). If $\phi$ lies inside the unit circle, but $\phi\neq 0$, then
(\ref{model2}) can be interpreted as a vector autoregressive model (L\"utkepohl, 2005) with common structure over the component time series. In this paper we focus on the local level model, but the choice $\phi\neq 1$ may allow some small flexibility around it, for example considering nearly local level when $\phi\approx 1$.
Despite its simplicity, the local level model can be used to analyze real data sets in various settings and scenarios, as it has been pointed out by many authors, see e.g. Durbin (2004, p. 6). In the context of model (\ref{model2}) with $\phi\approx 1$, $\theta_t$ is referred to as the conditional level or simply level of $y_t$, since $E(y_t|\theta_t)=\theta_t$ and then $\theta_t$ is local as $\theta_t\approx \theta_{t-1}$, where $E(.)$ denotes expectation. The local level model has been used to analyze the volume of the river Nile (Pole {\it et al.}, 1994, \S7.1; Durbin and Koopman, 2001, \S2.2.2), market research data for a drug development (West and Harrison, 1997, \S2.3), temperature data for assessing global warming (Shumway and Stoffer, 2006, \S6.1), and annual precipitation at Lake Superior (Petris {\it et al.}, 2009, \S3.2.1). A detailed account of the local level model in econometrics, including many examples, is given in Commandeur and Koopman (2007, Chapters 1-7). Furthermore, local level models play a significant role to financial econometrics as they form basic components for unit root tests (Kwiatkowski, 1992). Finally, as pointed out by Triantafyllopoulos (2006), model (\ref{model2}) is a generalization of the Shewhart-Deming model for quality control, and it can be deployed in the context of multivariate control charts for autocorrelated processes (Bersimis {\it et al.}, 2007), where the aim is to signal deviations from the mean and the covariance matrix of these processes.
A central problem associated with inference of model (\ref{model2}), is
the specification or estimation of the covariance matrices $\Sigma$
and $\Omega$. For the estimation of these matrices there are several algorithms based on direct likelihood maximization (Harvey, 1986, 1989) and in particular using analytical score functions (Shephard and Koopman, 1992). Iterative methods for indirect likelihood maximization are also available, e.g. Newton-Raphson algorithms (Shephard and Koopman, 1992; Shumway and Stoffer, 2006, \S6.3) and expectation maximization (EM) algorithms (Dempster {\it et al.}, 1977; Shumway and Stoffer, 1982; Koopman, 1993); Fahrmeir and Tutz (2001, \S8.1.2) and Shumway and Stoffer (2006, \S6.3) have detailed discussions of these algorithms with useful references and recursive versions of the EM algorithm is also possible for on-line application. Simulation based methods, such as Markov chain Monte Carlo (MCMC) (Carter and Kohn, 1994; Gamerman and Lopes, 2006) and sequential Monte Carlo methods (Doucet {\it et al.}, 2001) are also available. Although in recent years the advance in computing power has resulted in sophisticated simulation based and iterative estimation procedures, such as those discussed in the above references, it is still desirable to develop inference that is not based on simulation or on iterative methods, in particular for enabling fast statistical analysis of high dimensional data and sequential model monitoring in real-time (Harrison and West, 1991). The need for real-time estimation has been pointed out in Cantarelis and Johnston (1982) and in many references in machine learning and signal processing, see e.g. Haykin (2001) and Malik (2006). Furthermore, MCMC and maximum likelihood based methods, as those mentioned above, are effectively designed for a ``static'' or in-sample application where a complete set of data is available and the interest is focused on smoothing, rather than on forecasting. Instead, our interest is centred on sequential or ``dynamic'' application, where each time we collect a new observation vector $y_t$ we update the estimates or predictions in an adaptive way. For the remainder of this paper we discuss approximate conjugate estimation procedures, but in section \ref{examples} we also consider the EM algorithm for comparison purposes.
Assuming that $\Omega$ is proportional to $\Sigma$ in
the sense that $\Omega=w \Sigma$, for some scalar $w>0$,
learning for $\Sigma$ is possible either by adopting Bayesian
methods, considering a Wishart prior for $\Sigma^{-1}$ (West and
Harrison, 1997), or by adopting maximum likelihood estimation
procedures (Harvey, 1986; 1989). The above
proportional structure of $\Sigma$ and $\Omega$ can be seen as a
matrix generalization of the proportionality in the univariate case
$(p=1)$ that leads to the scale observational dynamic model (West
and Harrison, 1997; Triantafyllopoulos and Harrison, 2008), but when
$p>1$ it imposes the restrictive assumption that the correlation
matrix of $\omega_t$ is equal to the correlation matrix of
$\epsilon_t$. This limitation can be understood by noting that the above model belongs to the relatively restricted class of ``seemingly unrelated time series equations'' (SUTSE) (Harvey, 1989, \S8.2), which is a time series extension of the
``seemingly unrelated regression equation models''
(Zellner, 1962). In our opinion, efforts devoted to the estimation of
the above models have been focused primarily on mathematical convenience, and the correlation structure problem mentioned above appears to have been overlooked.
The purpose of this paper is to develop an on-line estimation procedure for adaptive and fast estimation of $\Sigma$ and forecasting
of $\{y_t\}$. The adaptive estimation methods proposed in this paper, may allow for
analysis of high dimensional data, although in this paper this is only briefly explored via Monte Carlo experiments. In order to achieve the above goal we propose the deterministic
specification of $\Omega$ as $\Omega=\Sigma^{1/2}W\Sigma^{1/2}$,
where $\Sigma^{1/2}$ denotes the symmetric square root of $\Sigma$
and $W$ is a covariance matrix to be specified. In our development, $W$ is initially assumed known, but we propose an application of the Newton-Rapshon method for adaptive estimation of this matrix in real problems. We observe that when
$W=wI_p$, where $I_p$ is the $p\times p$ identity matrix, then
$\Omega=w\Sigma$ (leading to $\Omega$ being proportional to
$\Sigma$), but when $W$ is not proportional to $I_p$, then the
correlation matrices of $\epsilon_t$ and $\omega_t$ are different.
Thus we extend the SUTSE models of Harvey (1989, \S8.2) and West and Harrison (1997, \S16.4) to
allow for a more general covariance setting. For estimation purposes,
we deploy approximate Bayesian inference, by adopting a prior
distribution for $\Sigma$ which leads to a generalization of the
inverse Wishart distribution. We provide convergence results of the
posterior covariance matrix of $\theta_t$ leading to the steady
state of $\theta_t$ and this is used in the estimation algorithm in
order to increase its computational speed.
The remaining of the paper is organized as follows. Section \ref{s2s1} generalizes the inverse Wishart distribution and discusses
some properties of the new distribution. In section \ref{s2s2} we develop approximate Bayesian inference for model (\ref{model2}) and section
\ref{examples} includes two illustrations, consisting of Monte Carlo experiments that compare and contrast the performance of our algorithms with existing methods in the literature and an example of monitoring a 5-dimensional process in quality control. Finally, section \ref{discussion} gives concluding comments.
\section{Generalized inverse Wishart distribution}\label{s2s1}
Let $X\sim IW_p(n,A)$ denote that the matrix $X$ follows an
inverse Wishart distribution with $n$ degrees of freedom and with
parameter matrix $A$. Given $A$, we use the notation $|A|$ for the
determinant of $A$, $\textrm{tr}(A)$ for the trace of $A$, and $\textrm{exp}(x)$ for the
exponent of the scalar $x$. The following theorem introduces a generalization of the inverse Wishart distribution.
\begin{thm}\label{lem1}
Consider the $p\times p$ random covariance matrix $X$ and denote
with $X^{1/2}$ the symmetric square root of $X$. Given $p\times p$
covariance matrices $A$ and $S$ and a positive scalar $n>2p$, define
$Y=X^{1/2}S^{-1}X^{1/2}$ so that $Y$ follows an inverse Wishart
distribution $Y\sim IW_p(n,A)$. Then the density function of $X$ is
given by
$$
p(X)=\frac{|A|^{(n-p-1)/2}|S|^{(n-p-1)/2}}{2^{p(n-p-1)/2}\Gamma_p\{(n-p-1)/2\}
|X|^{n/2}}\exp(\textrm{tr} (-AX^{-1/2}SX^{-1/2}/2)),
$$
where $\Gamma_p(.)$ denotes the multivariate gamma function.
\end{thm}
\begin{proof}
From Olkin and
Rubin (1964) the determinant of the Jacobian matrix of $X$ with
respect to $Y$ is $J(Y\rightarrow X)=J(Y\rightarrow
X^{1/2})J(X^{1/2}\rightarrow X)=\prod_{i\leq
j}^p(\lambda_i+\lambda_j)(\xi_i+\xi_j)^{-1}$, where
$\lambda_1,\ldots,\lambda_p$ are the eigenvalues of
$S^{-1/2}X^{1/2}S^{-1/2}$ and $\xi_1,\ldots,\xi_p$ are the
eigenvalues of $X^{1/2}$. We observe that if $A=I_p$, then $p(X)$ is
an inverse Wishart distribution, since
$\textrm{tr}(-X^{-1/2}SX^{-1/2}/2)=\textrm{tr}(-SX^{-1}/2)$. The
Jacobian $J(Y\rightarrow X)$ does not depend on $A$ and so we can
determine $J(Y\rightarrow X)$ from the special case of $A=I_p$. With
$A=I_p$, $X\sim IW_p(n,S)$ and $Y\sim IW_p(n,I_p)$ and from the
transformation $Y=X^{1/2}S^{-1}X^{1/2}$ we get
$$
p(Y)=\frac{|S|^{(n-p-1)/2}\exp(\textrm{tr}(-Y^{-1}/2))J(X\rightarrow
Y)}{2^{p(n-p-1)/2}\Gamma_p\{(n-p-1)/2\}|S|^{n/2}|Y|^{n/2}}.
$$
Since $Y\sim IW_p(n,I_p)$ it must be
$|S|^{-n/2}|S|^{(n-p-1)/2}J(X\rightarrow Y)=1$ and so
$J(X\rightarrow Y)=|S|^{(p+1)/2}$.
Now, in the general case of a covariance matrix $A$, we see
$$
\int_{X>0} p(X)\,dX=\int_{Y>0}
\frac{|A|^{(n-p-1)/2}}{2^{p(n-p-1)/2}\Gamma_p\{(n-p-1)/2\}|Y|^{n/2}}
\exp(\textrm{tr}(-AY^{-1}/2))\,dY=1,
$$
since $Y\sim IW_p(n,A)$.
\end{proof}
The distribution of the above theorem proposes a generalization of
the inverse Wishart distribution, since if $A=I_p$ we have $X\sim
IW_p(n,S)$ and if $S=I_p$, we have $X\sim IW_p(n,A)$. This is
a new generalization of the
inverse Wishart distribution, differing clearly from the generalizations of Dawid and Lauritzen (1993), Brown
{\it et al.} (1994), Roverato (2002), and Carvalho and West (2007).
In the following we refer to the distribution of Theorem \ref{lem1}
as {\it generalized inverse Wishart} distribution, and we write
$X\sim GIW_p(n,A,S)$. The next result gives some expectations of
the $GIW$ distribution.
\begin{thm}\label{lem2a}
Let $X\sim GIW_p(n,A,S)$ for some known $n,A$ and $S$. Then we have
\begin{enumerate}
\item [(a)] $E(X^{1/2}S^{-1}X^{1/2})=(n-2p-2)^{-1}A$;
$E(X^{-1/2}SX^{-1/2})=(n-p-1)A^{-1}$; \item [(b)]
$E|X|^{\ell}=2^{-p\ell} \prod_{i=1}^p\prod_{j=1}^\ell \{(n-p-i)/2-j\}^{-1} |A|^{\ell}|S|^{\ell}$,
\end{enumerate}
where $E(.)$ denotes expectation and $0<\ell<(n-2p)/2$.
\end{thm}
\begin{proof}
First we prove (a). From the proof of Theorem \ref{lem1} we have that\\
$Y=X^{1/2}S^{-1}X^{1/2}\sim IW_p(n,A)$ and so
$E(Y)=(n-2p-2)^{-1}A$ and $E(Y^{-1})=(n-p-1)A^{-1}$. Proceeding
with (b) we note from the proof of Theorem \ref{lem1} that for any
$n>2p$
$$
\int_{X>0}|X|^{-n/2}\exp(\textrm{tr}(-AX^{-1/2}SX^{-1/2}/2))\,dX=c^{-1},
$$
where $c$ is the normalizing constant of the distribution of $X$.
Then
$$
E|X|^{\ell}=c\int_{X>0}|X|^{-(n-2\ell)/2}\exp(\textrm{tr}(-AX^{-1/2}SX^{-1/2}/2))\,dX=\frac{c}{c^*},
$$
where
$$
c^*=\frac{2^{p\ell}|A|^{(n-p-1)/2}|S|^{(n-p-1)/2}}{2^{p(n-p-1)/2}|A|^{\ell}|S|^{\ell}\Gamma_p\{(n-2\ell-p-1)/2\}}
$$
and the range of $\ell$ makes sure that $n-2\ell>2p$. The result
follows by eliminating the factor $2^{p(n-p-1)/2}$ in the fraction
$c/c^*$, and by noting that from well known properties of the multivariate gamma function we have
$$
\Gamma_p\left(\frac{n-p-1}{2}\right)=\prod_{i=1}^p\Gamma\left(\frac{n-p-i}{2}\right)=\prod_{i=1}^p\prod_{j=1}^\ell \left(\frac{n-p-i}{2}-j\right)\Gamma\left(\frac{n-p-i}{2}-\ell\right)
$$
where $\Gamma(.)$ denotes the gamma function.
\end{proof}
The following property reflects on the symmetry of $A$ and $S$ in
the $GIW$ distribution.
\begin{thm}\label{th:commute}
If $X\sim GIW_p(n,A,S)$, for some known $n$, $A$ and $S$, then
$X\sim GIW_p(n,S,A)$.
\end{thm}
\begin{proof}[Proof of Theorem \ref{th:commute}]
Suppose that $X\sim GIW_p(n,A,S)$. From the normalizing constant of
the density $f(X)$ of Theorem \ref{lem1}, we can exchange the roles
of $|A|$ and $|S|$. And from $\textrm{tr}(-AX^{-1/2}S$ $\times
X^{-1/2}/2) =\textrm{tr}(-SX^{-1/2}AX^{-1/2}/2)$ we have that $X\sim
GIW_p(n,S,A)$.
\end{proof}
Next we show that the mode of a $GIW$ distribution can be obtained by the solution of a
matrix equation. First we give the following lemma.
\begin{lem}\label{lem:trace}
If $X$ is a $p\times p$ real-valued symmetric matrix of variables and $A,S$ are $p\times p$ symmetric
matrices of constants, then
\begin{equation}
\frac{\partial tr(AXSX)}{\partial X} =2\{K+K'-diag(k_{11},\ldots,k_{pp})\}, \label{eq:lem:trace}
\end{equation}
where $K=AXS$ and $K=(k_{ij})_{i,j=1,\ldots,p}$.
\end{lem}
\begin{proof}
Let $X=(x_{ij})_{i,j=1,\ldots,p}$ and thus
$$
\frac{\partial tr(AXSX)}{\partial x_{ij}} = tr\left(A\frac{\partial X}{\partial x_{ij}} SX\right) +
tr\left(AXS \frac{\partial X}{\partial x_{ij}}\right).
$$
Now let $u_j$ be the $j$-th column vector of the identity matrix $I_p$ (a zero vector having one unit in its $j$-th position).
For $i=j$ we have
\begin{equation}
\frac{\partial tr(AXSX)}{\partial x_{ii}} = tr(Au_iu_i'SX) + tr(AXSu_iu_i') = u_i'SXAu_i+u_i'AXSu_i=2k_{ii}, \label{lem:partial1}
\end{equation}
where $u_i'$ denotes the row vector of $u_i$.
For $j<i$ we have
\begin{eqnarray}
\frac{\partial \textrm{tr}(AXSX)}{\partial x_{ij}} &=& \textrm{tr}(SXAu_ju_i') + \textrm{tr}(AXSu_ju_i') + \textrm{tr}(SXAu_iu_j')+\textrm{tr}(AXSu_iu_j') \nonumber \\ &=& 2k_{ji} + 2k_{ij}. \label{lem:partial2}
\end{eqnarray}
Putting together (\ref{lem:partial1}) and (\ref{lem:partial2}) we obtain (\ref{eq:lem:trace}).
\end{proof}
\begin{thm}\label{th:mode}
The mode $\widehat{X}$ of $X\sim GIW(n,A,S)$ satisfies the matrix equation
\begin{equation}\label{mode}
A\widehat{X}^{-1/2}S+S\widehat{X}^{-1/2}A=2n\widehat{X}^{1/2}.
\end{equation}
Furthermore, $\widehat{X}$ is unique, i.e. $GIW$ is a unimodal distribution.
\end{thm}
\begin{proof}
From the density of $X$ we have
$$
\log p(X)=c+n\log |X^{-1/2}|-\frac{1}{2} \textrm{tr}(AX^{-1/2}SX^{-1/2}).
$$
To find $\widehat{X}$ we need to maximize $\log p(X)$. Using Lemma \ref{lem:trace} we have
$$
\frac{\partial \log p(X)}{\partial X^{-1/2}} = n\left\{ 2X^{1/2}-diag\left(x_{11}^{(1/2)},\ldots,x_{pp}^{(1/2)}\right)\right\} - K-K'+diag(k_{11},\ldots,k_{pp}),
$$
where $X^{1/2}=\left\{x_{ij}^{(1/2)}\right\}_{i,j=1,\ldots,p}$, $K=AX^{-1/2}S$ and $K=(k_{ij})_{i,j=1,\ldots,p}$.
Now
$$
\frac{\partial \log p(\widehat{X})}{\partial \widehat{X}^{-1/2}} = 0 \Rightarrow K+K'=2n\widehat{X}^{1/2} \Rightarrow A\widehat{X}^{-1/2}S+S\widehat{X}^{-1/2}A=2n\widehat{X}^{1/2}.
$$
Next we show that at $X=\widehat{X}$ the second partial derivative of $\log p(X)$ is a negative definite matrix.
Let $x=\textrm{vech}(X^{1/2})$ and $x^*=\textrm{vech}(X^{-1/2})$, where $\textrm{vech}(.)$ denotes the vec permutation operator of symmetric matrices. Also, let $D_p$ be the duplication matrix and $H_p$ be any left inverse of it and denote with $\otimes$ the Kronecker product of two matrices. Then
\begin{eqnarray*}
\frac{\partial^2 \log p(X)}{\partial x^* \partial (x^*)'} &=& -n H_p (X^{1/2}\otimes X^{1/2}) D_p - \frac{n}{2} \frac{ \textrm{vech}\{ diag(x_{11}^{(1/2}),\ldots,x_{pp}^{(1/2)})\} }{\partial (x^*)' } \\ &=& -n H_p (X^{1/2}\otimes X^{1/2}) D_p - \frac{n}{2} \frac{ \textrm{vech}\{ diag(x_{11}^{(1/2}),\ldots,x_{pp}^{(1/2)})\} }{\partial x' } \frac{\partial x}{\partial (x^*)'} \\ &=& -n H_p (X^{1/2}\otimes X^{1/2}) D_p + \frac{n}{2} diag\{\textrm{vech}(I_p)\} H_p (X^{1/2}\otimes X^{1/2}) D_p \\ &=& -\frac{n}{2} \left[ 2I_{p(p+1)/2} -diag\{\textrm{vech}(I_p)\}\right] H_p (X^{1/2}\otimes X^{1/2}) D_p <0,
\end{eqnarray*}
since both $2I_{p(p+1)/2} -diag\{\textrm{vech}(I_p)\}$ and $H_p (X^{1/2}\otimes X^{1/2})D_p$ are positive definite matrices.
To prove the second part of the theorem, we write for simplicity $X=\widehat{X}^{1/2}$ so that from (\ref{mode}) we have $AX^{-1}S+SX^{-1}A=2nX$. For $Y \neq X$, with $AY^{-1}S+SY^{-1}A=2nY$, let $\textrm{vec}(.)$ denote the column stacking operator of an unrestricted matrix. Then $A(X^{-1}-Y^{-1})S+S(X^{-1}-Y^{-1})A=2n(X-Y)$ or $Y^{-1}\otimes X^{-1} \textrm{vec}(X-Y)=-2n(S\otimes A+A\otimes S)^{-1}\textrm{vec}(X-Y)$, which leads to the contradiction that $Y^{-1}\otimes X^{-1}$ is a negative definite matrix. Thus $X=Y$ and so the solution $\widehat{X}$ of (\ref{mode}) is unique.
\end{proof}
Some comments are in order.
\begin{enumerate}
\item If $S=I_p$, then $GIW$ is reduced to an inverted Wishart distribution $X\sim IW(n,A)$ with mode $\widehat{X}=A/n$ and this satisfies
equation (\ref{mode}).
\item If $S=\lambda A$, for some $\lambda>0$, then equation (\ref{mode}) can be solved analytically as $\widehat{X}=\lambda n^{-1}A^2$. To see this define $\widehat{Y}^2=\widehat{X}$ so that (\ref{mode}) is satisfied for $\widehat{Y}^{-1}=\sqrt{n/\lambda} A^{-1}$.
\item If $AS$ is symmetric (i.e. if $A$ and $S$ commute), then $\widehat{X}=AS/n$. To prove this first we show that if $A$ and $S$ commute, then $A^{1/2}$ and $S^{1/2}$ commute too. Indeed, assume that $A^{1/2}S^{1/2}\neq S^{1/2}A^{1/2}$, then $AS^{1/2}\neq A^{1/2}S^{1/2}A^{1/2}\neq S^{1/2}A^{1/2}A^{1/2}=S^{1/2}A$ and $AS\neq S^{1/2}AS^{1/2}\neq S^{1/2}S^{1/2}A=SA$, which is a contradiction. Now define $\widehat{Y}^2=\widehat{X}$ with $\widehat{Y}=A^{1/2}S^{1/2}/\sqrt{n}$ and then substitute $\widehat{Y}$ into (\ref{mode}), i.e.
$$
A\widehat{Y}^{-1}S+S\widehat{Y}^{-1}A=\sqrt{n}AA^{-1/2}S^{-1/2}S+\sqrt{n}SS^{-1/2}A^{-1/2}A=2\sqrt{n}A^{1/2}S^{1/2}=2n\widehat{Y}.
$$
Note that the cases (1) and (2) above, are embedded in (3).
\item In the general case we can obtain the solution of (\ref{mode}) numerically, by considering it
as a special case of generalized Sylvester matrix equations (Wu {\it et al.}, 2008).
\end{enumerate}
The next result proposes a way to obtain the unique solution of (\ref{mode}), avoiding numerical methods.
\begin{thm}\label{th:mode2}
The mode $\widehat{X}$ of Theorem \ref{th:mode} satisfies $\textrm{vec}(\widehat{X}^{1/2}\otimes \widehat{X}^{1/2})=c$, where
\begin{gather*}
c=2n(b\otimes B +d\otimes D)(b'b\otimes B^2+b'd\otimes BD +d'b\otimes DB+d'd\otimes D^2)^{-1}vec(I_p),
\end{gather*}
for $b=vec(S)$, $B=I_p\otimes A$, $d=vec(A)$ and $D=I_p\otimes S$.
\end{thm}
\begin{proof}
For simplicity we write $X=\widehat{X}^{1/2}$. Then equation (\ref{mode}) becomes $AX^{-1}SX^{-1}+SX^{-1}AX^{-1}=2nI_p$, which by taking the vectorized operator in both sides, can be written as
\begin{equation}\label{th:mode2:eq1}
(I_p\otimes A)(X^{-1}\otimes X^{-1})\textrm{vec}(S)+(I_p\otimes S)(X^{-1}\otimes X^{-1})\textrm{vec}(A)=2n\textrm{vec}(I_p)
\end{equation}
With $b,B,d,D$ as in the theorem and by taking again the vectorized operator in both sides of (\ref{th:mode2:eq1}) we have
$$
(b'\otimes B+d'\otimes D)\textrm{vec}(X^{-1}\otimes X^{-1})=2n\textrm{vec}(I_p)
$$
Now we can see that the $p^2\times p^4$ matrix $b'\otimes B+d'\otimes D$ is of full rank $p^2$ and so the solution of the above system is given by
$$
\textrm{vec}(X^{-1}\otimes X^{-1}) = 2n (b\otimes B+d\otimes D)\{ (b'\otimes B+d'\otimes D)(b\otimes B+d\otimes D)\}^{-1} \textrm{vec}(I_p) =c,
$$
as required.
\end{proof}
In order to find the mode $\widehat{X}$ using Theorem \ref{th:mode2} we follow the next steps: first we calculate $c$, then we extract the matrix $\widehat{X}^{-1/2}\otimes \widehat{X}^{-1/2}$ from $c$, then using the formula $\textrm{vec}(\widehat{X}^{-1})=\widehat{X}^{-1/2}\otimes \widehat{X}^{-1/2}\textrm{vec}(I_p)$ and rearranging again we find $\widehat{X}^{-1}$ and finally by inverting this matrix we obtain $\widehat{X}$.
However, the above method for the computation of the mode may not be efficient for high dimensional data. Even in low dimensions, as the time series problem we consider in the next section has a sequential application, if we want to use the above procedure for the determination of the mode or if we want to solve the matrix equation of Theorem \ref{th:mode2} using numerical methods, we will have to perform these operations at each time $t$. In our experience this is a heavy computational job, even for relatively short time series. In order to circumvent this difficulty we propose instead to use the estimator
\begin{equation}\label{eq:est:tilda}
\widetilde{X}=\frac{AS+SA}{2n},
\end{equation}
which is motivated by noting that for cases (1)-(3) above, we have $\widetilde{X}=\widehat{X}$. Even when $AS\neq SA$, we have $ASA^{-1}\approx S$, the approximation here refers to matrix similarity, meaning that the matrices $ASA^{-1}$ and $S$ have the same determinant, the same trace, the same eigenvalues, and the same spectrum (see Theorem 21.3.1 of Harville, 1997, p. 525). Thus $AS$ and $SA$ can be thought of being close to each other and estimator \eqref{eq:est:tilda} basically suggests considering the average of $AS/n$ and $SA/n$. Moreover, a close look at $AS$ and $SA$ shows that the diagonal elements of $AS$ and $SA$ are the same and that in \eqref{eq:est:tilda} the off-diagonal elements of $\widetilde{X}$ are averages of the off-diagonal elements of $AS/n$ and $SA/n$. When $AS\approx SA$, and for large $n$, the estimator $|\widetilde{X}|$ is close to $E|X|$, which from (b) of Theorem \ref{lem2a} is equal to $\prod_{i=1}^p(n-p-2-i)^{-1}|A||S|$.
It is easy to verify that if $X\sim GIW_p(n,A,S)$ and considering the partition
$$
X=\left[\begin{array}{cc} X_{11} & 0 \\ 0 & X_{22}\end{array}\right], \quad A=\left[\begin{array}{cc} A_{11} & 0 \\ 0 & A_{22}\end{array}\right] \quad \textrm{and} \quad S=\left[\begin{array}{cc} S_{11} & 0 \\ 0 & S_{22}\end{array}\right],
$$
where $X_{11}$, $A_{11}$, $S_{11}$ are $q\times q$ and $X_{22}$, $A_{22}$, $S_{22}$ are $(p-q)\times (p-q)$ covariance matrices, then $X_{11}\sim GIW_q(n,A_{11},S_{11})$ and $X_{22}\sim GIW_{p-q}(n,A_{22},S_{22})$, for $1\leq q\leq p-1$. The verification of this is just by noting that $|X|=|X_{11}||X_{22}|$ and $\textrm{tr}(AX^{-1/2}SX^{-1/2})=\textrm{tr}(A_{11}X_{11}^{-1/2}S_{11}X_{11}^{-1/2})+\textrm{tr}(A_{22}X_{22}^{-1/2}S_{22}X_{22}^{-1/2})$ so that $p(X)=p(X_{11})p(X_{22})$. From the latter it follows that $X_{11}$ and $X_{22}$ are independent. This result has the following interesting consequence. Suppose that $X_{ii}$ are independent $q_i\times q_i$ random covariance matrices, each following an inverse Wishart distribution $X_{ii}\sim IW_{q_i}(n,k_iS_i)$, for some $k_i>0$ and some covariance matrix $S_i$, with $i=1,\ldots,s$. Then the random matrix $X=\textrm{block diag}(X_{11},\ldots,X_{q_s})$ (the block diagonal matrix of $X_{11},\ldots,X_{ss}$) follows the GIW distribution, $X\sim GIW_p(n,A,S)$, where $A=\textrm{block diag}(k_1I_{q_1},\ldots,k_sI_{q_s})$, $S=\textrm{block diag}(S_{11},\ldots,S_{ss})$ and $p=q_1+\cdots+q_s$. In words, the GIW distribution with the above block diagonal structure on $A$ and $S$ is generated from the superposition of independent inverse Wishart matrices. This gives an interpretation of the matrices $A$ and $S$ in GIW as well as it gives a useful model building approach when we wish to consider the superposition of local level models as in West and Harrison (1997, Chapter 6).
It is also easy to verify that if $X\sim GIW_p(n,A,S)$, then the
density of $Y=X^{-1}$ is
$$
p(Y) = \frac{|A|^{(n-p-1)/2} |S|^{(n-p-1)/2}
|Y|^{(n-2p-2)/2}}{2^{p(n-p-1)/2}\Gamma_p\{(n-p-1)/2\} }
\exp(\textrm{tr}(-AY^{1/2}SY^{1/2}/2)).
$$
This distribution generalizes the Wishart distribution; we will say
that $Y$ follows the {\it generalized Wishart} distribution with
$n-p-1$ degrees of freedom, covariance matrices $A^{-1}$ and
$S^{-1}$, and we will write $Y\sim GW_p(n-p-1,A^{-1},S^{-1})$. We can observe that when $A=I_p$ or $S=I_p$, the above density reduces
to a Wishart density. Again our terminology and notation, should not
cause any confusion with other generalizations of the Wishart
distribution, proposed in the literature (Letac and Massam, 2004).
The next theorem is a generalization of the convolution of the
Wishart and multivariate singular beta distributions (Uhlig, 1994; D\'{i}az-Garc\'{i}a and Guti\'{e}rrez, 1997; Srivastava, 2003).
For some integers $m,n$, let the $p\times p$ random matrix $B$ follow the
multivariate singular beta distribution with $m$ and $n$ degrees of
freedom, respectively, writing $B\sim B_p(m/2,n/2)$. The singularity of the beta distribution considered here is due to $n$ being
smaller than $p$, meaning that $I_p-B$ is singular (a similar argument can be stated for the singularity of $B$, if $m<p$), and thus the density of $B$ does not exist under the Lebesgue measure in the space of the $p\times p$ real-valued covariance matrices, but it does exist
under the Steifel manifold. Under this consideration the density function of $B$ is obtained if we replace the determinant of $I_p-B$ (which is zero) by the product of the positive eigenvalues of $I_p-B$; for more details the reader is referred to the above references.
\begin{thm}\label{th:uhlig}
Let $p$ and $n$ be positive integers and let $m>p-1$. Let
$H\sim GW_p(m+n,A,S)$ and $B\sim B_p(m/2,n/2)$ be independent, where
$A$ and $S$ are known covariance matrices. Then
$$
G\equiv \mathcal{U}(H)'B\mathcal{U}(H) \sim GW_p(m,A,S),
$$
where $\mathcal{U}(H)$ denotes the upper triangular matrix of the
Choleski decomposition of $H$.
\end{thm}
In order to prove this theorem, we prove the somewhat more
general result in the following lemma.
\begin{lem}\label{lemma:uhlig}
Let $A_1\sim W_p(m,I_p)$, $A_2=\sum_{j=1}^nY_jY_j'$, with
$Y_j\sim N_p(0,I_p)$ and $H\sim GW_p(m+n,A,S)$, where $A_1$, $Y_j$
$(j=1,\ldots,n)$ and $H$ are independent. Define $C=A_1+A_2$,
$B=\{\mathcal{U}(C)'\}^{-1}A_1\{\mathcal{U}(C)\}^{-1}$, $G=\mathcal{U}(H)'B\mathcal{U}(H)$ and
$D=H^{1/2}AH^{1/2}-G^{1/2}AG^{1/2}$. Then $C,G,D$ are independent and $C\sim W_p(m+n,I_p)$, $G\sim GW_p(m,A,S)$,
$D=\sum_{j=1}^n Z_jZ_j'$, where $Z_j\sim N_p(0,S)$.
\end{lem}
\begin{proof}
The proof mimics the proof of Uhlig (1994) for the Wishart case. Define
$Z_j=\mathcal{U}(H^{1/2}AH^{1/2})'\{\mathcal{U}(C)'\}^{-1}Y_j$ and note that
$D=\sum_{j=1}^nZ_jZ_j'$. From Theorem \ref{lem1} and from Uhlig
(1994), the Jacobian $J(A_1,H,Y_1,\ldots,Y_n\rightarrow
C,G,Z_1,\ldots,Z_n)$ is $|H|^{-n/2}|C|^{n/2}|A|^{-(p+1)/2}$. Then,
the joint density function of $A_1,H,A_2$ can be written as
\begin{gather*}
p(A_1,H,A_2) = p(A_1) p(H) p(A_2) = \left\{ 2^{pm/2}
\Gamma_p(m/2)\right\}^{-1} \exp(\textrm{tr}(-A_1/2)) |A_1|^{(m-p-1)/2} \\
\times \left[ 2^{p(m+n)/2} \Gamma_p\{(m+n)/2\}|A|^{(m+n)/2} |S|^{(m+n)/2}
\right]^{-1} \exp(\textrm{tr}(-A^{-1}H^{1/2}S^{-1}H^{1/2}/2))
|H|^{(m+n-p-1)/2} \\ \times (2\pi)^{-pn/2} \exp(\textrm{tr}(-A_2/2))
|A|^{-(p+1)} \\ = \left[
2^{p(m+n)/2} \Gamma_p\{(m+n)/2\}\right]^{-1}
\exp(\textrm{tr}(-C/2))|C|^{(m+n-p-1)/2} \\ \times \left\{ 2^{pm/2}
\Gamma_p(m/2)|A|^{m/2}|S|^{m/2} \right\}^{-1}
\exp(\textrm{tr}(-A^{-1}G^{1/2}S^{-1}G^{1/2}/2)) |G|^{(m-p-1)/2} \\ \times
(2\pi)^{-pn/2} |S|^{-n/2} \exp(\textrm{tr}(-S^{-1}D/2)) |A|^{(n-p-1)/2} =
p(C)p(G)p(D),
\end{gather*}
where $A_1=|C||B|$, $H^{1/2}AH^{1/2}=G^{1/2}AG^{1/2}+D$ and
$|H|=|G|/|B|$ are used.
\end{proof}
\begin{proof}[Proof of Theorem \ref{th:uhlig}]
The proof is immediate from Lemma \ref{lemma:uhlig}, after noticing
that with the definition of the multivariate singular beta
distribution (Uhlig, 1994), $B\sim B_p(m/2,n/2)$.
\end{proof}
\section{Bayesian inference}\label{s2s2}
\subsection{Estimation forward in time}
In this section we consider estimation for model (\ref{model2}). The
prior distributions of $\theta_0|\Sigma$ and $\Sigma$ are chosen to
be Gaussian and generalized inverse Wishart respectively, i.e.
\begin{gather}
\theta_0|\Sigma\sim
N_p(m_0,\Sigma^{1/2}P_0\Sigma^{1/2})\quad\textrm{and}\quad
\Sigma\sim GIW_p(n_0,Q^{-1},S_0),\label{eq8}
\end{gather}
for some known parameters $m_0$, $P_0=p_0I_p$, $n_0>0$ and
$S_0$. $Q$ is the limit of $Q_t=P_{t-1}+W+I_p$, where $P_{t-1}$ is
a covariance matrix. The next result states that the limit of
$\{P_t\}$ (and hence the limit of $\{Q_t\}$) exist and it provides
the value of this limit as a function of $\phi$ and $W$.
\begin{thm}\label{lem:limit}
If $P_{t}=R_{t}(R_{t}+I_p)^{-1}$, with $R_{t}=\phi^2P_{t-1}+W$,
where $W$ is a positive definite matrix and considering the prior
$P_0=p_0I_p$, for a known constant $p_0>0$, it is
$$
P=\lim_{t\rightarrow\infty}P_t=\frac{1}{2\phi^2}\left[
\left\{(W+(1-\phi^2)I_p)^2+4W\right\}^{1/2}-W-(1-\phi^2)I_p\right],
$$
for $\phi\neq 0$ and $P=W (W+I_p)^{-1}$, for $\phi=0$.
\end{thm}
Before we prove this result we give some background on the limit of
covariance matrices. Let $A\geq 0$ denote that the matrix $A$ is
non-negative definite, let $A>0$ denote that the matrix $A$ is
positive definite and let $A>B$ denote that the matrices $A>0$ and
$B>0$ satisfy $A-B>0$. If $A>B$, then $A^{-1}<B^{-1}$ (Horn and
Johnson, 1999). The sequence of symmetric matrices $\{P_t\}$ is said
to be monotonic and bounded if the scalar sequence
$\{\ell'P_t\ell\}$ is monotonic and bounded, for all real-valued vectors $\ell$. If for all $t$ the
matrix $P_t$ is a non-negative definite matrix, then the above
definition implies that $\{P_t\}$ is bounded if there exist matrices
$L$ and $U$ satisfying $L\leq P_t\leq U$ and monotonic if
$P_{t+1}<P_t$ or $P_{t+1}>P_t$, for any $t>t_0$ and $t_0>0$. If $\{P_t\}$ is both
monotonic and bounded, then it is convergent, since the sequence
$\{\ell'P_t\ell\}$ is also monotonic and bounded and so it is
convergent. The following two lemmas are needed in order to prove
the limit of Theorem \ref{lem:limit}.
\begin{lem}\label{lem3}
If $P_{t}=R_{t}(R_{t}+I_p)^{-1}$, with $R_{t}=\phi^2 P_{t-1}+W$,
where $W$ is a positive definite matrix and $\phi$ is a real number,
then the sequence of $p\times p$ positive matrices $\{P_t\}$ is
convergent.
\end{lem}
\begin{proof}
First suppose that $\phi=0$. Then $R_t=W$, for all $t$, and so
$P_t=W (W+I_p)^{-1}$, which of course is convergent.
Suppose now that $\phi\neq 0$. It suffices to prove that $\{P_t\}$
is bounded and monotonic. Clearly, $0\leq P_{t}$ and since
$\phi^2>0$ and $W$ is positive definite $0<P_{t}$, for all $t>0$.
Since $(R_{t}+I_p)^{-1}>0$,
$(R_{t}+I_p-R_{t})(R_{t}+I_p)^{-1}>0\Rightarrow
P_{t}=R_{t}(R_{t}+I_p)^{-1}<I_p$ and so $0<P_{t}<I_p$. For the
monotonicity it suffices to prove that, if $P_{t-1}^{-1}>
P_{t-2}^{-1}$ (equivalent $P_{t-1}^{-1}< P_{t-2}^{-1}$), then
$P_{t}^{-1}> P_{t-1}^{-1}$ (equivalent $P_{t}^{-1}< P_{t-1}^{-1}$).
From $P_{t-1}^{-1}> P_{t-2}^{-1}$ we have $P_{t-1}<
P_{t-2}\Rightarrow R_{t}< R_{t-1}\Rightarrow R_{t}^{-1}>
R_{t-1}^{-1}\Rightarrow
P_{t}^{-1}-P_{t-1}^{-1}=R_{t}^{-1}-R_{t-1}^{-1}> 0$, since
$P_{t}^{-1}=(R_{t}+I_p)R_{t}^{-1}=I_p+R_{t}^{-1}$. With an analogous
argument we have that, if $P_{t-1}^{-1}< P_{t-2}^{-1}$, then
$P_{t}^{-1}-P_{t-1}^{-1}< 0$, from which the monotonicity follows.
\end{proof}
\begin{lem}\label{lem3post}
Let $\{P_t\}$ be the sequence of Lemma \ref{lem3} and suppose that
$P_0=p_0I_p$, for a known constant $p_0>0$. Then, with $W$ as in
Lemma \ref{lem3}, the limiting matrix
$P=\lim_{t\rightarrow\infty}P_t$ commutes with $W$.
\end{lem}
\begin{proof}
First we prove that if $P_{t-1}$ commutes with $W$, then $P_{t}$
also commutes with $W$. Indeed from $P_{t}=(\phi^2 P_{t-1}+W)(\phi^2
P_{t-1}+W+I_p)^{-1}$ we have that $P_{t}^{-1}=I_p+(\phi^2
P_{t-1}+W)^{-1}$ and then
$$
P_{t}^{-1}W^{-1}=W^{-1}+(\phi^2W P_{t-1}+W^2)^{-1}=W^{-1}+(\phi^2
P_{t-1}W+W^2)^{-1}=W^{-1}P_{t}^{-1}
$$
which implies that $W
P_{t}=(P_{t}^{-1}W^{-1})^{-1}=(W^{-1}P_{t}^{-1})^{-1}=P_{t}W$ and so
$P_{t}$ and $W$ commute. Because $P_0=p_0I_p$, $P_0$ commutes with
$W$ and so by induction it follows that the sequence of matrices
$\{P_t, t\geq 0\}$ commutes with $W$. Since
$P=\lim_{t\rightarrow\infty}P_t$ exists (Lemma \ref{lem3}) we have
$$
PW=\lim_{t\rightarrow\infty}(P_tW)=\lim_{t\rightarrow\infty}(W
P_t)=W P
$$
and so $P$ commutes with $W$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{lem:limit}]
If $\phi=0$, then from Lemma \ref{lem3} we have $P_t=P=W(W+I_p)^{-1}$.
Let $\phi\neq 0$; from Lemma \ref{lem3} we have that $P$ exists and from Lemma
\ref{lem3post} we have that $P$ and $W$ commute. From
$P_{t}=(\phi^2P_{t-1}+W)(P_{t-1}+W+I_p)^{-1}$ we have
$P=(\phi^2P+W)(\phi^2P+W+I_p)^{-1}$ from which we get the equation
$P^2+\phi^{-2}P(W+I_p-\phi^2I_p)-\phi^{-2}W=0$. Now since $P$ and
$W$ commute we can write
\begin{gather*}
P^2+\phi^{-2}P(W+I_p-\phi^2I_p)-\phi^{-2}W=0 \Rightarrow
P^2+\frac{1}{2\phi^2}P(W+(1-\phi^2)I_p) \\
+\frac{1}{2\phi^2}(W+(1-\phi^2)I_p)
P+\frac{1}{4\phi^4}(W+(1-\phi^2)I_p)^2-
\frac{1}{4\phi^4}(W+(1-\phi^2)I_p)^2-W=0\\
\Rightarrow \left(
P+\frac{1}{2\phi^2}(W+(1-\phi^2)I_p)\right)^2=\frac{1}{4\phi^4}(W+(1-\phi^2)I_p)^2+W
\\ \Rightarrow P=\frac{1}{2\phi^2}\left[
\left\{(W+(1-\phi^2)I_p)^2+4W\right\}^{1/2}-W-(1-\phi^2)I_p\right],
\end{gather*}
after rejecting the negative definite root.
\end{proof}
Theorem \ref{lem:limit} generalizes relevant convergence results
for the univariate random walk plus noise model (Anderson and
Moore, 1979, p. 77; Harvey, 1989, p. 119). Following a similar
argument as in Harvey (1989, p. 119), we can see that the speed of
convergence is exponential; for a related discussion on the rate
of convergence the reader is referred to Chan {\it et al.} (1984).
Let $Y\sim t_p(n,m,P)$ denote that the $p$-dimensional random
vector $Y$ follows a multivariate Student $t$ distribution with $n$
degrees of freedom, mean $m$ and scale or spread matrix $P$ (Gupta
and Nagar, 1999, Chapter 4). Let $y^t=(y_1,\ldots,y_t)$ be the information set at time $t$, comprising
data up to time $t$, for $t=1,2,\ldots$ The next result gives an approximate
Bayesian algorithm for the posterior distributions of $\theta_t$
and $\Sigma$ as well as for the one-step forecast distribution of
$y_t$.
\begin{thm}\label{th3}
In the local level model (\ref{model2}), let the initial priors for
$\theta_0|\Sigma$ and $\Sigma$ be specified as in equation
(\ref{eq8}). The one-step
forecast and posterior distributions are approximately given, for
each $1\leq t\leq N$, as follows:
\begin{enumerate}
\item [(a)] One-step forecast at time $t$: $y_t|y^{t-1}\sim
t_p(n_{t-1},\phi m_{t-1},S_{t-1})$, where $S_{t-1}$,
$m_{t-1}$ are known at time $t-1$. \item [(b)] Posteriors at time $t$:
$\theta_{t}|\Sigma,y^{t}\sim
N_p(m_{t},\Sigma^{1/2}P_{t}\Sigma^{1/2})$ and
$\Sigma_{t}|y^{t}\sim GIW(n_t+2p,Q^{-1},S_{t})$, with
$m_{t}=\phi m_{t-1}+A_{t}e_{t}$, $P_{t}=(\phi^2P_{t-1}+W)(\phi^2P_{t-1}+W+I_p)^{-1}$,
$e_{t}=y_{t}-\phi m_{t-1}$, $S_{t}=S_{t-1}+e_{t}e_{t}'$,
where $A_{t}=\Sigma^{1/2}P_{t}\Sigma^{-1/2}$ is approximated
by $A_{t}=\widetilde{\Sigma}^{1/2} P \widetilde{\Sigma}^{-1/2}$, with
$\widetilde{\Sigma}$ the estimator of
$\Sigma_{t}|y^{t}$ as in (\ref{eq:est:tilda}), and
$Q_t=P_{t-1}+W+I_p$ being approximated by its limit
$Q=P+W+I_p$, where $P$ is given by Theorem \ref{lem:limit}.
\end{enumerate}
\end{thm}
\begin{proof}
The proof is inductive in the distribution of $\Sigma|y^t$. Assume
that given $y^{t-1}$ the distribution of $\Sigma$ is
$\Sigma|y^{t-1}\sim GIW(n_{t-1}+2p,Q^{-1},S_{t-1})$.
From the Kalman filter, conditionally on $\Sigma$, the one-step
forecast density of $y_t$ is
$$
y_t|\Sigma,y^{t-1}\sim
N_p(\phi m_{t-1},\Sigma^{1/2}Q_t\Sigma^{1/2})\approx
N_p(\phi m_{t-1},\Sigma^{1/2}Q\Sigma^{1/2}),
$$
where $m_{t-1}$, $Q_t$ and $Q$ are as in the theorem.
Given $y^{t-1}$ the joint distribution of $y_t$ and $\Sigma$ is
\begin{eqnarray}
p(y_t,\Sigma|y^{t-1})&=&p(y_{t}|\Sigma,y^{t-1})p(\Sigma|y^{t-1})\nonumber\\
&=&c_1\frac{\exp(\textrm{tr}(-Q^{-1}\Sigma^{-1/2}
(e_te_t'+S_{t-1})\Sigma^{-1/2}/2))}{|\Sigma
|^{(n_{t-1}+1+2p)/2}},\label{eq9}
\end{eqnarray}
where
$$
c_1=\frac{|S_{t-1}|^{(n_{t-1}+p-1)/2}}{(2\pi)^{\pi/2}2^{p(n_{t-1}+p-1)}|Q|^{(n_{t-1}+p)/2}
\Gamma_p\{(n_{t-1}+p-1)/2\}}.
$$
The one-step forecast density of $y_t$ is
\begin{eqnarray*}
p(y_t|y^{t-1})&=&\int_{\Sigma >0} p(y_t,\Sigma |y^{t-1})\,d\Sigma \\
&=& c_1 \int_{\Sigma >0} |\Sigma |^{-(n_{t-1}+2p+1)/2}
\exp(\textrm{tr}(-Q^{-1}\Sigma^{-1/2}(e_te_t'+S_{t-1}
)\Sigma^{-1/2}/2)) \,d\Sigma \\ &=& c_1 \frac{2^{p(n_{t-1}+p)/2}
\Gamma_p\{(n_{t-1}+p)/2\} }{ |Q|^{-(n_{t-1}+p)/2}
|e_te_t'+S_{t-1}|^{(n_{t-1}+p)/2} } \\ &=&
\frac{\Gamma_p\{(n_{t-1}+p)/2\} } { \pi^{p/2}
\Gamma_p\{(n_{t-1}+p-1)/2\} } |S_{t-1}|^{(n_{t-1}+p-1)/2}
|e_te_t'+S_{t-1}|^{-(n_{t-1}+p)/2},
\end{eqnarray*}
and so $y_{t}|y^{t-1}\sim t_p (n_{t-1}, \phi m_{t-1}, S_{t-1})$, as
required.
Now we derive the distribution of $\Sigma |y^{t}$. Applying Bayes
theorem we have
$$
p(\Sigma |y^t)=\frac{p(y_t|\Sigma ,y^{t-1})p(\Sigma
|y^{t-1})}{p(y_{t}|y^{t-1})}
$$
and from equation (\ref{eq9}) we have
$$
p(\Sigma |y^t)=c_2|\Sigma
|^{-(n_t+2p)/2}\exp(\textrm{tr}(-Q^{-1}\Sigma^{-1/2}S_t \Sigma^{-1/2}/2))
$$
and $n_t=n_{t-1}+1$, where $S_t$ is as in the theorem and the
proportionality constant is $c_2=c_1/p(y_{t}|y_{t-1})$, not
depending on $\Sigma$. Thus $\Sigma |y^t\sim
GIW_p(n_t+2p,Q^{-1},S_t)$ as required. Conditionally on $\Sigma$,
the distribution of $\theta_t$ follows directly from application of
the Kalman filter and this provides the stated posterior
distribution of $\theta_t$.
\end{proof}
From Theorem \ref{th3}, if $W=I_p$, the posterior distribution of
$\Sigma$ is reduced to an inverse Wishart, i.e. $\Sigma
|y^t\sim IW_p(n_t+2p,Q^{-1}S_{t-1})$, where now $Q$ is a variance.
In this case Theorem \ref{th3} reduces to the well known variance
learning of the random walk plus noise model of West and Harrison
(1997). For the application of Theorem \ref{th3}, one can use any estimator of
$\Sigma_t|y^t$, e.g. its mode $\widehat{\Sigma}$; here, following the motivation of $\widetilde{\Sigma}$ in page \pageref{eq:est:tilda}, we have used $\widetilde{\Sigma}$ for presentation purposes, and
this is the estimator we have used and tested in Section \ref{examples}.
\subsection{Choice of hyperparameters}\label{section:hyperp}
The hyperparameter $\phi$ can be chosen \emph{a priori}, e.g. the
application may require a local level model so that $\phi=1$. The covariance matrix $W$ can be optimized by indirect maximization of the log-likelihood
function, which using the prediction decomposition can be expressed
as
\begin{eqnarray}
\ell(W;y^N) &=& \log p(y_1)+\sum_{t=2}^N \log p(y_t|y^{t-1}) \nonumber \\ &=&
c+\frac{1}{2}\sum_{i=1}^N (c_1+t)\log |S_{t-1}| -\frac{1}{2}\sum_{t=1}^N (c_1+t+1)\log |e_te_t'+S_{t-1}|\nonumber \\ &=& c +\frac{(c_1+1)\log |S_0|}{2} -\frac{(c_1+N+1)\log |S_N|}{2}, \label{logl}
\end{eqnarray}
where $c=\sum_{t=1}^N \log \Gamma_p\{(n_0+p+t-1)/2\} - \sum_{t=1}^N \log \Gamma_p\{(n_0+p+t-2)/2\} - 2^{-1}Np \log \pi $ and $c_1=n_0+p-2$. Maximizing the above likelihood is equivalent of minimizing $\log|S_N|=|S_0+\sum_{t=1}^Ne_te_t'|$, given that $S_0$ is bounded. In this objective function to be minimized, $W$ is involved in $e_t$ via the recursion of $m_t$. To simplify notation we consider $\phi=1$. From Theorem \ref{lem:limit}, $W$ is obtained as a function of $P$ as $W=(I_p-P)^{-1}P^2$, given that $I_p-P$ is non-singular. Thus we propose finding $W$ which maximizes the log-likelihood function, conditional on a value of $\Sigma$; this conditioning is proposed for simplification reasons. Instead of working with $W$, we work with $P$, because from the above relationship, we can calculate $W$ from $P$. Even with these simplifications in place $P$ that minimizes $\log|S_N|$, given $\Sigma$ can not be obtained by direct differentiation. Thus we use a Newton-Raphson method to achieve this.
We start by writing recurrently $m_t$ from Theorem \ref{th3} as
\begin{eqnarray*}
m_t&=&m_{t-1}+\Sigma^{1/2}P\Sigma^{-1/2}(y_t-m_{t-1})=(I_p-\Sigma^{1/2}P\Sigma^{-1/2})m_{t-1}+\Sigma^{1/2}P\Sigma^{-1/2}y_t \\ &=& \Sigma^{1/2}(I_p-P)^t\Sigma^{-1/2}m_0+\sum_{i=0}^{t-1}\Sigma^{1/2}(I_p-P)^iP\Sigma^{-1/2}y_{t-i}
\end{eqnarray*}
and then by ignoring the first term (which is justified if $m_0=0$ or if the eigenvalues of $I_p-P$ lie inside the unit circle), we obtain $\log|S_N|$ as
\begin{eqnarray}
\log|S_N|&=&\log\bigg|S_0+\sum_{t=1}^Ny_ty_t'-\sum_{t=2}^N\sum_{i=0}^{t-2}y_ty_{t-1-i}'\Sigma^{-1/2}P(I_p-P)^i\Sigma^{1/2} \nonumber \\ && -\sum_{t=2}^N\sum_{i=0}^{t-2}\Sigma^{1/2}(I_p-P)^iP\Sigma^{-1/2}y_{t-1-i}y_t' \nonumber \\ && +\sum_{t=2}^N \sum_{i=0}^{t-2} \Sigma^{1/2}(I_p-P)^iP
\Sigma^{-1/2}y_{t-1-i}y_{t-1-i}'\Sigma^{-1/2}P(I_p-P)^i\Sigma^{1/2} \bigg|. \label{sn1}
\end{eqnarray}
Since $W$ or $P$ do not depend on $\Sigma$, we proceed by estimating $P$ independently of $\Sigma$, as if $\Sigma$ were proportional to $I_p$. With this in place, using the chain rule of matrix differentiation (Harville, 2007, \S15.7), we obtain the first partial derivative of \eqref{sn1} as $$
\frac{\partial \log|S_N|}{\partial p_{kl} } =\textrm{tr}\left[\left(\frac{\partial \log |S_N|}{\partial S_N}\right)' \frac{\partial S_N}{\partial p_{kl}}\right] =\textrm{tr}\left[\left(2S_N^{-1}-\textrm{diag}(s_{11,N}^{(-1)}.\ldots,s_{pp,N}^{(-1)})\right)(\Lambda_N+\Lambda_N')\right],
$$
where $\Lambda_N=\sum_{t=2}^N\sum_{i=0}^{t-2}(y_t-(I_p-P)^iPy_{t-1-i})y_{t-1-i}'K_i$, $K_i$ is the partial derivative of $P(I_p-P)^i$, $S_N^{-1}=\big(s_{kl,N}^{(-1)}\big)$, and
$\partial \log|S_N| / \partial p_{kl}$ is the $(k,l)$ element of the matrix derivative $\partial \log|S_N| / \partial P$, for $k,l=1,\ldots,p$. For the calculation of $K_i$ we can see that $K_i=K_{i-1}(I_p-P)-P(I_p-P)^{i-1}K_0$, where $K_0=\partial P/\partial p_{kl}$, which, by defining $u_k$ as the zero $p$-dimensional column vector having a unit in the $k$th place, is equal to $u_ku_k'$ when $k=l$ and it is equal to $u_ku_l'+u_lu_k'$ when $k\neq l$ (Harville, 1997, p. 300). The recursion of $K_i$ follows by using the multiplicative rule of differentiation on the function $P(I_p-P)^i$ and writing $K_i$ as a function of $K_{i-1}$.
For the second derivative we have
$$
\frac{\partial^2 \log|S_N|}{\partial p_{kl}\partial p_{rs}}=\textrm{tr}\left(S_N^{-1}\frac{\partial^2S_N}{\partial p_{kl}\partial p_{rs}}\right) - \textrm{tr}
\left(S_N^{-1}\frac{\partial S_N}{\partial p_{kl}}S_N^{-1}\frac{\partial S_N}{\partial p_{rs}}\right)
$$
From before we know $\partial S_N /\partial p_{kl}=\Lambda_N+\Lambda_N'$ and so $\partial^2S_N/\partial p_{kl}\partial p_{rs}=\partial \Lambda_N/\partial p_{rs}+\partial \Lambda_N'/\partial p_{rs}$. Thus
$$
\frac{\partial\Lambda_N}{\partial p_{rs}}=\sum_{t=2}^N\sum_{i=0}^{t-2}\left[y_t-(I_p-P)^iPy_{t-1-i}\right]y_{t-1-i}'\frac{\partial K_i}{\partial p_{rs}} -\sum_{t=2}^N\sum_{i=0}^{t-2}K_i'y_{t-1-i}y_{t-1-i}'K_i,
$$
where from the recursion of $K_i$ we have
$$
\frac{\partial K_i}{\partial p_{rs}} = \frac{\partial K_{i-1}}{\partial p_{rs}} (I_p-P)-2K_{i-1}K_0
$$
with $\partial K_0/\partial p_{rs}=0$. This completes the first and second partial derivatives of $\log|S_N|$ with respect to elements of $P$. Then the Newton-Raphson method at each time $t$ and for iterations $j=1,2,\ldots$, approximates the true minimum $\hat{P}$ by $\hat{P}^{(j)}$, using the formula
\begin{equation}\label{NR1}
\textrm{vec}(\hat{P}^{(j)}) = \textrm{vec}(\hat{P}^{(j-1)}) + \left( \frac{\partial^2 \log |S_N| }{ \partial \textrm{vec}(P) \partial \textrm{vec}(P)' } \right)^{-1}\bigg|_{P=\hat{P}^{(j-1)}} \frac{\partial \log |S_N|}{\partial \textrm{vec}(P)} \bigg|_{P=\hat{P}^{(j-1)}}
\end{equation}
where $\hat{P}^{(0)}=I_p$ and $\textrm{vec}(\cdot)$ denotes the column stacking operator of an unrestricted matrix. Under some regularity conditions (Shumway and Stoffer, 2006, \S6.3), the algorithm converges to the true minimum $\hat{P}$. Convergence is assumed at iteration $j$, for which $\parallel \hat{P}^{(j)}-\hat{P}^{(j-1)}\parallel_2\leq Tol$, for some small tolerance value $Tol$, where $\parallel \cdot \parallel_2$ denotes the Frobenius norm or distance; similar stoppage rules are discussed in Shumway and Stoffer (2006, \S6.3). Note that typically not many iterations are needed for convergence, although this may depend on the specific application and on the dimension on the data; for the examples in the next section we have used $Tol=0.001$.
An alternative approach is to consider indirect optimization of the conditional log-likelihood function using the Expectation Maximization (EM) algorithm. Some recursive type or on-line version of the EM algorithm is possible (not discussed further in this paper), but if the reader is more familiar with the typical off-line EM algorithm described in Koopman (1993) and Shumway and Stoffer (2006, \S6.3), such an approach would prevent the application of real-time estimation.
Finally, we discuss the specification of $W$ using discount factors (West and Harrison, 1997, Chapter 6). According to this, we introduce $p$ (not necessarily distinct) discount factors $\delta_1,\ldots,\delta_p$, forming a discount matrix $\Delta=\textrm{diag}(\delta_1,\ldots,\delta_p)$. The idea of this specification is that the prior covariance matrix $\text{Var}(\theta_t|\Sigma,y^{t-1})=\phi^2\Sigma^{1/2}\Delta^{-1/2}P\Delta^{-1/2}\Sigma^{1/2}$ is increased compared to $\text{Var}(\theta_{t-1}|\Sigma,y^{t-1})$, reflecting on the increased uncertainty or loss of information going from $t-1$ to $t$, prior to observing $y_t$. From the above, the expression of $P$ in Theorem \ref{lem:limit} and by equating $\phi^2P+W=\phi^2\Delta^{-1/2}P\Delta^{1/2}$, we obtain the matrix equation $(W^2+4W)^{1/2}+W=\Delta^{-1/2}(W^2+W)^{1/2}-\Delta^{-1/2}W\Delta^{-1/2}$. For $\delta_1=\ldots=\delta_p=\delta$ (known as single discounting), the solution of this equation is $W=\delta^{-1}(1-\delta)^2I_p$, which is proportional to $I_p$, and so, in this case, the GIW distribution reduces to an IW, as discussed in section \ref{s2s1}. In the general case, it can be shown that the solution of the above matrix equation yields $W$ to be diagonal (but not necessarily proportional to $I_p$), i.e. $W=\textrm{diag}(\delta_1^{-1}(1-\delta_1)^2,\ldots,\delta_p^{-1}(1-\delta_p)^2)$. However, still there remains the problem of the specification of the discount factors. A commonly adopted approach, is to include the discount factors to the likelihood function, and to maximize it with respect to them, but this takes us again back to the indirect maximization procedure. In this paper, we favour the Newton-Raphson methodology as described above, but we do recognize its limitations, in particular regarding high dimensional data where the inversion of the Hessian matrix may be difficult or even impossible. In such cases a suitable approach involving discount factors may be favoured.
\subsection{Time-varying covariance matrices}
So far our discussion has been focused on situations where $\Sigma$, the conditional covariance matrix of $y_t$ is time-invariant. However, in many situations, in particular in finance, this is not the case. For example consider that $y_t$ denotes the logarithm of the price of $p$ assets, or the logarithm of $p$ foreign exchange rates. It is evident that model (\ref{model2}) would not be an appropriate model to consider as $\Sigma$, interpreted here as the volatility of $y_t$, should be time-varying. We can thus extend model (\ref{model2}) by replacing $\Sigma$ by a time-varying $\Sigma_t$ and including a stochastic process to describing the evolution of $\Sigma_t$. For the volatility covariance matrix $\Sigma_t$, we propose a
multiplicative stochastic law of its precision $\Sigma_t^{-1}$, i.e.
\begin{equation}\label{evol}
\Sigma_{t}^{-1}=k\mathcal{U}(\Sigma_{t-1}^{-1})'B_t\mathcal{U}(\Sigma_{t-1}^{-1}),\quad
t=1,\ldots,N,
\end{equation}
where $k=\{\delta(1-p)+p\}\{\delta(2-p)+p-1\}^{-1}$, for a discount
factor $0<\delta\leq 1$, and $\mathcal{U}(\Sigma_{t-1}^{-1})$ denotes the unique
upper triangular matrix based on the Choleski decomposition of
$\Sigma_{t-1}^{-1}$. Here $B_t$ is a $p\times p$ random matrix
following the multivariate singular beta distribution
$B_t\sim B(m/2,1/2)$, where $m=\delta(1-\delta)^{-1}+p-1$. The motivation behind the above evolution has been discussed in the literature, see e.g. Uhlig (1994, 1997). Here $k,\delta,m$ are chosen so that a random walk type evolution for $\Sigma_t^{-1}$ is preserved, i.e. $E(\Sigma_t^{-1}|y^{t-1})=E(\Sigma_{t-1}^{-1}|y^{t-1})$. This model is a generalization of Shephard's local scale models (Shephard, 1994), which were suggested as an alternative to integrated GARCH modelling and which are exploiting the gamma/beta convolution proposed by Smith and Miller (1986).
If we combine Theorems \ref{th:uhlig} and \ref{th3} we can obtain the full estimation of the above model; in brief Theorem \ref{th:uhlig} is responsible for the prior estimation or prediction of $\Sigma_t$, given data $y^{t-1}$ and Theorem \ref{th3} is responsible for the posterior estimation of $\Sigma_t$ given $y^t$ and of the estimation of $\theta_t$ and the prediction of $y_t$. Next we give the result, the proof of which is trivial by the discussion above.
\begin{thm}\label{th3:vol}
In the local level model (\ref{model2}) with a time-varying volatility covariance matrix $\Sigma=\Sigma_t$ and evolution (\ref{evol}), let the initial priors for
$\theta_0|\Sigma_0$ and $\Sigma_0$ be specified as in equation
(\ref{eq8}). The one-step
forecast and posterior distributions are approximately given, for
each $1\leq t\leq N$, as follows:
\begin{enumerate}
\item [(a)] One-step forecast at time $t$: $\Sigma_{t}|y^{t-1}\sim GIW_p(\delta
(1-\delta)^{-1}+2p,Q^{-1},k^{-1} S_{t-1})$ and $y_{t}|y^{t-1}\sim
t_p(\delta(1-\delta)^{-1},m_{t-1},k^{-1}S_{t-1})$, where
$k=(\delta(1-p)+p)(\delta(2-p)+p-1)^{-1}$ and $\delta$, $S_{t-1}$,
$m_{t-1}$ are known at time $t-1$. \item [(b)] Posteriors at time $t$:
$\theta_{t}|\Sigma_{t},y^{t}\sim
N_p(m_{t},\Sigma_{t}^{1/2}P_{t}\Sigma_{t}^{1/2})$ and
$\Sigma_{t}|y^{t}\sim GIW((1-\delta)^{-1}+2p,Q^{-1},S_{t})$, with
$m_{t}=\phi m_{t-1}+A_{t}e_{t}$, $P_{t}=(\phi^2P_{t-1}+W)(\phi^2P_{t-1}+W+I_p)^{-1}$,
$e_{t}=y_{t}-\phi m_{t-1}$, $S_{t}=k^{-1}S_{t-1}+e_{t}e_{t}'$,
where $A_{t}=\Sigma_{t}^{1/2}P_{t}\Sigma_{t}^{-1/2}$ is approximated
by $A_{t}=\widetilde{\Sigma}_t^{1/2} P \widetilde{\Sigma}_t^{-1/2}$, with
$\widetilde{\Sigma}_t$ the estimator of
$\Sigma_{t}|y^{t}$ as in (\ref{eq:est:tilda}), and
$Q_t=P_{t-1}+W+I_p$ being approximated by its limit
$Q=P+W+I_p$, where $P$ is given by Theorem \ref{lem:limit}.
\end{enumerate}
\end{thm}
Some comments are in order. First note that if we set $\delta=1$, then $k=1$ and $B_t=1$ with probability 1 and Theorem \ref{th3:vol} is very similar to Theorem \ref{th3}, the only difference being that the finite $n_t$ in Theorem \ref{th3} becomes $\infty$ in Theorem \ref{th3:vol} and this means that the $t$ distribution of $y_t|y^{t-1}$ practically becomes a normal distribution under Theorem \ref{th3:vol}. Another point refers to the suitability of the evolution (\ref{evol}) and the related local level model. Multivariate stochastic volatility models that allow for $y_t$ and/or for $\Sigma_t$ to follow a vector or matrix autoregressive processes have been proposed in the literature (Chib {\it et al.}, 2006; Philipov, 2006; Maasoumi and McAleer, 2006), but they have to rely on simulation-based methods, typically on Markov chain Monte Carlo or on particle filters, and they may not be suitable for real-time prediction of high dimensional data. Such a demand has recently become more and more prevalent as hedge funds and other investment boutiques require reliable automatic forecasting procedures that are suitable for algorithmic statistical arbitrage (Montana {\it et al.}, 2009). In this direction the above algorithm offers an option, which extends a series of papers in this area, see e.g. Quintana and West (1987), Quintana {\it et al.} (2003), Soyer and Tanyeri (2006), Carvalho and West (2007) and references therein.
\section{Illustrations}\label{examples}
In this section we report on Monte Carlo experiments, in order to compare the performance of the proposed algorithm with existing estimation procedures, and also we present an application to multivariate control charting.
\subsection{Monte Carlo experiments}\label{montecarlo}
We have generated realizations of observation and evolution covariance matrices $\Sigma$ and $\Omega$ according to the following scheme: for each covariance matrix, first we generate independently $p(p+1)/2$ correlations from a beta distribution and we multiply them by $+1$ or $-1$ generated by a bernoulli distribution with probability 1/2. Next we generate independently $p$ variances from a gamma distribution, and then we use the correlation decomposition of the covariance matrix, i.e. $\Sigma=V C V$, where $V$ is the diagonal matrix with elements the square roots of the $p$ simulated variances and $C$ is the correlation matrix with off-diagonal elements the $p(p+1)/2$ simulated correlations and with units in the main diagonal.
With this scheme in place we have performed a Monte Carlo study, over a set of 100 simulated $p$-variate time series vectors according to the local level model (\ref{model2}) with $\phi=1$ and for three time series lengths $N=100,500,1000$. We have considered $p=10,50,100$, covering from low to relatively high dimensional time series, and for their estimation we contrast the algorithm of the previous section (this model is referred to as GIW) with a local level model where the observation covariance matrix is estimated via an inverted Wishart distribution (this model is referred to as IW), a local level model where both of $\Sigma$ and $\Omega$ are estimated using the EM algorithm of Shumway and Stoffer (1982) (this model is referred to as EM), and the local level model using the true simulated values of $\Sigma$ and $\Omega$ (this model is referred to as Kalman). For the IW model $\Sigma$ was estimated by assuming an inverse Wishart prior and $\Omega=w\Sigma$, where $w$ was estimated by direct maximum likelihood methods as in Harvey (1986, 1989). For all models we used the priors \eqref{eq8} with $m_0=(0,\ldots,0)'$ and $p_0=1000$, the latter of which reflects on a weakly informative or vague prior specification for $\theta_0$. Also, for both the IW and GIW models we used the prior \eqref{eq8} for $\Sigma$, the difference being that when $W$ is a covariance matrix (for the GIW) this prior becomes $\Sigma\sim GIW_p(n_0,Q^{-1},S_0)$, while when $W=w$ is s scalar variance (for the IW), this prior reduces to $\Sigma\sim IW_p(n_0,Q^{-1}S_0)$; for both cases $n_0=1/100$ and $S_0=I_p$. For the estimation of the GIW, at each time $t$, $W$ is estimated by the Newton-Raphson method of section \ref{section:hyperp}.
Table \ref{table1} reports on the average mean of squared standardized one-step forecast errors (MSSE), which if the fit is perfect should be equal to the unit. Here, due to the high dimensions considered, we witness the quality of the estimation of $\Sigma$ and $\Omega$ via the accuracy of the one-step forecast covariance matrix for each model. First of all we note that the values of the MSSE for the Kalman model are nearly equal to one and clearly this model is the benchmark or the gold standard here, but artificial as in practice we will not know these covariance matrices. We observe that the GIW produces consistent results, outperforming the IW, and producing MSSE close to the gold standard. In comparison with the EM we observe that at low dimensions $p$ and for small values of $N$, the GIW is better, although as $N$ increases the performance of EM is improved and for $N=1000$ the EM model produces marginally better results than the GIW. The improved performance of the EM model at $N=1000$ is expected as it is well known that, under certain conditions, the EM estimators of $\Sigma$ and $\Omega$ converge to their true values (Shumway and Stoffer, 2006, \S6.3). But as $p$ increases we observe a deterioration in the performance of the EM model as compared to the GIW; in particular for $p=100$ and $N=100$ we still obtain reasonable performance with the GIW model, while both the EM and IW models clearly overestimate $\Sigma$ and $\Omega$. Here it should be noted that in our setup both the IW and the EM models are aimed at off-line application, since they need the whole data path $y^N$ for the computation of maximum likelihood estimates. For the EM algorithm we used the convergence criterion used in Shumway and Stoffer (2006, p. 345) that convergence is assumed when the likelihood function does not change by more than 0.001. However, this has resulted in slower algorithms, in particular at the higher dimensions considered here. For a single model, the algorithm of the GIW run in 1 minute and 31 seconds (for $p=10$ and $N=100$) and in 3 minutes and 19 seconds (for $p=100$ and $N=1000$); the respective results for the other models were, for the IW 41 seconds (for $p=10$ and $N=100$) and 1 minute and 43 seconds (for $p=100$ and $N=1000$), for the EM 1 minute and 47 seconds (for $p=10$ and $N=100$) and 3 minutes and 53 seconds (for $p=100$ and $N=1000$), and for the Kalman 11 seconds (for $p=10$ and $N=100$) and 55 seconds (for $p=100$ and $N=1000$). The experiments were run on an Intel(R) Celeron(R) M processor 1.60GHz, 504MB of RAM and the software we used was the freeware R, version 2.9.1, downloadable from {\tt http://www.r-project.org/}.
\begin{table}[t]
\caption{Performance of the algorithm of the previous section (GIW), against the local level models using the inverse Wishart distribution (IW), using estimates of the covariances matrices from the EM algorithm (EM), and using the true covariance matrices (Kalman). Shown are the Monte Carlo averages of the mean of standardized one-step ahead forecast errors for each model and their respective standard errors, given in brackets.}\label{table1}
\begin{center}
\begin{tabular}{|rr|rrrr|}
\hline $p$ & $N$ & GIW & IW & EM & Kalman \\ \hline 10 & 100 & 0.983 (0.002) & 0.963 (0.003) & 0.972 (0.050) & 0.999 (0.000) \\ & 500 & 0.995 (0.002) & 0.975 (0.003) & 0.996 (0.003) & 0.998 (0.000) \\ & 1000 & 0.997 (0.000) & 0.988 (0.001) & 0.998 (0.001) & 1.001 (0.000) \\ \hline \hline 50 & 100 & 0.969 (0.001) & 0.911 (0.001) & 1.060 (0.003) & 0.998 (0.001) \\ & 500 & 0.985 (0.004) & 1.045 (0.004) & 1.066 (0.002) & 1.002 (0.000) \\ & 1000 & 1.011 (0.003) & 1.039 (0.001) & 1.009 (0.002) & 1.002 (0.000) \\ \hline \hline 100 & 100 & 0.969 (0.001) & 0.899 (0.001) & 1.160 (0.002) & 0.995 (0.001) \\ & 500 & 0.972 (0.003) & 1.074 (0.002) & 1.082 (0.001) & 1.003 (0.000) \\ & 1000 & 1.005 (0.001) & 1.032 (0.001) & 1.004 (0.003) & 1.001 (0.000) \\ \hline
\end{tabular}
\end{center}
\end{table}
To mark the quality of the estimation for the GIW model, Figure \ref{fig1a} plots the Frobenius distance of the estimated $\Sigma$ at each time point ($t=1-1000$) from its true simulated value, for $p=10,50,100$. We note that in all three cases the algorithm achieves an upper bound 0.008 quite quickly. The distances of $p=50$ and $p=100$ are much more volatile in comparison to the distance of $p=10$, but all eventually converge. The means of the three distances were 0.0050, 0.0053 and 0.0054 respectively and their respective variances were $6.25\times 10^{-7}$, $2.16\times 10^{-6}$ and $2.60\times 10^{-6}$, respectively. The respective distances of the estimated $W$ follow a similar pattern to that of Figure \ref{fig1a} and their accurate estimation appears to be an important element of the successful estimation of $\Sigma$.
\begin{figure}
\caption{Frobenious distance over time of the GIW model using simulation from a single iteration. Shown are the three distances
corresponding to simulations with $p=10,50,100$.}
\label{fig1a}
\end{figure}
\subsection{Multivariate control charts}\label{controlcharts}
In this section we consider a multivariate control charting scheme for autocorrelated data (Bersimis {\it et al.}, 2007). Typically multivariate control charts focus on the detection of signals of multivariate processes, which may exhibit out of control behaviour, defined as deviating from some prespecified target mean vector and a target covariance matrix. The Hotteling $T$ chart is the standard control chart as it is capable of detecting out of control signals of the joint effects of the variables of interest.
\begin{figure}
\caption{Production time series data.}
\label{fig1}
\end{figure}
However, many authors have pointed out that in the presence of autocorrelation, this chart is a poor performer (Vargas, 2003). As a result over the past decade researchers have focused considerable efforts on to the development of control charts for multivariate time series data (Bersimis {\it et al.}, 2007). Pan and Jarrett (2004) point out the importance of accurate estimation of the observation covariance matrix and they study the effects its miss-specification has in the detection of out of control signals. These authors suggest using the $T$ chart as above, after estimating the covariance matrix deploying some suitable time series method.
The multivariate local level model is a natural candidate model for the above situation, as it is a generalization of the popular Shewhart-Deming model, according to which the observed data $y_t$ are modelled as noisy versions of a constant level $\theta$, or $y_t=\theta+\epsilon_t$, where $\epsilon_t\sim N_p(0,\Sigma)$. This model is valid for serially uncorrelated data, but it is clearly not suitable for time series data. In this context, the motivation for the local level model is that the level of the time series at time $t$, $\theta_t$, follows a slow evolution described by a random walk. Using this model and considering an inverted Wishart distribution for $\Sigma$, Triantafyllopoulos (2006) proposes that first the one-step forecast distribution is sequentially produced, then the logarithm of the Bayes factors of the current forecast distribution against a prespecified target distribution forms a new univariate non-Gaussian time series, which control chart is designed using the modified exponentially weighted moving average (EWMA) control chart. If the process is on target, then the log Bayes factor (West and Harrison, 1997, \S11.4) will fluctuate around zero and the EWMA control chart will not signal significant deviations from this target. If, on the other hand, the EWMA signals out of control points, this will in turn signal deviations of the original process from its target. In the above reference, the target distribution is chosen to be a multivariate normal distribution, but, depending on experimentation and historical information, other distributions may be selected. As in Pan and Jarrett (2004) and in other studies, a critical stage in the application of this method, is that the estimate of $\Sigma$ and the forecast of $y_t$ are accurate, so that the fitted model is a good representation of the generating process.
We consider data from an experiment of production of a plastic mould the quality of which is
centered on the control of temperature and its variation. For this
purpose five measurements of the temperature of the mould have
been taken, for $276$ time points; for more details on the set up of this
experiment the reader is referred to Pan and Jarrett (2004). From Figure \ref{fig1}, which is a plot of the data, we can argue that this data possesses a local level type evolution. We have applied the above control charting methodology using the local level model with
the GIW distribution. For the model fit we note that the MSSE is $\textrm{MSSE}=[0.952~ 0.966~ 0.985~ 1.110~ 0.994]'$, which marks a much improved performance compared to Pan and Jarrett (2004) and to Triantafyllopoulos (2006); similar improved results (not shown here) apply considering other measures of goodness of fit, e.g. the mean of squared forecast errors and the mean absolute deviation. For the design of the control chart, with a small smoothing factor equal to 0.05 we use the EWMA chart, which control limits are modified from its usual control limits, to accommodate for both the non-Gaussianity of the Bayes factor series and its autocorrelation. Figure \ref{fig2} shows the EWMA control chart, from which we can see the improved behaviour: in Phase I where the model is applied and tested, we see that all EWMA points are within the control limits and in Phase II we see that the model signals a clear out of control behaviour. In contrast to the studies above, our model manages to avoid having out of control signals in Phase I, which reflects on the more accurate estimation of the observation covariance matrix and of the overall fit. In Phase II it shows a deterioration of the process, which is not signaled in Pan and Jarrett (2004) as very few out of control points are detected in that study. We also note that this deterioration can not be detected or suspected by either looking at the time series plot in Figure \ref{fig1} or performing univariate control charts to each of the individual series. For this data set, applying the $T$ control chart after estimating $\Sigma$ using our method and Pan and Jarrett (2004) again favoured our proposal (results not shown here). Finally we report that the improved performance of our chart in Phase I is evident, by noting that the control limits are much tighter as compared to those in Triantafyllopoulos (2006) and thus the deployed fitted model here, is a more accurate representation of the data.
\begin{figure}
\caption{Modified EWMA control chart for the log Bayes factor of the Production process, using a smoothing
parameter $\lambda=0.05$; the dotted horizontal
line indicates the target mean $-0.141$ and the dashed horizontal lines indicate the
control limits; the vertical line separates Phase I (for $t=1-180$) and Phase II
(for $t=181-276$).}
\label{fig2}
\end{figure}
\section{Conclusions}\label{discussion}
In this paper we propose on-line estimation for the multivariate local level model with the focus placed on the estimation of the covariance matrix of the innovations of the model. We criticize the application of the inverse Wishart prior distribution in this context as restrictive and often lacking empirical justification. Motivated from the conjugate model, we generalize the inverse Wishart distribution to account for wider application, but still manage to achieve approximate conjugacy, which is useful for real-time estimation. This approach results in fast recursive estimation, which resembles the Kalman filter, but allowing for covariance learning too. It is shown that our proposal delivers under Monte Carlo experiments and also in comparison with existing methods. An application of multivariate control charts is used to illustrate the proposed methodology. Future research efforts will be devoted on to the application of this methodology to high dimensional data.
\end{document}
|
\begin{equation}gin{document}
\title{On the dissipative dynamics of entangled states in coupled-cavity quantum electrodynamics arrays}
\author{Imran M. Mirza, and Adriana S. Cruz}
\affiliation{Macklin Quantum Information Sciences, \\
Department of Physics, Miami University, Oxford, Ohio 45056, USA}
\email{[email protected]}
\begin{equation}gin{abstract}
We examine the dissipative dynamics of N00N states with an arbitrary photon number $\mathcal{N}$ in two architectures of fiber-coupled optical ring resonators (RRs) interacting with two-level quantum emitters. One architecture consists of a two-way cascaded array of emitter-cavity systems, while in the other architecture we consider two fiber-coupled RRs each coupled to multiple dipole-dipole interacting (DDI) quantum emitters (QEs). Our focus in this paper is to study how am initially prepared multiple excitation atomic N00N states transfers to the RRs and then how rapidly it decays in these open cavity quantum electrodynamics (CQED) setups while varying the emitter-cavity coupling strengths, emitter-cavity detuning, and backscattering from cavity modes. We present a general theoretical formalism valid for any arbitrary numbers of QEs, RRs, and $\mathcal{N}$ number in the N00N state for both schemes. As examples, we discuss the cases of single and two-excitation N00N states and report the comparison of our findings in both schemes. As one of the main results, we conclude that the array scheme tends to store N00N for longer times while the DDI scheme supports higher fidelity values. The results of this study may find applications in designing new multiparty entanglement-based protocols in quantum metrology and quantum lithography.
\end{abstract}
\maketitle
\section{Introduction}
Nonclassical states of light and matter play a decisive role in the processing of information in a quantum-mechanical manner. Despite a broad range of proven and proposed applications in quantum information processing \cite{nielsen2010quantum, bouwmeester2000physics}, quantum computation \cite{bennett2000quantum} and quantum metrology \cite{giovannetti2006quantum}, these nonclassical states exhibit an extremely fragile behavior when exposed to the surrounding environment. This openness to the outside environment results in a fast decay and (in most of the situations) in an irrecoverable loss of information stored in these states. Among disparate types of nonclassical states, N00N states \cite{boto2000quantum,sanders1989quantum} represents a unique kind of genuine many-body entangled state with two superimposed orthogonal components. The N00N state is generally expressed as
\begin{equation}gin{equation}
\ket{\Psi}=\frac{1}{\sqrt{2}}\left(\ket{\mathcal{N},0}+e^{i\varphi}\ket{0,\mathcal{N}}\right),
\end{equation}
where $0$ and $\mathcal{N}$ represent zero and $N$ number of excitations in the N00N state, and $\varphi$ is a relative phase between two orthogonal state components.
Remarkable applications of N00N states have already been found in quantum optical lithography \cite{d2001two}, quantum metrology \cite{joo2011quantum}, precision measurements \cite{kok2002creation}, multiparticle entanglement based quantum interferometry \cite{hyllus2012fisher} and quantum-enhanced information processing (in general) \cite{gisin2007quantum}.
Consequently, different experimental demonstrations and theoretical proposals are carried out recently for the successful generation of N00N states in diverse systems. Some captivating examples include: Fast generation of photonic N00N states with two artificial atoms in two microwave cavities \cite{su2014fast, merkel2010generation}, three-photon N00N state generation scheme based on spontaneous parametric down-conversion \cite{kim2009three}, creating high fidelity N00N states through mixing classical and quantum light \cite{afek2010high}, production of atomic N00N states via phase super-resolution measurements \cite{chen2010heralded} and realization of mechanical N00N states in microcavity optomechanics \cite{ren2013single}.
Post generation, a crucial question is how fast a multi-photon N00N state decays given a time requirement set by the open quantum information protocol? In this paper, we address this question by analyzing and comparing two setups to transfer the emitter-generated multi-excitation N00N states to RRs (or optical cavities). One scheme is a two-way cascaded Jaynes-Cummings (JC) array \cite{hartmann2006strongly,greentree2006quantum,mirza2013single,mirza2015nonlinear,mirza2015bi} and the other scheme consists of multiple DDI emitters in two fiber-coupled RRs. To the best of our knowledge, such a study using these particular architectures has not been reported before in the present context.
\begin{equation}gin{figure*}\label{Fig1}
\includegraphics[width=6.8in,height=1.2in]{Fig1.pdf}
\captionsetup{
format=plain,
margin=1em,
justification=raggedright,
singlelinecheck=false
}
\caption{Architecture-I: An array of $N$ number of emitter-cavity systems and two output photodetectors. Due to the two-way cascaded coupling all $\hat{a}_{2n-1}$ (and similarly $\hat{a}_{2n}$) modes can interact with each other in a uni-directional manner while $ n=1,2,...,N$. The fiber time delay between two consecutive emitter-cavity systems is $\tau=L/c$, where L is the distance between two cavities and $c$ is the group velocity of light in the fiber. For simplicity, one can either consider all emitter-cavity systems identical or can assume a mirror-symmetric situation (mirror lying in the middle of the array with an even number of emitter-cavity subsystems). The detectors count photons in the two output modes, described by the continua annihilation operators $\hat{a}_{out}$ and $\hat{b}_{out}$. For further details regarding the system see Sec.~II(A).}\label{Fig1}
\end{figure*}
In our models, no external source of single-photons is included rather excited atoms coupled with their respective cavities serve as the sources of the single photons. The produced photons are then transferred from one emitter-cavity system to the other through the fiber route. For realistic treatment, we have included the effects of different decoherence mechanisms (including the photon leakage from the cavity mirrors and spontaneous emission from QEs) on the time evolution of obtained N00N states. To this end, a quantum jump approach (QJA) combined with the input-output theory \cite{gardiner1985input} for cascaded quantum systems is employed to incorporate uni-directional coupling between the consecutive cavity modes. Furthermore, we utilize the quantum state fidelity \cite{nielsen2010quantum} as a measure to keep track of the decay of the generated N00N state.
We find that in general in the JC array scheme one can sustain the N00N states for longer times, whereas in DDI emitters coupled to the RRs scheme one can obtain the N00N state with higher fidelity. In both schemes, we find that under the strong emitter-cavity coupling fidelity manifests oscillatory behavior which originates from the Rabi oscillations. We also notice with an increase in the excitation number from $\mathcal{N}=1$ to $\mathcal{N}=2$, maximum fidelity achieved decreases markedly in both schemes (range lies between 50\% to almost an order of magnitude decrease).
The paper is organized as follows. In Sec.~II, we outline the model of our JC array scheme. We also present the dissipative dynamics in the same section through the quantum Langevin equations \cite{gardiner2004quantum} and the quantum trajectory/jump method \cite{carmichael1993open, daley2014quantum}. In Sec.~III we introduce the setup for scheme-II involving quantum emitters that are directly coupled through the DDI. Next, in Sec.~IV we report the main findings of our study by comparing the time-evolution and maximum fidelity achieved for the uni- and bi-photon N00N states in both schemes. Finally, in Section V, we close with a summary of the main findings of this work.
\section{Architecture-I: JC array}
\subsection{System Hamiltonian and Dissipative Dynamics}
The system under consideration is shown in Fig.~\ref{Fig1}. Under the rotating wave approximation, dipole approximation, and Markov approximation (collectively referred to as the quantum white-noise limit \cite{baragiola2012n}), the Hamiltonian for the global system (system, bath, and system-bath interaction) is expressed as
\begin{equation}gin{widetext}
\begin{equation}gin{equation}\label{H}
\begin{equation}gin{split}
& \hat{H}=\hbar\sum^{N}_{n=1}\left[-\omega_{egn}\hat{\sigma}_{n}\hat{\sigma}^{\dagger}_{n}+
\omega_{cn}\hat{a}_{n}^{\dagger}\hat{a}_{n}\right]+\hbar\sum^{N}_{n=1}\left[g_{n}\hat{a}_{2n-1}^{\dagger}\hat{\sigma}_{n}+g^{\ast}_{n}\hat{a}_{2n-1}\hat{\sigma}^{\dagger}_{n}\right]+\hbar\sum^{N}_{n=1}\left[g^{\ast}_{n}\hat{a}_{2n}^{\dagger}\hat{\sigma}_{n}+g_{n}\hat{a}_{2n}\hat{\sigma}^{\dagger}_{n})\right]\\
&+\hbar\sum^{N}_{n=1}\eta \left(\hat{a}^{\dagger}_{2n-1}\hat{a}_{2n}+\hat{a}^{\dagger}_{2n}\hat{a}_{2n-1}\right)+\hbar\int_{-\infty}^{+\infty}\omega_{1} \hat{a}^\dagger(\omega_{1})\hat{a}(\omega_{1})d\omega_{1}
+\hbar\int_{-\infty}^{+\infty}\omega_{2} \hat{b}^\dagger(\omega_{2})\hat{b}(\omega_{2})d\omega_{2}\\
&+i\hbar\sum^{N}_{n=1}\sqrt{\frac{\kappa_{2n-1}}{2\pi}}\int_{-\infty}^{+\infty}\left(\hat{a}_{2n-1}\hat{a}^{\dagger}(\omega_{1})-\hat{a}_{2n-1}^{\dagger}\hat{a}(\omega_{1})\right)d\omega_{1}+i\hbar\sum^{N}_{n=1}\sqrt{\frac{\kappa_{2n}}{2\pi}}\int_{-\infty}^{+\infty}\left(\hat{a}_{2n}\hat{b}^{\dagger}(\omega_{2})-\hat{a}_{2n}^{\dagger}\hat{b}(\omega_{2})\right)d\omega_{2}.
\end{split}
\end{equation}
\end{widetext}
The setup consists of $N$ number of emitter-cavity subsystems coupled through a dispersion-less optical fiber. QEs have a two-level energy structure with $n$th emitter transition frequency and spontaneous emission rate given by $\omega_{egn}$ and $\gamma_{n}$, respectively. Raising (lowering) of $n$th emitter is described by the atomic raising (lowering) operator $\hat{\sigma}^{\dagger}_{n} (\hat{\sigma}_{n})$. The coupling between the $n$th emitter and the $n$th cavity is quantified through the emitter-cavity coupling rate $g_{n}$ while each $n$th cavity is assumed to support a single isolated resonant mode with frequency $\omega_{cn}$. When the $n$th emitter de-excites and a photon is emitted into the cavity, any one of the two counter-propagating modes (described by annihilation operators $\hat{a}_{2n-1}$ and $\hat{a}_{2n}$) can be excited. Due to cavity modes back reflections, the modes $\hat{a}_{2n-1}$ and $\hat{a}_{2n}$ can directly mix --- a process characterized by the backscattering rate $\eta$. Once a cavity is populated with a photon, it can leak the photon into the tapered fiber, and from the $2n-1$ to the $2n$th cavity modes this leakage is described in terms of rates $\kappa_{2n-1}$ and $\kappa_{2n}$, respectively. Dispersion-less fiber is modeled to have two continua of modes. The annihilation of photons in the left (right) direction continuum is described by the operator $\hat{b}(\omega_{2})$ ($\hat{a}(\omega_{1})$) as shown in Fig.~\ref{Fig1}.
The first and second terms on the right-hand side of Eq.~\eqref{H} represent the free Hamiltonian for the QEs and cavity modes, respectively. Here we have chosen the energy of the emitters' ground states to be negative, such that the initial state has zero energy. We have also neglected the zero-point energy in the cavity mode free Hamiltonian. Next terms with prefactors $g_n$ and $g^\ast_n$ are the emitter-cavity interaction Hamiltonian that is following the standard Jaynes-Cummings model. The first term on the second line of Eq.~\eqref{H} shows the cavity backscattering Hamiltonian. The last two terms on the same line are the free bath Hamiltonians while the terms on the last line present the coupling between cavity modes with their respective baths. The nonvanishing commutation and anticommutation relations among system and bath operators are given by:
\begin{equation}gin{align}
&\lbrace\hat{\sigma}_{n},\hat{\sigma}^{\dagger}_{n^{'}}\rbrace=\delta_{nn^{'}},~~ [\hat {a}(\omega_{i}),\hat {a}^{\dagger}(\omega_{j})]=\delta(\omega_{i}-\omega_{j})\nonumber,\\
& [\hat {b}(\omega_{i}),\hat {b}^{\dagger}(\omega_{j})]=\delta(\omega_{i}-\omega_{j}),~~\text{and}~~ [\hat {a}_{n},\hat {a}_{n^{'}}^{\dagger}]=\delta_{nn^{'}}.
\end{align}
The imposition of the time-reversal symmetry on Hamiltonian requires all cavity decay rates to be identical (i.e. $\kappa_{1}=\kappa_{2}=...=\kappa_{2n}\equiv\kappa$). But here we still use different subscripted cavity decay rates, just to keep track of different coupling terms. Later in plots, one can further simplify the situation by taking all emitter-cavity subsystems to be identical.
\subsection{Bi-directional quantum Langevin equation and input-output relations}
The interaction of the emitters and intracavity modes with the environment makes our emitter-cavity array essentially an open quantum system. To describe the dynamics of such a quantum system we start off by working in the Heisenberg picture. Therein, by following the standard procedure, \cite{gardiner2004quantum, carmichael1993open} of eliminating continua from the system dynamics, we can identify the two input and output operators \cite{gardiner1985input} corresponding to two fiber continua as
\begin{equation}gin{subequations}
\begin{equation}gin{eqnarray}
\hat{a}_{in}(t):=\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\hat{a}_{0}(\omega_{1})e^{-i\omega_{1}(t-t_{0})}d\omega_{1},\\
\hat{b}_{in}(t):=\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\hat{b}_{0}(\omega_{2})e^{-i\omega_{2}(t-t_{0})}d\omega_{2}.
\end{eqnarray}
\end{subequations}
\begin{equation}gin{subequations}
\begin{equation}gin{eqnarray}
\hat{a}_{out}(t):=\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\hat{a}_{1}(\omega_{1})e^{-i\omega_{1}(t-t_{1})}d\omega_{1},\\
\hat{b}_{out}(t):=\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\hat{b}_{1}(\omega_{2})e^{-i\omega_{2}(t-t_{1})}d\omega_{2},
\end{eqnarray}
\end{subequations}
where $t_{0}$ and $t_{1}$ are some arbitrary initial and final times, with $\hat{a}_{0}(\omega_{1})\equiv\hat{a}(\omega_{1};t_{0}), \hat{b}_{0}(\omega_{1})\equiv\hat{b}(\omega_{1};t_{0}), \hat{a}_{1}(\omega_{1})\equiv\hat{a}(\omega_{1};t_{1}),$ and $ \hat{b}_{1}(\omega_{1})\equiv\hat{b}(\omega_{1};t_{1})$ being the past and future time conditioned continua operators. The temporal evolution of any arbitrary system operator $\hat{X}$ (which may belong to any one of the emitter-cavity systems in the array) can then be expressed through the following quantum Langevin's equation
\begin{equation}gin{widetext}
\begin{equation}gin{equation}\label{Lang}
\begin{equation}gin{split}
& \frac{d\hat{X}(t)}{dt}=-\frac{i}{\hbar}[\hat{X}(t),\hat{H}_{sys}]
-\sum_{n=1}^{N}\Bigg\lbrace\left[\hat{X}(t),\hat{a}_{2n-1}^\dagger\right]\left(\frac{\kappa_{2n-1}}{2}\hat{a}_{2n-1}+\sqrt{\kappa_{2n-1}}\hat{a}_{in}(t-(n-1)\tau)\right)
-\Big(\frac{\kappa_{2n-1}}{2}\hat{a}_{2n-1}^{\dagger}+\sqrt{\kappa_{2n-1}}\\
&\times\hat{a}_{in}^{\dagger}(t-(n-1)\tau)\Big)
\left[\hat{X}^\dagger(t),\hat{a}_{2n-1}\right]\Bigg\rbrace-\sum^{N-1}_{n=1}\sum^{N-1}_{m=1}\Bigg\lbrace\sqrt{\kappa_{2n-1}\kappa_{2m+1}}~\delta_{m>n-1}\left[\hat{X}(t),\hat{a}_{2m+1}^{\dagger}\right]\hat{a}_{2n-1}(t-(n-m+1)\tau)\\
&-\sqrt{\kappa_{2n-1}\kappa_{2m+1}}~\delta_{m>n-1}\hat{a}_{2n-1}^{\dagger}(t-(n-m+1)\tau)\left[\hat{X}(t),\hat{a}_{2m+1}\right]\Bigg\rbrace-\sum_{n=1}^{N}\Bigg\lbrace\left[\hat{X}(t),\hat{a}_{2n}^\dagger\right]\Big(\frac{\kappa_{2n}}{2}\hat{a}_{2n}+\sqrt{\kappa_{2n}}\\
&\times\hat{b}_{in}(t-(N-n)\tau)\Big)-\Big(\frac{\kappa_{2n}}{2}\hat{a}_{2n}^{\dagger}+\sqrt{\kappa_{2n}}\hat{b}_{in}^{\dagger}(t-(N-n)\tau)\Big)\left[\hat{X}^\dagger(t),\hat{a}_{2n}\right]\Bigg\rbrace-\sum^{N-1}_{n=1}\sum^{N-1}_{m=1}\Bigg\lbrace\sqrt{\kappa_{2n}\kappa_{2m+2}}~\delta_{m<n-1}\\
&\times\left[\hat{X}(t),\hat{a}_{2m+2}^{\dagger}\right]\hat{a}_{2n}(t-(n-m+1)\tau)-\sqrt{\kappa_{2n}\kappa_{2m+2}}~\delta_{m<n-1}\hat{a}_{2n}^{\dagger}(t-(n-m+1)\tau)\left[\hat{X}(t),\hat{a}_{2m+2}\right]\Bigg\rbrace.
\end{split}
\end{equation}
\end{widetext}
Here $\hat{H}_{sys}$ is the atom-cavity system Hamiltonian, which consists of the discrete terms in Eq.~[\ref{H}] with $\delta_{m\lessgtr n-1}=1 \forall m\lessgtr n-1$ and otherwise zero. The above Langevin equation is a generalization of the usual cascaded quantum system Langevin equation \cite{gardiner2004quantum} where we have included a bidirectional coupling among atom-cavity systems in the array. The JC array is a cascaded quantum network in which the input of one cavity (in one direction) essentially serves as a time delayed output from the other (nearest neighbor) cavity i.e. for any arbitrary $n$th cavity $\hat{a}^{(n)}_{in}(t)=\hat{a}^{(n-1)}_{out}(t-\tau)$ and similarly for opposite direction: $\hat{b}^{(n-1)}_{in}(t)=\hat{b}^{(n)}_{out}(t-\tau)$.
Corresponding to two input field operators $\hat{a}_{in}(t)$, $\hat{b}_{in}(t)$ appearing in the above Langevin equation there are two output operators $\hat{a}_{out}(t)$, $\hat{b}_{out}(t)$ which are related to the time-delayed input and intra cavity field operators through the following input-output relations \cite{carmichael2009statistical,gardiner1985input,carmichael1993open} as
\begin{equation}gin{align}
\hat{a}_{out}(t)=& ~\hat{a}_{in}(t-N\tau)+\sqrt{\kappa_{2n-1}}\hat{a}_{2n-1}(t)\nonumber\\
&+\sqrt{\kappa_{2n-3}}\hat{a}_{2n-3}(t-\tau)+...+\sqrt{\kappa_{1}}\hat{a}_{1}(t-N\tau),\\
\hat{b}_{out}(t)=&~\hat{b}_{in}(t)+\sqrt{\kappa_{2}}\hat{a}_{2}(t)\nonumber\\
&+\sqrt{\kappa_{4}}\hat{a}_{4}(t-\tau)+...+\sqrt{\kappa_{2n}}\hat{a}_{2n}(t-N\tau).
\end{align}
From now on, we are going to ignore the time retardation/delays assuming that the time scale of system dynamics is much slower compared to the time required for the photons to propagate from one RR to another RR through the optical fiber. Mathematically, this will mean to work in a regime where $\kappa,g,\eta >>\tau^{-1} (=L/c)$. Non-vanishing commutation relations among input and output operators follow causality conditions i.e.
\begin{equation}gin{align}
[\hat{a}_{{\rm in}}(t),\hat{a}_{{\rm in}}^{\dagger}(t')]=\delta(t-t'), ~[\hat{a}_{{\rm out}}(t),\hat{a}_{{\rm out}}^{\dagger}(t')]=\delta(t-t'),
\end{align}
and similarly for $\hat{b}_{in}(t)$ and $\hat{b}_{out}(t)$ operators. Moreover, throughout this work, we are going to assume that initially there are no photons in the fiber such that in the expectation values of all relevant normally ordered observables, input operators do not contribute and thus can be neglected altogether.
\subsection{Cascaded master equation and the quantum trajectory analysis}
Transforming now to the Schr\"odinger picture with an arbitrary density operator $\hat{\rho}(t)$ and exploiting the trick: $ {\rm d\langle \hat{X}(t)\rangle/dt=Tr\lbrace\hat{X}(t_{0})d\hat{\rho}(t)/dt\rbrace}$ along with the cyclic property of trace (${\rm Tr\lbrace...\rbrace}$) operation, one can drive the master equation in the Lindblad form using the Langevin equation mentioned in Eq.~(\ref{Lang}). The resultant master equation describing the time evolution of system density operator $\hat{\rho}_{s}(t)$ takes the following form
\begin{equation}gin{widetext}
\begin{equation}gin{equation}\label{mas-(1)}
\begin{equation}gin{split}
&\frac{d\hat{\rho}_s(t)}{dt}=\frac{-i}{\hbar}\left[\hat{H}_{sys},\hat{\rho}_s(t)\right]+\sum_{n=1}^{N}\Bigg\lbrace\kappa_{2n-1}\left(\hat{a}_{2n-1}\hat{\rho}_{s}(t)\hat{a}^{\dagger}_{2n-1}-\frac{1}{2}\hat{\rho}_{s}(t)\hat{a}^{\dagger}_{2n-1}\hat{a}_{2n-1}-\frac{1}{2}\hat{a}^{\dagger}_{2n-1}\hat{a}_{2n-1}\hat{\rho}_{s}(t)\right)\Bigg\rbrace \\
&+\sum^{N-1}_{n=1}\sum^{N-1}_{m}\sqrt{\kappa_{2n-1}\kappa_{2m+1}}~\delta_{m>n-1}\left(\left[\hat{a}_{2n-1}\hat{\rho}_{s}(t),\hat{a}^{\dagger}_{2m+1}\right]-\left[\hat{a}_{2m+1},\hat{\rho}_{s}(t)\hat{a}^{\dagger}_{2n-1}\right]\right)+\sum_{n=1}^{N}\Bigg\lbrace\kappa_{2n}\Big(\hat{a}_{2n}\hat{\rho}_{s}(t)\hat{a}^{\dagger}_{2n}\\
&-\frac{1}{2}\hat{\rho}_{s}(t)\hat{a}^{\dagger}_{2n}\hat{a}_{2n}-\frac{1}{2}\hat{a}^{\dagger}_{2n}\hat{a}_{2n}\hat{\rho}_{s}(t)\Big)\Bigg\rbrace+\sum^{N-1}_{n=1}\sum^{N-1}_{m}\sqrt{\kappa_{2n}\kappa_{2m+2}}~\delta_{m<n-1}\left(\left[\hat{a}_{2m+2}\hat{\rho}_{s}(t),\hat{a}^{\dagger}_{2n}\right]-\left[\hat{a}_{2n},\hat{\rho}_{s}(t)\hat{a}^{\dagger}_{2m+2}\right]\right).
\end{split}
\end{equation}
\end{widetext}
Now to apply the quantum trajectory method (or quantum jump approach (QJA)) \cite{carmichael1993open, molmer1993monte,van2000quantum} which is an appropriate formalism for the description of cascaded open quantum systems, we re-write the above Master equation in a more suggestive form as
\begin{equation}gin{align}
\frac{d\hat{\rho}_s(t)}{dt}&=\frac{-i}{\hbar}\left[\hat{H}^{'},\hat{\rho}_s(t)\right] \nonumber\\
&+\sum_{i=o,e}\left(\hat{J}_{i}\hat{\rho}_s(t)\hat{J}^{\dagger}_{i}-\frac{1}{2}\hat{J}^{\dagger}_{i}\hat{J}_{i}\hat{\rho}_s(t)-\frac{1}{2}\hat{\rho}_s(t)\hat{J}^{\dagger}_{i}\hat{J}_{i}\right),
\end{align}
while $\hat{J}_{e}=\sum^{N}_{n=1}\sqrt{\kappa_{2n}}\hat{a}_{2n}$ and $\hat{J}_{o}=\sum^{N}_{n=1}\sqrt{\kappa_{2n-1}}\hat{a}_{2n-1}$ are the so-called ``jump operators" in the terminology of QJA describing the cascaded coupling among right and left cavity modes, respectively and the Hamiltonian $\hat{H}^{'}$ is given by
\begin{equation}gin{align}
\hat{H}^{'}=&~\hat{H}_{sys}-\frac{i\hbar}{2}\sum^{N-1}_{n=1}\sum^{N-1}_{m=1}\sqrt{\kappa_{2n-1}\kappa_{2m+1}}~\delta_{n<m+1}\Big(\hat{a}_{2n-1}\nonumber\\
&\times\hat{a}^{\dagger}_{2m+1}-\hat{a}^{\dagger}_{2n-1}\hat{a}_{2m+1}\Big).
\end{align}
Combining terms we can rewrite the above master equation in a more compact form as
\begin{equation}gin{equation}\label{mas-(1U)}
\frac{d\hat{\rho}_s(t)}{dt}=\frac{-i}{\hbar}\left[\hat{H}_{NH},\hat{\rho}_s(t)\right]+\sum_{i=o,e}\hat{J}_{i}\hat{\rho}_s(t)\hat{J}^{\dagger}_{i}.
\end{equation}
We notice that if we just consider the first term on the right side of the above equation, then it represents a Liouville-von Neumann type equation with the difference that instead of the system Hamiltonian we now have an effective non-Hermitian Hamiltonian ($\hat{H}_{NH}$). In the QJA this non-Hermitian Hamiltonian consists of a Hermitian (system Hamiltonian) part and an anti-Hermitian part constructed from the jump operators. The explicit form of this non-Hermitian Hamiltonian for the system under consideration is given as follows
\begin{equation}gin{align}
\hat{H}_{NH}&=\hat{H}_{sys}-\frac{i\hbar}{2}\sum^{N}_{n=1}\left(\kappa_{2n-1}\hat{a}^{\dagger}_{2n-1}\hat{a}_{2n-1}+\kappa_{2n}\hat{a}^{\dagger}_{2n}\hat{a}_{2n}\right)\nonumber\\
&-i\hbar\sum^{N-1}_{n=1}\sum^{N-1}_{m=1}\sqrt{\kappa_{2n-1}\kappa_{2m+1}}~\delta_{n<m+1}\hat{a}^{\dagger}_{2m+1}\hat{a}_{2n-1}\nonumber\\
&-i\hbar\sum^{N-1}_{n=1}\sum^{N-1}_{m=1}\sqrt{\kappa_{2n}\kappa_{2m+2}}~\delta_{n>m+1}\hat{a}^{\dagger}_{2m}\hat{a}_{2n+2}.
\end{align}
Note that in the above Hamiltonian we have the three types of terms on the right-hand side: the first term is the usual system Hamiltonian, the next two terms describing the decay of energy from the cavity modes and the last two terms are separately expressing a uni-directional coupling among the even and odd cavity modes through the fiber.
\begin{equation}gin{figure*}
\includegraphics[width=6.5in,height=2.65in]{Fig2.pdf}
\captionsetup{
format=plain,
margin=1em,
justification=raggedright,
singlelinecheck=false
}
\caption{Architecture-II: Two fiber-coupled RRs (left or L and right or R), each coupled with $N$ number of QEs positioned on the circumference of the RR and capable of direct coupling through the DDI. Just like scheme-I, the fiber delay has been ignored in the development of mode couplings between the two cavities. Again photo-detectors $D_{a}$ and $D_{b}$ are placed at the ends of the fiber where they can register any number of emitted photons involved in the problem. Black dots in the figure are representing the repetition of the emitters on the RR circumference.}\label{Fig2}
\end{figure*}
One can then interpret the commutator term appearing in the master equation (Eq.~(\ref{mas-(1U)})) as the system evolution in a non-unitary manner under the action of non-Hermitian Hamiltonian following the so-called non-unitary Schr\"odinger equation in the terminology of the QJA
\begin{equation}gin{equation}\label{NUSE}
i\hbar~\frac{d\ket{\tilde{\psi}(t)}}{dt}=\hat{H}_{NH}{\ket{\tilde{\psi}(t)}}.
\end{equation}
The last term in Eq.~\eqref{mas-(1U)} are the jump terms that describe the decay of energy from the system in a stochastic manner, such that the probability of occurrence of a jump during an infinitesimal time interval $[t,t+dt]$ is given by
\begin{equation}gin{equation}\label{pi}
P_{j}(t)={\bra{\tilde{\psi}}}\hat{J_{j}}^{\dagger}
\hat{J_{j}}{\ket{\tilde{\psi}}}dt=:\Pi_j dt,
\end{equation}
for $j=o,e$ and $\hat{J}_{o}\equiv\hat{a}_{out}(t)$ and $\hat{J}_{e}\equiv\hat{b}_{out}(t)$. After one jump is recorded state of the system is re-normalized (reset) according to the transformation: $\ket{\psi}\mapsto
\frac{\hat{J}_j\ket{\tilde{\psi}}}{\sqrt{\Pi_j}}.$
\section{Architecture-II: DDI emitters in two coupled cavities}
In scheme-II (which is shown in Fig.~\ref{Fig2}), we consider just two fiber-coupled RRs, but each coupled to $N$ number of emitters close enough to be directly coupled to each other through the DDI. The DDI between $n$th and $(n+1)$ QE belonging to $j$th RR is characterized by the parameter $\xi^{(j)}_{n,n+1}$. The absolute position of each atom on the circumference of the RR is not relevant to the present problem, but can easily be included in the model by introducing an appropriate phase in the emitter-cavity coupling rate. Under the rotating wave approximation one can express the Hamiltonian for this scheme as
\begin{equation}gin{widetext}
\begin{equation}gin{equation}
\begin{equation}gin{split}
&\hat{H}_{sys}=-\sum_{j=L,R}\sum^{N}_{n=1}\frac{\hbar\omega_{eg}}{2}\hat{\sigma}^{(j)}_{z,n}+\hbar\omega_{c1}\left(\hat{a}^{\dagger}_{1}\hat{a}_{1}+\hat{a}^{\dagger}_{2}\hat{a}_{2} \right)+\hbar\omega_{c2}\left(\hat{a}^{\dagger}_{3}\hat{a}_{3}+\hat{a}^{\dagger}_{4}\hat{a}_{4} \right)+\sum_{j=L,R}\sum^{N-1}_{n=1}\hbar\xi^{(j)}_{n,n+1}\Big(\hat{\sigma}^{\dagger (j)}_{n}\hat{\sigma}^{(j)}_{n+1} +\\
&+ \hat{\sigma}^{\dagger (j)}_{n+1}\hat{\sigma}^{(j)}_{n}\Big)+\hbar\eta^{(L)}\left(\hat{a}^{\dagger}_{1}\hat{a}_{2}+\hat{a}^{\dagger}_{2}\hat{a}_{1}\right)+\hbar\eta^{(R)}\left(\hat{a}^{\dagger}_{3}\hat{a}_{4}+\hat{a}^{\dagger}_{4}\hat{a}_{3}\right)+\sum^{N}_{n=1}\hbar\left(g_{L}\hat{a}_{1}\hat{\sigma}^{\dagger(L)}_{n} + g^{\ast}_{L}\hat{a}^{\dagger}_{1}\hat{\sigma}^{(L)}_{n}+g^{\ast}_{L}\hat{a}_{2}\hat{\sigma}^{\dagger(L)}_{n} + g_{L}\hat{a}^{\dagger}_{2}\hat{\sigma}^{(L)}_{n} \right)\\
&+\sum^{N}_{n=1}\hbar\left(g_{R}\hat{a}_{3}\hat{\sigma}^{\dagger(R)}_{n} + g^{\ast}_{L}\hat{a}^{\dagger}_{3}\hat{\sigma}^{(R)}_{n}+g^{\ast}_{R}\hat{a}_{4}\hat{\sigma}^{\dagger(R)}_{n} + g_{R}\hat{a}^{\dagger}_{4}\hat{\sigma}^{(R)}_{n} \right).
\end{split}
\end{equation}
\end{widetext}
The dissipative dynamics of the system in this scheme also follow the formalism of the cascaded master equation discussed in detail earlier in the last section. The main difference (from the perspective of the QJA) is now we have just two cavity modes that are cascaded coupled (i.e. $\hat{a}_{1}\leftrightarrow\hat{a}_{3}$ and $\hat{a}_{4}\leftrightarrow\hat{a}_{2}$). Note that in the QJA one can easily incorporate the spontaneous emission rate from the QEs (not drawn in the figure).
Equipped with this theoretical formalism, In the next section, we'll discuss how one can transfer the atomic excitation-based N00N state to the multiphoton RRs N00N state in both architectures. And we'll also present a comparison (in terms of the cavity N00N state fidelity) between how the generated N00N state evolves in both schemes.
\section{Results and Discussion}
To quantify and examine the time evolution of the obtained state in comparison to the desired multiphoton N00N state, we make use of Uhlamann fidelity \cite{nielsen2010quantum} defined as
\begin{equation}gin{equation}
\mathcal{F}=\Bigg[{\rm Tr}\Bigg
\lbrace\sqrt{\sqrt{\hat{\rho}_{g}}\hat{\rho}_{id}\sqrt{\hat{\rho}_{g}}}\Bigg\rbrace\Bigg]^{2}=\Bigg[\sqrt{\bra{\Psi}\hat{\rho}_{g}\ket{\Psi}}\Bigg]^{2},
\end{equation}
while $\hat{\rho}_{g}$ ($\hat{\rho}_{id}$) is the generated (required/ideal) state. Since our required state is always a N00N state which is a pure state $\ket{\Psi}$ hence above definition simplifies to the last term in the above equation.
Note that the aforementioned open quantum system framework is valid for any number of QEs and RRs in both schemes. We'll focus on applying this framework to two relatively simple examples of single and two-photon N00N states in the following section, where we'll utilize the above-defined fidelity as a quantitative measure to describe N00N state dissipative dynamics.
\subsection{Uni-photon NOON state}
First of all, we'll consider a uni-photon N00N state, which (neglecting the local phase between orthogonal components) takes the form:
\begin{equation}gin{align}
\ket{\Psi}=\frac{1}{\sqrt{2}}(\ket{1,0}+\ket{0,1}).
\end{align}
This state is one of the four maximally entangled Bell states, which have their own significance due to a wide range of applications in quantum information processing \cite{nielsen2010quantum}, linear optics quantum computation \cite{knill2001scheme}, quantum cryptography \cite{bennett1993teleporting}, quantum dense coding \cite{mattle1996dense} and quantum teleportation \cite{furusawa1998unconditional} (to name a few).
For starting with such a state in emitters, we'll consider just two emitter-cavity subsystems (subsystem (L) and (R)) with one of the emitters being excited initially, but which one we don't know. And then as time evolves such a state can be transferred from emitters to any two of the cavity modes of different emitter-cavity subsystems or even between the hybrid emitter-cavity systems. Note that this setup is valid for both scheme-I and -II. In the next section when we'll consider two excitation cases, then the setups for scheme-I and -II will become different and will be compared. The purpose of this single excitation case is to introduce the notations as well as to study how a single excitation atomic N00N state can be relocated from emitters to cavities and hybrid emitter-cavity subsystems and then can be stored.
Employing the machinery of QJA, for the pre-photodetection times the system under consideration evolves according to the following no-jump state
\begin{equation}gin{align}
\ket{\tilde{\psi}}=&\Bigg(c_{1}(t)\hat{\sigma}^\dagger_1 +c_2(t)\hat{a}^\dagger_1+c_3(t)\hat{a}^\dagger_2+ c_{4}(t)\hat{\sigma}^\dagger_2\nonumber\\
&+c_5(t)\hat{a}^\dagger_3+c_6(t)\hat{a}^\dagger_4\Bigg)\ket{\varnothing},
\end{align}
where $\ket{\varnothing}$ represents the ground state with both QEs unexcited and no photons in the cavity modes. Inserting the no-jump state along with the non-Hermitian Hamiltonian $\hat{H}_{NH}$ into non-unitary Schr\"odinger equation yields the following set of coupled differential equations for no-jump probability amplitudes
\begin{equation}gin{subequations}
\begin{equation}gin{eqnarray}
\overbigdot{c}_1(t) = -i g^* c_{2}(t)-i g c_{3}(t),~\\
\overbigdot{c}_{2}(t) = -i( \Delta_{ac}-\frac{i}{2} \kappa) c_{2}(t)-i g c_{1}(t),~\\
\overbigdot{c}_{3}(t) = -i(\Delta_{ac}-\frac{i}{2} \kappa) c_{3}(t)- \kappa c_{6}(t)-i g^* c_{1}(t),~\\
\overbigdot{c}_{4}(t) = -i g^* c_{5}(t)-ig c_{6}(t),~\\
\overbigdot{c}_{5}(t) = -i(\Delta_{ac}-\frac{i}{2} \kappa) c_{5}(t)-i g c_{4}(t)-i \kappa c_{2}(t),~\\
\overbigdot{c}_{6}(t) = -i(\Delta_{ac}-\frac{i}{2} \kappa) c_{6}(t)-i g^* c_{4}(t).~
\end{eqnarray}
\end{subequations}
After finding the solution of the above equation set, one can follow the standard procedure of QJA \cite{di2008photon} to obtain the density operator by performing an ensemble average over the possible realizations of the conditioned output photo-detection. For the present case the density operator can then be divided into a jump part (with subscript $J$) and a no-jump part (subscript $NJ$) as
\begin{equation}gin{equation}
\hat{\rho}(t)=\mathcal{P}_{NJ}(t)\hat{\rho}_{NJ}(t)+\mathcal{P}_{J}(t)\hat{\rho}_{J}(t).
\end{equation}
For the uni-photon case, only one jump is possible to be recorded hence we can write the density operator more explicitly as
\begin{equation}gin{equation}
\hat{\rho}_s(t)=\ket{\tilde{\psi}}\bra{\tilde{\psi}}+\left|c_{g}(t)\right|^{2}\ket{\varnothing}\bra{\varnothing},
\end{equation}
while we have used: $\hat{\rho}_{NJ}(t)=\frac{\ket{\tilde{\psi}}\bra{\tilde{\psi}}}{||\tilde{\psi}||^{2}}$ and $\mathcal{P}_{NJ}(t)=||\tilde{\psi}||^{2}$ and defined $\mathcal{P}_{J}(t)=|c_{g}(t)|^{2}=1-\sum^{6}_{i=1}|c_{i}(t)|^{2}$. The physical interpretation of $|c_{g}(t)|^{2}$ is the probability of finding the system in the ground state when the single photon is being detected. Note that the definition of $|c_{g}(t)|^{2}$ ensures correct normalization of the obtained density operator.
\begin{equation}gin{figure*}
\begin{equation}gin{center}
\begin{equation}gin{tabular}{cccc}
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig3a.pdf}} &
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig3b.pdf}}&
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig3c.pdf}}\\
\end{tabular}
\captionsetup{
format=plain,
margin=1em,
justification=raggedright,
singlelinecheck=false
}
\caption{Single-photon N00N state dissipative dynamics, when N00N state is prepared between (a) QEs (b) cavity modes (belonging to different cavities) (c) hybrid atom-cavity systems. Parameters used are: $\Delta_{ac}\equiv(\omega_{eg}-\omega_{c})=0.5g$ and the value of cavity decay rate $\kappa$ is varied between 0 and 1.5 in units of emitter-cavity coupling rate $g$ to examine the strong and weak coupling regimes in detail. Note that all decay rates are assumed to be identical here i.e. $\kappa_{1}=\kappa_{2}=...=\kappa_{2n}=\kappa_{2n-1}\equiv\kappa$ with neglecting cavity backscattering ($\eta=0$). Spontaneous emission rates from all QEs have been completely ignored in this and all other plots.}\label{Fig3}
\end{center}
\end{figure*}
Once the full density operator of the system $\hat{\rho}_s(t)$ is attained, we can calculate the time evolution of fidelity of the N00N states. To study how the initial emitter N00N state evolves as a function of time we trace out all four cavity modes from the system density operator $\hat{\rho}_s(t)$ and as a result, obtain the following emitter density operator $\hat{\rho}_E(t)$
\begin{equation}gin{equation}
\begin{equation}gin{split}
&\hat{\rho}_E(t)=Tr_c\lbrace\hat{\rho}_s(t)\rbrace\\
&=|c_{1}(t)|^{2}\ket{e_{1},g_{2}}\bra{e_{1},g_{2}}+|c_{4}(t)|^{2}\ket{g_{1},e_{2}}\bra{g_{1},e_{2}}\\
&+c_{1}(t)c^{\ast}_{4}(t)\ket{e_{1},g_{2}}\bra{g_{1},e_{2}}+c^{\ast}_{1}(t)c_{4}(t)\ket{g_{1},e_{2}}\bra{e_{1},g_{2}}\\
&+\left(1-|c_{1}(t)|^{2}-|c_{4}(t)|^{2}\right)\ket{g_{1},g_{2}}\bra{g_{1},g_{2}}.
\end{split}
\end{equation}
The fidelity ($\mathcal{F}_E(t)$) of such an emitter N00N state (Bell state: $\ket{\Psi}=\frac{1}{\sqrt{2}}(\ket{e_{1},g_{2}}+\ket{g_{1},e_{2}})$) then turns out to be
\begin{equation}gin{equation}
\mathcal{F}_E(t)=\frac{1}{2}\left|c_{1}(t)+c_{4}(t)\right|^{2}.
\end{equation}
Similarly the fidelity ($\mathcal{F}_c(t)$) of generating single-photon N00N state between cavity modes of left (L) and right (R) cavities ( $\ket{\Psi}=\frac{1}{\sqrt{2}}(\ket{1_{L},0_{R}}+\ket{0_{L},1_{R}})$) takes the form
\begin{equation}gin{equation}
\mathcal{F}_c(t)=\frac{1}{4}|c_{2}(t)+c_{3}(t)+c_{5}(t)+c_{6}(t)|^{2}.
\end{equation}
And finally, the single excitation (emitter excitation or single-photon in any cavity mode) N00N state between hybrid left and right atom-cavity systems follow the fidelity
\begin{equation}gin{equation}
\mathcal{F}_{Ec}(t)=\frac{1}{6}|c_{1}(t)+c_{2}(t)+c_{3}(t)+c_{4}(t)+c_{5}(t)+c_{6}(t)|^{2}.
\end{equation}
Starting with single excitation N00N state in Fig.~3, we plot all three fidelities $\mathcal{F}_{E}(t)$, $\mathcal{F}_{c}(t)$, and $\mathcal{F}_{Ec}(t)$ as a function of time. We notice that in all figures in the strong coupling regime $\kappa<g$ the obtained fidelities oscillate. These are the well-known Rabi oscillations which are describing an almost reversible exchange of photons between the emitter and cavity. Whereas, in the weak coupling regime $\kappa>g$ fidelities exhibit a purely decaying trend. Another noticeable point is that in the initially prepared emitter N00N state while transferring excitations to the cavities the resultant maximum fidelity decreases slightly to 96\% due to the small photon leakage from the imperfect cavity walls. In the cavities, single-photon N00N state tends to stay inside cavity modes for at least $6g^{-1}$ time in the strong coupling regime ($g=10\kappa$) with a 75\% fidelity. Finally, we observe that one can store this single excitation N00N state in the hybrid atom-cavity systems, achieving fidelity of 50\% (as expected) for almost $6g^{-1}$ time.
\begin{equation}gin{figure*}
\begin{equation}gin{center}
\begin{equation}gin{tabular}{cccc}
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig4a.pdf}} &
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig4b.pdf}}&
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig4c.pdf}}\\
\end{tabular}
\captionsetup{
format=plain,
margin=1em,
justification=raggedright,
singlelinecheck=false
}
\caption{Fidelity of the single-photon cavity N00N state under the variation of (a) coupling of emitters with the cavity modes (b) emitter-cavity detuning (far detuned case $\Delta_{ac}=5g$) and (c) strong cavity mode backscattering with $\eta=1.5g$. Similar to Fig.~\ref{Fig3} the value of cavity decay rate $\kappa$ is varied between 0 and 1.5 in units of $g$ in part (b) and (c) of the above figure (to examine the strong and weak coupling regime of CQED in the presence of large detunings and cavity mode backscattering).}\label{Fig4}
\end{center}
\end{figure*}
From now on we concentrate on the cavity N00N state only, i.e. in each case we'll start in an emitter N00N state and then we'll be interested in studying how cavities can store photonic N00N state for protracted times. To this end, in Fig.~4 we have varied the parameter $\kappa/g$, atom-cavity detuning, and cavity backscattering to further examine the dependence of cavity N00N state fidelity on the strong coupling regime of CQED, frequency mismatch, and mixing of modes in each cavity, respectively.
In Fig.~\ref{Fig4}(a) we notice that as we enhance the strong coupling regime, although we obtain a high fidelity ($\sim$ 80\%) but $\mathcal{F}^{(c)}$ starts to oscillate more rapidly as the period of oscillations depend on the parameter $g$. Due to the strong coupling between emitter and cavity modes and not among cavity modes, the single-photon N00N state vanishes quickly ($\sim t=2\kappa^{-1}$). In all of the previous plots, we were considering a small detuning between the atom and cavity ($\omega_{c}-\omega_{eg}\equiv\Delta_{ac}=0.5g$). In Fig.~ \ref{Fig4}(b) we introduce a large detuning ($\Delta_{ac}=5g$). As a result, we notice that the fidelity remains trapped for longer times (more than $6g^{-1}$) but this happens at the cost of losing fidelity to 25\%. In Fig.~\ref{Fig}(c) we have introduced a strong backscattering ($\eta=1.5 g$) between the cavity modes and as a result, we notice that the highest fidelity achieved reaches $75\%$. Compared to no backscattering situation (Fig.~\ref{Fig3}(b)) we notice the appearance of an additional half blob of fidelity appearing at $\kappa=0$ point. This manifests a control over the collapse and revival pattern of photonic N00N state fidelity due to the possibility of populating both cavity modes by altering the backscattering parameter.
\begin{equation}gin{figure*}
\begin{equation}gin{center}
\begin{equation}gin{tabular}{cccc}
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig5a.pdf}} &
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig5b.pdf}}&
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig5c.pdf}}\\
\end{tabular}
\captionsetup{
format=plain,
margin=1em,
justification=raggedright,
singlelinecheck=false
}
\caption{Two-photon N00N state generation and evolution in scheme-I. All emitter-cavity system parameters are chosen to be identical for simplicity and particular attention has been paid to (a) large atom-cavity detuning (b) cavity mode backscattering and (c) stronger coupling regime. The rest of the parameters are the same as used in Fig.~\ref{Fig4}.}\label{Fig5}
\end{center}
\end{figure*}
\subsection{Bi-photon NOON state}
After the discussion of single-photon N00N state (Bell states), in this section, we focus on the two-photon N00N states of the form
\begin{equation}gin{align}
\ket{\Psi}=\frac{1}{\sqrt{2}}(\ket{2,0}+\ket{0,2}).
\end{align}
In literature, this state is also known as the Hong-Ou-Mandel state (after the well-known 1987 Hong-Ou-Mandel experiment \cite{hong1987measurement}) and has its significance in the context of pure quantum interference effects among indistinguishable photons \cite{di2014observation, lewis2014proposal,lopes2015atomic,branczyk2017hong}. At the two-photon (and later for a higher number of photon N00N states) our scheme-I and scheme-II start to differ. Therefore, we'll discuss both schemes now separately.
\subsubsection{Setup for Scheme-I}
In this scheme, we'll consider four emitter-cavity subsystems in the JC array where we divide the system into two groups. The first two emitter-cavity systems form one group and the last two emitter-cavity systems form the other. Initially, we'll start in two excitations N00N state constructed through QEs, while considering either the first group having both QEs excited and other group's QEs in the ground state or vice versa. Again, our key interest is to generate a photonic N00N state in cavities and examine the parameter regimes where both the highest fidelity and longer survival time can be realized. To this end, we rewrite the master equation in a more suggestive form (using the general form of the equation worked out in Eq.~\eqref{mas-(1)}) and identifying the following set of jump/output operators
\begin{equation}gin{subequations}
\begin{equation}gin{eqnarray}
\hat{J}_{e}=\sum^{8}_{j=2,4}\sqrt{\kappa_{j}}\hat{a}_{j},\\
\hat{J}_{o}=\sum^{7}_{j=1,3}\sqrt{\kappa_{j}}\hat{a}_{j}.
\end{eqnarray}
\end{subequations}
The no-jump state ($\ket{\tilde{\psi}}$) has now seventy-four different possibilities of distributing two excitations among twelve slots in the kets (four QEs and eight intra-cavity modes). Still, one can work out the ensemble-averaged density operator which symbolically appears as
\begin{equation}gin{equation}
\hat{\rho}(t)=\ket{\tilde{\psi}}\bra{\tilde{\psi}}+|c_{g}(t)|^{2}\ket{\varnothing}\bra{\varnothing},
\end{equation}
while $\ket{\varnothing}$ is the ground state for the full system (all QEs unexcited and no photons in the cavity modes).
Following the same line of calculations adopted for the case of single-photon N00N state case, we trace out the emitter part from $\hat{\rho}_s(t)$ and obtain $\hat{\rho}_c(t)$. Using $\hat{\rho}_c(t)$, in Fig.~\ref{Fig5} we numerically plot the two-photon N00N state fidelity for the present scheme. We notice an overall marked decrease in the two-photon N00N state fidelity when compared to the single-photon case, but an overall enhanced storage ability (in all three parts of Fig.~\ref{Fig5} the required state is trapped longer than $6$ time units). Fig.~5\ref{Fig5}(a) shows an almost order of magnitude reduction in $\mathcal{F}_{c}$ (compared to Bell-state case) but larger amplitude oscillations in the fidelity when a large emitter-cavity detuning is introduced. This happens since increasing detuning atoms being far from resonance, doesn't emit the photon quickly into the corresponding cavity modes which result in an overall suppression in the cavity mode-based N00N state.
\begin{equation}gin{figure*}
\begin{equation}gin{center}
\begin{equation}gin{tabular}{cccc}
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig6a.pdf}} &
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig6b.pdf}}&
\subfloat{\includegraphics[width=5.5cm,height=6cm]{Fig6c.pdf}}\\
\end{tabular}
\captionsetup{
format=plain,
margin=1em,
justification=raggedright,
singlelinecheck=false
}
\caption{Two-photon N00N state decay employing scheme-II. (a) large atom-cavity detuning case, (b) cavity mode backscattering, and (c) Strong coupling regime. In all of the plots, a small DDI interaction $\xi$ has been included between the two QEs. Specifically, $\xi=0.5g$ for (a) and (b) part of the figure and $0.5\kappa$ for the part (c). The rest of the parameters are the same as used in Fig.~\ref{Fig5}.}\label{Fig6}
\end{center}
\end{figure*}
Next, the introduction of cavity backscattering enhances the fidelity from 3.3\% to 10\% (compare Fig.~5(a) and Fig.~\ref{Fig5}(b)). Again fidelity shows blobs of collapse and revival, but with the passage of time blobs start to separate due to an increase in the destructive interference of many different probability amplitudes pathways in which the two photons can shuttle back and forth in the JC array. Finally, as shown in Fig.~\ref{Fig5}(c), entering a stronger coupling regime causes a slight increase in the maximum fidelity (from 10\% to 13\%) while the two-photon N00N state fidelity does not oscillate in the usual manner rather exhibits a more involved quantum interference pattern.
\subsubsection{Setup for Scheme-II}
In scheme-II we always consider two coupled cavities but for a two-photon state we couple each RR with two dipole-dipole interacting emitters. We assume, initially either both QEs coupled to the left cavity being excited and the emitters coupled to the right cavity are in their ground state or vice versa. The set of jump operators for this case take the form
\begin{equation}gin{subequations}
\begin{equation}gin{eqnarray}
\hat{J}_{o}=\sqrt{\kappa_{1}}\hat{a}_{1}+\sqrt{\kappa_{3}}\hat{a}_{3},\\
\hat{J}_{e}=\sqrt{\kappa_{2}}\hat{a}_{2}+\sqrt{\kappa_{4}}\hat{a}_{4}.
\end{eqnarray}
\end{subequations}
And the no-jump state has thirty-two different ways of finding the system with two excitations before the recording of any photon. Here we explicitly report this state as
\begin{equation}gin{widetext}
\begin{equation}gin{align}
&\ket{\tilde{\psi}}= ~\Big(c_{1}(t)\hat{\sigma}^\dagger_1\hat{\sigma}^\dagger_2+c_{2}(t)\hat{\sigma}^\dagger_1\hat{\sigma}^\dagger_3+c_{3}(t)\hat{\sigma}^\dagger_1\hat{\sigma}^\dagger_4+c_{4}(t)\hat{\sigma}^\dagger_2\hat{\sigma}^\dagger_3+c_{5}(t)\hat{\sigma}^\dagger_2\hat{\sigma}^\dagger_4+c_{6}(t)\hat{\sigma}^\dagger_3\hat{\sigma}^\dagger_4+c_7(t)\hat{\sigma}^\dagger_1\hat{a}^\dagger_1+c_8(t)\hat{\sigma}^\dagger_1\hat{a}^\dagger_2+c_9(t)\hat{\sigma}^\dagger_1\hat{a}^\dagger_3\nonumber\\
&+c_{10}(t)\hat{\sigma}^\dagger_1\hat{a}^\dagger_4+c_{11}(t)\hat{\sigma}^\dagger_2\hat{a}^\dagger_1+c_{12}(t)\hat{\sigma}^\dagger_2\hat{a}^\dagger_2+c_{13}(t)\hat{\sigma}^\dagger_2\hat{a}^\dagger_3+c_{14}(t)\hat{\sigma}^\dagger_2\hat{a}^\dagger_4+c_{15}(t)\hat{\sigma}^\dagger_3\hat{a}^\dagger_1+c_{16}(t)\hat{\sigma}^\dagger_3\hat{a}^\dagger_2+c_{17}(t)\hat{\sigma}^\dagger_3\hat{a}^\dagger_3+c_{18}(t)\hat{\sigma}^\dagger_3\hat{a}^\dagger_4\nonumber\\
&+c_{19}(t)\hat{\sigma}^\dagger_4\hat{a}^\dagger_1+c_{20}(t)\hat{\sigma}^\dagger_4\hat{a}^\dagger_2+c_{21}(t)\hat{\sigma}^\dagger_4\hat{a}^\dagger_3+c_{22}(t)\hat{\sigma}^\dagger_4\hat{a}^\dagger_4+c_{23}(t)\hat{a}^\dagger_1\hat{a}^\dagger_1+c_{24}(t)\hat{a}^\dagger_2\hat{a}^\dagger_2+c_{25}(t)\hat{a}^\dagger_3\hat{a}^\dagger_3+c_{26}(t)\hat{a}^\dagger_4\hat{a}^\dagger_4+c_{27}(t)\hat{a}^\dagger_1\hat{a}^\dagger_2\nonumber\\
&+c_{28}(t)\hat{a}^\dagger_1\hat{a}^\dagger_3+c_{29}(t)\hat{a}^\dagger_1\hat{a}^\dagger_4+c_{30}(t)\hat{a}^\dagger_2\hat{a}^\dagger_3+c_{31}(t)\hat{a}^\dagger_2\hat{a}^\dagger_4+c_{32}(t)\hat{a}^\dagger_3\hat{a}^\dagger_4\Big)\ket{\varnothing}.
\end{align}
\end{widetext}
In the same manner, as before, we numerically calculate and plot the desired fidelity of the two-photon N00N state in Fig.~\ref{Fig6}. When we compare each plot in Fig.~\ref{Fig6} with the corresponding plot in Fig.~\ref{Fig5} we notice an overall trend of increase in the fidelity, but the storage of the required state has been diminished considerably. In Fig.~\ref{Fig6}(a) again large detuning causes survival of two-photon state for a longer time but the maximum fidelity achieved is limited to 8.2\% only which is greater than the corresponding maximum fidelity of $\sim $3\% obtained in the JC array case. Inclusion of the backscattering between cavity modes causes a considerable enhancement in the fidelity (maximum value jumps to $50\%$) but the regular oscillatory pattern (as seen for instance in Fig.~\ref{Fig6}(b)) has been disturbed due to an increase in the number of ways in which the destructive interference among different photonic paths can occur. Finally, in Fig.~\ref{Fig6}(c) we find a wave-like oscillatory profile resembling Fig.~\ref{Fig5}(c) but a decrease in the maximum fidelity (compared to Fig.~\ref{Fig6}(b)) to 37\%. The non-linearities produced by the QEs seem to destroy the two-photon detection at the same detector (Hong-Ou-Mandel interference) in this case, similar to what has been reported in Ref.~\cite{mirza2015nonlinear}.
\section{Summary and Conclusions}
By using the QJA approach combined with the input-output formalism of quantum optics, in this paper we have studied the transfer and decay of multiphoton N00N states in two CQED architectures/schemes. Scheme-I consists of two-way cascaded JC arrays whilst scheme-II makes use of multiple QEs that are DDI and are coupled to two coupled cavities. After describing a general theoretical treatment valid for any number of QEs and RRs, as working examples, we presented and examined the situations of single and two-photon N00N states. For the single-photon N00N state (Bell states), in which case, both schemes are the same, we demonstrated that starting in an emitter Bell state one can transfer this state to cavity modes with the maximum fidelity 96\% and also to the emitter-cavity hybrid system with fidelity 50\%. We then focused on the cavity mode case. There we found that cavity modes backscattering serves as the best option to preserve Bell state with maximum fidelity of $\mathcal{F}_c\sim$ 80\% with up to 6$g^{-1}$ storage time.
For the two-photon N00N state scenario, both setups geometrically become different. We compared the required state fidelities in both schemes and found that while working in the strong coupling regime, the JC array scheme is (overall) better for the longer storage of two-photon state with storage time increased by a factor of $6$ and maximum fidelity achieved up to 12\%. Multiple DDI scheme (scheme-II) is well suited for gaining higher fidelity though (up to 50\% for the backscattering case) but for shorter periods due to unavailability of a chain of JC subsystems.
Finally, we remark that our study has shown that unless clever ways of state manipulation are not utilized this trend of fast decay and lower fidelity values would extend down to N00N states with higher $\mathcal{N}$ values. In this context, there have been proposals reported in the last five years or so (see for instance \cite{su2014fast, xiong2015efficient, qi2020generating}) to achieve higher N00N state fidelities. However, none of those studies have focused on the multi-emitter many RRs cascaded schemes considered in this work. We point out that such many-body schemes have practical relevance to recent advancements in quantum computing (with $50- 100$ qubit number)\cite{preskill2018quantum} and in the development of multiple node quantum communication protocols. We, therefore, leave the task of achieving higher fidelities and retaining N00N states (with $\mathcal{N}>2$) in the presence of spontaneous emission for longer times within the two-way cascaded JC setups considered in this study, as a future possible direction of this work.
\end{document}
|
\begin{equation}gin{document}
\begin{equation}gin{abstract} We study certain moduli spaces of sheaves on Enriques surfaces thereby obtaining, in every odd dimension, new examples of Calabi--Yau manifolds. We describe the geometry (canonical bundle, fundamental group, second Betti number and certain Hodge numbers) of these moduli spaces showing, in partial analogy to the well--known case of sheaves on K3 or Abelian surfaces, how the geometry of the surface reflects that of the moduli space itself.
\end{abstract}
\maketitle
\section*{Introduction}
Moduli spaces of sheaves on K3 surfaces are among the most studied objects in algebraic geometry. Part of their interest lies in that they inherit the rich structure coming from the K3 surface itself. For example, by work of Mukai \cite{Mukai} the symplectic structure on the surface induces a holomorphic symplectic structure on the smooth locus of the moduli space. When smooth and projective, these moduli spaces provide examples of compact irreducible hyperk\"ahler manifolds \cite{Beauville83}, \cite{Huybrechts2}, \cite{Mukai}, \cite{Kieran7}. On the other hand, not much work has been done regarding the geometry of moduli spaces of sheaves on Enriques surfaces, even though it is natural to expect that their geometry is tightly related to that of the Enriques surface itself and of the moduli spaces of sheaves on the covering K3 surface.
The present paper describes the geometry of a certain class of moduli spaces of sheaves on an Enriques surface $T$, namely the case of moduli spaces parametrizing pure dimension one sheaves on $T$. By considering pure dimension one sheaves whose support is linearly equivalent to a given curve $C$, these moduli spaces may be viewed as the relative compactified Jacobian of the linear system $|C|$. As such, they have a structure of fibration in abelian varieties.
One of the results of the paper is to show that the canonical bundle of these moduli spaces is \emph{trivial}. Though it is not hard to see that the canonical bundle is a torsion element in the Picard group, it is an interesting surprise that it is actually trivial, and not $2$--torsion as is true in the case of Enriques surfaces. This produces a series of new examples (in every odd dimension) of Calabi--Yau manifolds. Recall that one of the reasons why hyperk\"ahler manifolds have attracted attention is that they are, together with irreducible Calabi-Yau manifolds and complex tori, the building blocks of K\"ahler manifolds with trivial first Chern class \cite{Beauville83}. By proving that the universal cover of these moduli spaces are \emph{irreducible} Calabi--Yau manifolds, this paper thus produces a new series of building blocks for $c_1$--trivial manifolds.
The fact that the properties of these moduli spaces do not fully reflect those of the underlying surface makes the study of their geometry even more compelling.
The other main results of the paper, all of which use the abelian fibration structure, are the computations of the fundamental group, of the cohomology of the structure sheaf, and of the second Betti number.
To fix ideas, let us assume that we are considering a moduli space $N$ parametrizing sheaves whose Fitting support belongs to a linear system $|C|$, of genus $g \ge 2$. Associating to every sheaf its Fitting support, defines the support (or Le Potier) morphism $N \to |C|=\mathbb{P}^{g-1}$, and endows the moduli space with a fibration structure in the $g$--dimensional Jacobians of the curves belonging to $|C|$. Notice that these moduli spaces are always non empty. To sum up the main results, we have the following theorem.
\begin{equation}gin{thmintro}
Let $|C|$ be a genus $g \ge 2$ linear system on a general Enriques $T$, let $d \neq g-1$ be an integer, let $H$ be a generic polarization, and let $N \to |C|$ be the component of the moduli space of $H$--semistable sheaves on $T$ with Fitting support in $|C|$ and Euler characteristic equal to $\chi=d-g+1$ that contains sheaves supported on irreducible curves. Suppose the divisibility of $C$ in $NS(T)$ is coprime with $2(d-g+1)$ then,
\begin{equation}gin{enumerate}
\item $N$ is a smooth $(2g-1)$-dimensional Calabi--Yau variety, i.e.,
\[
\omega_{N}\cong {\mathcal O}_{N}, \quad \text{and } \quad h^{p,0}(N)=0 \quad \text{ for } \,\,\, p\neq 0, 2g-1.
\]
\item There is a surjection $ \mathbb{Z}/(2) \twoheadrightarrow \pi_1(N)$ which, under a natural assumption that holds in many cases (e.g. for low values of $g$ and in the case $|C|$ is a primitive linear system) and that is expected to hold in general (see Assumption \ref{assumption} and subsequent discussion), turns out to be an isomorphism.
\item Under the same assumption, we show that for $g \ge 3$
\[
h^2(N)=11.
\]
\item For $g=2$, we get Calabi--Yau $3$--folds with the following Hodge diamond
\[
\begin{equation}gin{array}{cccccc}
&1&&\\
&0\,\,\,\,\,\,0&&\\
\,\,\,\,0&10&0\,\,\,\,&\\
1\,\,\,\,&10\,\,\,\,\,\,10&\,\,\,\,1&\\\
\end{array}
\]
\end{enumerate}
\end{thmintro}
Geometrically, we can realize the universal cover $\widetilde N$ of $N$ via the Stein factorization of the norm map. Since $\widetilde N$ is simply connected, it is an \emph{irreducible} Calabi--Yau manifold of dimension $2g-1$.
Further content of the paper regards the support morphism which, though appearing ubiquitously in algebraic geometry (e.g. the Hitchin system, the Beauville--Mukai integrable system) is not very well understood (especially over the locus of non--reduced curves). We give some factual and conjectural properties for the support morphism in the case of a linear system on a smooth projective surface (Section \ref{fibration in jacobians}). We also mention a recent result of Yoshioka \cite{yoshioka-enriques} regarding this morphism in the case of primitive linear systems.
In the specific case of Enriques surfaces, we study the corresponding variation of Hodge structures (via degeneration of Hodge bundles) and compute the push--forward of the structure sheaf (\ref{higher direct images}). This involves monodromy calculations that are of independent interest \ref{irreducible monodromy non hyp}.
Beyond the case of the Hilbert scheme $T^{[n]}$ of $n$ points on $T$, whose canonical bundle (which is shown to be of $2$--torsion) and fundamental group are computed in \cite{Oguiso_Schroer11}, this paper is the first one that studies geometric properties of a moduli space of sheaves on an Enriques surfaces. The previously existing literature, \cite{Kim98}, \cite{Kim06}, and \cite{Zowislok} studies smoothness and irreducibility properties of moduli spaces of sheaves on Enriques surfaces, by realizing them as double covers of Lagrangian subvarieties of moduli spaces on the covering K3 surface (and after the first version of this paper appeared, also \cite{Nuer} where non emptiness is studied and \cite{Nuer2} where the birational geometry of these moduli spaces is studied). Finally, \cite{Hauzer} finds an explicit parametrization in the case when the moduli spaces are one--dimensional and shows how to relate moduli spaces of sheaves of arbitrary rank to those of low rank.
The techniques of this paper can be used also to study relative compactified Jacobians of linear systems on bi-elliptic surfaces. These moduli spaces produce another series of Calabi-Yau manifolds, whose geometry is the subject of a forthcoming paper by the author.
\section{Preliminaries} \label{prelims}
\subsection{Set up and notation} \label{notation}
\subsubsection{}Throughout the paper $T$ will denote an Enriques surface, that is a smooth projective surface with
\[
H^1(T, \mathcal O_T)=0,
\]
and whose canonical bundle $\omega_T$ defines a non-trivial $2$-torsion element of the Picard group. It is well known that $\pi_1(T)=\mathbb{Z}/(2)$ and that its universal cover, which will be denoted by $S$, is a K3 surface. The covering morphism
\[
f: S \to T,
\]
is the double covering induced by $\omega_T$. The deck involution
\[
\quad \iota: S \to S,
\]
is antisymplectic, i.e., if $\sigma$ denotes the holomorphic symplectic form on $S$, then $\iota^* \sigma=-\sigma$.
By $C$ we will denote a curve in $T$.
Using the Riemann-Roch theorem one can see that if $C^2 \ge0$, then the line bundle ${\mathcal O}(C)\otimes \omega_T$ is also effective. We will denote by $C'$ a curve in $|\mathcal O(C)\otimes \omega_T|$.
We also set
\[
D:=f^{-1}(C) \subset S,
\]
By the Hodge index theorem, if the arithmetic genus $g$ of $C$ satisfies $g \ge 2$ and $C$ is connected and reduced, then the covering
\[
f: D \to C,
\]
is connected. In particular, the two-torsion line bundle
\begin{equation} \label{eta}
\eta:={\omega_T}_{|C},
\end{equation}
is not trivial.
When this is the case the genus of $D$ is equal to
\[
h= 2g-1.
\]
If $g\ge 2$, then $\dim |C|=\dim |C'|=g-1$ and $\dim |D|=h$.
Moreover, we see that $|C|$ and $|C'|$, identified with their images in $|D|$ under the pullback morphism,
are the two $\iota$-invariant linear subspaces of $|D|$.
\subsection{Pure dimension one sheaves on surfaces}
\subsubsection{}Let $(X,H)$ be a smooth projective polarized surface. Associated to any coherent sheaf $F$ on $X$, is the ideal sheaf
\[
\mathcal I_F:=\ker [\mathcal O_X \to \mathcal{E}nd(F)],
\]
defining the $\mathcal O_X$-module structure on $F$. The \emph{scheme theoretic support} of $F$, denoted $\Supp(F)$, is the scheme defined by $\mathcal I_F$.
A sheaf is called \emph{pure of dimension $d$} if for any subsheaf $0 \neq G \subseteq F$, $\dim \Supp(G)=d$ . Let $F$ be a pure dimension one sheaf on $X$, then $\Supp(F) $ is a (possibly non-integral) curve, and $F=i_*L$, where $i: \Supp(F) \to X$ is the natural embedding and where $L$ is a sheaf on $\Supp(F)$ having no subsheaves that are supported on points.\\
For pure dimension one sheaves, we will also consider another type of support, the \emph{Fitting support}, which is defined in the following way. A pure dimension one sheaf on a smooth projective has homological dimension one (\cite{Huybrechts-Lehn}, Chapter 1), i.e., there exists a length one locally free resolution of $F$,
\[
0 \to L_1 \stackrel{a}{\rightarrow} L_0 \to F \to 0.
\]
The Fitting support of $F$, denoted $\supp(F)$, is the subscheme of $X$ defined by the equation $\det a=0$. Contrary to case of the scheme theoretic support, the Fitting support behaves well in families. It is important to point out that the class in cohomology of the pure dimension one scheme $\supp(F)$ is exactly the first Chern class $c_1(F)$.
\subsubsection{} For pure dimension one sheaves stability with respect to the Hilbert polynomial defined by $H$ amounts to considering stability with respect to the slope function
\[
\mu_H(F)=\ff{\chi(F)}{c_1(F) \cdot H},
\]
where $\chi(F)$ denotes the Euler characteristic of $F$. So if $F$ is pure of rank one supported on a reduced curve $\mc{G}amma$, $F$ is $H$--semistable if and only if for every subcurve $\mc{G}amma' \subset \mc{G}amma$ we have
\begin{equation} \label{stability pure rank one sheaves}
\ff{\chi(F)}{\mc{G}amma \cdot H} \le \ff{\chi(F_{\mc{G}amma'})}{\mc{G}amma' \cdot H},
\end{equation}
where
\[
F_{\mc{G}amma'}:=F \res{\mc{G}amma'} \slash Tor(F \res{\mc{G}amma'} )
\]
is the restriction of $F$ to the subcurve, modulo its torsion. We say that $H$ is $\chi$--general for a curve $\mc{G}amma$ and an integer $\chi$ (or $d$--general for $d:=\chi-\chi(\mathcal O_\mc{G}amma)$) if for every subcurve $\mc{G}amma'\subset \mc{G}amma$ the rational number $ \chi \ff{\mc{G}amma' \cdot H}{\mc{G}amma \cdot H}$ is not an integer. This guarantees that $H$--semistability is equivalent to $H$--stability.
Recall that for any coherent sheaf $F$ on $X$, one can define the Mukai vector $v(F) \in H^*_{alg}(T, \mathbb{Z})$. When $F$ is pure of dimension one it is given by
\[
v=v(F)=(0,c_1(F), ch_2(F))=(0, c_1(F), \chi(F)-\ff{1}{2}c_1(X)c_1(F)).
\]
Here $ch_2(F)$ denotes the degree two part of the Chern character of $F$. Let
\[
M_{v,H}(X)
\]
be the moduli space of $H$-semistable sheaves on $X$ with Mukai vector $v$.
Let $F$ be a sheaf with $v(F)=v$ and let $\mathcal H_{c_1(v)}$ be the Hilbert scheme parametrizing subschemes with cohomology class equal to $\supp(F)$.
Since the Fitting support behaves well in families, we can define the Le Potier \cite{LePotier} or support morphism
\[
\begin{equation}gin{aligned}
\pi: M_{v,H}(X) & \longrightarrow \mathcal H_{{c_1(v)}},\\
F & \longmapsto \supp(F)
\end{aligned}
\]
which associates to a pure sheaf of dimension one its Fitting support.
For a curve $\mc{G}amma \subset X$ defining a point $[\mc{G}amma] \in \mathcal H_{c_1(v)}$, the fiber $M_{v,H}(X)_{[\mc{G}amma]}:=\pi^{-1}([\mc{G}amma])$ is the Simpson moduli space of $H_{|\mc{G}amma}$-semistable sheaves on $\mc{G}amma$. For example, the fiber over a nodal curve is isomorphic to an appropriate compactfied Jacobian in the sense of Oda and Seshadri \cite{Oda_Seshadri79} (cf. Alexeev \cite{Alexeev04}).
If $h^1(X, \mathcal O_X)=0$, then every component of $\mathcal H_{c_1(v)}$ is just the linear system of some line bundle with that cohomology class. Hence, if $v=(0,\mc{G}amma, \chi)$ and we let
\[
M_{v,H}(X, |\mc{G}amma|)
\]
be the irreducible component of $\pi^{-1}(|\mc{G}amma|)$ containing the locus of locally free sheaves with integral Fitting support, we can think of
\begin{equation} \label{support morphism}
\pi: M_{v,H}(X, |\mc{G}amma|)=\overline J_{H,d}(|\mc{G}amma|) \to |\mc{G}amma|
\end{equation}
as the relative compactified Jacobian of degree $d=\chi-\chi(\mathcal O_\mc{G}amma) $ of the linear system $|\mc{G}amma|$. Indeed, the fiber over a reduced curve $\mc{G}amma$ is just the degree $d$ compactified Jacobian of that curve with respect to the polarization $H$, i.e.
\[
\pi^{-1}([\mc{G}amma])=\overline J_{H,d}(\mc{G}amma)
\]
where $d=\chi-\chi(\mathcal O_\mc{G}amma)$. Notice that if the curve is integral then the compactified Jacobian does not depend on $H$.
\begin{equation}gin{lemma} \label{existence stable sheaves}
If $\mc{G}amma$ is an integral curve, the fiber $\pi^{-1}([\mc{G}amma])$ is contained in the stable locus $M^s_{v,H}(X, |\mc{G}amma|)$ of $M^s_{v,H}(X, |\mc{G}amma|)$. In particular, if there exist an integral curve in the linear system $|\mc{G}amma|$, then $M^s_{v,H}(X, |\mc{G}amma|)$ is non-empty and changing polarization only changes $M_{v,H}(X, |\mc{G}amma|)$ within its birational class.
\end{lemma}
\begin{equation}gin{proof}
If $\supp(F)$ is integral then there is no condition (\ref{stability pure rank one sheaves}) to be checked, i.e. any surjection $F \to G$ is an isomorphism.
\end{proof}
Often in this paper if we consider the restriction of (\ref{support morphism}) to a locus in $|\mc{G}amma|$ parametrizing irreducible curves, we omit the polarization from the notation.
If the curve $\mc{G}amma$ is reducible, then stability depends on the degree of $H$ on each component of $\mc{G}amma$. For later use, we work out the characterization of semistability for pure rank one sheaves supported on a curve that is the union of two smooth components meeting transversally.
\begin{equation}gin{lemma} \label{stability on two components}
Let $\mc{G}amma=\mc{G}amma_1+\mc{G}amma_2$ be a curve that is the union of two smooth components meeting transversely in $\delta$ points. Let $F$ be a pure rank one sheaf on $\mc{G}amma$ with $\chi(F)=\chi$ and let $\delta' \le \delta$ be the number of nodes where $F$ is locally free. Then $F$ is $H$--semistable if and only if
\[
\ff{h_1}{h} \chi \le \chi_1 \le \ff{h_1}{h} \chi+\delta',
\]
where $h_1=H \cdot \mc{G}amma_1$, $h_2=H \cdot \mc{G}amma_2$, $h=h_1+h_2$, and where $\chi_i=\chi(F_{\mc{G}amma_i})$. Furthermore, $F$ is $H$-stable if and only if the inequalities are strict. As a consequence, if $H$ is general then semistability is equivalent to stability.
\end{lemma}
\begin{equation}gin{proof}
This follows readily from (\ref{stability pure rank one sheaves}) and the fact that $F$ fits into a short exact sequence
\[
0 \to F \to F_{\mc{G}amma_1} \oplus F_{\mc{G}amma_2} \to \mathbb{C}^{\delta'} \to 0
\]
so that $\chi+\delta'=\chi_1+\chi_2$ and we can rewrite inequality (\ref{stability pure rank one sheaves}) for $i=2$ in terms of $h_1$ an $\chi_1$.
\end{proof}
We will also need the following important result by Melo, Rapagnetta, and Viviani.
\begin{equation}gin{prop} \label{MRV on compactified Jac} \cite{Melo-Rapagnetta-Viviani14} Let $\mc{G}amma$ be a reduced locally planar curve of genus $g$, let $d$ be an integer, let $H$ be a $d$--general polarization for $\mc{G}amma$, and let $\overline J_{H,d}(\mc{G}amma)$ be the compactified Jacobian of degree $d$. Then $\overline J_{H,d}(\mc{G}amma)$ is l.c.i of dimension $g$ and its smooth locus is precisely the locus
\[
J_{H,d}(\mc{G}amma)
\]
parametrizing line bundles.
\end{prop}
As a consequence of these considerations we highlight the following well known Corollary that will be used in Section \ref{fundamental group}.
\begin{equation}gin{cor} \label{number of components}
Let $\mc{G}amma=\mc{G}amma_1+\mc{G}amma_2$ be the union of two smooth components meeting in $\delta$ points. Then $\overline J_{H,d}(\mc{G}amma)$ has $\delta$ irreducible components, parametrized by the $\delta$ pairs $(\chi_1, \chi_2)$ satisfying the condition of Lemma \ref{stability on two components}: For every such pair $(\chi_1, \chi_2)$ the corresponding component contains as a dense open subset the locus of line bundles whose restriction to $\mc{G}amma_1$ and $\mc{G}amma_2$ have Euler characteristic $\chi_1$ and $\chi_2$, respectively.
\end{cor}
Suppose now that $\chi(F)=-g+1$, where $g$ is equal to the genus of $\mc{G}amma=\supp(F)$. In this case, there is a rational section
\begin{equation} \label{section on irreducible}
s: \mathcal H_{c_1(v)} \dasharrow M_{v,H}(X),
\end{equation}
defined on an open subset containing integral curves.
\begin{equation}gin{rem} \label{section defined} If $\mc{G}amma$ is ample then we can also consider $H=\mc{G}amma$ and it is not hard to see using (\ref{stability pure rank one sheaves}) that the structure sheaf of every curve is stable. It follows that the section is a regular morphism. This guarantees that there is a non--empty open set in the ample cone of polarizations for which the section is a regular morphism.
\end{rem}
\subsubsection{} Recall that if $F$ is a stable sheaf, the tangent space to the moduli space at a point $[F]$ is canonically isomorphic to $\mc{E}xt^1(F,F)$.
Moreover (\cite{Artamkin88}, \cite{Mukai}), the obstructions to deforming $F$ on $X$ lie in
\begin{equation} \label{obstructions}
\mc{E}xt^2(F, F)_0:=\ker[ \tr: \mc{E}xt^2(F,F) \to H^2(X, \mathcal O_X)],
\end{equation}
where $\tr: \mc{E}xt^2(F,F) \to H^2(X, \mathcal O_X)$ is the trace morphism (cf. \cite{Huybrechts-Lehn}). Hence, by Serre duality, if $(X, H)$ is a polarized surface with $\omega_X=\mathcal O_X$ (i.e. a K3 or an abelian surface) and $F$ is a pure sheaf $H$-stable sheaf on $X$ with $v=v(F)$ then the moduli space $M_{v,H}$ is smooth at the point $[F]$.
\begin{equation}gin{thm}[\cite{Mukai},\cite{Gottsche-Huybrechts}, \cite{Kieran7},\cite{Huybrechts2},\cite{Yoshioka99}, \cite{Yoshioka}] \label{theorem-definition walls}
Let $X$ be a K3 surface, let $H$ be a polarization on $X$, let $F$ be a pure sheaf on $X$ and set $v=v(F)$. The locus $M_{v,H}^s(X)$ of $H$-stable sheaves on $X$ has a holomorphic symplectic form. If $v$ is primitive, then there exists a locally finite collection of real codimension one linear subspaces (called the $v$-walls) in the ample cone $\Amp(X)\otimes \mathbb{Q}$ such that, if $H$ is chosen outside the union of the $v$-walls then $H$--stability coincides with $H$--semistability (an $H$ satisfying this condition is called $v$-generic, see Definition \ref{definition of walls} below) and $M_{v,H}$ is an irreducible holomorphic symplectic manifold of K3$^{[n]}$-type.\footnote{By definition, this means that it is deformation equivalent to a Hilbert scheme of points on a K3 surface.}
\end{thm}
Hence, if $v=(0, \mc{G}amma, \chi)$ and $H$ is chosen to be $v$--generic, then $H$ is $\chi$--general for every curve in $|\mc{G}amma|$.
Finally, we will need the following expression for the symplectic form on $M_{v,H}$:
on the tangent space $T_{[F]} M_{v,H}(X)=\mc{E}xt^1(F,F)$ at a point $[F]$ corresponding to a stable sheaf $F$, the symplectic form is given by the composition
\begin{equation} \label{symplectic form}
\begin{equation}gin{aligned}
\sigma: \mc{E}xt^1(F, F) \times \mc{E}xt^1(F, F) & \stackrel{\cup}{\longrightarrow} & \mc{E}xt^2(F, F) & \stackrel{\tr}{\longrightarrow} H^2(S, {\mathcal O}_S)\cong \mathbb{C}, \\
(e, f) & \longmapsto & e\cup f \,\,\,\,\,\,\, & \longmapsto \,\,\,\,\, \tr(e\cup f)
\end{aligned}
\end{equation}
where the identification $H^2(S, {\mathcal O}_S)=H^2(S, \omega_S) \cong \mathbb{C}$ is Serre dual to the isomorphism
\begin{equation} \label{isomorphism induced by sigma}
H^0(S, \omega_S)=\mathbb{C} \sigma \cong \mathbb{C},
\end{equation}
defined by the choice of a, unique up to scalar, symplectic form $\sigma \in H^0(S, \omega_S)$.
\subsection{Some facts about linear systems on Enriques surfaces.} \label{linear systems enriques}
In this section we collect a few (mostly known) results about linear systems on an Enriques surface that will be needed in the rest of the paper. After a general introduction, we will focus on linear systems on a general Enriques surface. For a more complete treatment we refer to \cite{Cossec} and \cite{Cossec-Dolgachev}.
\subsubsection{} \label{primitive} \label{general enriques 1} The N\'eron-Severi group\footnote{Following Definition 1.1.13 of \cite{Lazarsfeld1}, we let the N\'eron-Severi group of a smooth projective variety $X$ be the group of line bundles on $X$ modulo numerical equivalence. In particular, it is torsion free.} of $T$ has rank $10$ and is isomorphic to the abstract lattice $U\oplus E_8(-1)$, where $U$ and $E_8(-1)$ denote the hyperbolic lattice and the positive definite $E_8$ root lattice, respectively.
The pullback homomorphism
\begin{equation}
f^*: \mathbb{N}S(T) \to \mathbb{N}S(S),
\end{equation}
is injective and in \cite{Namikawa1} it is shown that the image of the N\'eron-Severi group of $T$ in the N\'eron-Severi group of $S$ is a primitive sub lattice (of rank $10$). In particular, if we choose $C$ so that its class is primitive in $\mathbb{N}S(T)$, then so is the class of $D$ in $\mathbb{N}S(S)$. By abuse of notation, we say that a curve or a line bundle is primitive if its class in the N\'eron-Severi group is a primitive element of the lattice.
Moreover (\cite{Namikawa1}, by choosing the Enriques surface general in moduli we can ensure that
\begin{equation} \label{NS general enriques}
f^*(\mathbb{N}S(T))=\mathbb{N}S(S).
\end{equation}
When this is the case, $\iota^*$ acts as the identity on $\mathbb{N}S(S)$ and there are no smooth rational curves on $S$ or on $T$.
In particular, there are not effective line bundles of negative self intersection.
From now on, when we say that $T$ is general we will assume that (\ref{NS general enriques}) holds.
\begin{equation}gin{lemma} Let $T$ be a general Enriques surface.
\begin{equation}gin{enumerate}
\item If $L$ is a line bundle on $T$ with $L^2>0$ (respectively $L^2\ge 0$), then $\pm L$ is ample (respectively nef).
\item If $L^2>0$ then the general member of $|L|$ is irreducible.
\end{enumerate}
\end{lemma}
\begin{equation}gin{proof}
Item (1) follows immediately from the Hodge index theorem and the fact that there are no curves with negative self intersection on a general $T$. Item (2) is \cite[Prop. 3.1.6]{Cossec-Dolgachev} and \cite[Cor 3.1.2]{Cossec-Dolgachev}.
\end{proof}
\begin{equation}gin{lemma} \label{irreducibility} Let $T$ be a general Enriques surface, and let $C \subset T$ be a primitive curve of genus $g \ge 2$. If $C$ is irreducible, then so is its preimage $D=f^{-1}(C)$.
\end{lemma}
\begin{equation}gin{proof}
Suppose $D$ breaks into the sum of
two irreducible components $D_1$ and $D_2$. Since $D_1$ and $D_2$ are interchanged by the involution and since by assumption $\iota^*$ acts as the identity on $\mathbb{N}S(S)$, it follows that $D_1 \sim D_2$ and hence that $D\sim 2D_i$, contradicting the fact that $C$, and thus $D$ by \ref{primitive}, is primitive.
\end{proof}
We should also point out that for the conclusion of this lemma to hold, it is sufficient to assume that the class of $C$ is not divisible by $2$.
Contrary to what happens for positive genus where the dimension of an effective linear system of genus $g$ is equal to $g-1$, if $L$ is an effective linear system with $L^2=0$, then the dimension of $|L|$ depends on the divisibility of the class of $L$ in $\mathbb{N}S(T)$. It is well known \cite[\S 1.6]{Cossec} that if $L$ is a primitive, then $L=\mathcal O(e)$ for a primitive elliptic curve and $|L|=\{e\}$. According to the notation introduced in Subsection \ref{notation}, we denote by
\[
e'
\]
the (unique) curve in the linear system $L \otimes \omega_T$. If $L=\mathcal O(2e)$, then $|L|$ is a pencil, whose general fiber is a smooth elliptic curve and that has exactly two double fibers
\[
e \quad \text{ and } \quad e'.
\]
This shows that the canonical bundles of $T$ is equal to the difference of the two half fibers, i.e., $\omega_T=\mathcal O(e-e')$.
From these considerations one can deduce (see \cite[Thm 1.5.1]{Cossec}) that if $L=\mathcal O(ke)$ with $k \ge 2$, then
\[
|L|\cong \Sym^{\lfloor \ff{k}{2} \rfloor}|2e|
\]
and $L$ has a fixed component if and only if $k$ is odd. Primitive elliptic curves and elliptic pencils play a big role in the study of linear systems on Enriques surfaces.
\begin{equation}gin{defin}
A genus $g \ge 2$ linear system $|C|$ on an Enriques $T$ is called hyperelliptic if $g=2$ or if the map $T \dashrightarrow \mathbb{P}^{g-1}$ defined by $|C|$ is of degree $2$ onto a rational normal scroll of degree $g-2$ in $\mathbb{P}^{g-1}$.
\end{defin}
The following proposition gives a very useful characterization of hyperelliptic linear systems
\begin{equation}gin{prop}\cite[Prop. 4.5.1 and Cor. 4.5.1]{Cossec-Dolgachev} \label{hyperelliptic enriques}
Let $T$ be an Enriques surface, and let $C\subset T$ be an irreducible curve with $C^2=2g-2 \ge 2$. The following are equivalent,
\begin{equation}gin{enumerate}
\item The linear system $|C|$ is hyperelliptic;
\item $|C|$ has base points;
\item There exists a primitive elliptic curve $e_1$ such that $C\cdot e_1 =1$.
\end{enumerate}
Moreover, if $T$ is general then $|C|$ is hyperelliptic if and only if $C\equiv (g-1)e_1+e_2$ for two primitive elliptic curves with $e_1 \cdot e_2=1$.
\end{prop}
Up to tensoring by $\omega_T$, the two elliptic curves are determined by $|C|$.
It is worth mentioning here that if $|C|$ is hyperelliptic then its base locus (which is non empty by the proposition above) consists of two simple points, described by the following Lemma
\begin{equation}gin{lemma} \label{base points}
Let $C=(g-1)e_1+e_2$ be a hyperelliptic linear system. The two simple base points of $|C|$ are
\[ \begin{equation}gin{aligned}
e_1 \cap e_2' \text{ and } e'_1 \cap e_2 & \text{ if } g-1 \text{ is odd }\\
e_1 \cap e_2 \text{ and } e'_1 \cap e_2 & \text{ if } g-1 \text{ is even }
\end{aligned}
\]
\end{lemma}
\begin{equation}gin{proof}
The proof is straightforward after noticing that $\mathcal O(C) \res{e_1}$ is equal to $\mathcal O(e_2') \res{e_1}$ or $\mathcal O(e_2) \res{e_1}$ depending on whether $g-1$ is odd or even, and similarly for the restriction to $e_1'$.
\end{proof}
\begin{equation}gin{cor} \label{section}
Let $p: \mathcal C \to |C|$ be the universal family of curves of a hyperelliptic linear system. Then $\mathcal C$ is smooth and $p$ has two sections.
\end{cor}
\begin{equation}gin{proof}
By the Lemma above, $|C|$ has two simple baspoints and hence we can identify this linear system as a base point free linear system on the blow up $\widetilde T$ of $T$ at the two base points. Since the universal family $ \mathcal C \to T$ factors via the blow up morphism $\widetilde T \to T$, we see that $\mathcal C$ is smooth. The statement about the sections also follows readily from the Lemma above.
\end{proof}
Finally, it is known that the general curve in $|C|$ is a smooth hyperelliptic curve \cite[Cor 4.5.1]{Cossec-Dolgachev}.
\begin{equation}gin{cor} \label{smooth general member}
Let $L$ be an effective line bundle on a general $T$ with $L^2>0$. Then the general member of $|L|$ is a smooth connected curve.
\end{cor}
We will need the following observation regarding intersection of curves on a general $T$. If $C$ and $\mc{G}amma$ are two curves such that $C \cdot \mc{G}amma=1$, then one of them has to be a primitive elliptic curve and the other one either another primitive elliptic curve or a hyperelliptic curve: If they are both of genus $1$ there is nothing to show. So let us suppose that one of them, say $C$, is of genus $g \ge 2$. Up to moving $C$ in its linear system we can assume it to be smooth. Suppose that $\mc{G}amma$ is not primitive elliptic so that it moves in a positive dimensional linear system. If $|\mc{G}amma|$ is base point free, then $\mathcal O(\mc{G}amma) \res{C}$ would cut a positive dimensional degree one linear system on $C$, providing a contradiction since $C$ is not rational. If $|\mc{G}amma|$ has a base point, then by the proposition above it has to be hyperelliptic. It follows that there are two primitive elliptic curves such that $|\mc{G}amma|=|ne_1+e_2|$, with $n=g(\mc{G}amma)-1$. But since by the Hodge index theoerm $C \cdot e_i >0$, for $i=1$ and $2$, we get a contradiction to the fact that $C \cdot \mc{G}amma=1$.
In the rest of the paper we will need some knowledge about singular curves in linear systems on a general $T$. Given a linear system $|C|$ on $T$ with smooth connected general member, we define the discriminant
\[
\Delta \subset |C|
\]
of $|C|$ to be the closed codimension one subset of $|C|$ parametrizing singular members (for the sake of this paper it will be enough to consider $\Delta$ with its reduced induced structure). The following three propositions describe the curves parametrized by the general points of the discriminant.
\begin{equation}gin{prop} \label{reducible in codimension one}
Let $|C|$ be a genus $g \ge 2$ linear system on a general Enriques surface $T$. Then $|C|$ has reducible members in codimension one if and only if $|C|$ is hyperelliptic. \end{prop}
\begin{equation}gin{proof}
Consider a reducible member of the form
\begin{equation} \label{decomposition}
C_1+C_2,
\end{equation}
where $C_1$ and $C_2$ have no common components
and set $\nu=C_1\cdot C_2$.
For $i=1,2$, we let $g_i$ be the arithmetic genus of $C_i$. We have
\[
g=g_1+g_2+\nu-1,
\]
so $\dim |C|=g_1-1+g_2-1+\nu$.
Since the irregularity of $T$ is zero, the locus of curves in $|C|$ having a decomposition like that in (\ref{decomposition}) admits a finite surjective morphism from the product of linear systems $|C_1|\times |C_2|$.
Case by case, we will compare the dimension of $|C_1|$ and $|C_2|$ with that of $|C|$. Clearly, if $\dim |C_i|=g_i-1$ for both $i=1$ and $2$ (as is the case the two curves have either positive genus greater than one or are primitive elliptic), then
\[
\dim |C|= \dim |C_1|+ \dim |C_2|+\nu=\dim |C_1|\times |C_2|+\nu.
\]
It follows that the codimension of the locus of curves of this type is equal to one if and only if $\nu=1$. If this is the case, then by the remarks following Corollary \ref{smooth general member} at least one of the two curves, say $C_1$, has to be a primitive elliptic curve. This implies that $C \cdot C_1=1$ and hence by the remarks following Corollary \ref{smooth general member}, that $|C|$ is hyperelliptic.
Next, consider the case where $C_1\in |s e_1|=\mathbb{P}^{\lfloor \ff{s}{2}\rfloor}$ for some primitive elliptic curve $e_1$ and some integer $s \ge 1$. If $g_2 \ge 2$, we have $\dim |C_1|\times |C_2|=\lfloor \ff{s}{2}\rfloor+g_2-1$ and $\dim |C|= g_2-1+ \nu$. If $C_2=te_2$ with $t \ge 1$, then $\dim |C_1|\times |C_2|=\lfloor \ff{s}{2}\rfloor+\lfloor \ff{t}{2}\rfloor$ and $\dim|C|=\nu$.
Either way we have
\[
\dim |C| - \dim (|C_1|\times |C_2|)=\left\{\begin{equation}gin{aligned} & \nu-\lfloor \ff{s}{2}\rfloor, && \text{if} \,\, g_2\ge 2\\ &\nu-\lfloor \ff{s}{2}\rfloor-\lfloor \ff{t}{2}\rfloor, && \,\text{if} \,\, g_2=1. \end{aligned} \right.
\]
In the first case, since $\nu=s\nu'$ and $\nu' \ge 1$, then we are done, unless $a)$ $s=1$ and $\nu'=1$, or $b)$ $s=2$ and $\nu'=1$. In case $a)$, $C_1$ is primitive elliptic and $C \cdot C_1=1$, so we are in the hyperelliptic case. In case $b)$, $C_2 \cdot e_1=1$ so the curve $C_2$ is hyperelliptic and we can write it as $\nu e_1+e_2$, with $e_1\cdot e_2=1$. It follows that $|C|=|(\nu+s)e_1+e_2|$ is hyperelliptic.
As for the second case, we can set $\nu=st\nu'$. It follows that we are done unless $\nu'=1$, $s=2$ and $t=1$ (or $s=1$ and $t=2$). This means that $C_2=e_2$, with $e_1\cdot e_2=1$ and that $|C|=2e_1+e_2$ is hyperelliptic (or $|C|=|2e_2+e_1|$).
\end{proof}
\begin{equation}gin{prop} \label{Verra}
Let $|C|$ be a genus $g \ge 3 $ non--hyperelliptic linear system on a general Enriques surface. If $|C|\neq |2(e_1+e_2)|$ for two primitive elliptic curves $e_1$ and $e_2$ with $e_1 \cdot e_2=1$, then there is an open dense subset of the discriminant parametrizing irreducible curves with one single node.
\end{prop}
\begin{equation}gin{proof}
It is well known that if $|C|$ is very ample then $|C|$ contains a Lefschetz pencil (e.g. \cite[\S 2.1 II]{Voisin2}), so we only need to prove the statement in the case where $|C|$ is not very ample. By \cite[Thm 1.2]{Knutsen} a curve $C$ of genus $g \ge 2$ on a general Enriques surface is very ample if and only if there is no primitive elliptic curve $e$ such that $C \cdot e =1$ or $C \cdot e =2$. Since in the first case $|C|$ is hyperelliptic, we only need to prove the statement in the second case.
Under this assumption, we claim that the linear system $|C|$ satisfies one of the following
\begin{equation}gin{enumerate}
\item $|C|$ defines a degree $4$ morphism $\psi: T \to \mathbb{P}^2$.
\item $|C|$ defines a degree one morphism $T \to R \subset \mathbb{P}^{g-1}$ onto a non normal surface.
\item $|C|=|2(e_1+e_2)|$ for some primitive elliptic curves $e_1$ and $e_2$ with $e_i \cdot e_j=1$, for $i\neq j$.
\end{enumerate}
Indeed, by \cite[Theorem 4.6.3]{Cossec-Dolgachev} either (1) happens, or $|C|$ defines a birational morphism onto a non normal surface with double lines, or $|C|$ is base point free and defines a degree two map. These linear systems are called superelliptic (see page 228 of \cite{Cossec-Dolgachev}). If this is the case, Proposition 4.7.1, Theorem 4.7.1, and Theorem 4.7.2 of \cite{Cossec-Dolgachev} imply that case (3) occurs (case (i) of Thm. 4.7.2 is excluded using the fact that $T$ contains no rational curves). For the first two cases above, let us now prove that in codimension one only curves with one single node can occur. Case (1) uses the description of the ramification locus of $\psi$ provided in \cite{Verra-83}. Verra shows that, generically, the ramification locus is equal to a degree $12$ curve $\mc{G}amma \subset \mathbb{P}^2$ that has $36$ cuspidal points and no other singularity. Since lines that are tangent to a smooth point of $\mc{G}amma$ appear only in codimension one and since a plane curve has only a finite number of bitangents or flexes, we only need to show that if $\ell$ is a general line through a cusp $\gamma \in \mc{G}amma$, then $\psi^{-1}(\ell)$ has at worst one node. This is proved in Lemma \ref{cuspidi} below.
Let us pass to case (2). Since the map is birational, a curve in $|C|$ can be singular only if it is the preimage $\psi^{-1}(H)$ of a hyperplane section $H$ of $R$ that either is tangent to $R$ at a smooth point (or is the limit of such), or passes through the singular locus of $R$. We can argue as in \cite[\S 2.1 II]{Voisin2} (see remarks after Cor. 2.8 of loc. cit, which apply to a smooth quasi--projective variety) and conclude that the general hyperplane that is tangent to the smooth locus of $R$ has one single ordinary double point (i.e. hyperplanes that are have two ordinary double points or other singularities appear in codimension $2$). We are left with analyzing what happens over the hyperplane sections through the singular locus $\Sing R$. Notice that, by Bertini, these hyperplane sections are smooth outside of $\Sing R$ so we only have to understand what happens over the singular locus.
To do so, we first have to understand what the singular locus of $R$ looks like: using Propositions 3.6.2 and 3.6.3 of \cite{Cossec-Dolgachev} we may assume that $|C|$ is one of the following
\begin{equation}gin{enumerate}
\item[(a)] $|ke+2e_1|$, where $e$ and $e_1$ are primitive elliptic curves with $e \cdot e_1=1$ and $k \ge 3$ (genus $g=2k+1$).
\item[(b)] $|ke+e_1|$, where $e$ and $e_1$ are primitive elliptic curves with $e \cdot e_1=2$ and $k \ge 2$ (genus $g=2k+1$).
\item[(c)] $|ke+e_1+e_2|$, where $e,e_1$, and $e_2$ are primitive elliptic curves with $e \cdot e_1=e \cdot e_2=e_1 \cdot e_2=1$ and $k \ge 1$ (genus $g=2k+2$).
\end{enumerate}
(in the first two cases we have set $k \ge 3$ and $k \ge 2$ to prevent from falling in the cases (3) and (1) above). Let us do the case (c) with $k=1$, which is the well known realization of an Enriques surface as the normalization of a sextic surface $R$ in $\mathbb{P}^3$ that passes doubly through the edges $l_1, \dots, l_6$ of a tetrahedron \cite{Dolgachev16}. The edges of the tetrahedron are the images of the elliptic curves $e, e', e_1, e_1', e_2, e_2'$ (indeed, the linear system $|C|$ restricts to a $g^1_2$ on each of these elliptic curves). In addition to the double lines, the surface $R$ has $4$ pinch points on each of the edges of the tetrahedron (the ramification points of the $g^1_2$'s on the elliptic curves) and $4$ triple points at the $4$ vertices of the tetrahedron. In particular, the preimage of a general point on one of the lines $l_i$ consists in $2$ points, the preimage of the pinch points consists in one single point, and the preimage of the triple points consists in $3$ points. If $H$ is a general hyperplane section through one of the pinch points, then it acquires a cusp. However, since $T \to R$ is the normalization morphism, we can see that in this case $\psi^{-1}(H)$ is smooth. Indeed, $T$ can be locally identified with the proper transform of $R$ under the blow up of $\mathbb{P}^3$ along the double line, and it is clear that this blow up normalizes a general cuspidal curve passing through a pinch point. A general hyperplane section through one of the triple point will be a curve with a triple point with three distinct branches which are separated under the map $\psi$. It follows that curves in $|C|$ that have worst singularities than one simple node appear in codimension two.
The other cases can be dealt with analogously: using Lemma \ref{birational map} below and Definition 1.43 (and discussion thereafter) of \cite{Kollar-Kovacs}, $R$ has two double lines (and is generically normal crossing along them) and pinch points. Hence, a hyperplane section $\mc{G}amma$ of $R$ is singular wherever it is tangent to the smooth locus of $R$ and is also singular along its intersection with the two double lines. A tangent hyperplane section that does not contain the double locus, will thus be normalized under the morphism $T \to R$.
It follows that the discriminant locus of these linear system is equal to the closure of the locus of hyperplane sections of $R$ that are tangent along the smooth locus of $R$ which, again using \cite[\S 2.1 II]{Voisin2}, is irreducible.
\end{proof}
\begin{equation}gin{cor} \label{discriminant locus irr}
Let $|C|$ be as in case $(2)$ of Proposition \ref{Verra} above. Then the discriminant locus of $|C|$ is irreducible. Moreover, if $|C|$ is as in case $(1)$ of Proposition \ref{Verra}, the number of irreducible components of the discriminant locus is equal to $37$.
\end{cor}
\begin{equation}gin{proof}
The second statement follows from discussion of Verra's result in the proof of Proposition \ref{Verra}. As for the second statement, we can argue as follows. First recall that the closure of locus of hyperplane sections that are tangent to a smooth quasi--projective variety (as is the smooth locus of $R$) is irreducible. Second, notice that for any hyperplane $H \subset \mathbb{P}^{g-1}$ that does not contain the two lines of the non--normal locus of $R$, the curve $H\cap R$ has only nodes or cusps along the two lines (depending if it meets a line at a regular double point or at a pinch point) and hence it is normalized under the map $ T \to R$ (notice that the hyperplane sections that contain the lines appear in higher codimension).
\end{proof}
We remark that since in the case (3) the class of $C$ is divisible by $2$, this is not a case we will consider (a necessary condition for the assumptions of Theorem \ref{thm smooth} to hold is that the class of $|C|$ is not divisible by $2$).
\begin{equation}gin{lemma} \label{cuspidi}
Let $|C|$, $\psi: T \to \mathbb{P}^2$, and $\mc{G}amma$ be as in case (1) above, and let $\gamma \in \mc{G}amma$ be a cuspidal point. Then for a general line $\ell$ through $\gamma$, the curve $C=\psi^{-1}(\ell)$ has at worst one node.
\end{lemma}
\begin{equation}gin{proof} Since $\ell$ is a general line through $\gamma$, we may assume that it is not tangent to $\mc{G}amma$ away from $\gamma$ and hence that $C$ is smooth away from $\psi^{-1}( \gamma)$.
Let $\widetilde C \to C$ be the normalization of $C$ and let us consider the induced morphism $ \widetilde \psi: \widetilde C \to C \to \ell$. Suppose that $C$ has a cusp over $\gamma$, so that $\widetilde C$ has genus $3-1=2$ and $\widetilde \psi$ has a ramification point over $\gamma$. Applying Riemann--Hurwitz to $\widetilde \psi$, we can compute the ramification $r$ of $\widetilde \psi$: $r=2 \deg \psi + 2g-2= 2 \cdot 4+2=10$. Since $\ell$ already meets the ramification curve $\mc{G}amma$ in $10$ points other than $\gamma$, $\widetilde \psi$ cannot ramify over $\gamma$. Hence, if $C$ has a double point over $\gamma$ it must be a node. The case where $\widetilde C$ has worst singularities is dealt with analogously, using Riemann--Hurwitz to compute the ramification divisor and finding a contradiction on the number of ramification points of $\widetilde \psi$ outside of $\gamma$.
\end{proof}
\begin{equation}gin{lemma} \label{birational map}
Let $|C|$ and $\psi: T \to R \subset \mathbb{P}^{g-1}$ be as in (a), (b) or in (c) with $k \ge 2$ above (so that $g \ge 5$). Then $\psi$ is an isomorphism outside of the two elliptic curves $e$ and $e'$, which are mapped $2:1$ onto two double lines in $R$.
\end{lemma}
\begin{equation}gin{proof}
It is easy to check that the restriction of $|\mathcal O(C)|$ to $ {e}$ and $e'$ is a $g^1_2$. Also notice that since two sublinear systems $|I_e(C)|=|\mathcal O(C-e)$ and $|I_{e'}(C)|=|\mathcal O(C-e')$ are different subspaces of $|\mathcal O(C)|$, it follows that $|\mathcal O(C)|$ separates the two curves $e$ and $e'$. In the case when $C^2 \ge 10$ (which corresponds precisely to $k \ge3$ in cases $a)$ and $b)$ above and to $k \ge 2$ in case $c)$), we can use Reider's theorem together with our assumptions on $T$ and on $|C|$, to conclude that $\psi$ separates points and tangent directions outside of an effective curve whose components $E$ satisfy $E^2=0$ and $E\cdot C=2$. Since the component of such a curve have to be equal to $e$ or to $e'$, this solves the question in the case when $C^2 \ge 10$. We thank the referee for suggesting the use of Reider's theorem.
We are thus only left to consider the case $b)$, with $k=2$.
This corresponds to the case when $|C|=|2e+e_1|$ and $e$ and $e_1$ are primitive elliptic curves with $e \cdot e_1=2$. As mentioned on page 278 of \cite{Cossec-Dolgachev} the projective model associated to this linear system is a non--normal octic surface in $\mathbb{P}^4$ with two double lines (images of $e$ and $e'$). We include a proof of the fact that, for general $T$, such an octic surface is smooth away from the two lines. We need to show that for every length $2$ point $z $ on $ T$ that is scheme theoretically not contained in $ \{e \cup e'\}$, there is a surjective map
\[
\alpha_z: H^0(\mathcal O(C)) \to H^0(\mathcal O_z(C)).
\]
We will prove this with the help of the linear systems $D:=C-e=e+e_1$ and $D'=C-e'=e'+e_1$. Notice that, as in case $(1)$ of Proposition \ref{Verra}, $|D|$ defines a degree $4$ morphism $\varphi_D: T \to \mathbb{P}^2$ (cf. \cite[Thm 4.6.3]{Cossec-Dolgachev}). Given $z$ as above, then either $z$ is scheme-theoretically contained in a fiber of $\varphi_D$, or it is not. Suppose it is, so that $ z \subset \varphi_D^{-1}(p)$, for some $p \in \mathbb{P}^2$. Then $|I_{z|T}(D)|=\varphi_D^*|I_{p|\mathbb{P}^2}(1)|$ is a pencil and hence there is an irreducible curve $D_z \in |D|$ containing $z$ (here we denote by $I_{X|Y}$ the ideal sheaf of a closed subscheme $X \subset Y$). We claim that $H^1(D_z,I_{z|D_z}(C))=H^0(D_z,I_{z|D_z}^\vee(-C)\otimes \omega_{D_z})=0$. This immediately implies that $H^0(D_z, \mathcal O_{D_z}(C)) \to H^0(\mathcal O_z(C))$ is surjective, which, since $H^1(T, \mathcal O(C-D_z))=H^1(T, \mathcal O(e))=0$, implies that $\alpha_z$ is also surjective. To prove the claim, suppose by contradiction that there is a non zero section $\sigma \in H^0(D_z,I_{z|D_z}^\vee (-C)\otimes \omega_{D_z})= H^0(D_z,I_{z|D_z}^\vee(-e'))$. Then $\sigma $ induces an injective morphism $\mathcal O_{D_z} \to I_{z|D_z}^\vee(-e')$ which we can dualize (notice that $ I_{z|D_z}$ is reflexive because it is a torsion free sheaf on a locally planar curve)
to get an injection
\[
\sigma^\vee: I_{z|D_z}(e') \to \mathcal O_{D_z}.
\]
Using the fact that $H^1(e_1)=0$, we see that $|\mathcal O(e') \res{D_z}|=\{e'\cap D_z\}$. The existence of a non--zero $\sigma^\vee$ as above implies hence that $z\supset e'\cap D_z$. Since the length of $z$ and of $e'\cap D_z$ are both equal to $2$, this implies that $z= e'\cap D_z$, which contradicts the fact that $ z$ is scheme theoretically not contained in $e' \cup e$.
Let us now suppose that $z $ is not contained in a fiber of $\varphi_D$. Then the morphism $ \begin{equation}ta_z: H^0(\mathcal O(D)) \to H^0(\mathcal O_z(D))$ is surjective. Consider the morphisms
\[
\gamma: H^0(\mathcal O(D)) \to H^0(\mathcal O(C)), \quad \text{and} \quad \gamma_z: H^0(\mathcal O_z(D))=H^0(\mathcal O_z(C-e)) \to H^0(\mathcal O_z(C)).
\]
Since $ \gamma_z \begin{equation}ta_z=\alpha_z \gamma$ and $\begin{equation}ta_z$ is surjective, we have $\im (\gamma_z) \subset \im (\alpha_z)$. Notice that $\gamma_z$ vanishes along $e\cap z$. There are three cases. The first is when $z \cap e= \emptyset$ so that $\gamma_z$ is an isomorphism and hence $\alpha_z$ is surjective. The second is when $z$ is a length two point supported on $e$ (but not scheme theoretically contained in $e$). Then, under the identification $\mathcal O_z(C) \cong \mathbb{C}[\epsilon]\slash (\epsilon^2)$, we see that $\im(\gamma_z)$ is the maximal ideal $(\epsilon)$ and to conclude that $\alpha_z$ is surjective we only need to notice that $|C|$ is base point free and hence that there is a section not vanishing on the support of $z$. The third case is when $z=z_1 \cup z_2$ with $z_1 \in e$ and $z_2 \notin e$. Then $\im(\gamma_z) =\mathcal O_{z_2}$. In this case, we can use the linear system $D'=C-e'=e'+e_1$ and consider instead the morphism $\gamma'_z: H^0(\mathcal O_z(D'))=H^0(\mathcal O_z(C-e')) \to H^0(\mathcal O_z(C))$. The same reasoning as above, together with the fact that $ z_1 \notin e'$, shows that we have $\im(\gamma'_z)\supset \mathcal O_{z_1} $. Since $\im(\alpha_z) \supset \im(\gamma'_z)$, this shows that also in the third case $\alpha_z$ is surjective and concludes the proof.
\end{proof}
\begin{equation}gin{prop} \label{discriminant locus}
(1) Let $|C|$ be a hyperelliptic of genus $g \ge 3$. Then $\Delta \subset |C|$ is the union of four irreducible components $\Delta_1, \Delta_2, \Delta_3, \Delta_4$. The general point of the first two components parametrizes curves that are the union of two smooth curves meeting transversally in one point, the general point of the third component parametrizes curves that are union of two smooth components meeting transversally in two points, and the general point of the fourth component parametrizes singular, but irreducible, curves. Moreover, the general curve parametrized by this component has only one node. \\
(2) If $g(C)=2$, then $|C|$ is a pencil which for general $T$ has exactly $18$ singular members, $16$ of which are irreducible with one node and $2$ of which are reducible, consisting of two elliptic curve meeting transversely in one point.
\end{prop}
\begin{equation}gin{proof}
(1) It is clear that the two hyperplanes,
\begin{equation} \label{two hyperplane components}
\Delta_1:=\{e_1\}\times |(n-1)e_1+e_2|,\quad \text{ and } \quad \Delta_2:=\{e_1'\}\times |(n-1)e_1+e_2'|,
\end{equation}
constitute two components of the discriminant locus, and also that they parametrizes curves of the form $e_1\cup \mc{G}amma$, with $\mc{G}amma$ a curve in $ |(n-1)e_1+e_2|$ (resp. $e_1'\cup \mc{G}amma$, with $\mc{G}amma \in |(n-1)e_1+e'_2|$). Since the genus of $\mc{G}amma$ is $\ge 2$, the general curve in these linear systems is smooth by Corollary \ref{smooth general member}.
For the third irreducible component, consider the natural map
\[
\begin{equation}gin{aligned}
\psi : \mathbb{P}^1 \times \mathbb{P}^{n-2}=|2e_1| \times |(n-2)e_1 \times e_2| & \longrightarrow \Delta \subset \mathbb{P}^n \\
(C_1, C_2) & \longmapsto C_1+C_2
\end{aligned}
\]
which is finite and birational. In particular, the image of $\psi$ defines a component of $\Delta$ which we denote by $\Delta_3$. The general curve parametrized by this component is therefore the union of a smooth curve in $|2e_1| $ and of a smooth curve in $ |(n-2)e_1 \times e_2| $, which generically meet transversally in two distinct points.
We are left with proving that the remaining part $\Delta_4$ of the discriminant is irreducible and that the general curve parametrized by it is irreducible with one single node. By definition of hyperelliptic linear system, and the fact that $T$ contains no rational curves, the rational map $\varphi_{|C|}$ associated to the linear system maps $T$ generically $2:1$ onto a degree $n-1$ smooth rational surface $R \subset \mathbb{P}^{n}$. We know recall some geometry of $\varphi_{|C|}$, following \cite[Thm 4.5.2]{Cossec-Dolgachev}.
We already saw that the linear system has two base points, which were described in Lemma \ref{base points}.
Since the degree one linear systems induced by restricting $|C|$ to $e_1$ and $e_1'$ both have a base point, $\varphi_{|C|}$ contracts these two curves to two distinct points, denoted by $P$ and $Q$. By Lemma \ref{base points}, when $n$ is even then both base points of $|C|$ lie on $e_2$ so that when $n=2$ the degree two linear systems $|C| \res{e_2}$ is the trivial and the curve $e_2$ gets also contracted.
Let $T' \to T$ be the blow up of the two base points of $|C|$. We get a generically $2:1$ morphism $ T' \to R$ which contracts the proper transforms of $e_1 $ and $ e_1'$ (and also $e_2$ if $n=2$).
The ramification curve of $\varphi_{|C|}$ is described in Theorem 4.5.2 of \cite{Cossec-Dolgachev}.
It consists of the union of two lines $\ell_1$ and $\ell_2$ (the images of the exceptional divisor of the blow up $T' \to T$) and of an irreducible curve $B \subset R$. The irreducible curve $B$ has two tacnodes in $P$ and $Q$ and is otherwise non singular ($T$ contains no rational curves), except in the case $n=2$ where it is has a simple node at the intersection $O$ of the two lines.
A curve in $|C|$ is singular in the following three cases.
If it covers a singular (hence reducible) hyperplane section of $R$, if it covers a smooth curve that is tangent to the ramification curve, if its image contains one of the two points $P$ and $Q$ or, when $n=2$, if it covers a line passing by the intersection $O$ of $\ell_1$ and $\ell_2$.
The preimages, under $\varphi_{|C|}$, of the hyperplane sections through $P$ and $Q$ are the curves in $\Delta_1$ and $\Delta_2$, respectively.
The set of hyperplane sections of $R$ that are tangent to $\ell_1$ (resp. $\ell_2$) contain $\ell_1$ (resp. $\ell_2$) and hence they form a set of codimension $2$. For $n=2$, we also have to consider the set of hyperplanes through $O$, which is nothing but $\Delta_3$, namely, the sublinear system $e_2+|2e_1|$ of curves containing $e_2$. While for $n \ge 3$ the component $\Delta_3$ corresponds to the curves covering the reducible hyperplane sections of $R$.
Finally, we observe that the closure of the set of hyperplane sections that are tangent to $B$ at smooth points is irreducible, since it is dominated by a $\mathbb{P}^{n-2}$--bundle over the smooth locus of $B$. Moreover, generically it parametrizes tangent curves that are tangent but not bi--tangent, so that the corresponding curve in $|C|$ has one simple node.
(2) A genus $2$ linear system has two simple base points $p$ and $q$, so that if $\mathcal C \to |C|$ denotes the universal family, we have $\chi_{top}(\mathcal C)=\chi_{top}(BL_{p,q}T)=14$. We use this to count the number of singular curves in $|C|$. By Proposition \ref{hyperelliptic enriques}, $|C|=|e_1+e_2|$ for two primitive elliptic curves $e_1$ and $e_2$, with $e_1 \cdot e_2=1$. It follows that that there are exactly two reducible curves in $|C|$, namely $e_1+e_2$ and $e'_1+e'_2$. By \S 8.1.4 (i) of \cite{Cossec}, the linear system $|C|$ is the pullback under a degree two map $\psi: T \dashrightarrow |C| \times |C'|=\mathbb{P}^1 \times \mathbb{P}^1 \subset \mathbb{P}^3$ of one of the two rulings of the quadric. The map $\psi$, which is defined away from the four intersection points $e_i \cap e'_j$ ramifies over the union of a square of lines (the images of the four exceptional divisors of the blow up of $T$ at $e_i \cap e'_j$ and of a degree $(4,4)$ curve $B$ that has a simple node at each edge of the square. Counting parameters we can see that the general Enriques surface can be constructed in this way, and that generically $B$ will have no other singularities. Moreover, we can also assume that no line in the ruling is bitangent (for more details see \cite{thesis}). Since the singular, but irreducible, members of $|C|$ arise from lines that are tangent to $B$, we see that generically they all have one simple node and no other singularity. Hence all the singular curves in $|C|$ have Euler number equal to $-1$. If $N$ denotes the number of singular fibers, we have $14=-2(2-N)-N$, and there are exactly $18$ singular fibers, two of which are reducible.
\end{proof}
\section{Smoothness and first properties of $N$} \label{fibration in jacobians}
Let $\chi$ be a non-zero integer and set
\begin{equation} \label{w e v}
w=(0,[C], \chi), \,\,\,\, \text{ and } \,\, v=f^*w=(0,[D], 2\chi).
\end{equation}
Let $H$ denote a polarization on $T$, and set
\[
A=f^*H.
\]
The Hilbert scheme $\mathcal H_{c_1(w)}$ has two components: $|C|$ and $|C'|$. Without loss of generality we can consider only one of them and set
\begin{equation} \label{N}
N=M_{w,H}(T, |C|), \,\,\,\, \text{ and } \,\, M=M_{v,A}(S).
\end{equation}
In order to study $N$, we will look at the natural pullback morphism from $N$ to $M$, which to a sheaf $F$ on $T$ with $v(F)=w$, associates the sheaf $f^*F$ on $S$ with $v(f^* F)=v$. We start with a few well known lemmas
\begin{equation}gin{lemma}[\cite{Gieseker}] \label{pullback stable}
Let $G$ be a pure dimension one sheaf on $T$ and let $H$ be an ample line bundle on $T$ and set $A=f^*H$. If $G$ is $H$-semistable, then $f^*G$ is $A$-semistable on $Y$.
\end{lemma}
\begin{equation}gin{proof}
Since $\omega_T$ is numerically trivial, tensoring a sheaf by $\omega_T$ does not change the numerical invariants of the sheaf itself. It follows that the operation of tensoring by $\omega_T$ preserves not only the slope, but also stability and semi-stability with respect to any line bundle. Let $E \subset f^* G$ be a subsheaf. Using the projection formula for $f$, we see that $f_* E$ is a subsheaf of the $H$-semistable sheaf $G\oplus (G\otimes \omega_T) $.
Clearly, $\chi(E)=\chi (f_* E)$. Moreover, since $E$ is pure of dimension one, $f_* c_1(E)=c_1(f_* E)$. Then $c_1(E)\cdot H=f_*c_1(E)\cdot H$, so that $\mu_H(E)=\mu_{H}(f_*E)$. Since $\mu_{H}(G)=\mu_{A}(G\oplus (G\otimes \omega_T) )$, the lemma is proved.
\end{proof}
\begin{equation}gin{lemma} \label{F and F'}
Let $E$ and $G$ be two non isomorphic $H$-stable sheaves on $T$. Suppose that $f^*E \cong f^*G$. Then
$$G\cong E \otimes\omega_T.$$
\end{lemma}
\begin{equation}gin{proof}
If $f^*E \cong f^*G$, then we also have an isomorphism $E\otimes (\mathcal O_T \otimes \omega_T) = f_*f^*E \cong f_*f^*G=G\otimes (\mathcal O_T \otimes \omega_T) $. Since all maps from $E$ to $G$ are trivial, it follows that the composition $E \to E\otimes f_* \mathcal O_S \to G\otimes \omega_T$ has to be non-zero. However, since $E$ and $G\otimes \omega_T$ are stable of the same reduced Hilbert polynomial, we have
$$E \cong G\otimes \omega_T.$$
\end{proof}
\begin{equation}gin{lemma} \cite{Takemoto73} \label{takemoto}
Let $G$ be a sheaf on $T$. If $G \cong G \otimes \omega_T$ then $f^* G$ is not simple. In particular, it cannot be stable.
\end{lemma}
\begin{equation}gin{proof}
We have
\begin{equation}
\begin{equation}gin{aligned}
\Hom(f^*G, f^*G)&=\Hom(G, f_* f^*G)=\\
&=\Hom(G, G)\oplus \Hom(G, G\otimes \omega_T)\\
\end{aligned}
\end{equation}
Since, by assumption, $\Hom(G, G\otimes \omega_T)$ is at least one-dimensional the Lemma is proved.
\end{proof}
By Lemma \ref{pullback stable} the pullback map
\begin{equation} \label{pullback morphism}
\begin{equation}gin{aligned}
\mathbb{P}hi: N=M_{w,H}(T, |C|)&\longrightarrow M=M_{v,A}(S),\\
[G] &\longmapsto [f^* G]\\
\end{aligned}
\end{equation}
is a regular morphism.
\begin{equation}gin{lemma}[\cite{Kim98}]\label{pullback generically 2 to 1}
The pullback morphism $\mathbb{P}hi: N \to M$ is generically $2:1$.
\end{lemma}
\begin{equation}gin{proof}
By Lemma \ref{F and F'} the morphism $\mathbb{P}hi$ is of degree $ \le 2$ so that we only need to prove that, for a sheaf $G$ corresponding to a general point in $N$, the sheaf $G$ is not isomorphic to $G \otimes \omega_T$. By Corollary \ref{smooth general member} the general member of $|C|$ is smooth. By Lemma \ref{existence stable sheaves}, if the Fitting support of $G$ is a smooth curve, then $f^* G$ is $H$-stable so that we may use Lemma \ref{takemoto} and conclude that $\mathbb{P}hi$ is generically $2:1$.
\end{proof}
As we remarked above, tensoring by $\omega_T$ preserves stability hence the involution
\begin{equation} \label{involution on N}
\begin{equation}gin{aligned}
\epsilon: N &\rightarrow N,\\
G &\mapsto G \otimes \omega_T,
\end{aligned}
\end{equation}
is well defined. It clearly commutes with $\mathbb{P}hi$.
\begin{equation}gin{lemma} \label{N smooth at stable points}
Let $G$ be an $H$-semistable sheaf such $f^*G$ is $A$-stable. Then $G$ is $H$-stable and $N$ is smooth at $[G]$ of dimension $2g-1$.
\end{lemma}
\begin{equation}gin{proof}
The first statement is clear. The obstructions to deforming a $G$ on $T$ lie in $\mc{E}xt^2(G,G)$ which is dual, by Serre duality, to
\[
\Hom(G, G \otimes \omega_T),
\]
However, this space is zero by Lemma \ref{takemoto} as we are assuming $f^*G$ to be simple.
\end{proof}
Since $\iota^* f^* G=f^* G$ the image of $\mathbb{P}hi$ is contained in the closure of the fixed locus of the birational involution
\begin{equation} \label{iota star}
\begin{equation}gin{aligned}
\iota^*: M=M_{v,A}(S) & \dasharrow M_{v,A}(S)=M.\\
F &\longmapsto \iota^* F
\end{aligned}
\end{equation}
Notice that this involution is regular on an open subset containing sheaves with irreducible support.
\begin{equation}gin{lemma} \label{involution anti-symplectic}
The involution $\iota^*: M \dasharrow M$ is anti-symplectic, i.e. if $\sigma$ denotes the symplectic form on the smooth locus of $M$, then $\iota^* \sigma=-\sigma$. Moreover, the fibration $\pi: M \to |D|$ is equivariant with respect to the involution $\iota^*$ defined above.
\end{lemma}
\begin{equation}gin{proof} Let $F$ be an $A$-stable sheaf corresponding to a point $[F]$ in $M$.
By functoriality of the cup product and of the trace map, the following diagram is commutative,
\begin{equation}
\xymatrix{
\mc{E}xt^1(F, F) \times \mc{E}xt^1(F, F) \ar[d]_{\iota^*} \ar[r] ^{\phantom{ghim}\cup} & \mc{E}xt^2(F, F) \ar[r]^\tr \ar[d]_{\iota^*} & H^2(S, {\mathcal O}_S)\ar[d]^{\iota^*}\\
\mc{E}xt^1(\iota^*F, \iota^*F) \times \mc{E}xt^1(\iota^*F, \iota^* F) \ar[r] ^{\phantom{ffjgjgjf} \cup} &\mc{E}xt^2(\iota^*F,\iota^* F) \ar[r]^\tr & H^2(S, {\mathcal O}_S).
}
\end{equation}
Hence by Mukai's description of $\sigma$ (cf. (\ref{symplectic form})), to prove the Lemma we only need to prove that the identification $H^2(S, {\mathcal O}_S)\cong \mathbb{C}$ changes sign if we compose it with $\iota^*$. This follows from the fact that, since $\iota$ is an anti-symplectic involution on $S$, the identification $H^0(S, \omega_S)=\mathbb{C} \sigma \cong \mathbb{C}$ changes sign once we compose it with $\iota^*$. The second statement follows from the definitions of $\iota^*$ and $\pi$.
\end{proof}
\begin{equation}gin{lemma} \label{fix iota smooth}
If $H$ is $\iota^*$-invariant, then the involution (\ref{iota star}) is regular.
Let $Z $ be any component of $\mc{F}ix(\iota^*) \subset M$, then $Z \cap(M\setminus \Sing(M))$ is smooth. Moreover, if $Z\cap(M\setminus \Sing(M))$ is non empty, then $Z$ is an isotropic subvariety of $M$.
\end{lemma}
\begin{equation}gin{proof}
The first statement is clear, since if $F$ is $H$-stable then $\iota^*F$ is $\iota^*H$-stable. The second statement follows from the well known fact that the fixed locus of the action of a finite group on a smooth variety is smooth. As for the third statement, it is an immediate consequence of Lemma \ref{involution anti-symplectic}.
\end{proof}
\begin{equation}gin{lemma} \cite{Kim98} \label{simple and invariant}
Let $F$ be a pure sheaf of dimension one on $X$ and assume that it is $\iota^*$-invariant. If $F$ is simple, then
\[
F=f^*(G),
\]
for some sheaf $G$ on $Y$.
\end{lemma}
\begin{equation}gin{proof}
Since $Y$ is a quotient of $X$ by a $\mathbb{Z}/(2)$ action, the descent data translates into the existence of a morphism $\varphi: \iota^* F \to F$, such that the following diagram is commutative
\[
\xymatrix{
\iota^* \iota^* F \ar@{=}[d] \ar[r]^{\iota^*\varphi } & \iota^* F \ar[r]^\varphi & F \\
F \ar[urr]_\id &&
}
\]
Since $F$ is simple, this can always be achieved by multiplying any given isomorphism $\iota^* F \to F$ by a suitable scalar.
\end{proof}
Now let
\begin{equation} \label{Y}
Y:=Y_{v,A} \subset \mc{F}ix(\iota^*),
\end{equation}
be component of $\mc{F}ix(\iota^*)$ containing $\mc{F}ix($. The lemma above says that the restriction
\[
\mathbb{P}hi: N \to Y \subset M,
\]
(which by abuse of notation we still denote by $\mathbb{P}hi$) is surjective.
Before stating the main result of this section, we recall the following definition.
\begin{equation}gin{defin} \cite[Def. 3.8]{Yoshioka-FM} \cite[Theorem-Definition 2.4]{AS} \label{definition of walls}
Let $v$ be a primitive Mukai vector. A polarization $H$ is called $ v$--generic if any $H$--semistable sheaf with Mukai vector $v$ is actually $H$--stable.
\end{defin}
By \S 1.4 of \cite{Yoshioka}, if $v=(0, D, \chi)$ is primitive Mukai vector and $\chi \neq 0$, then the locus of $v$--generic polarizations is non--empty. More precisely, this locus is equal to the complement of a finite union of real codimension one subset of $\Amp_\mathbb{R}(T)$. A \emph{wall of $v$} is defined to be an irreducible component of the complement of the locus of $v$--generic polarizations. In \cite[Prop 2.5]{AS} explicit equations for the walls are given for primitive Mukai vectors of pure dimension one (notice that the set of walls could, a priori, be a proper subset of the linear subspaces appearing in \S 1.4 of \cite{Yoshioka}). It is not hard to see that if $\chi=0$, then the set of $v$--generic polarizations can be empty.
\begin{equation}gin{thm} \label{thm smooth}
Let $C$ be a curve of genus $g \ge 2$ on an Enriques surface $T$. Let $\chi$ be a non-zero integer, set $w=(0,[C], \chi)$ and $v=(0,[D], 2\chi)$, where $D=f^{-1}(C)$. Assume that $v$ is primitive, and let $A$ be an ample line bundle on $T$ such that $A=f^*H$ is $v$-generic. The moduli space
\[
N=M_{w,H}(T, |C|),
\]
is a smooth, irreducible, projective variety of dimension $2g-1$, and it admits an \'etale double cover onto the Lagrangian subvariety
$ Y_{v,A}(S) \subset M_{v,A}(S)$.
\end{thm}
\begin{equation}gin{proof}
The smoothness follows from Lemmas \ref{N smooth at stable points}, \ref{fix iota smooth} and \ref{simple and invariant} above.
The fact that the pullback morphism is unramified follows from Lemma \ref{F and F'}.
Notice that since $\dim M_{v,A}(S)=2h$ and $h=2g-1$, the isotropic subvariety $Y_{v,A}(S) \subset M_{v,A}(S)$ is indeed Lagrangian. The fact that $N$ is irreducible follows from \cite[Thm 0.2]{yoshioka-enriques}.
\end{proof}
\begin{equation}gin{rem}
In the rest of the paper we will usually refer to $N$ without mention of the dependency on $w$ and $H$. The phrase `` let $N$ be as in Theorem \ref{thm smooth}'' will mean ``let $w$ and $A$ be as in Theorem \ref{thm smooth} and set $N= M_{w,A}(T, |C|)$''. If we refer to $N$ as a relative compactified Jacobian of specific degree $d$, then it means that we have chosen $\chi=d-g+1$ in $w=(0, C, \chi)$.
\end{rem}
\begin{equation}gin{rem}
In the theorem above we have asked $\chi \neq 0$. This is because otherwise the canonical bundle of a reducible curve would be strictly semistable. This condition appears also in \cite{Yoshioka}.
\end{rem}
One can also verify directly that if $G \ncong G \otimes \omega_T$, then the differential
\[
d\mathbb{P}hi: \mc{E}xt^1_T(G,G) \to \mc{E}xt^1_S(f^*G,f^*G),
\]
is injective.
Notice that if $w$ is primitive, then so is $v$ as soon as $C$ is not divisible by $2$ in $\mathbb{N}S(T)$. Moreover, if $T$ is general, then the general $H$ in $\Amp(T)$ will be such that $f^*H$ is $v$-generic.
\begin{equation}gin{rem}
If the assumptions of the proposition are not satisfied, the singular locus of $M$ and of $N$ may be non-empty. For vector bundles, this singular locus has been described by Kim in \cite{Kim98}.
\end{rem}
\subsection{On the support morphism}
Regarding the relative compactified Jacobian over the locus of reduced curves, we have the following result of Melo, Rapagnetta and Viviani
\begin{equation}gin{prop}[\cite{Melo-Rapagnetta-Viviani14}] \label{flat on reduced locus}
The restriction
\[
N_{V} \to V,
\]
of $\nu$ to the open locus $V$ of reduced curves is equidimensional. In particular, if $N$ is smooth then $N_V \to V$ is flat.
\end{prop}
\begin{equation}gin{proof}
This follows from the cited result Proposition \ref{MRV on compactified Jac}.
\end{proof}
Problems, however, may arise when dealing with non-reduced curves. In general, the Simpson moduli spaces of sheaves on a non-reduced curve may have higher dimensional components, as the following example shows, and are not well understood.
\begin{equation}gin{example}\label{jesse}{\rm Consider a smooth curve $\mc{G}amma'$ of genus $\gamma' \ge 2$, and let $\mc{G}amma$ denote the scheme obtained by considering a non-reduced double structure on $\mc{G}amma'$. Let $\gamma$ be the genus\footnote{By genus we mean the \emph{arithmetic} genus of $\mc{G}amma$, i.e., the integer $\gamma$ defined by $\chi(O_\mc{G}amma)=1-\gamma$.} of $\mc{G}amma$. It was shown by Chen and Kass in \cite{Chen_Kass11}, that all the components of the Simpson moduli space have dimension $\gamma$ except, possibly, a $(4\gamma'-3)$-dimensional component, which exists when $4\gamma'-3 \ge \gamma$. This component parametrizes rank 2 semistable sheaves on $\mc{G}amma'$.
Suppose now that $\mc{G}amma$ and $\mc{G}amma'$ are contained in a smooth surface $X$, so that the scheme structure defining $\mc{G}amma$ is the one induced by the ideal sheaf ${\mathcal O}(-2\mc{G}amma')$. By the adjunction formula,
\[
\gamma=4\gamma'-3-\deg {\omega_X}_{|\mc{G}amma'}.
\]
so the dimension of the Simpson moduli space does not jump when
\begin{equation} \label{canonical bundle numerically trivial}
\deg {\omega_X}_{|\mc{G}amma'}\le 0.
\end{equation}
}
\end{example}
In particular, as soon as the canonical bundle of $X$ is numerically trivial ( K3, abelian, Enriques or bi-elliptic surfaces) then (\ref{canonical bundle numerically trivial}) is satisfied for any curve contained in $X$.
\begin{equation}gin{conjecture} \label{expectation}
Let $(X,H)$ be a smooth projective surface and let $C \subset X$ be a curve of arithmetic genus $g$. If $\deg {\omega_X}_{|\mc{G}amma}\le 0$ for every sub curve $\mc{G}amma \subset C$, then any component of the Simpson moduli space of pure dimension one sheaves with support equal to $C$ is $g$-dimensional.
\end{conjecture}
Evidence for this conjecture is given by the following examples.
Suppose $(X, H)$ is a polarized K3 or abelian surface and let $v=(0, D, \chi)$ be a primitive Mukai vector with $\chi \neq 0$. Matsushita proved in \cite{Matsushita1} that the support morphism
\[
\pi: M_{v,H} \to |D|,
\]
is equidimensional. The proof, however, relies on the existence of a symplectic structure on these moduli spaces and cannot be applied to moduli spaces of sheaves on other surfaces. Indeed, using Koll\'ar's theorem on the torsion freeness of the higher direct images of the structure sheaf, Matsushita proves that \emph{ every } fiber of $\pi$, and not just the general one, is Lagrangian and hence of dimension equal to $\dim M_{v,H}(X)/2$. Here, by Lagrangian, we mean that the pullback of the symplectic form to any resolution of a fiber, considered with its reduced induced structure, vanishes identically.
Another example where the conjecture holds true is the Hitchin system for the group $GL(r)$, which can be thought of as the relative compactified Jacobian of a linear system on the ruled surface $X$ associated to the canonical bundle of a curve. The spectral curves are multi sections of the ruling and hence satisfy $\omega_S\cdot C =0$. Also in this case, the proof of flatness comes from the existence of a symplectic form with respect to which the Jacobian fibration is Lagrangian \cite{Laumon}. The condition $\deg {\omega_X}_{|\mc{G}amma}\le 0$ appears also in the recent paper \cite{CL}, which provides further evidence for the conjecture.
However, it is natural to expect that the dimension of the fibers of the support map should not depend on the existence of a symplectic structure but only on discrete invariants such as the rank of the sheaves and the arithmetic genera of their supports.
The last example is provided by Del Pezzo surfaces. In \cite{LePotier} Le Potier shows that for $\mathbb{P}^2$ the Picard group of the moduli spaces has two generators: the pullback of the hyperplane section under the support morphism and the determinant line bundle. Looking into the proof, however, one realizes that for any generically polarized Fano surface the fibers of the support morphism are not too big. More specifically, one can use Lemmas 3.2 and 3.3 of loc. cit., and the fact that in this setting one can choose the Quot scheme so that it is a principal bundle over the moduli space, to show that the locus of sheaves supported on non-reduced curves has codimension greater or equal to two.
Since we were not yet able to prove Conjecture \ref{expectation} for Enriques surfaces, we will need the following assumption when computing the fundamental group and the second Betti number in Sections \ref{fundamental group} and \ref{second betti number}.
\begin{equation}gin{assumption}\label{assumption}
As above, let $N_V \to V$ be the restriction of $\nu$ to the locus $V \subset |C|$ of reduced curves. The linear system $|C|$ is such that
\[
\codim(N \setminus N_V, N) \ge 2.
\]
\end{assumption}
Since $\codim (V, |C|)\ge 2$ this assumption is equivalent to asking that there are no irreducible components of $N_\Delta$ which map to codimension $\ge 2$ subsets of $\Delta$.
In some cases of low genus, where the curves of the linear system do not degenerate too much, one can show directly that the relative compactified Jacobian is equidimensional. For example, if there are no non-reduced curves, or if all the non-reduced curves have at worst a double structure one can use Propostion \ref{flat on reduced locus} and Example \ref{jesse}. Some examples of linear systems all of whose members are reduced are
\begin{equation} \label{low genus linear systems}
\begin{equation}gin{aligned}
&\vert e_1+e_2 \vert, \,\,\, \text{with} \,\,\, e_1\cdot e_2=1,& g(C)= 2, & \,\, \dim N=3, \\
&|e+f| \,\,\, \text{with} \,\,\, e\cdot f=2,&g(C)=3, & \,\, \dim N=5,\\
&\vert e_1+e_2+e_3 \vert \,\,\, \text{with} \,\,\, e_i\cdot e_j=1 \,\, \text{for}\,\, i\neq j,& g(C)=4, & \,\, \dim N=7.
\end{aligned}
\end{equation}
where $e_1$, $e_2$, $e_3$, $e$ and $f$ are primitive elliptic curves.
More generally, as Yoshioka has pointed out to me, this assumption is satisfied whenever $|C|$ is primitive:
\begin{equation}gin{prop} \cite{yoshioka-enriques} \label{Yoshioka assumption} Let $|C|$ be a primitive linear system on a general Enriques surface $T$, and $N$ be as in Theorem \ref{thm smooth} (i.e. let $w$ and $H$ be as in Theorem \ref{thm smooth} and set $N= M_{w,H}(T, |C|)$ ). Then Assumption \ref{assumption} is satisfied.
\end{prop}
\begin{equation}gin{proof} This is Proposition 4.4 of \cite{yoshioka-enriques}.
\end{proof}
\begin{equation}gin{cor} \label{assumption hyperelliptic}
Let $|C|$ be a hyperelliptic linear system on a general Enriques surface $T$, and let $N$ be as in Theorem \ref{thm smooth}. Then Assumption \ref{assumption} is satisfied.
\end{cor}
\begin{equation}gin{proof}
By $(3)$ Proposition \ref{hyperelliptic enriques}, a hyperelliptic linear system is primitive and hence we may use Proposition \ref{Yoshioka assumption}
\end{proof}
\section{The fundamental group} \label{fundamental group}
This section is devoted to computing the fundamental group of the relative compactified Jacobian variety $N$ constructed in Section \ref{fibration in jacobians}. We show that there is a surjection $\mathbb{Z}/(2) \twoheadrightarrow \pi_1(N)$ which, under Assumption \ref{assumption} is actually an isomorphism. Under this assumption, we can also identify the universal covering space, which can be described using the norm map. At the end of the section, we also prove some results on the vanishing cycles of these families. These results will be used to calculate the second Betti numbers of $N$.
The main result of the section is the following
\begin{equation}gin{thm} \label{fundamental group thm} Let $|C|$ be a genus $g \ge 2$ linear system on a general Enriques surface $T$ let $v$ and $N$ be as in Theorem \ref{thm smooth}.
Then there is a surjection
\[
\mathbb{Z}/(2) \twoheadrightarrow \pi_1(N)
\]
which is an isomorphism in case
$|C|$ satisfies Assumption \ref{assumption}.
\end{thm}
\begin{equation}gin{rem}
Recall that a hyperelliptic linear system on a general Enriques surface $T$ is always primitive (Prop. \ref{hyperelliptic enriques}). Hence, the theorem above holds unconditionally if $|C|$ is hyperelliptic.
\end{rem}
There are two main ingredients in the proof of this result. The first is a theorem of Leibman \cite{Leibman}, as used also in \cite{Mark_Tik} and in \cite{ASF12}. We combine this with the second ingredient, which is the Abel--Jacobi map and which allows the comparison of the fundamental group of a family of mildly singular curves with the fundamental group of the corresponding relative compactified Jacobian. We followed an idea of the referee to use the Abel--Jacobi map, as it seemed to be very natural. I am grateful to the referee for this suggestion.
At the end of the section we also prove a result on the vanishing cycles of these families (correcting a mistake that appeared in the first version and that was pointed out to us by the referee). This will be used in Section \ref{second betti number}.
\subsection{Preliminaries}
By abuse of notation, let us denote by
\[
\pi: Y \to |C|,
\]
the map induced by the support morphism $M \to |D|$, and by
\[
\nu: N \to |C|,
\]
the support morphism for the moduli space of sheaves on $T$.
There is a commutative diagram,
\begin{equation} \label{fibration N and Y}
\xymatrix{
N \ar[dr]_\nu \ar[rr]^\mathbb{P}hi && Y \ar[dl]^\pi\\
& |C|
}
\end{equation}
which shows that the double cover $\mathbb{P}hi$ restricts fiberwise to a non-trivial double cover.
For later use, let us define a torsion line bundle on $Y$ by setting
\begin{equation} \label{define L}
\mathbb{P}hi_*{\mathcal O}_N\cong {\mathcal O}_Y\oplus L.
\end{equation}
Notice that $L^{\otimes 2}\cong{\mathcal O}_Y$, and that $L$ generates the kernel of $\mathbb{P}hi^*: \mathbb{P}ic(Y) \to \mathbb{P}ic(N)$.
Let
\[
U \subset |C|, \quad \text{ and }\,\ \,\, V \subset |C|,
\]
be the open loci of smooth and reduced curves respectively. For any $t \in U$, let $C_t$ be the smooth member of $|C|$ corresponding to $t$, and set
\[
D_t=f^{-1}(C_t).
\]
By \cite{MumfordPryms}, (vi) Section 2 and Corollary 2 Section 3, the fixed locus $\mc{F}ix(\iota)$ of $\iota^*$ acting on $\Jac(D_t)$ is exactly $f^*(\Jac(C_t))$ and the double cover $N_t \to Y_t$ is induced by the sequence
\begin{equation} \label{pullback jacobians}
1 \to \mathbb{Z}/(2) \to \Jac(C_t) \stackrel{f^*}{\rightarrow} \mc{F}ix(\iota)\subset \Jac(D_t).
\end{equation}
\subsection{Leibman}
Let us start by considering the setting of \cite{Leibman}, which we formulate directly in the context of algebraic varieties. Let $p\colon E \to B$ be a surjective morphism of smooth connected varieties. Assume that $p$ has a section $s$. Let $W \subset B$ be a locally closed smooth subvariety of codimension at least one. Set $U = B \setminus W$, $E_U = p^{-1}(U)$, and $E_W=p^{-1}(W)$. Assume that $E_U \to U$ is a smooth fibration that is topologically locally trivial with path connected fiber $F \stackrel{j}{\mc{H}ookrightarrow} E_U$. We will say that a morphism $E \to B$ satisfies Leibman's condition if it satisfies the assumptions just mentioned.
Fix base points $o \in E_U$ and $p(o) \in U$ with respect to which we consider fundamental groups. We have the following commutative diagram
\begin{equation} \label{leibman diagram}
\xymatrix{
1 \ar[r] & R \ar[d] \ar[r] & G \ar[r] \ar[d] & H \ar[d] & \\
1 \ar[r] & \pi_1(F) \ar[r]^{j_*} \ar[d] & \pi_1(E_U) \ar[r]^{p_*} \ar[d] & \pi_1(U) \ar[r] \ar[d] & 1\\
1 \ar[r] & K \ar[r] & \pi_1(E) \ar[r]^{p_*} & \pi_1(B) &\\
}
\end{equation}
where $G=\ker[ \pi_1(E_U) \to \pi_1(E)]$, $H=\ker[\pi_1(U) \to \pi_1(B)]$, $K=\ker[\pi_1(E) \to \pi_1(B)]$, and $R=\ker [\pi_1(F) \to K]$. Since removing closed algebraic subsets only makes the fundamental group larger, the two vertical arrows on the bottom left are surjective and hence so is $\pi_1(E) \to \pi_1(B)$.
Following Leibman, let us select a set of generators of $H$ which we will then lift to $G$. A loop in $U$ that can be closed in $B$ can be represented as the image of the boundary of a map $D \to B$ from a two dimensional disk $D$. Choose a general point $x_i$ on every irreducible component $W_i \subset W$ and a small two dimensional disk $D_i \subset B$ transversal to $W_i$ in $x_i$ and such that $D_i \cap W_i=\{x_i\}$. By transversality, any map from a two dimensional disc to $B$ with boundary contained in $U$ can be moved, up to homotopy, to a map whose image is a disc that is transversal to every component of $W$. Moreover, it can be arranged so that this disc meets every component at the chosen points (cf. \cite[(1.11)]{Leibman}). For every $i$, join the base point $p(o)$ with $\partial D_i$ in every possible way (up to homotopy) via paths $\gamma$ in $U$. The set of paths of the form $\gamma \partial D_i \gamma^{-1}$, together with their inverses, gives a set of generators for $H$. Since $E \to B$ has a section, the set of loops in $E_U$ of the form $s_*(\gamma \partial D_i \gamma^{-1})$ are a lift to $G$ of the generators of $H$. Hence the morphism $G \to H$ is surjective. As a consequence, $\pi_1(F) \to K$ is also surjective and there is an exact sequence
\begin{equation} \label{R for general case}
1 \to R=\pi_1(F) \cap G \to \pi_1(F) \to \pi_1(E) \to \pi_1(B) \to 1.
\end{equation}
Our aim is the describe the group $R$ more explicitly when $E \to B$ is a family of curves or its relative compactified Jacobian. Before doing so let us point out two important facts.
\begin{equation}gin{rem} \label{remark on birational class}
The first remark is that so far we have only used that the section is defined at the general point of each component of $W$. The second is if $E'$ is a smooth variety and $h: E \dashrightarrow E'$ is a birational map then $\pi_1(E) \cong \pi_1(E')$. In particular, if $h$ restricts to an isomorphism over the general fiber of $E \to B$ we are free to consider $\pi_1(E')$ instead of $ \pi_1(E)$ in the exact sequence above.
\end{rem}
As usual, let $|C|$ be a linear system on a general Enriques surface $T$ of genus $g \ge 2$. Let $B \subset |C|$ be a general linear subsystem with the property that the universal family
\[
\mathcal C_B \subset B \times T, \quad p: \mathcal C_B \to B,
\]
of curves is smooth and has a section $s$. This is the case, for example, if $|C|$ is a hyperelliptic linear system (Corollary \ref{section}), or if $B \subset |C|$ is a general pencil in an arbitrary linear system of genus $g \ge 3$ (a general pencil in a genus $g$ linear system has $2g-2$ simple base points). Then $\mathcal C_B \to B$ satisfies the assumptions of Leibman and we can consider the corresponding sequence (\ref{R for general case}). For the base point $o \in \mathcal C$, let $C_{t_o}$ be the fiber of $p$ over $t_o:=p(o)$. We have the following
\begin{equation}gin{lemma} \label{lemma R for C}
Let $D_{t_o}=f^{-1}(C_{t_o})$ be inverse image of $C_{t_o}$ under the universal cover $f: S \to T$. There is an exact sequence
\begin{equation} \label{R for C}
1 \to R_{\mathcal C} \to \pi_1(C_{t_o}) \to \pi_1(\mathcal C_B) \to 1.
\end{equation}
where
\[
R_{\mathcal C} =f_*( \pi_1(D_{t_o})),
\]
\end{lemma}
\begin{equation}gin{proof}
This is just (\ref{R for general case}) applied to $\mathcal C_B \to B$ together with the fact that $\pi_1(B)$ is trivial.
It is easy to see that the second projection $\mathcal C_B \to T$ induces an isomorphism at the level of fundamental groups so
\[
\pi_1(\mathcal C_B ) \cong \mathbb{Z} \slash (2).
\]
As a consequence, the two morphisms $\pi_1(C_{t_o}) \to \pi_1(\mathcal C_B)$ and $\pi_1(C_{t_o}) \to \pi_1(T)$ have the same kernel. Since the $2:1$ cover $S \to T$ restricts to the non--trivial $2:1$ cover $D_{t_o} \to C_{t_o}$, it is clear that $\ker[\pi_1(C_{t_o}) \to \pi_1(T)]=f_*(\pi_1(D_{t_o}))$ and the Lemma follows.
\end{proof}
Now let $H$ be a polarization on $T$ and let
\[
\nu: \overline J_{H,0}(\mathcal C_B) \to B
\]
be the degree zero relative compactified Jacobian of $\mathcal C_B \to B$. In other words, $ \overline J_{H,0}(\mathcal C_B)=N \times_{|C|} B$, where $N$ is the moduli space for the Mukai vector $w=(0, C, -g+1)$. Let us assume that $H$ is $v$--generic (so that $N$ and $\overline J$ are smooth) and also such that $\nu$ has a section. Such a polarization exists because of Remark \ref{section defined}.
We are thus in the setting of Leibman. Sequence (\ref{R for general case}) becomes
\begin{equation} \label{R for J}
0 \to R_{ \overline J_{H,0}(\mathcal C_B)} \to \pi_1(J_{t_o})=H_1(C_{t_o}, \mathbb{Z}) \to \pi_1( \overline J_{H,0}(\mathcal C_B)) \to 0.
\end{equation}
\begin{equation}gin{lemma} \label{independent of H and d}
Let $\mathcal C_B \to B$ be as above. For any polarization $H$ and any degree $d$ such that $\overline J_{H,d}(\mathcal C_B)$ is smooth, there is a short exact sequence (\ref{R for J}) with first term $R_{ \overline J_{H,0}(\mathcal C_B)} $ independent of $H$ and $d$.
\end{lemma}
\begin{equation}gin{proof}
Using Remark \ref{remark on birational class} we only need to check that the birational class of $\overline J_{H,0}(\mathcal C_B)$ is independent of $H$ and $d$. Independence of $H$ follows from Lemma \ref{existence stable sheaves} and the independence of $d$ follows from the existence of a section.
\end{proof}
As a consequence to compute $\pi_1( \overline J_{H,d}(\mathcal C_B))$ we can drop $H$ from the notation and only consider the degree $0$ compactified Jacobian. This will be denoted by
\[
\overline J_B:= \overline J_{H,0}(\mathcal C_B)
\]
Our aim is to use Lemma \ref{lemma R for C} to compute $R_{\overline J_B}=R_{\overline J_{H,0}(\mathcal C_B)}$ and we will use Abel--Jacobi maps to compare (\ref{R for C}) and (\ref{R for J}).
Let $U_B=U \cap B \subset B$ be the open locus parametrizing smooth curves and consider the restriction $J_{U_B}=\nu^{-1}(U_B)$ of $\overline J_B \to B$ to $U_B$.
Using the section $s : B \to \mathcal C_B$ we can define an Abel--Jacobi map
\begin{equation} \label{Abel Jacobi}
\begin{equation}gin{aligned}
A=A_{U_B,s}: \mathcal C_{U_B} &\longrightarrow J_{U_B},\\
c & \longmapsto m_{C_b, c} \otimes \mathcal O_{C_b}(s(b)),
\end{aligned}
\end{equation}
which is well known to be an embedding. We can view $A$ as a rational map
\[
\mathcal C_B \dashrightarrow \overline J_B
\]
which induces, since we are assuming $\overline J$ to be smooth (so the fundamental group is a birational invariant; $A$ is defined on an open set whose codimension is at least $2$) a morphism $A_*: \pi_1(\mathcal C_B ) \to \pi_1(\overline J)$ which fits into the following commutative diagram
\begin{equation} \label{Leibman seq for C and J}
\xymatrix{
1 \ar[r] & R_{\overline J_B} \ar[r]& \pi_1(J_{t_o}) \ar@{->>}[r] & \pi_1(\overline J_B) \ar[r]& 1\\
1 \ar[r] & R_{\mathcal C_B} \ar[r] \ar[u]^{r} & \pi_1(C_{t_o}) \ar@{->>}[u]^{t}\ar@{->>}[r]& \pi_1(\mathcal C_B)\ar[u]^{A_*} \ar[r]&1
}
\end{equation}
\begin{equation}gin{lemma} \label{if r surj then okay}
There exist a surjection $\pi_1(\mathcal C_B)=\mathbb{Z} \slash (2) \twoheadrightarrow \pi_1(\overline J_B)$ which is an isomorphism if and only if $R_{\mathcal C_B} \to R_{\overline J} $ is surjective. If this is the case, then $R_{\overline J_B}=f_* H_1(D_{t_o}, \mathbb{Z})$.
\end{lemma}
\begin{equation}gin{proof}
The first two statements are diagram chasing, while the third follows from the fact that since $R_{\mathcal C_B}=f_*( \pi_1(D_{t_o}))$ then $r(R_{\mathcal C_B})=f_* H_1(D_{t_o}, \mathbb{Z}) \subset R_{\overline J_B}$ and this inclusion is an equality if and only if $r$ is surjective.
\end{proof}
To show that the map $\pi_1(\mathcal C_B) \to \pi_1(\overline J)$ is actually an isomorphism, we will need the following Lemma which we will use after showing that the Abel--Jacobi maps embeds $\mathcal C_B$ in $\overline J_B$. Before stating the Lemma, let us introduce some more notation to add to the one defined at the beginning of the Section.
Again, we follow Leibman ($(\gamma')$ on pg 102 and pg 104). For this part we also need that $p: E \to B$ is smooth at the general point of $E_W$ (in the case of $N \to |C|$ this follows from Proposition \ref{MRV on compactified Jac}).
For every component $E^j_{D_i}$ of $p^{-1}(W_i)$ we can choose a general point $q_{ij}$, lifting $x_i$, and a small disk $D_{ij}$, lifting $D_i$, which is transversal to $E^j_{D_i}$ and only meets it in $q_{ij}$. This is possible because $p$ admits local sections at the general point of every component of $E_W$. For every $i$, we can choose these lifts so that the one corresponding to the component meeting the section $s(B)$ is precisely $s(D_i)$. Moreover, for any path $\eta$ joining $p(o)$ to $\partial D_i$ as above we can choose (since the fibers over $U$ are path connected) a path $\gamma$ in $E_U$ which lifts $\eta$ and which joins the base point in $E_U$ to a fixed point $o_{ij} \in \partial D_{ij}$. Notice that we can chose such points $o_{ij}$ so that for fixed $i$ they lie over the same point $o_i \in \partial D_i$ and so that the point lying in $s(D_i)$ is precisely $s(o_i)$.
This defines other lifts of the generators of $H$, which are not necessarily contained in the image of the section. The observation, which we will make more precise in Subsection \ref{section vanishing cycles}, is that different lifts of the same generator of $H$ differ by a vanishing cycle of the family. Since for the moment we don't need this, we postpone the discussion on the vanishing cycles in a separate subsection.
\begin{equation}gin{lemma} \label{comparing R}
Let us be given two morphisms $p: E \to B$ and $p':E' \to B'$ satisfying the conditions of Leibman and suppose that there are locally closed embeddings $E' \subset E $ and $B' \subset B$ commuting with $p$ and $p'$. Let $U' \subset B'$ be the locus where $p'$ is smooth and suppose that $U' \subset U$. Denote by $F'$ the fiber of the topologically locally trivial fibration $E'_{U'}$ and suppose that the inclusion $F' \subset F$ induces a surjection at the level of fundamental group. Suppose furthermore that every component of $E_{W}$ contains a component of $E'_{W'}={p'}^{-1}(W')$, where $W'=B' \setminus U'$ and that both $p$ and $p'$ are smooth at the general point of every component of $E'_{W'}$. Let $R=\ker[\pi_1(F) \to \pi_1(E)]$ and $R'=\ker[\pi_1(F') \to \pi_1(E')]$ be as in ( \ref{leibman diagram}). Then the natural morphism $R' \to R$ is surjective.
\end{lemma}
\begin{equation}gin{proof}
We know that $R=G \cap \pi_1(F)$, where $G=\ker[\pi_1(E_U) \to \pi_1(E) ]$ and similarly $R'=G' \cap \pi_1(F')$, where $G'=\ker[\pi_1(E'_U) \to \pi_1(E')]$. We claim that it is enough to show that the natural morphism $G' \to G$ is surjective. Indeed, consider the short exact sequences $1 \to R \to G \to H \to 1$ and the corresponding primed one $1 \to R' \to G' \to H' \to 1$. Both are exact on the right because $p: E \to B$ and $p':E' \to B'$ satisfy the conditions of Leibman and because of the discussion after diagram (\ref{leibman diagram}). Morever, the morphism between the two fibrations induce a morphism of complexes between the two short exact sequences. It follows that if $G' \to G$ is surjective, then the cokernel of $R' \to R$ is surjected upon by the kernel of the natural morphism $H' \to H$. Since $\pi_1(F') \to \pi_1(F)$ is surjective, then $\pi_1(U')\to \pi_1(U)$ is injective and hence so is $H' \to H$.
As above, we can write any element $\alpha \in G$ as a product of paths of the form $\gamma \partial D_{ij} \gamma^{-1}$ where the discs $D_{ij}$ are as above and the $\gamma$'s are paths in $E_U$ connecting $o$ to $o_{ij} \in \partial D_{ij}$ . Since $p$ is smooth at the general point of $E_{W_i}^j$, we can choose the $D_{ij}$ to be centered at points $x'_{ij} \in E'_{W'}$. Moreover, if locally we trivialize the embedding $E' \to E$, we can use a homotopy to move the disk $D_{ij}$ so that it is actually contained in $E'$. To show that $G' \to G$ is surjective it is therefore sufficient to show that the paths $\gamma$ joining $o$ to $\partial D_{ij} $ are homotopic to paths $\gamma'$ in $E'_{U'}$. Since $F'$ is path connected we can choose a path $\gamma''$ joining $o$ to $\partial D_{ij}$ and lifting $p(\gamma)$. Then $\gamma {\gamma''}^{-1}$ is a loop in $E_U$ which lies in the kernel of $p_*$, i.e., $\gamma {\gamma''}^{-1}=f$ for some $f \in \pi_1(F)$. Since $\pi_1(F') \to \pi_1(F)$ is surjective by assumption, $f$ is homotopic to a loop in $F'$ and hence $\gamma$ is homotopic to $ \gamma':= \gamma'' f$ which is a path in $E'_{U'}$.
\end{proof}
\subsection{Abel--Jacobi maps and the proof of the Theorem}
The next step will be to show, given a family of curves as above, that the Abel--Jacobi map (\ref{Abel Jacobi}) can be extended to an embedding satisfying the assumptions of this lemma.
Before doing so, we need to recall a few facts about extension of Abel--Jacobi maps to singular curves. This topic has been extensively studied. We refer to \cite{Melo-Rapagnetta-Viviani14} and \cite{Caporaso-Coelho-Esteves-08} and the references therein for a more thorough treatment on the topic. Here we limit ourselves to the most basic facts. We start by considering the extension over the locus $V \subset B$ parametrizing singular but integral curves. Following \cite{Melo-Rapagnetta-Viviani14} and \cite{Caporaso-Coelho-Esteves-08}, over $V$
one can extend (\ref{Abel Jacobi}) by considering on $\mathcal C_V \times_V \mathcal C_V$ the sheaf
\[
I_\Delta \otimes p_1^* \mathcal O_{\mathcal C_V} (\Sigma), \quad \Sigma:=s(B) \subset \mathcal C_B,
\]
where $I_\Delta$ is the ideal sheaf of the diagonal in $\mathcal C_V \times_V \mathcal C_V$. This sheaf defines a flat family of rank one torsion free sheaves of degree zero, parametrized by the second factor $\mathcal C_V$. As such, it defines a morphism, extending $A_{U_B,s}$, from $\mathcal C_V$ into the relative compactified Jacobian. For reference, we highlight the following proposition
\begin{equation}gin{prop} \label{abel jacobi for non hyper}
Let $\mathcal C_B \to B$ be the family over a general pencil $B \subset |C|$ in a non--hyperelliptic linear system on a general Enriques surface, let $\overline J_B \to B$ the degree zero relative compactified Jacobian of this family. Choose a section of the family and consider the Abel--Jacobi map (\ref{Abel Jacobi}) with respect to this section. Then $A$ extends to an embedding
\[
\mathcal C \mc{H}ookrightarrow \overline J
\]
over $B$.
\end{prop}
\begin{equation}gin{proof}
The fact that the morphism $A$ extends was discussed above and the fact that it is an embedding follows from \cite[Thm 1]{Caporaso-Coelho-Esteves-08}.
\end{proof}
To extend the Abel--Jacobi map over the locus of reducible curves one needs to be more careful as the sheaves of the form $m_{C_b, c} \otimes \mathcal O_{C_b}(s(b))$ will in general not be semistable. Melo, Rapagnetta, and Viviani have shown in \cite[Lem 6.1 and Prop. 6.7]{Melo-Rapagnetta-Viviani14} that on a \emph{fixed} curve one can always find a polarization which guarantees stability and hence that, up to suitably choosing the polarization, the assignment
\[
C_b \ni c \mapsto m_{C_b, c} \otimes \mathcal O_{C_b}(s(b)) \in \overline J_{H,0}(C_b)
\]
defines an extension of the Abel--Jacobi morphism (notice, however, that if the curve has separating nodes, then the definition has to be tweaked (\cite[\S 9-10]{Caporaso-Coelho-Esteves-08} \cite[Prop. 6.7]{Melo-Rapagnetta-Viviani14})). However, the polarization for which the Abel--Jacobi map is defined depends on the given curve and hence the construction does not in general work in families. Luckily, for a hyperelliptic linear system on a general Enriques surface we have the following proposition.
\begin{equation}gin{prop} \label{abel jacobi for hyper}
Let $\mathcal C \to |C|$ be the universal family of a hyperelliptic linear system of genus $g\ge 2$ on a general Enriques surface. Fix $s: |C| \to \mathcal C$ one of the two sections and let $B \subset |C|$ be the open subset parametrizing curves that are: irreducible; or are the union of two smooth curves meeting in two points (as is the general curve of the component $\Delta_3$ of the discriminant); or are the union of two smooth curves meeting in one point (as is the general curve of the components $\Delta_1$ and $\Delta_2$ of the discriminant).
There exists a $(-g+1)$--general polarization $H$ such that $A$ extends to a regular embedding over $B$
\[
\mathcal C_B \mc{H}ookrightarrow \overline J_{H,0}(\mathcal C_B).
\]
\end{prop}
\begin{equation}gin{proof}
By the remarks before the proposition, we only need to check the extension of the morphism on the locus parametrizing reducible curves. By Proposition \ref{discriminant locus}, in codimension one the only curves that appear are of the form of the form $\mc{G}amma_1+\mc{G}amma_2$, where
\begin{equation}gin{enumerate}
\item [(i)] $\mc{G}amma_1 \in |2e_1|$ and $\mc{G}amma_2 \in |(n-2)e_1+e_2|$;
\item [(ii)] $\mc{G}amma_1 = e_1$ and $\mc{G}amma_2 \in |(n-1)e_1+e_2|$;
\item [(iii)] $\mc{G}amma_1 = e'_1$ and $\mc{G}amma'_2 \in |((n-1)e_1+e_2)'|$
\end{enumerate}
(if $g=2$ only the last two cases occur).
Recall that the two sections of a hyperelliptic linear system come from its base points which were described in Lemma \ref{base points}. Let $\Sigma:= s(|C|)$ be the image of the section.
Up to switching cases $(ii)$ and $(iii)$ we can assume that the following intersections hold: in case $(i)$: $\Sigma \cap \mc{G}amma_1=0$ and $\Sigma \cap \mc{G}amma_2=1$; in case $(ii)$: $\Sigma \cap \mc{G}amma_1=0$ and $\Sigma \cap \mc{G}amma_2=1$; in case $(iii)$: $\Sigma \cap \mc{G}amma_1=1$ and $\Sigma \cap \mc{G}amma_2=0$.
We will use Lemma \ref{stability on two components} to check whether in the three cases $(i), (ii)$, and $(iii)$ the sheaves of the form $m_p \otimes \mathcal O_\mc{G}amma(\Sigma)$ are stable. Notice that the only thing that matters for stability are the intersection numbers
\[
a=H \cdot e_1, \quad \text{and} \quad b=H \cdot e_2
\]
so we will drop $H$ from the notation and only use $a$ and $b$. Since $g(C)=n+1$ and we are considering the degree zero Jacobian, $\chi=-n$. For curves of type $(i)$, stability for line bundles becomes
\[
-\ff{2na}{na+b} \le \chi_1 \le - \ff{2na}{na+b} +2.
\]
If $a$ and $b$ are such that $na>b$ then
\begin{equation} \label{H general}
1<\ff{2na}{na+b}<2
\end{equation}
(by choosing $H$ to be appropriate combination of $e_1$ and $e_2$ this can certainly be achieved) so $H$ is $\chi$--general for $\mc{G}amma$ and a line bundle is stable if and only if $-1 \le \chi_1 \le 0$, i.e. if and only if
\[
(\chi_1, \chi_2)=(-1, -n+3) \quad \text{ or } \quad
(\chi_1, \chi_2)=(0, -n+2).
\]
This means that the sheaf $m_p \otimes \mathcal O_\mc{G}amma(\Sigma \res{\mc{G}amma})$ is stable when $p$ is a smooth point on either of the two components and hence $A$ extends over the smooth locus of curves of type $(i)$. When $p \in \mc{G}amma_1 \cap \mc{G}amma_2$, then the sheaf $m_p \otimes \mathcal O_\mc{G}amma(\Sigma \res{\mc{G}amma})$ is locally free at only one node. By Lemma \ref{stability on two components} stability is equivalent to $\chi_1=-1$ and hence $m_p \otimes \mathcal O_\mc{G}amma(\Sigma \res{\mc{G}amma})$ is stable since Euler characteristics of the restriction to the two components are $(\chi_1, \chi_2)=(-1, -n+2)$.
It is more subtle to extend the morphism over the locus of curves with separating nodes \cite[Prop. 6.7]{Melo-Rapagnetta-Viviani14} (as are the curves of cases $(ii)$ and $(iii)$). To fix ideas, let us consider case $(ii)$. Stability requires $(\chi_1, \chi_2)=(0, -n+1)$ which is satisfied for sheaves the form $m_p \otimes \mathcal O_\mc{G}amma(\Sigma \res{\mc{G}amma})$ if and only if $p$ is point belonging to $\mc{G}amma_2 \setminus \mc{G}amma_1 \cap \mc{G}amma_2$. Indeed, in this case $(\chi_1, \chi_2)=(0,-n+1)$, otherwise $(\chi_1, \chi_2)=(-1,-n+2)$. Following \cite[6.7]{Melo-Rapagnetta-Viviani14} and \cite[\S 9-10]{Caporaso-Coelho-Esteves-08}. For $p \in \mc{G}amma_1$ we can set $A(p)=m_p \otimes \mathcal O_\mc{G}amma(\Sigma \res{\mc{G}amma}) \otimes \mathcal O_\mc{G}amma(\mc{G}amma_1)$. Here, $\mathcal O_\mc{G}amma(\mc{G}amma_i)$ denotes the restriction to $\mc{G}amma$ of the divisor in $\mathcal C_B \to B$ that lives over $\Delta_1$ and is swept out by the components $\mc{G}amma_i$ of the curves parametrized by $\Delta_1$. Since, $\deg \mathcal O_\mc{G}amma(\mc{G}amma_i)_{|\mc{G}amma_i}=-1$ and $\deg \mathcal O_\mc{G}amma(\mc{G}amma_i) _{|\mc{G}amma_i}=1$, the sheaf $m_p \otimes \mathcal O_\mc{G}amma(\Sigma \res{\mc{G}amma}) \otimes \mathcal O_\mc{G}amma(\mc{G}amma_1)$ is stable.
Similarly, to extend the morphism over the second component of curves of type $(iii)$, we need to twist by $\mathcal O_\mc{G}amma(\mc{G}amma_2)$.
For a hyperelliptic linear system this can work in families since there is no monodromy among the irreducible components so over $\Delta_1$ and $\Delta_2$ we can single out the first and second components of every curve. More precisely, the morphism $\mathcal C_B \to \overline J_B$ will be determined, using the universal property of the moduli space, by the sheaf
\[
\mathcal I_\Delta \otimes p_1^*\mathcal O_{\mathcal C}(\Sigma) \otimes \mathcal O_{\mathcal C_B \times_B \mathcal C_B}(D_1) \otimes \mathcal O_{\mathcal C_B \times_B \mathcal C_B}(D_2)
\]
on $\mathcal C_B \times_B \mathcal C_B$, viewed as a family of sheaves parametrized by the second factor $\mathcal C_B$. Here $D_1 \subset \mathcal C_B \times_B \mathcal C_B$ is the component of $(p \times p)^{-1} (\Delta_1 \times \Delta_1)$ parametrizing pairs of points $(x, y) \in \mathcal C_B \times_B \mathcal C_B$ both of which belong to $e_1$ and $D_2 \subset \mathcal C_B \times_B \mathcal C_B$ is the component of $(p \times p)^{-1}(\Delta_2 \times \Delta_2)$
parametrizing pairs of points $(x, y) \in \mathcal C_B \times_B \mathcal C_B$ both of which belong to $|((n-1)e_1+e_2)'|$.
Finally, by \cite[Thm 1]{Caporaso-Coelho-Esteves-08} (see also \cite[Fact 6.10]{Melo-Rapagnetta-Viviani14}) the Abel--Jacobi map is an embedding precisely away from the components of a curve which are smooth rational curves whose intersection with the rest of the curve consists in two separating nodes.
\end{proof}
From the proof of the above proposition we may deduce the following Corollary.
\begin{equation}gin{cor} \label{components C and J}
Let $\mc{G}amma=\mc{G}amma_1+\mc{G}amma_2$ be a general curve in $\Delta_3$, let $H$ be a general polarization satisfying $na>b$ as above and let $\bar J_H(\mc{G}amma)$ be the relative compactified Jacobian of degree $0$ pure sheaves on $\mc{G}amma$, stable with respect to $H$. Then, the Abel--Jacobi map defined above embeds
$\mc{G}amma_1$ in the component corresponding to $(-1, -n+3)$ and embeds $\mc{G}amma_2$ in the component corresponding to $(0, -n+2)$.
\end{cor}
We can finally show
\begin{equation}gin{thm} \label{pi one for hyperelliptic}
Let $|C|$ be a hyperelliptic linear system of genus $g \ge 2$, and let $N$ be as in Theorem \ref{thm smooth}. Then
\[
\pi_1(N) \cong \mathbb{Z} \slash (2).
\]
and the kernel of the natural morphism $\pi_1(C_{t_o}) \to \pi_1(N)$ is equal to $f_*(\pi_1(D_{t_o}))$.
\end{thm}
\begin{equation}gin{proof} Recall that $|C|$ is a primitive linear system and hence by Proposition \ref{Yoshioka assumption} it satisfies Assumption \ref{assumption}.
By Lemma \ref{independent of H and d} it is enough to look at the case when the degree is zero and when $H$ is general and satisfes (\ref{H general}). Then by Proposition \ref{abel jacobi for hyper} we can apply Lemma \ref{comparing R} to $\mathcal C_B \to B$ and $N=\overline J_B \to B$, which gives surjectivity of the map $r$ in diagram (\ref{Leibman seq for C and J}). Hence, by Lemma \ref{if r surj then okay} the morphism $A_*: \pi_1(\mathcal C )=\mathbb{Z} \slash (2) \to \pi_1(N)$ is an isomorphism and $\ker[\pi_1(C_{t_o}) \to \pi_1(N)]=f_*(\pi_1(D_{t_o}))$.
\end{proof}
Let us now come to the case of a non--hyperelliptic linear system. Since there is no section, the strategy is to first compute the fundamental group of the relative compactified Jacobian of a general pencil (which admits a section) and then to pass from there to the family over the complete linear system.
\begin{equation}gin{lemma} \label{from pencil to whole}
Let $|C|$ be a genus $g \ge 3$ non--hyperelliptic linear system on a general Enriques surface $T$ and let $N$ be as in Theorem \ref{thm smooth}. Let $B \subset |C|$ be a general pencil, let $\mathcal C_B \to B$ be the universal family of curves and let $\overline J_B=\overline J(\mathcal C_B)=N \times_B |C| \to B$ be its degree $d$ relative compactified Jacobian. Then
\[
\pi_1(\overline J_B)\cong \mathbb{Z} \slash(2).
\]
and there is a surjection
\[
\pi_1(\overline J_B) \twoheadrightarrow \pi_1(N).
\]
\end{lemma}
\begin{equation}gin{proof} The proof is the same as for the case of a hyperelliptic linear system.
Since $\mathcal C_B \to B$ has a section, the birational class of $\overline J_B$ does not depend on the degree (nor on the polarization) and hence we can assume that $d=0$. By Proposition \ref{abel jacobi for non hyper} there is an embedding $A: \mathcal C_B \to \overline J_B$ which, by Lemmas \ref{comparing R} and \ref{if r surj then okay} induce an isomorphism $\pi_1 (\mathcal C_B) \cong \mathbb{Z} \slash (2)$. The second statement is standard. It can, for example, be proved comparing diagrams \ref{leibman diagram} for the two families and using the fact that $\pi_1(U \cap B) \to \pi_1(U)$ is surjective (or also using \cite[Thm 1.1]{Goreski-MacPherson}).
\end{proof}
This shows that there is a $2:1$ cover of $\overline J_B$. Using the norm map we wish to extend this cover to all of $N$.
Let $U' \subset |C|$ be the locus parametrizing integral curves. The norm map will allow us to extend the covering to $N_{U'}=\overline J(\mathcal C_{U'})$, showing that the surjection $\mathbb{Z} \slash (2)=\pi_1(\overline J_B) \to N_{U'}=\overline J(\mathcal C_{U'})$ is an isomorphism. To extend this result to all of $N$, we need to assume that $|C|$ satisfies Assumption \ref{assumption}. Indeed, if this holds then $\codim(N\setminus N_{U'}, N ) \ge 2$ and hence $\pi_1(N_{U'})=\pi_1(N)$.
\begin{equation}gin{prop} \label{norm map}
Let
\[
\xymatrix{
\mathcal D \ar[dr] \ar[rr]_{2:1}^f && \mathcal C \ar[dl] \\
& B &
}
\]
be a family of \'etale double covers between reduced and irreducible curves with locally planar singularities. Let $d$ be an integer and let $\overline J_d({\mathcal D}) \to B$ and $\overline J_d({\mathcal C}) \to B$ be the relative degree $d$ compactified Jacobian of the families $\mathcal D$ and $\mathcal C$. There is a natural fiberwise \'etale double cover of $\widetilde {J_d(\mathcal C)} \to J_d(\mathcal C)$, which on the Jacobian of a smooth curve $C_b$ is induced by the index two subgroup
\begin{equation} \label{fiberwise cover}
f_* H_1(D_b, \mathbb{Z}) \subset H_1(C_b, \mathbb{Z}).
\end{equation}
\end{prop}
\begin{equation}gin{proof}
The proof is based on the norm map, see \cite[\S 6.5]{EGAII} and \cite[\S 21.5]{EGAIViv}. Let $J_d({\mathcal D}) \subset \overline J_d(\mathcal D)$ and $J_d({\mathcal C}) \subset \overline J_d(\mathcal C)$ be the open loci parametrizing locally free sheaves. For simplicity, let us consider the case where the general curve is smooth and where the total space $J_d({\mathcal C})$ is also smooth (this is the only case we will need; in any event, the general case can be deduced from this using versal deformation spaces). Since the codimension of the complement of $J_d({\mathcal C})$ in $ \overline J_d(\mathcal C)$ is of codimension $\ge 2$, $\pi_1(J_d({\mathcal C}))=\pi_1(\overline J_d({\mathcal C}))$ and it will be enough to study the double cover over $J_d({\mathcal C})$ as it will automatically extend to the whole $\overline J_d({\mathcal C})$. The norm map is the morphism defined by (cf. \cite[\S 6.5]{EGAII})
\begin{equation} \label{norm map 1}
\begin{equation}gin{aligned}
N_{\mathcal D \slash \mathcal C}: J_d({\mathcal D}) & \longrightarrow J_d({\mathcal C}) \\
L & \longmapsto \det f_* L \otimes (\det f_* \mathcal O_{\mathcal D})^{-1}
\end{aligned}
\end{equation}
For $b \in B$, let $D_b \to C_b$ be the restriction to $b$ of the $2:1$ cover. If $L=\mathcal O_{D_b}(\sum n_i x_i)$, then \cite[21.5.5]{EGAIViv}
\begin{equation} \label{explicit norm}
N_{{\mathcal D \slash \mathcal C}}(L)=\mathcal O_{C_b}(\sum n_i f(x_i)),
\end{equation}
It is well known \cite{MumfordPryms}, that over the locus of smooth curves the kernel of the norm map has two connected components. To show that this is the case also for a singular (but irreducible) curve we can argue in the following way. Suppose that $D_b$ and $C_b$ are singular curves and let $n: \widehat C_b \to C_b$ and $m: \widehat D_b \to D_b$ be their normalizations. The norm map is compatible with the pullback to the normalizations (cf. \cite[Prop. 6.5.8]{EGAII}), in the sense that the following is a commutative diagram of short exact sequences of groups
\begin{equation} \label{norm map 2}
\xymatrix{
0 \ar[r] & G \times G \ar[r] \ar[d]^\sum & J_{ D_b} \ar[d]^{N} \ar[r]^{m^*} & J_{\widetilde D_b} \ar[d]^{\widetilde N} \ar[r] & 0 \\
0 \ar[r] & G \ar[r] &J_{ C_b} \ar[r]^{n^*} & J_{\widetilde C_b} \ar[r] & 0
}
\end{equation}
Here $G\cong n_* \mathcal O^*_{\widetilde C_b} \slash \mathcal O^*_{\widetilde C_b}$ and the two factors of $G \times G \cong m_* \mathcal O^*_{\widetilde D_b} \slash \mathcal O^*_{\widetilde D_b}$ are exchanged by the involution. By definition of the norm map, if $z=(x,y) \in G \times G$ then $N(z)$ is the determinant of the endomorphism $\mu_z: G \times G \to G \times G$ determined by the multiplication by $z$. Given that the algebra structure on $G \times G$ is simply the product structure, multiplication by $z=(x, y)$ is just multiplication by $x$ on the first component and by $y$ on the second so $\det \mu_z=xy$. This shows that the restriction of $N$ to $G \times G$ is the product map. From (\ref{norm map 2}) it then follows that $\ker N$ has two connected components, which shows that if we consider the quotient $\widetilde{J({\mathcal C})} $ of $J_d(\mathcal D)$ by the identity component of the norm map we get a commutative diagram
\begin{equation} \label{two to one norm}
\xymatrix{
J_d({\mathcal D}) \ar[dr] \ar[rr] && J_d({\mathcal C}) \\
& \widetilde{J_d({\mathcal C})} \ar[ur]_{2:1}^\mathbb{P}hi &
}
\end{equation}
which is the desired $2:1$ fiberwise cover and which can be interpreted as the Stein factorization of (\ref{norm map 1}).
We are left with determining what this double cover is for the Jacobian of a smooth curve $C_b$. Since by construction $ J_d({\mathcal D}) \to \widetilde{J_d({\mathcal C})} $ has connected fibers, it is clear that $\im[ \pi_1(J_{ D_b}) \to \pi_1(J_{ C_b})]=\im[ \pi_1(\widetilde{J_{\mathcal C}} \res{b}) \to \pi_1(J_{ C_b})]$. If we consider degree $d$ Abel--Jacobi maps $D_b \to J_d({ D_b})$ and $C_b \to J_d({ C_b})$ with respect to points $x \in D_b$ and $f(x) \in C_b$, then they will be compatible with $f$ and with the norm map. Using this we can see that on each smooth fiber $\im[ \pi_1(\widetilde{J_{\mathcal C} }\res{b}) \to \pi_1(J_{ C_b})]=f_* H_1(D_b, \mathbb Z) \subset H_1(C_b, \mathbb Z) $ as desired.
\end{proof}
\begin{equation}gin{cor} \label{pi one non hyper}
Let $|C|$ be a non--hyperelliptic linear system on a general Enriques surface and let $N$ be as in Theorem \ref{thm smooth}. Then there is a surjection
\[
\mathbb{Z} \slash (2) \to \pi_1(N)
\]
which is an isomorphism in case $|C|$ satisfies Assumption \ref{assumption}.
\end{cor}
\begin{equation}gin{proof} The existence of a surjection $\mathbb{Z} \slash (2) \to \pi_1(N)$ for a non--hyperelliptic linear system holds, unconditionally, thanks for Lemma \ref{from pencil to whole}.
If a non--hyperelliptic linear system satisfies Assumption \ref{assumption}, then the locus in $N$ parametrizing sheaves supported on non integral curves has codimension $\ge 2$, so we can remove it without affecting the fundamental group. Proposition \ref{norm map} shows that there is a surjection $\pi_1(N) \to \mathbb{Z} \slash (2)$, which means that the morphism $\mathbb{Z} \slash (2)=\pi_1(N_B) \to \pi_1(N)$ has to be an isomorphism.
\end{proof}
Using the norm map we can also give the following geometric interpretation to the universal cover
\begin{equation} \label{psi}
\mathbb{P}si: \mathbb{N}t \to N,
\end{equation}
of $N$ (under the hypothesis that $|C|$ satisfies Assumption \ref{assumption} so that $\pi_1(N)=\mathbb{Z} \slash (2)$).
\begin{equation}gin{prop} \label{universal cover and norm map}
Let $|C|$ be a genus $g \ge 2$ linear system on a general $T$ and suppose that it satisfies Assumption \ref{assumption}. For every $t$ in the open set $U$ parametrizing smooth curves, we let $C_t$ be the corresponding curve and we set $D_t=f^{-1}(C_t)$. Then
\[
\ker[\pi_1(N_t) \to \pi_1(N)]=f_* H_1(D_t, \mathbb{Z}).
\]
In addition, over the locus $U'$ of integral curves the universal cover (\ref{psi}) agrees with the $2:1$ cover $\mathbb{P}hi$ induced by the norm map (\ref{two to one norm}) .
\end{prop}
\begin{equation}gin{proof}
If $|C|$ is not hyperelliptic, then this is a corollary of the construction of $\mathbb{P}si$ as an extension of $\mathbb{P}hi$. Suppose therefore that $|C|$ is hyperelliptic. The first statement follows from Proposition \ref{norm map} and from the second statement Theorem \ref{pi one for hyperelliptic}. Let us consider the two fiberwise coverings $\mathbb{P}si$ and $\mathbb{P}hi$ of $N_{U'}$. They are defined by two surjections $\eta_\mathbb{P}hi: \pi_1(N_{U'}) \to \mathbb{Z} \slash (2)$ and $\eta_\mathbb{P}si: \pi_1(N_{U'}) \to \mathbb{Z} \slash (2)$ which we want to prove are the same morphism. By the first statement we know that $\mathbb{P}si$ and $\mathbb{P}hi$ induce the same cover over the Jacobians of smooth curves (in fact, it is not hard to show that they induce the same covering also for $1$--nodal curves). Since the surjection $\pi_1(N_U) \to \pi_1(U)$ is split, $\mathbb{P}si$ and $\mathbb{P}hi$ define the same covering of $N_U$.
Letting $j_*: \pi_1(N_U) \twoheadrightarrow \pi_1(N_{U'}) $ be the natural morphism associated to the open embedding $j: N_U \to N_{U'}$, we may deduce that $\eta_\mathbb{P}hi \circ j_*= \eta_\mathbb{P}si \circ j_* $. Since $j_*$ is surjective this shows that $\eta_\mathbb{P}hi=\eta_\mathbb{P}si$ and hence that $\mathbb{P}si$ and $\mathbb{P}hi$ define the same covering of $N_{U'}$.
\end{proof}
We finish this section with a results that will be used in Section \ref{second betti number}.
\subsection{Vanishing cycles} \label{section vanishing cycles}
Let us now come to the result on vanishing cycles which will be used in Section \ref{second betti number}. We use the notation introduced at the beginning of this Section and before Lemma \ref{comparing R}.
Recall that for every irreducible components $W_i$ of $W$ we have chosen discs $D_i\subset B$ which are transversal to $W_i$ at a chosen point $x_i$ and we have picked a point $o_i \in \partial D_i$.
For every $i$, let us consider the restriction
\[
E_{D_i} \to D_i
\]
and choose $s(o_i)$ as base point for $\pi_1(E_{D_i})$. Consider the usual specialization map
\begin{equation} \label{local vanishing cycles}
{sp_i}_*: \pi_1(F) \to \pi_1(E_{D_i}) \cong \pi_1(E_{x_i}), \quad E_{x_i}=p^{-1}(x_i),
\end{equation}
where the isomorphism $\pi_1(E_{D_i}) \cong \pi_1(E_{x_i})$ comes from a retraction $E_{D_i} \to E_{x_i}$.
Let $V_i$ be a set of generators of $ \ker {sp_i}_*$ (which by definition is the group the vanishing cycles of the family $E_{D_i}$). Given a loop $v_i \in V_i$ based in $s(o_i)$ and any path $\gamma$ joining $p(o)$ to $ o_i \in \partial D_i$ as above, we can form a loop $v $ in $ E_U$ by setting
\[
v=s_*(\gamma) v_i s_*(\gamma)^{-1}.
\]
Denote by $V$ the set of paths obtained by doing this for every component of $W$, and by $V^\rho$ the normal subgroup generated by $V$ and all its conjugates under the monodromy action of $\pi_1(U)$ on $\pi_1(F)$.
\begin{equation}gin{prop} \cite{Leibman} \label{what is R}
$R=[\pi_1(F), s_*H] \cdot V^\rho$ (here the commutator $[\pi_1(F), s_*H]$ is taken in $\pi_1(E_U)$ and, since $\pi_1(F) \subset \pi_1(E_U)$ is normal, it is contained in $\pi_1(F)$).
\end{prop}
\begin{equation}gin{proof} This proof imitates the proofs of \cite[Lem. 1.2 and 1.7]{Leibman}.
An element $\alpha$ in $R=\pi_1(F) \cap G$ can be represented as the boundary of a map $\varphi: D \to E$, with $o \in \varphi (\partial D) \subset E_U$ and such that $p_*(\varphi (\partial D))=1$ in $\pi_1(U)$. By transversality, $\varphi(D)$ can be made transversal to each component $E^j_{W_i}$ at the chosen points $q_{ij}$ and such that the intersection of $\varphi(D)$ with a suitable neighborhood of $q_{ij}$ is contained in $D_{ij}$ for every $i$ and $j$. Choose an orientation for $\partial D_{ij}$. We can write $\alpha=\prod \alpha_{ij}$ where
\[
\alpha_{ij}=\begin{equation}ta_{ij} \partial D_{ij} {\begin{equation}ta_{ij}}^{-1}
\]
for a path $\begin{equation}ta_{ij}$ in $E_U$ joining $o$ to the chosen point $o_{ij} \in \partial D_{ij} $. Notice that to write $\alpha$ as a product we have defined an ordering on the set of bi-indices $ij$, i.e. we have chosen a bijection
$\nu:\{ ij\} \to L=\{1, \dots, N\}$ such that we can write
\[
\alpha=\prod_{{(ij) \, | \, \nu(ij) =1}}^{N} \alpha_{ij} .
\]
Join $s(o_i) \in s_*(D_i)$ to $o_{ij}$ via a path $\epsilon_{ij}$ in the fiber $E_{x_i}$. Set $\gamma_{ij}:=p_* \begin{equation}ta_{ij}$, so that $\gamma_{ij}$ is a path in $U$ joining $p(o)$ to $p(o_i)$. Then $\begin{equation}ta_{ij} {\epsilon_{ij}}^{-1}(s_*\gamma_{ij})^{-1}=f_{ij}$, for some $f_{ij} \in \pi_1(F)$. Since $p_*(\epsilon_{ij} \partial D_{ij} {\epsilon _{ij}}^{-1})=p_*(s_* \partial D_i)$, the difference
\[
v_{ij}:=s_*( \partial D_i)^{-1} \epsilon_{ij} \partial D_{ij} {\epsilon _{ij}}^{-1}
\]
lies in $\ker({sp_i}_*)$. This shows we can write
\[
\alpha_{ij} =\begin{equation}ta_{ij} \partial D_{ij} {\begin{equation}ta_{ij}}^{-1}= \begin{equation}ta_{ij} {\epsilon _{ij}}^{-1} \underbrace{ {\epsilon _{ij}} \partial D_{ij} {\epsilon _{ij}}^{-1}}_{s_*( \partial D_i )v_{ij} } {\epsilon _{ij}} {\begin{equation}ta_{ij}}^{-1}=
\]
\[
=\begin{equation}ta_{ij} {\epsilon _{ij}}^{-1} s_*( \partial D_i )v_{ij} {\epsilon _{ij}}{\begin{equation}ta_{ij}}^{-1}=
f_{ij} s_*(h_{ij}) {f_{ij}}^{-1} w_{ij},
\]
where $h_{ij}= \gamma_{ij} \partial D_i \gamma_{ij}^{-1}$ and
\[
w_{ij}={\begin{equation}ta_{ij}} {\epsilon _{ij}}^{-1} v_{ij} {\epsilon _{ij}} {\begin{equation}ta_{ij}}^{-1}.
\]
We can write
\[
w_{ij}=s_*(\gamma_{ij}) d_{ij} v_{ij} {d_{ij}}^{-1}s_*(\gamma_{ij})^{-1},
\]
where $d_{ij}:=s_*(\gamma_{ij})^{-1} \begin{equation}ta_{ij} {\epsilon _{ij}}^{-1} $ is homotopic to a loop in the fiber $p^{-1}(o_i)$. Since clearly $d_{ij} v_{ij} {d_{ij}}^{-1} \in \ker({sp_i}_*)$, we have $w_{ij} \in V$. We can thus write
\[
\alpha=\prod_{ij} [f_{ij}, s_*(h_{ij}) ] s_*(h_{ij}) w_{ij}=\prod_{\nu=1}^N [f_{\nu}, s_*(h_{\nu}) ] s_*(h_{\nu}) w_{\nu},
\]
where in the last equality we have used the ordering of the bi--indices introduced above.
Following Leibman (pgg. 100 and 103) we can write
\[
\alpha=\prod_{\nu=1}^N [{f'}_{\nu}, s_*(h'_\nu) ] {w'}_{\nu} \prod_{\nu=1}^N s_*(h_\nu),
\]
where
\[
f'_\nu:=\Big(\prod_{\xi=1}^{\nu-1} s_*(h_\xi) \Big) f_\nu \Big(\prod_{\xi=1}^{\nu-1} s_*(h_\xi) \Big)^{-1}
\]
\[
s_*h'_\nu:= \Big(\prod_{\xi=1}^{\nu-1} s_*(h_\xi) \Big) s_*h_\nu \Big(\prod_{\xi=1}^{\nu-1} s_*(h_\xi) \Big)^{-1}
\]
\[
w'_\nu:= \Big(\prod_{\xi=1}^{\nu} s_*(h_\xi) \Big) w_\nu \Big(\prod_{\xi=1}^{\nu} s_*(h_\xi) \Big)^{-1}
\]
Notice also that $p_* \prod_{\nu=1}^N s_*(h_\nu)=1$ and since $p_* s_*=\id$, then $\prod_{\nu=1}^N s_*(h_\nu)=1$, and hence
\[
\alpha=\prod_{\nu=1}^N [{f'}_{\nu}, s_*(h'_\nu) ] {w'}_{\nu}.
\]
To finish the proof we only have to notice that ${w'}_{\nu} \in V^\rho$. This follows from that fact that if $h \in \pi_1(U)$ and $w \in \pi_1(F)$, then $s_*(h) w s_*(h)^{-1}$ is exactly the monodromy action $\rho_h(w)$ of $h$ on $w$ (cf. \cite{ASF12}, \S 7, in particular, (7.7) and (7.9)). Since we have already observed that $w_\nu \in V$, it follows that ${w}_{\nu} \in V^\rho$.
\end{proof}
Let us now apply this result when $[E \to B]=[\mathcal C\to B]$ is a smooth family of reduced curves whose general member is smooth of genus $g$ and such that in codimension one we have only nodal curves of geometric genus $g-1$ (this is indeed the case for the families we consider). Suppose the family has a section so that we can apply the remarks above. Consider, as we did before Lemma \ref{comparing R}, generators of $H$ of the form $h_{i, \gamma}= \gamma \partial D_i \gamma^{-1}$. For each $h_i$, the monodromy operator $\rho_{h_i}$ on the fiber $ C_{t_o}$, for $t_o:=p(o)$, is the Dehn twist around a closed loop $c_i=c_{i, \gamma} \subset C_{t_o}$ , called the vanishing cycle associated to $h_i$ (see \cite[Chapter XI]{GACII}). The choice of $c_i$ depends on the choice of local coordinates, but its homology class is well defined (up to a sign). To see how it compares to the local vanishing cycles of $\ker({sp_i}_*)$ (cf. (\ref{local vanishing cycles})), consider a $v_i \in V_i$ and the loop $ s_*( \gamma) v_i s_*(\gamma)^{-1}$, which clearly belongs to $R$. Since we are assuming that in codimension one the genus drops only by one, the image in homology of $\ker({sp_i}_*)$ has rank one and hence the classes of $s_*( \gamma) v_i s_*(\gamma)^{-1}$ in $H_1(C_{t_o}, \mathbb{Z})$ are all a multiple of $c_i$. In particular, the classes of the ${w'}_{ij}$ defined in the previous Proposition will also be a multiple of $c_i$.
Also, for any $f \in \pi_1(C_{t_o})$ we have, passing to $H_1(C_{t_o}, \mathbb{Z})$, switching to the additive notation, and using that $[f, s_*(h_i)]=f \rho_{h_i}(f^{-1}).$
\begin{equation} \label{commutator and monodromy}
[f, s_*(h_i) ] =f-f-(f, c_i)c_i=-(f, c_i)c_i \in H_1(C_{t_o}, \mathbb{Z}).
\end{equation}
\begin{equation}gin{cor} \label{generated by vanishing cycles}
Let $p: \mathcal C \to B$ be a family of curves as above. And let $R_{\mathcal C}=\ker[\pi_1(C_{t_o}) \to \pi_1(\mathcal C)]$ be as in (\ref{leibman diagram}).
Then the image of $R_{\mathcal C}$ in $H_1(C_{t_o}, \mathbb{Z})$ is generated by the vanishing cycles associated with a set of generators of $H$.
\end{cor}
\begin{equation}gin{proof}
This is an immediate consequence of Proposition \ref{what is R}, of the definition of vanishing cycle, and of (\ref{commutator and monodromy}).
\end{proof}
\section{The canonical bundle} \label{canonical bundle}
The aim of this section is to show that the canonical bundle of $N$ is trivial. We start with the following adaptation of Theorem 8.3.3 of \cite{Huybrechts-Lehn} to our context
\begin{equation}gin{prop} [\cite{Huybrechts-Lehn}]
Let $T$ be an Enriques surface, and let $M$ be a component of a moduli space parametrizing stable sheaves $F$ such that $F \ncong F \otimes \omega_Y$.
Then the canonical bundle is torsion, i.e.,
\[
\omega_M=0 \,\, \text{ in } \mathbb{P}ic(M)_\mathbb{Q}.
\]
\end{prop}
\begin{equation}gin{proof}
First, notice that $M$ is smooth since by assumption the obstructions vanish. Even though Chapter 8 of \cite{Huybrechts-Lehn} is formulated for sheaves of positive rank, one can go through all the results needed for the proof of Theorem 8.3.3 and check that they work, with the appropriate modifications, also in the case of pure dimension one sheaves.
\end{proof}
Recall from (\ref{N}), (\ref{Y}) and (\ref{psi}) the definitions of $N$, $Y$ and $\mathbb{N}t$.
\begin{equation}gin{cor} With the assumptions of Theorem \ref{thm smooth}, we have
$\omega_{\mathbb{N}t} \cong \mathcal O_{\mathbb{N}t}$
\end{cor}
\begin{equation}gin{proof}
By the proposition above, $\omega_{\mathbb{N}t}$ is a torsion class in $\mathbb{P}ic(\widetilde N)$, but since $\widetilde N$ is simply connected, this class has to be trivial.
\end{proof}
\begin{equation}gin{prop} \label{comparing the canonical} Let the assumptions be as in Theorem \ref{thm smooth}. The following are equivalent\\
$i)$The canonical bundle of $Y$ is trivial;\\
$ii)$The canonical bundle of $N$ is trivial;\\
$iii)$The canonical bundle of $\mathbb{N}t$ is trivial.
\end{prop}
\begin{equation}gin{proof}
Since $\mathbb{P}hi: N \to Y$ and $\mathbb{P}si: \mathbb{N}t \to N$ are \'etale, we only need to prove that $ii)$ implies $i)$ and that $iii)$ implies $ii)$. We start with the first implication, so let us suppose that $\omega_N \cong \mathcal O_N$.
Recall from formula (\ref{define L}) the definition of $L$. Then $\mathbb{P}hi^*\omega_Y\cong \mathcal O_N$, implying that either $\omega_Y$ is trivial, or that it is isomorphic to $L$. Consider a point $t \in U$ and denote, as usual, by $Y_t$ the fiber over $t$. To conclude we make the following two claims. The first is that $L_{Y_t}$ is not trivial, which follows immediately from (\ref{pullback jacobians}).
Whilst the second claim is that $({\omega_Y})_{|Y_t}$ is trivial which follows immediately from the fact that $\omega_{Y_t}$ and $N_{Y_t|Y}$ are trivial. Hence, $\omega_Y \ncong L$.
The same argument applies to show that the canonical bundle of $\mathbb{N}t$ is trivial if and only if the canonical bundle of $N$ is trivial. In fact, the only thing we need is that on each fiber the double cover $\mathbb{N}t_t \to N_t$ is non-trivial, and this is a consequence of Corollary \ref{universal cover and norm map}.
\end{proof}
The steps above prove the following theorem
\begin{equation}gin{thm} \label{thm canonical}
Let $|C|$ be a genus $g \ge 2$ linear system on a general Enriques surface $T$ and let $N$ be as in Theorem \ref{thm smooth}. Then
\[
\omega_N\cong \mathcal O_N.
\]
\end{thm}
Notice that Propositions \ref{comparing the canonical} and \ref{comparing the canonical}, and hence Theorem \ref{thm canonical}, are \emph{not} conditional to Assumption \ref{assumption}.
\begin{equation}gin{cor}\emph{
With the same assumptions as in the theorem above,
\[
\chi({\mathcal O}_Y)=\chi({\mathcal O}_N)=0.
\]}
\end{cor}
\begin{equation}gin{proof}
By Serre duality, this is true for any odd--dimensional Calabi--Yau manifold.
\end{proof}
Moduli spaces of sheaves on a K3 surface share many properties of the surface itself. As we saw in Theorem \ref{thm canonical}, this is not the case for Enriques surfaces. Another instance of this lack of analogy is the fact that the universal cover of $N$ induces a \emph{non-trivial} cover of every fiber: any Enriques surface $T$ admits an elliptic fibration $T \to \mathbb{P}^1$ with exactly two multiple (double) fibers. The canonical bundle of $T$ is the difference of the two half fibers and, moreover, the universal cover $f: S \to T$ induces a \emph{trivial} cover of every reduced fiber; indeed, if $e$ is a primitive elliptic curve in $T$, then for any reduced curve $\mc{G}amma $ belonging to $|2e|$, $f^{-1}(\mc{G}amma)$ is the disjoint union of two members of $|f^*e|$. In fact, the covering $S \to T$ is induced by base change via a degree two morphism $\mathbb{P}^1 \to \mathbb{P}^1$ (ramified at the two points corresponding to the non-reduced fibers) whereas in the case of $\nu: N \to |C|$, as we have already mentioned, the restriction of the universal cover to the fibers of $\nu$ is non-trivial.
This difference in behavior appears also in comparison to other types of moduli spaces of sheaves. In \cite{Oguiso_Schroer11}, Oguiso and Schr\"oer prove that the Hilbert scheme of $n$ points on a given Enriques surface $T$ has the property that the canonical bundle is not trivial, but twice the canonical bundle is trivial. It would be interesting to know if one could extract a general principle from this phenomenon, i.e., that the canonical bundle of a moduli space depends on the parity of its dimension. It would also be interesting to study, given a genus $g$ linear system $|C|$, the geometry of the rational Abel-Jacobi maps
\[
T^{[g-1]} \dasharrow N^{g-1}, \quad \,\, \text{ and } \,\,\,\,\,\,\, N^g \dasharrow T^{[g]}.
\]
Here $N^d$ denotes the degree $d$ relative compactified Jacobian of $|C|$.
We end the section with the main result
\begin{equation}gin{thm} \label{calabi-yau}
Let $N$ be as in Theorem \ref{thm smooth}. Then the Calabi-Yau manifold $N$ is irreducible. By this we mean that
\[
H^p(N, {\mathcal O}_N)\cong \left\{\begin{equation}gin{aligned}
&\mathbb{C} \,\,\,\, \text{if} \,\,\,p=0, \text{ or } p=2g-1,\\
& 0 \,\,\,\,\, \text{otherwise}.
\end{aligned}
\right.
\]
\end{thm}
The main step in proving the theorem is the following proposition that computes the higher direct images of the structure sheaf by using the corresponding result of Matsushita \cite{Matsushita05} for the morphism $\pi: M \to |D|$.
\begin{equation}gin{prop} \label{higher direct images} Let $N$ be as in Theorem \ref{thm smooth}. Then,
\begin{equation}
R^i\nu_*{\mathcal O}_N\cong \wedge ^i \oplus_{j=1}^g {\mathcal O}_{\mathbb{P}^{g-1}}(-1).
\end{equation}
\end{prop}
Since
\[
H^k(\mathbb{P}^{g-1}, {\mathcal O}_{\mathbb{P}^{g-1}}(-p+k))\cong \left\{\begin{equation}gin{aligned}
&\mathbb{C} \quad \quad \text{if } (k,p)=(0,0), \quad \,\, \text{or } \,\,\, (k,p)=(g-1, 2g-1)\\
&0 \quad \quad \,\,\, \text{otherwise}
\end{aligned}
\right.
\]
the spectral sequence calculating $H^p(N, {\mathcal O}_N)$ degenerates, and the theorem easily follows.
\begin{equation}gin{proof}[Proof of Proposition \ref{higher direct images}]
Since the canonical bundles of $N$ and of $Y$ are trivial, by Theorem 2.1 in \cite{Kollar1} the sheaves $R^i\nu_*{\mathcal O}_N$ and $R^i\pi_*{\mathcal O}_Y$ are torsion free and by Corollary 3.9 in \cite{Kollar86} they are reflexive.
Moreover, $\mathbb{P}hi$ is a finite morphism so the spectral sequence associated to the composition of functors yields an isomorphism (recall that $L$ was defined in (\ref{define L}))
\[
R^i\nu_*{\mathcal O}_N \cong R^i\pi_*{\mathcal O}_Y \oplus R^i\pi_*L.
\]
However, since $L_{|Y_t}$ is a non-trivial torsion line bundle, the higher direct images $R^i\pi_*L$ are supported on the discriminant locus of $|C|$ and since the sheaf $R^i\nu_*{\mathcal O}_N $ is torsion free, they have to be identically zero. It follows that
\begin{equation} \label{N and Y}
R^i\nu_*{\mathcal O}_N \cong R^i\pi_*{\mathcal O}_Y.
\end{equation}
The proof of the proposition can then be deduced from the following three claims, whose proof uses Proposition \ref{degenerations} below.
\subsection*{Claim I} $R^1\nu_*{\mathcal O}_N \cong R^1p_*{\mathcal O}_{\mathcal C}$.
\subsection*{Claim II} $R^1\nu_*{\mathcal O}_N\cong \oplus_{i=1}^g {\mathcal O}_{\mathbb{P}^{g-1}}(-1)$.
\subsection*{Claim III} $R^i\nu_*{\mathcal O}_N \cong \wedge ^i R^1\nu_*{\mathcal O}_N$.
For Claim I, first notice that there is a natural isomorphism over the locus $U \subset |C|$ of smooth curves (e.g. see Lemma \ref{higher direct images different degree} below).
By Proposition \ref{degenerations} below, the isomorphism extends naturally over the general point of every component of the discriminant. Hence, there is an open subset $W \subset |C|$, whose complement has codimension greater or equal to two, over which the sheaves in question are isomorphic. Since they are reflexive sheaves, this isomorphism extends to an isomorphism over all of $|C|$.
To show Claim III, we first use Proposition \ref{degenerations} as well as Claim I to find an isomorphism which is defined over an open set $W$ as above. Since by Claim II $R^1\pi_*{\mathcal O}_N$, and hence also its exterior powers, are locally free, the isomorphism defined over $W$ extends to the whole $|C|$ and we have proved the claim.
For Claim II, we argue as follows. Let $\mathcal I$ denote the ideal sheaf of $|C|$ in $|D|$. Recall that $ \mathcal I/ \mathcal I^2\cong \oplus_{i=1}^g {\mathcal O}_{\mathbb{P}^{g-1}}(-1)$ and that the short exact sequence (on which the involution $\iota^*$ acts)
\[
0 \to \mathcal I/ \mathcal I^2 \to (\Omega^1_{|D|})\res{|C|} \to \Omega^1_{|C|} \to 0
\]
is split. The sheaf $ \mathcal I/ \mathcal I^2$ is the $\iota^*$--anti--invariant part of $(\Omega^1_{|D|})\res{|C|}$ and the sheaf $ \Omega^1_{|C|}$ is the $\iota^*$--invariant part. By Theorem 1.3 of \cite{Matsushita05}, there is an isomorphism
\begin{equation} \label{matsushita iso}
\Omega^1_{|D|} \cong R^1\pi_* {\mathcal O}_M.
\end{equation}
Since this isomorphism is induced by the symplectic form $\sigma$ of $M$, it interchanges the invariant and anti-invariant subbundles of $\Omega^1_{|D|} \res{|C|}$ and of $R^1\pi_* {\mathcal O}_M \res{|C|}$. In particular, the composition of the inclusion $\mathcal I/ \mathcal I^2 \to (\Omega^1_{|D|})\res{|C|} \cong R^1\pi_* {\mathcal O}_M\res{|C|}$ with the natural morphism $R^1\pi_* {\mathcal O}_M \res{|C|} \to R^1\pi_* {\mathcal O}_Y$ is non-zero and generically surjective. We need to show that it is an isomorphism. Consider a general line $\ell \subset |C|$ and let $p': \mathcal C' \to \ell$ be the restriction of the family of curves to $\ell$, so that $\mathcal C'$ is just the blow up of $T$ at the $2g-2$ base points of the pencil. By base change (over $W$ all families in question are flat, so we can apply base change) and by Claim I there is an isomorphism
\[
R^1\nu_* {\mathcal O_N} \res{\ell}=R^1\pi_* {\mathcal O_Y}\res{\ell}\cong R^1p'_* {\mathcal O_{\mathcal C'}}.
\]
Since $R^1p'_* {\mathcal O_{\mathcal C'}}$ is locally free of rank $g$, we can write
\[
R^1p'_* {\mathcal O_{\mathcal C'}}=\oplus_{i=1}^g \mathcal O_{\mathbb{P}^1}(a_i), \quad \text{ for some } a_i \in \mathbb{Z}.
\]
Since the base is one-dimensional, the Leray spectral sequence degenerates and we can use the Hodge numbers of $\mathcal C'$ to calculate the $a_i$'s. From
\begin{equation}
\begin{equation}gin{aligned}
&0=H^1(\mathcal C', {\mathcal O}) \cong H^1(\mathbb{P}^1, {\mathcal O})\oplus H^0(\mathbb{P}^1, R^1 {p}_*{\mathcal O}_{\mathcal C'}), \,\,\,\,\,\text{and}\\
&0=H^2(\mathcal C', {\mathcal O})\cong H^2(\mathbb{P}^1, {\mathcal O})\oplus H^1(\mathbb{P}^1, R^1 {p}_*{\mathcal O}_{\mathcal C'})\oplus H^0(\mathbb{P}^1, R^2 {p}_*{\mathcal O}_{\mathcal C'}),
\end{aligned}
\end{equation}
we deduce that
\[
a_i < 0, \,\,\,\,\text{and }\,\,\,\,\,\,a_i>-2.
\]
We conclude that $a_i=-1$ for every $i$, so that $R^1p'_* {\mathcal O_{\mathcal C'}}=\oplus_{i=1}^g \mathcal O_{\mathbb{P}^1}(-1)$. It follows that the morphism
\[
\mathcal I/ \mathcal I^2 \to R^1\pi_* {\mathcal O}_Y
\]
defined above is an isomorphism over an open subset whose complement has codimension greater or equal to two. Hence it extends to a global isomorphism and Claim II follows using (\ref{N and Y}).
\end{proof}
\begin{equation}gin{rem} Analogously to the cases of ${\mathcal O}_N$ and ${\mathcal O}_Y$, one can show that also the higher direct images of ${\mathcal O}_N$ and of ${\mathcal O}_{\mathbb{N}t}$ are isomorphic, so that
\[
H^p(\mathbb{N}t, {\mathcal O}_{\mathbb{N}t})\cong \left\{\begin{equation}gin{aligned}
&\mathbb{C} \,\,\,\, \text{if} \,\,\,p=0, 2g-1,\\
& 0 \,\,\,\,\, \text{otherwise},
\end{aligned}
\right.
\]
and $\mathbb{N}t$ is an irreducible Calabi-Yau manifold.
\end{rem}
\begin{equation}gin{prop} \label{degenerations}
Let $ \mathcal C \to B$ be a projective family of smooth genus $g$ curves parametrized by a smooth projective curve (or a disc), and let $p: \overline{ \mathcal C} \to \overline{ B}$ be a smooth compactification of the family such that for every point $a_i \in \overline{ B} \setminus B$ the curve $\overline {\mathcal C}_{a_i}=p^{-1}(a_i)$ is reduced and nodal. Let
$q: \overline{\mathcal J} \to \overline{B}$ be a relative compactified Jacobian of the family $ \overline{ \mathcal C}$ and suppose that it is smooth.
There is a natural isomorphism
\begin{equation}
R^i q_* \mathcal O_{\overline{\mathcal J}} \cong \wedge^i R^1 p_* \mathcal O_{\overline{\mathcal C}},
\end{equation}
\end{prop}
The proposition will use the following lemma and some results about Hodge bundles and their degenerations which we recall in the next subsection.
\begin{equation}gin{lemma} \label{higher direct images different degree}
Let $q: \mathcal C \to B$ be a family of smooth curves and let and $\nu: J^d_{\mathcal C} \to B$ be the degree $d$ relative compactified Jacobian. For every $i$ there is a natural morphism
\[
R^i \nu_* \mathbb{Q}_{J^d_{\mathcal C}} \to R^i q_* \mathbb{Q}_{\mathcal C},
\]
which is an isomorphism for $i=1$. Moreover, the same holds for the higher direct images of the structure sheaves.
\end{lemma}
\begin{equation}gin{proof}
Suppose $\mathcal C \to B$ has a section $s$. Then as in (\ref{Abel Jacobi}), we can consider an Abel--Jacobi map $A_s: \mathcal C \to J^d_{\mathcal C}$ whose pull--back $A_s^*$ induces a morphism between the local systems. Though the map itself depends on the section $s$, the morphism $A_s^*$ does not, since translation by a point on an abelian variety induces the identity in cohomology. It follows that even if $q$ does not have a section we can choose local sections to define local morphisms which, since they are independent of the section, can be glued to define a global morphism. The same argument can be applied to the direct images of the structure sheaf.
\end{proof}
In Section \ref{second betti number}, there will be a more refined version of this Lemma (Proposition \ref{J and J' MHS}).
\subsection{Degeneration of Hodge bundles} We follow \cite{Zucker}, \cite{Katz71}, \cite{Kollar86}, \cite{Steenbrink77}, and \cite{Peters-Steenbrink} for which we refer for more details and complete proofs.
Let $B$ be a smooth curve and let $f: Z \to B$ be a smooth projective morphism. The degree $i$th cohomology of this family determines a degree $i$ variation of Hodge structures (VHS for short) on $B$ whose underlying local system is $R^i f_* \mathbb{C}$. By \cite[I.2.28]{Deligne70} the locally free sheaf
\begin{equation}gin{equation}
\mc{H}^i:=R^i f_* \mathbb{C} \otimes {\mathcal O}_B
\end{equation}
is isomorphic to the hypercohomology sheaf
\begin{equation}gin{equation}
R^if_* \Omega^\bullet_{Z|B},
\end{equation}
where $\Omega^\bullet_{Z|B}$ is the complex of relative differentials of the family. We denote by
\begin{equation}gin{equation} \label{Gauss-Maninn}
\nabla: \mc{H}^i \to \mc{H}^i\otimes\Omega^{1}_{B}
\end{equation}
the Gauss-Manin connection associated to $R^i f_* \mathbb{C}$. Consider the so called \emph{filtration b\^ete}
\begin{equation}gin{equation} \label{filtration bete1}
\mc{F}^p \Omega^{\bullet}_{Z|B}:=\Omega^{\bullet\ge p}_{Z|B},
\end{equation}
of the complex of relative differentials.
The spectral sequence in hypercohomology associated to this filtration has $E_1^{p,q}$ term equal to
\begin{equation}gin{equation} \label{E2}
R^qf_* \Omega^p_{Z|B},
\end{equation}
and abuts to (the associated graded pieces of)
\begin{equation}gin{equation} \label{hi}
R^{i}f_* \Omega^\bullet_{Z|B}= \mc{H}^{i}.
\end{equation}
Since $f$ is smooth these sheaves are locally free and, just as in the case of a smooth projective variety (see for example \cite[Prop. 10.29]{Peters-Steenbrink}), one can show that the spectral sequence degenerates at $E_1$ and that the maps
\begin{equation}gin{equation}
R^if_* \Omega^{\bullet\ge p}_{Z|B} \to R^if_* \Omega^{\bullet}_{Z|B}
\end{equation}
are injective. In other words, the filtration induced on $\mc{H}^{i}$ is
\begin{equation}gin{equation}\label{filtration}
\mc{F}^{p}\mc{H}^i=R^if_* \Omega^{\bullet\ge p}_{Z|B}\subset \mc{H}^i,
\end{equation}
and its associated graded pieces are the sheaves
\[
R^qf_* \Omega^p_{Z|B}, \quad \text{with}\quad p+q=i.
\]
The filtration (\ref{filtration}) is precisely the Hodge filtration of the variation of Hodge structures of the family $Z \to B$.
Now consider a smooth projective compactification
\[
f\colon \mathbb{Z}b \to \overlineerline{B},
\]
and suppose that $D:=f^{-1}(\overlineerline{B} \setminus B)$ is a reduced divisor with normal crossing. Let $\Omega_{\mathbb{Z}b|\overlineerline{B}}^\bullet(\log D)$ be the complex of relative logarithmic differentials (\cite[(21)]{Zucker}).
By a classical theorem \cite[II.7.9]{Deligne70}, it is known that $(\mc{H}^i, \nabla)$ is an algebraic differential equation with regular singular points. By definition \cite[(2.1) (i)]{Kollar86}, this means that $\nabla$ has logarithmic poles at every point $b$ in $\overlineerline{B} \setminus B$, that is to say, there exists a vector bundle extension $\mc{H}b^i$ of $\mc{H}^i$ to all of $\overlineerline{B}$, such that the connection $\nabla$ extends to a morphism
\[
\overlineerline \nabla: \mc{H}b^i \to \mc{H}b^i \otimes \Omega_{\overlineerline{B}}^1(\log A ), \quad A:=\overline B \setminus B.
\]
Such an extension is not unique, but as we will see there is a unique one satisfying some additional conditions. Recall \cite[(2.1) (iii)]{Kollar86} that the \emph{residue} $\mathbb{R}es_b (\overlineerline \nabla)$ of $\overlineerline \nabla$ at a point $b$ in $\overlineerline{B} \setminus B$ is defined to be the endomorphism of the fiber $\mc{H}b^i_b$ induced by restricting $\overlineerline \nabla$ to $b$ and composing this restriction with $\id_{\mc{H}b^i_b}\otimes \mathbb{R}es$ where $\mathbb{R}es$ is the Poincer\'e residue map $ \Omega_{\overlineerline{B}}^1(\log b) \to \mathbb C_b $
\[
\mathbb{R}es_b (\overlineerline \nabla): \mc{H}b^i_b \to \mc{H}b^i_b \otimes \Omega_{\overlineerline{B}}^1(\log b) \to \mc{H}b^i_b \otimes \mathbb{C}_b.
\]
In other words, if we fix a local trivialization of $\mc{H}b^i$ around $b$ and if $z$ is a local coordinate on $\overlineerline{B}$, the residue is defined by the equation $\overlineerline \nabla=dz\otimes (\mathbb{R}es_b (\overlineerline \nabla) 1/z + \dots)$. Another important property of the residue \cite[Lem. 2.2]{Kollar86} is that if $T$ denotes the local monodromy operator, then
\begin{equation} \label{monodromy and residue}
T=\exp(-2\pi i \mathbb{R}es_b(\overlineerline \nabla)).
\end{equation}
By \cite[(2.11)]{Steenbrink77} and \cite{Steenbrink_Zucker80} ((5.1) for the notation and (5.3) for the result; for a clear exposition see \cite{Zucker}), the sheaves
$$R^if_* \Omega^\bullet_{\mathbb{Z}b|\overlineerline{B}}(\log D)$$
and
$$R^qf_* \Omega^p_{\mathbb{Z}b|\overlineerline{B}}(\log D), \quad \,\,\,p+q=i,$$
are \emph{locally free} extensions of (\ref{hi}) and (\ref{E2}), respectively. Moreover, Katz proved in \cite[V]{Katz71} (see also \cite[Thm. 10.28]{Peters-Steenbrink}) that there is a natural morphism
\begin{equation} \label{extending nabla}
\overlineerline \nabla: R^if_* \Omega^\bullet_{Z|B}(\log D) \to R^if_* \Omega^\bullet_{Z|B}(\log D) \otimes\Omega_{\overlineerline{B}}^1(\log \overlineerline{B} \setminus B),
\end{equation}
which extends the Gauss-Manin connection.
It follows that we can set
\[
\mc{H}b^i:= R^if_* \Omega^\bullet_{Z|B}(\log D).
\]
On the other hand, the filtration bete (\ref{filtration bete1}) extends to a filtration
\begin{equation}gin{equation} \label{filtration bete2}
\mc{F}^p \Omega^{\bullet}_{\mathbb{Z}b|\overlineerline{B}}(\log D):=\Omega^{\bullet\ge p}_{\mathbb{Z}b|\overlineerline{B}}(\log D),
\end{equation}
of $R^if_* \Omega^\bullet_{Z|B}(\log D)$. This defines a spectral sequence whose $E_1^{p,q}$ terms are
\[
R^q f_* \Omega^p_{\mathbb{Z}b|\overlineerline{B}}(\log D).
\]
Since these sheaves are locally free and the differential is generically zero, the differential is identically zero. Hence the spectral sequence, which abuts to the associated graded pieces of $R^if_* \Omega^\bullet_{Z|B}(\log D)$, degenerates at $E_1$. Moreover, these graded pieces $R^qf_* \Omega^p_{\mathbb{Z}b|\overlineerline{B}}(\log D)$ are locally free and hence the extensions
\[
\mc{F}^p(\overlineerline\mc{H}^i):= R^{i} f_* \Omega^{\ge p}_{\overlineerline{X}|\overlineerline{B}}(\log D)
\]
of the sheaves $\mc{F}^p(\mc{H}^i)$ are actually extensions as vector \emph{sub-bundles} of $\overlineerline \mc{H}^i$ (this is a particular case of Schmid's Nilpotent Orbit Theorem). The last ingredient is the following classical theorem
\begin{equation}gin{thm}[Manin, \cite{Deligne70}, Prop. 5.4]\label{deligne} Let $(\mc{H}, \nabla)$ be an algebraic differential equation with regular singular points on the pointed disc $\Delta^*$. Then $(\mc{H}, \nabla)$ admits a \emph{unique} locally free extension $(\overlineerline \mc{H}, \overlineerline \nabla)$ to the disc $\Delta$ satisfying the following two properties
\begin{equation}gin{enumerate}
\item $\overlineerline \nabla: \overlineerline \mc{H} \to \overlineerline \mc{H} \otimes \Omega^1_{\Delta}(\log 0)$ has logarithmic poles;
\item The eigenvalues $\lambda$ of $\mathbb{R}es_0 (\overlineerline \nabla) \in \mc{E}nd (\overlineerline \mc{H}_0)$ satisfy $0\le \re (\lambda)<1$.
\end{enumerate}
\end{thm}
An extension of an algebraic differential equation $\mc{H}$ as in the Theorem is called the \emph{canonical extension}. For example, if $(\mc{H}, \nabla)=(\mathcal{O}_{\Delta^*}, d)$, then the trivial extension $(\overline \mc{H}=\mathcal{O}_{\Delta}, \overline \nabla=d)$ has no poles and is the canonical extension.
\begin{equation}gin{thm}[\cite{Katz71}, VII] \label{Katz monodromy theorem}
Let $\mathbb{Z}b \to \overlineerline{B}$ be as above, where $D=\mathbb{Z}b \setminus Z$ a reduced divisor with normal crossing. Then for every $b \in \overlineerline B \setminus B$, the extension $(R^if_* \Omega^\bullet_{Z|B}(\log D), \overlineerline \nabla)$, with $\overlineerline \nabla$ as in (\ref{extending nabla}), of $(R^if_* \Omega^\bullet_{Z|B}, \nabla)$ satisfies the assumptions of Theorem \ref{deligne} with the eigenvalues of the residue $\mathbb{R}es_b (\overlineerline \nabla)$ equal to zero. In particular, $R^if_* \Omega^\bullet_{Z|B}(\log D)$ is the canonical extension of $R^if_* \Omega^\bullet_{Z|B}$.
\end{thm}
\begin{equation}gin{cor} \cite[Cor. 11.18]{Peters-Steenbrink} \label{monodromy unipotent}
The monodromy of a one parameter family degenerating to a reduced normal crossing central fiber is unipotent.
\end{cor}
\begin{equation}gin{proof}
This follows immediately from (\ref{monodromy and residue}).
\end{proof}
Under this circumstance we say by abuse of notation that $R^{i} f_* \Omega^{\ge p}_{\mathbb{Z}b|\overlineerline{B}}(\log D)$ and $R^qf_* \Omega^p_{\mathbb{Z}b|\overlineerline{B}}(\log D)$ are the \emph{canonical extensions} of $R^if_* \Omega^{\bullet\ge p}_{Z|B}$ and $R^qf_* \Omega^p_{Z|B}$, respectively.
Notice that we can say ``canonical extension'' also for $\mc{F}^p(\overlineerline\mc{H}^i)$ and for $\mc{G}r^p(\mc{H}^i)$ because such extension are uniquely determined by the canonical extension. Indeed, let $j: B \to \overlineerline{B}$ be the open immersion, let $\mc{H}$ be a vector bundle on $B$, and let $E\subset \mc{H}$ be a sub-bundle. Suppose we are given a vector bundle extension $\mc{H}b$ of $\mc{H}$ on the whole of $\overlineerline{B}$. Any extension of $E$ to $\overlineerline{B}$ as a sub-bundle of $\mc{H}b$ is always contained in the saturation of $\mc{H}b \cap j_*E$ in $\mc{H}b$, and thus has to be isomorphic to the saturation itself. In particular, since the extension of $E$ as a vector sub-bundle of $\mc{H}b$ is unique, so is the extension of the quotient $\mc{H}/E$.
We now go back to our situation applying these remarks to the families $\overline{\mathcal C} \to \overlineerline{B}$ and $\overline{\mathcal J}\to \overlineerline{B}$.
\begin{equation}gin{proof}[Proof of Proposition \ref{degenerations}]
Our aim is to prove that the natural isomorphism $R^i q_* \mathcal O_{\overline{\mathcal J}} \res{B} \cong \wedge^i R^1 p_* \mathcal O_{\overline{\mathcal C}}\res{B}$ extends over $\overlineerline{B}$.
As should be clear by now, we will show this by using the canonical extensions of the VHS associated to the families $p: \overline{ \mathcal C} \to \overline{ B}$ and $q: \overline{\mathcal J} \to \overline{B}$. Indeed by Proposition \ref{MRV on compactified Jac} they both have singular fibers that are reduced and normal crossing so that we can apply the theory of degeneration of Hodge bundles. Set
\[
A= \overline B \setminus B, \quad \text{ and } \quad \overline{\mathcal C}_A=p^{-1}(A), \,\, \overline{\mathcal J}_A=q^{-1}(A).
\]
The sheaves
\[
\begin{equation}gin{aligned}
\overlineerline \mc{H}_{\overline{\mathcal J}}^1&= R^1 q_* \Omega^{\bullet}_{\overline{\mathcal J}|\overlineerline{B}}(\log ( \overline{\mathcal J}_A))\\
\overlineerline \mc{H}_{\overline{\mathcal C}}^1&= R^1 p_* \Omega^{\bullet}_{{\overline{\mathcal J}}|B}(\log ({\overline{\mathcal C}}_A))
\end{aligned}
\]
both extend $R^1q_* \mathbb{C}\otimes {\mathcal O}_B\cong {R^1p_*\mathbb{C}\otimes {\mathcal O}_B}$, and by Theorem \ref{Katz monodromy theorem} they are both isomorphic to the canonical extension. Hence, there is an isomorphism
\begin{equation} \label{hX e hC}
\overlineerline \mc{H}_{\overline{\mathcal J}}^1 \cong \overlineerline \mc{H}_{\overline{\mathcal C}}^1,
\end{equation}
which extends the existing isomorphism over $B$. As a consequence, we alsoget an isomorphism of the canonical extension of
$\mc{F}^1(\mc{H}_{\overline{\mathcal J}}^1)\cong \mc{F}^1(\mc{H}_{\overline{\mathcal C}}^1)$, i.e.,
\[
\mc{F}^1(\overlineerline \mc{H}_{\overline{\mathcal J}}^1)\cong \mc{F}^1(\overlineerline \mc{H}_{\overline{\mathcal C}}^1),
\]
which in turn implies that there is an isomorphism of the first graded pieces
\[
\mc{G}r^0(\overlineerline \mc{H}_{\overline{\mathcal J}}^1)\cong \mc{G}r^0(\overlineerline \mc{H}_{\overline{\mathcal C}}^1),
\]
i.e., an isomorphism
\[
R^1q_* {\mathcal O}_{\overline{\mathcal J}}\cong R^1p_*{\mathcal O}_{\overline{\mathcal C}}.
\]
We now want to prove that
\[
R^gq_* {\mathcal O}_{\overline{\mathcal J}}\cong \wedge^g R^1p_*{\mathcal O}_{\overline{\mathcal J}}.
\]
A connection $\nabla$ on a vector bundle $\mc{H}$ naturally induces connections, denoted by $\nabla_j$, on all the exterior powers $\wedge^j \mc{H}$ of $\mc{H}$ by setting $\nabla_j(h_{i_1} \wedge \dots \wedge h_{i_j})= \sum \pm h_{i_1} \wedge \dots \wedge \nabla(h_{i_k}) \wedge \dots \wedge h_{i_j}.$ By construction, if $(\mc{H}, \nabla)$ admits an extension $(\overlineerline \mc{H}, \overlineerline \nabla)$ with only logarithmic poles, then so does $(\wedge^j \mc{H}, \nabla_j)$ as we can set $\overlineerline {( \wedge^j \mc{H})}=\wedge^j \overlineerline \mc{H}$ with the obvious definition for $\overlineerline \nabla_j$. This also shows that if the eigenvalues of the residue of $\overlineerline \nabla$ are zero, then the same is true for the eigenvalues of $\mathbb{R}es(\overlineerline \nabla_j)$ (indeed, both operator will be nilpotent). In particular, the sheaf $\wedge^j \overlineerline \mc{H}$ is the canonical extension of $\wedge^j \mc{H}$.
Since $\overline{\mathcal J} \to \overlineerline{B}$ and $\overline{\mathcal C} \to \overlineerline{B} $ have normal crossing boundary, by Theorem \ref{Katz monodromy theorem}, by the discussion above, and by uniqueness of the canonical extension we find
\begin{equation}
\wedge^i \, \overlineerline \mc{H}^1_{\overline{\mathcal J}} \cong \wedge^i \, \overlineerline \mc{H}^1_{\overline{\mathcal C}}.
\end{equation}
Moreover, since $\mathcal J \to B$ is a family of abelian varieties, we have a natural isomorphism of VHS $\wedge^j \mc{H}^1_{\mathcal J} \cong \mc{H}^g_{\mathcal J}$. Notice that the Hodge filtration on $\mc{H}^g_{\mathcal J}$ is simpy the exterior power of the filtration on $\mc{H}^1_{\mathcal J}$. Using again the result of Katz, we know that
\[
\overlineerline \mc{H}^g_{\overline{\mathcal J}}:=R^g q_* \Omega^{\bullet}_{\overline{\mathcal J}}(\log \overline{\mathcal J}_A)
\]
is also the canonical extension. It follows that,
\[
\wedge^g \overlineerline \mc{H}^1_{\overline{\mathcal C}} \cong \overlineerline \mc{H}^g_{\overline{\mathcal J}}.
\]
This induces an isomorphism of the respective Hodge filtrations and thus also of the respective graded pieces.
Since $\mc{G}r^0(\wedge^g\mc{H}^1_{\mathcal {J}})\cong \wedge^g \mc{G}r^0(\mc{H}^1_\mathcal J)$, we conclude
\[
R^gq_* {\mathcal O}_{\overline {\mathcal {J}}}\cong \wedge^g R^1p_*{\mathcal O}_{\overline{\mathcal {C}}},
\]
and the proposition is proved.
\end{proof}
The referee pointed out that this proposition can also be proved using \cite[Cor B]{MRVFMAI}.
\section{The second Betti number} \label{second betti number}
This section is devoted to calculating the second Betti number of the relative compactified Jacobian $N$ (assumptions as in Theorem \ref{thm smooth}). We will assume that $|C|$ satisfies Assumption \ref{assumption} (recall that this assumption holds for primitive linear systems, in particular, it holds also for hyperelliptic linear systems). The strategy is to compare this cohomology group with that of the universal family of curves in the linear system and, in fact, we will prove that the two groups have the same dimension. In the whole section, unless otherwise stated, cohomology should be understood with complex coefficients.
As usual, let $\chi$ be a non-zero integer, consider the Mukai vector
\[
w=(0, [C], \chi),
\]
and assume that $v=(0, D, 2\chi)$ is primitive. Let $A$ be an ample line bundle on $T$ such that $H=f^*A$ is $v$-generic and set
\[
N=N_{w,A}, \quad \text{ and } Y=\mathbb{P}hi(N) \subset M=M_{v,H}.
\]
\begin{equation}gin{thm} \label{betti N}
Let $|C|$ be a linear system of genus $g \ge 3$ on a general Enriques surface $T$, and let $N$ be as above. Suppose that $|C|$ satisfies Assumption \ref{assumption}, then
\[
h^2(N)=11.
\]
\end{thm}
The case when $|C|$ has genus $2$ is done at the end of this Section. We also recall that thanks to Prop. 4.4. of \cite{yoshioka-enriques}, Assumption \ref{assumption} is satisfied in many cases, for example in low genus and for primitive linear systems.
The proof of the Theorem uses the long exact sequence in cohomology associated to the pair $(N, N_U)$, as done in \cite{Rapagnetta} (for a linear system of curves on a K3 surface), and then relies on the comparison of the local systems associated to the family of curves and its relative compactified Jacobian.
As usual, we denote by $\mathcal C \subset |C| \times T$ the universal family of curves. Consider the second projection
\[ \label{C to T}
p\colon \mathcal C \to T,
\]
which is a fibration in $\mathbb P^{g-2}$'s outside of the base locus of $|C|$, over which the fiber is isomorphic to $\mathbb P^{g-1}$. Using this we easily see that
\begin{equation} \label{b2 C}
H^1(\mathcal C)=0, \quad \text{ and that } \quad H^2(\mathcal C)= \left\{\begin{equation}gin{aligned}
&\mathbb{C}^{11} \,\,\,\, \text{if} \,\,\,|C| \text{ not hyperellptic}\\
& \mathbb{C}^{13} \,\,\,\, \text{if} \,\,\,|C| \text{ is hyperellptic}
\end{aligned}
\right.
\end{equation}
Recall that $U \subset |C|$ denotes the locus parametrizing smooth curves. Notice that if $|C|$ is hyperelliptic then $U$ is strictly contained in the locus $U'$ parametrizing smooth fibers of $\nu: N \to |C|$, since the general fiber of $\nu$ over the two components $\Delta_1$ and $\Delta_2$ is smooth.
\begin{equation}gin{lemma} \label{H1} Let $|C|$ be a genus $g \ge 2$ linear system on a general Enriques surface $T$, and let $k$ be the number of irreducible components of $\Delta$. Then $H^1(\mathcal C_U)\cong H^1(N_U)\cong H^1(U)=\mathbb{C}^{k-1}$.
\end{lemma}
\begin{equation}gin{proof}
The equality $H^1(U)=\mathbb{C}^{k-1}$ is well--known, see for example \cite[Prop. 1.3]{Dimca}. Since $q: \mathcal C_U \to U$ is a smooth morphism,
\[
H^1(\mathcal C_U)\cong H^0(U, R^1q_* \mathbb{C})\oplus H^1(U),
\]
so we only need to show that $H^0(U, R^1q_* \mathbb{C})=0$. This follows from the invariant cycle theorem \cite{DeligneHodgeII}, which in our setting asserts that
\[
H^0(U, R^1q_* \mathbb{C})=H^1(C_t)^{\inv}=\im[H^1(\mathcal C) \to H^1(\mathcal C_U)],
\]
where $H^1(C_t)^{\inv}$ denotes the monodromy invariant part of the first cohomology of a smooth curve $C_t$. Since $H^1(\mathcal C)=0$, we are done. To finish the proof we only need to invoke Lemma \ref{higher direct images different degree} which guarantees that
\[
R^1q_* \mathbb{C}_{\mathcal C}\res{U}=R^1\nu_* \mathbb{C}_N \res{U}.
\]
\end{proof}
\begin{equation}gin{rem}
By the results of Subsection \ref{linear systems enriques} (cf. Corollary \ref{discriminant locus irr} and Proposition \ref{discriminant locus}), $k=1$ unless either $|C|$ is hyperelliptic, in which case $k=4$, or $|C|$ is of genus $3$ and defines a degree $4$ morphism to $\mathbb P^2$ as in case $(1)$ of Proposition \ref{Verra}, in which case $k=37$.
\end{rem}
We will now consider the long exact sequences in cohomology for the pairs $(N, N_U)$ and $(\mathcal C, \mathcal C_U)$. Set
\[
\begin{equation}gin{aligned}
j=\#\text{ of irreducible components of } N_{\Delta},\\
\ell=\#\text{ of irreducible components of } C_{\Delta}.
\end{aligned}
\]
So, for example, if $|C|$ is non--hyperellptic $k=j=\ell$. Indeed, by Proposition \ref{Verra} the general point of every component of the discriminant parametrizes irreducible curves and hence the preimage in $N$ of every component of the discriminant is irreducible. In Corollary \ref{discriminant locus irr} we remarked that in the non--hyperelliptic case $k=1$, unless $|C|$ is of genus $3$ in which case $k=37$. If $|C|$ is hyperelliptic, of genus $g \ge 3$, then by Propositions \ref{discriminant locus} and Corollary \ref{number of components}, $j=5$ and $\ell=7$.
\begin{equation}gin{lemma} \label{H2} There are exact sequences
\begin{equation} \label{relative cohomology N}
\begin{equation}gin{aligned}
0 \to \mathbb{C}^{j-k+1} \to H^2(N) \to H^2(N_U),\\
0 \to \mathbb{C}^{\ell-k+1} \to H^2(\mathcal C) \to H^2(\mathcal C_U).
\end{aligned}
\end{equation}
\end{lemma}
\begin{equation}gin{proof}
By the long exact sequence in cohomology of pairs and by the previous Lemma, we only need to show that $H^2(N, N_U)=\mathbb{C}^{j}$ and that $H^2(\mathcal C, \mathcal C_U)=\mathbb{C}^\ell$. By Poincar\'e-Lefschetz duality (\cite{Spanier} Chapter 6, Section \S 2 Thm 17 or also \cite{Peters-Steenbrink} Thm B.28),
\[
H_i(N, N_U) \cong H^{2n-i}(N_\Delta).
\]
Letting $S(N_\Delta)$ be the singular locus of $N_\Delta$ and setting $i=2$ we get, since the real codimension of $S(N_\Delta)$ in $N_\Delta$ is greater or equal to $2$,
\begin{equation} \label{relative cohomology}
H^{2n-2}(N_\Delta)\cong H^{2n-2}(N_\Delta, S(N_\Delta)) \cong H_0(N_\Delta\setminus S(N_\Delta))\cong \mathbb{C}^j,
\end{equation}
where the second to last isomorphism is again given by Poincar\'e-Lefschetz duality\footnote{cit. Spanier, Thm 19 plus the fact the the pair $(N_\Delta, S(N_\Delta))$ is taut in $N$.} and the last isomorphism holds because by assumption $N_\Delta$ has $j$ irreducible components. The same argument applies for showing that $H^2(\mathcal C, \mathcal C_U)=\mathbb{C}^\ell$.
\end{proof}
\begin{equation}gin{cor} \label{kernel of N to NU}
The dimension of the kernel of the natural morphism $H^2(N) \to H^2(N_U)$ is equal to $1$ if $|C|$ is not hyperelliptic and equal to $2$ if $|C|$ is hyperelliptic.
\end{cor}
\begin{equation}gin{rem}
This Corollary and the Lemma before are the places where we are using Assumption \ref{assumption}. Clearly, if this assumption does not hold the number of irreducible components of $N_\Delta$ cannot be computed in the same way.
\end{rem}
Recall that the degree $i$ cohomology groups of a smooth quasi-projective variety $Z$ are endowed with a canonical mixed Hodge structure (MHS for short) \cite{DeligneHodgeII} of weight $\ge i$. Moreover, if $\overline Z$ is a smooth projective compactification of $Z$, by Corollary 3.2.17 of \cite{DeligneHodgeII} we have
\[
W_i H^i(Z)=\im [ H^i(\overline Z) \to H^i(Z)]
\]
where $W_i H^i(Z)$ denotes the weight $i$ part of the MHS on $H^i(\overline Z)$.
Applying this to $N_U$ and $\mathcal C_U$ we find that there are short exact sequence
\[
\begin{equation}gin{aligned}
0 \to \mathbb{C}^{j-k+1} \to H^2(N) \to W_2H^2(N_U) \to 0,\\
0 \to \mathbb{C}^{\ell-k+1} \to H^2(\mathcal C) \to W_2H^2(\mathcal C_U) \to 0.
\end{aligned}
\]
The theorem will follow once we prove the following
\begin{equation}gin{prop} \label{H2 MHS}
There is an isomorphism of MHS
\[
H^2(N_U) \cong H^2(\mathcal C_U)
\]
\end{prop}
\begin{equation}gin{proof}[Proof of Theorem \ref{betti N}]
By (\ref{b2 C}) we see that $W_2H^2(\mathcal C_U)=W_2(H^2(N_U))$ is equal to $10$ in the non--hyperelliptic case and equal to $9$ in the hyper--elliptic case while by Corollary \ref{kernel of N to NU} see that $j-k+1$ is equal to $1$ in the non--hyperelliptic case and equal to $2$ in the hyperelliptic case. In either case, we see that $\dim H^2(N)=11$.
\end{proof}
The proof of Proposition \ref{H2 MHS} will take the rest of this section. We start with the following general statement
\begin{equation}gin{prop} \label{J and J' MHS}
Let $p:\mathcal C \to B$ be a family of smooth connected curves of genus $g$ over a smooth quasi--projective variety $B$. For every pair of integers $d$ and $d' $ let $q: \mathcal J=\mathcal J_d \to B$ and $q': \mathcal J'=\mathcal J_{d'} \to B$ be the relative Jacobians of degree $d$ and $d'$, respectively. There exists cycles $Z \in CH^g(\mathcal J \times_B \mathcal J')_\mathbb{Q}$ and $P \in CH^2(\mathcal C \times_B \mathcal J)_\mathbb{Q}$ inducing
\begin{equation}gin{enumerate}
\item natural isomorphisms $[Z]_*: R^k q_* \mathbb{Q}_{\mathcal J} \cong R^k q'_* \mathbb{Q}_{\mathcal J'}$ of local systems for every $k$;
\item an isomorphism of MHS $[Z]_*: H^k(\mathcal J) \cong H^k(\mathcal J')$ for every $k$.
\item natural morphisms $[P]_*: R^k q_* \mathbb{Q}_{\mathcal J} \to R^k p_* \mathbb{Q}_{\mathcal C}$ of local systems for every $k$ (isomorphism for $k=1$);
\item a morphism of MHS $[P]_*: H^k(\mathcal J) \to H^k(\mathcal C)$ for every $k$.
\end{enumerate}
Moreover, the morphisms of MHS in $(2)$ and $(4)$ are compatible with smooth base change and with the Leray filtrations of the two sides.
\end{prop}
\begin{equation}gin{proof} Let $\Sigma \subset \mathcal C$ be a multisection of the family and let $\varphi: \Sigma \to B$ be the induced morphism. Let $r$ be its degree. If we base change $\mathcal C$ to $\Sigma$, there is a tautological section and hence all relative Jacobians are isomorphic. In particular, we can find the graph of an isomorphism $\mc{G}amma \subset (\mathcal J\times_B {\Sigma}) \times_\Sigma (\mathcal J'\times_B \Sigma)$. Let $\xi:(\mathcal J\times_B {\Sigma}) \times_\Sigma (\mathcal J'\times_B \Sigma) \to \mathcal J\times_B \mathcal J'$ be the natural projection and set
\[
Z:= \ff{1}{r} \xi_*\mc{G}amma \in CH^g(\mathcal J \times_B \mathcal J')_\mathbb{Q}.
\]
This cycle can be viewed also as an element of $H^0(B, R^{2g} (q,q')_* \mathbb{Q})=\oplus H^0(R^{2g-k}q_* \mathbb{Q}_{\mathcal J} \otimes R^k q'_* \mathbb{Q}_{\mathcal J'})$, where $(q, q'): \mathcal J\times_B \mathcal J' \to B$ is the natural morphism. This defines for every $k$ an element in $H^0(B, R^{2g-k}q_* \mathbb{Q}_{\mathcal J} \otimes R^k q'_* \mathbb{Q}_{\mathcal J'})= Hom_B(R^k q_* \mathbb{Q}_{\mathcal J} , R^k q'_* \mathbb{Q}_{\mathcal J'})$, hence a morphism
\[
[Z]_*: R^k q_* \mathbb{Q}_{\mathcal J} \to R^k q'_* \mathbb{Q}_{\mathcal J'}
\]
which by \cite[Lemma 5.4]{Arapura05}, $[Z]_*$ is the composition
\begin{equation} \label{lemma arapura}
R^k q_* \mathbb{Q}_{\mathcal J} \stackrel{p_1^*}{\to} R^{k} (q,q')_* \mathbb{Q} \stackrel{\cup Z}{\to} R^{k+2g} (q,q')_* \mathbb{Q} \stackrel{{p_2}_*}{\to} R^k q'_* \mathbb{Q}_{\mathcal J'}),
\end{equation}
where $p_1$ and $p_2$ are the first and second projection from $ \mathcal J\times_B \mathcal J' $. To show this is an isomorphism let us look at the stalks of $[Z]_*$.
Let $b \in B$ be a point and let $\sigma \in \Sigma$ be such that $\varphi(\sigma)=b$.
By (\ref{lemma arapura}), the stalk at $b$ of the morphism $[Z]_*$ is precisely the correspondence $[Z_b]_*$ induced by the cycle $Z_b=(\ff{1}{r}\xi_*\mc{G}amma)_b$. To understand what this is, recall that by construction, the isomorphism $[\mc{G}amma_\sigma]_*: H^k(\mathcal J_b) \to H^k(\mathcal J'_b)$ is the isomorphism in cohomology induced by
\[
\begin{equation}gin{aligned}
\mathcal J_b &\to \mathcal J'_b,\\
L &\mapsto L \otimes \mathcal O_{\mathcal C_b}((d'-d)\sigma ).
\end{aligned}
\]
As in Lemma \ref{higher direct images different degree} we can see that $[\mc{G}amma_\sigma]_*$ is independent of the point $\sigma$ for $\sigma \in \varphi^{-1}(b)$.
Since $[\mc{G}amma_\sigma]_*$ is independent of $\sigma$ this implies that $[Z_b]_*=r[\ff{1}{r}\mc{G}amma_\sigma]_*: H^k(\mathcal J_b) \to H^k(\mathcal J'_b)$. In particular, $[Z]_*$ is an isomorphism.
Under the cycle map $CH^g(\mathcal J \times_B \mathcal J')_\mathbb{Q} \to H^{BM}_{2\dim B+2g}(\mathcal J \times_B \mathcal J') $ we can also view $Z$ as class in Borel--Moore homology. This defines a map
\[
\begin{equation}gin{aligned}
{[Z]_*}: H^k(\mathcal J) &\to H^k(\mathcal J')\\
\alpha & \mapsto {p_2}_*( [Z] \cup p_1^*(\alpha))
\end{aligned}
\]
By compatibility of MHS with cup--product, this is a morphism of MHS (e.g. see \cite[\S 6.3]{Peters-Steenbrink}). Lemmas 5.2 and 5.3 of \cite{Arapura05} show that the Leray filtration is compatible both with cup product and with pushforward under smooth projective morphisms as is $p_2$. This shows (cf. \cite{Arapura05} pg. 586) that $[Z]_*$ is compatible with the Leray filtrations on both $H^k(\mathcal J)$ and $H^k(\mathcal J')$ and hence induces morphisms
\begin{equation} \label{}
H^i(B, R^j q_* \mathbb{Q}_{\mathcal J} ) \to H^i(B, R^j q'_* \mathbb{Q}_{\mathcal J'} ).
\end{equation}
which by $(1)$ are isomorphisms.
This shows that $(2)$ is an isomorphism as well.
For the last two statements, we may consider the cycle $P \in CH^2(\mathcal C \times_B \mathcal J)_\mathbb{Q}$ obtained by considering the Poincar\'e line bundle on $(\mathcal C\times_B {\Sigma}) \times _\Sigma (\mathcal J_r \times_B \Sigma)$, pushing it forward to $\mathcal C \times_B \mathcal J_r$ and then dividing by $r$. With this definition, the proof of $(3)$ and $(4)$, and of compatibility with the Leray filtrations, follows from the general theory as in $(1)$ and $(2)$.
\end{proof}
\begin{equation}gin{proof}[Proof of Proposition \ref{H2 MHS}]
By the last statement of Proposition \ref{J and J' MHS}, there is a morphism of MHS
\[
\varphi: H^2(N_U) \to H^2(\mathcal C_U)
\]
which is compatible with the Leray filtrations of $N_U \to U$ and $\mathcal C_U \to U$. To show that $\varphi$ is an isomorphism, it is sufficient to show that the natural morphism of local systems of Proposition \ref{J and J' MHS} induce isomorphisms
\begin{equation} \label{iso local systems}
H^i(U, R^j\nu_* \mathbb{C}_N) \cong H^i(U, R^jq_* \mathbb{C}_{\mathcal C}).
\end{equation}
Since for $j=1$ there is an isomorphism
\[
R^1 \nu_* \mathbb{C}_{N_U}\cong R^1 p_* \mathbb{C}_{\mathcal C_U},
\]
we only need to prove (\ref{iso local systems}) for $(i,j)=(0,2)$. By Proposition \ref{J and J' MHS} there is a (non--zero) morphism of local systems $R^2\nu_* \mathbb{C}_{N_U}) \to R^2 p_* \mathbb{C}_{\mathcal C_U}$ so we only have to show that it induces an isomorphism at the level of global sections. By the invariant cycle theorem we know that
\[
H^0(U, R^2\nu_* \mathbb{C}_{N_U})=H^2(N_t, \mathbb{C})^{\inv}, \quad \text{ and } \quad H^0(U, R^2 p_* \mathbb{C}_{\mathcal C_U})=H^2(C_t, \mathbb{C})^{\inv}.
\]
where, since $C_U \to U$ is a family of smooth connected curves, we known that $H^0(U, R^2 p_* \mathbb{C}_{\mathcal C_U})$ is one dimensional. So we only have to prove that $H^0(U, R^2\nu_* \mathbb{C}_{N_U})$, too, is one dimensional. Let
\[
\rho: \pi_1(U) \to \Aut(H^1(C_t, \mathbb{C}))
\]
be the monodromy representation. By Proposition \ref{irreducible monodromy non hyp} below, this representation is irreducible.
Since $\rho$ preserves the symplectic pairing $(\cdot,\cdot)$, the isomorphism $w: H^1(C_t, \mathbb{C}) \cong H_1(C_t, \mathbb{C})$ induced by $(\cdot,\cdot)$ is $\rho$-equivariant. By composing with $w$, it follows that any $\rho$-invariant element of $ \wedge^2H^1(C_t, \mathbb{C})$ can be thought of as a $\rho$-invariant morphism
\[
\varphi: H^1(C_t, \mathbb{C}) \to H^1(C_t, \mathbb{C}).
\]
By Schur's Lemma
\[
\varphi=\lambda \id,
\]
for some $\lambda$ in $\mathbb{C}$, and
\[
( \wedge^2H^1(C_t, \mathbb{C}))^{\inv}\cong \mathbb{C}.
\]
is one--dimensional, generated by the class of the the intersection pairing (viewed as the theta divisor of $N_t$ via the natural isomorphism $\wedge^2H^1(C_t, \mathbb{C}) \cong H^2(N_t, \mathbb{C})$).
\end{proof}
\begin{equation}gin{prop} \label{irreducible monodromy} \label{irreducible monodromy non hyp}
Let $|C|$ be a linear system of genus $g \ge 2$ on a general Enriques surface $T$, and let $t \in U \subset |C|$ be a point. The monodromy representation
\begin{equation}
\rho: \pi_1(U) \to \Aut(H^1(C_t, \mathbb{C})),
\end{equation}
is irreducible.
\end{prop}
\begin{equation}gin{proof}
Let $N$ be the degree zero relative compactified Jacobian of $|C|$. By Corollary \ref{generated by vanishing cycles} we know that
the kernel of the natural morphism
\[
\pi_1(N_t)=H^1(C_t, \mathbb{Z}) \to \pi_1(N) \to 1,
\]
is generated by vanishing cycles. Hence so is $H^1(C_t, \mathbb{C})$ ( we freely identify $H^1(C_t, \mathbb{C})$ with $H_1(C_t, \mathbb{C})$ with the monodromy invariant isomorphism defined by Poincar\'e duality).
If the discriminant locus $\Delta \subset |C|$ is irreducible, then we can conclude using Theorem 3.4 in \cite{Voisin2} which shows that the restriction of the monodromy representation to the subspace generated by the vanishing cycles is irreducible provided that $\Delta$ is irreducible. If not, we argue as follows.
Let
\[
\{c_i\},
\]
be the set of vanishing cycles associated to a set of generators $\{ h_i \}$ of $H=\pi_1(U)$ as in Subsection \ref{section vanishing cycles}.
To prove that there are no invariant subspaces, we argue by contradiction and suppose that there is a non-trivial invariant subspace
\[
F \subset H^1(C_t, \mathbb{C}).
\]
First of all, we check that the intersection pairing $(\cdot, \cdot)$ on $H^1(C_t, \mathbb{C})$ restricts to a symplectic pairing on $F$. Indeed, since the $\{c_i\}$ generate $H^1(C_t, \mathbb{C})$ and the intersection pairing is non-degenerate, for every non-zero $\begin{equation}ta \in F$ there exist an $i$ such that $(\begin{equation}ta, c_i) \neq 0$. Since $F$ is a $\rho$-invariant subspace, it follows that the the image of $\begin{equation}ta$ under the Picard--Lefschetz monodromy transformation along $h_i$, which is $\rho_{h_i}(\begin{equation}ta)=PL_{h_i}(\begin{equation}ta)=-\begin{equation}ta+(\begin{equation}ta,\alpha_{i})\alpha_{i}$, lies in $F$, and thus
\[
\alpha_{i} \in F.
\]
Hence $F$ and its orthogonal complement $F^\perp$, which also is monodromy invariant, are two symplectic vector spaces. Set
\[
2n=\dim F, \quad 2m=\dim F^\perp.
\]
The above argument also shows that every vanishing cycle $ c_i$ lies either in $F$ or in $F^\perp$. In particular, we can decompose
\[
R:=\ker[\pi_1(N_t) \to \pi_1(N)]=R_F\oplus R_{F^\perp},
\]
where $R_F$ and $R_{F^\perp}$ are the (non--degenerate) sublattices generated by the vanishing cycles that lie in $F$ and $F^\perp$ respectively.
Since $R\subset H^1(C_t, \mathbb{Z})$ has index two and $( \cdot, \cdot)$ is unimodular, it follows that the determinant of the intersection matrix for $R$ is $4$ (cf. \cite{BHPV} $\S$ 1.2). Since $R_F$ and $R_{F^\perp}$ are symplectic lattices, the determinants of their intersection matrices are squares and hence, up to switching $F$ and $F^\perp$, we may assume that one determinant is $1$ and the other is $4$. In particular, up to switching $F$ and $F^\perp$, we may assume that $R_F \subset H^1(C_t, \mathbb{Z})$ is primitive. Set $F_\mathbb{Z}:=R_F$ and $E_\mathbb{Z}:=(F_\mathbb{Z})^\perp$, where the orthogonal complement is taken in $H^1(C_t, \mathbb{Z})$ so that we have decomposed
\begin{equation}
H^1(C_t, \mathbb{Z})=F_\mathbb{Z} \oplus E_\mathbb{Z}.
\end{equation}
as a direct sum of two primitive lattices.
Consider now the following commutative diagram,
\[
\xymatrix{
H^1(C_t, \mathbb{Z}) \ar@{^{(}->}[r]^j \ar@{^{(}->}[d] & H^1(C_t, {\mathcal O}_{C_t}) \ar[d]_D^\sim\\
H^1_{dR}(C_t, \mathbb{C}) \ar@{->>}[r]_p & H^{0,1}_{\overlineerline \partial }(C_t)
}
\]
where the left hand side vertical arrow is the composition of the base change inclusion $H^1(C_t, \mathbb{Z})\subset H^1(C_t, \mathbb{C})$, and the De Rham isomorphism. The top horizontal arrow is given by the exponential sequence, the right hand side vertical arrow is the Dolbeaut isomorphism, and the bottom arrow is the projection onto the Dolbeaut group.
The two spaces
\[
F':=D^{-1}p(F) \quad \text{ and} \quad E':=D^{-1}p(E),
\]
contain the lattices $j(F_\mathbb{Z})$ and $j(E_\mathbb{Z})$, of rank respectively equal to $2n$ and $2m$ and are thus of dimension equal to $n$ and $m$ respectively. It follows that
\[
F_t:= F'/j(F_\mathbb{Z}), \quad \text{ and }E_t:= E'/j(E_\mathbb{Z}),
\]
are two smooth abelian varieties of dimensions equal to $n$ and $m$ respectively. They are both principally polarized since $F_\mathbb{Z}$ and $E_\mathbb{Z}$ are unimodular. In particular
\begin{equation} \label{decomposition ppav}
\Jac^0(C_t) \cong F_t \times E_t.
\end{equation}
However, since the intersection product on $H^1(C_t, \mathbb{Z})$ can be viewed, via the isomorphism $\wedge^2 H^1(C_t, \mathbb{Z}) \cong H^2(\Jac^0(C_t) , \mathbb{Z})$, as the theta divisor of $\Jac^0(C_t)$, it follows that (\ref{decomposition ppav}) is actually a decomposition as principally polarized abelian varieties. We have thus reached a contradiction since the Jacobian variety of a smooth curve is irreducible as a principally polarized abelian variety.
\end{proof}
\subsection{The genus two case} \label{genus two case}
When $|C|$ is a genus two linear system on a general Enriques surface $T$, we get a Calabi-Yau three-fold whose Hodge numbers are described by the following theorem.
\begin{equation}gin{thm} \label{genus two thm}
Let $|C|$ be a genus two linear system on a general Enriques surface $T$. Then $\nu: N \to \mathbb{P}^1=|C|$ has exactly $16$ singular fibers, each of which is a rank one degeneration of an abelian surface, $\pi_1(N)=\mathbb{Z}/(2)$, and the Hodge diamond of $N$ is the following:
\[
\begin{equation}gin{array}{cccccc}
&1&&\\
&0\,\,\,\,\,\,0&&\\
\,\,\,\,0&10&0\,\,\,\,&\\
1\,\,\,\,&10\,\,\,\,\,\,10&\,\,\,\,1&\\\
\end{array}
\]
The singular fibers of the natural abelian surface fibration on $\widetilde N$ are of the same kind, and $\widetilde N$ has the same Hodge numbers.
\end{thm}
\begin{equation}gin{proof}
Since $N$ is smooth and $|C|$ is one-dimensional, the support morphism is flat and thus Theorem \ref{fundamental group} holds unconditionally.
As for the singular fibers, this follows from Proposition \ref{discriminant locus} and the fact that the Jacobian of a union of two smooth curves meeting transversally in one point is smooth and projective and the fact that the compactified Jacobian of an irreducible nodal curve is a rank one degeneration of an abelian variety. As for the second Betti number, the only difference is that now
\[
H^1(\mathcal C_U)\cong H^1(N_U)\cong H^1(U)=\mathbb{C}^{17}, \quad \text{ and } \quad H^2(\mathcal C)\cong \mathbb{C}^{12}.
\]
Moreover,
\[
\begin{equation}gin{aligned}
h^2(\mathbb{N}t, \mathbb{N}t_U)=h^2(Y, Y_U)=h^2(N, N_U)&=\#\text{ of irreducible components of } N_{\Delta}=18,\\
h^2(\mathcal C, \mathcal C_U)&=\#\text{ of irreducible components of } \mathcal C_{\Delta}=20.
\end{aligned}
\]
so that (notation as above) $\ell-k+1=3$ and $j-k+1=1$. Hence
\[
W_2(H^2( N_U))=W_2(H^2(\mathcal C_U))=\mathbb{C}^{9}
\]
and
\[
H^2(N)\cong \mathbb{C}^{10}.
\]
To compute the remaining Hodge number $H^{2,1}(N)$, it is sufficient to notice that $\chi_{top}(N)=0$ and hence $2H^2+2=H^3=3+H^{2,1}$. This follows from the fact that the all the fibers of $N \to |C|$ have trivial topological Euler characteristic.
\end{proof}
The fact that the second Betti number group of this three dimensional moduli space is $10$ reminds of what happens in the case of K3 surfaces. Indeed, the second Betti number of the higher dimensional examples is equal to $23=b_2(K3)+1$, whereas that of the two dimensional moduli spaces is equal to $22$.
\end{document}
|
\betagin{document}
\mathrm{Re}newcommand{1.5}{1.5}
\title[Shilnikov problem in Filippov dynamical systems]
{Shilnikov problem in Filippov\\ dynamical systems}
\author[D.D. Novaes and M.A. Teixeira]
{Douglas D. Novaes and Marco A. Teixeira}
\address{Departamento de Matem\'{a}tica, Universidade
Estadual de Campinas,\\ Rua S\'{e}rgio Buarque de Holanda, 651, Cidade Universit\'{a}ria Zeferino Vaz, 13083-859, Campinas, SP,
Brazil} \varepsilonmail{[email protected]} \varepsilonmail{[email protected]}
\varepsilonnsuremath{\mathbb{S}}ubjclass[2010]{34A36,37C29,37H20}
\keywords{Filippov systems, sliding dynamics, sliding homoclinic orbits, Shilnikov homoclinic orbits, sliding Shilnikov orbits, piecewise linear differential systems}
\maketitle
\betagin{abstract}
In this paper we introduce the concept of sliding Shilnikov orbits for $3$D Filippov systems. In short, such an orbit is a piecewise smooth closed curve, composed by Filippov trajectories, which slides on the switching surface and connects a Filippov equilibrium to itself, namely a pseudo saddle-focus. A version of the Shilnikov's Theorem is provided for such systems. Particularly, we show that sliding Shilnikov orbits occur in generic one-parameter families of Filippov systems, and that arbitrarily close to a sliding Shilnikov orbit there exist countably infinitely many sliding periodic orbits. Here, no additional Shilnikov-like assumption is needed in order to get this last result. In addition, we show the existence of sliding Shilnikov orbits in discontinuous piecewise linear differential systems. As far as we know, the examples of Fillippov systems provided in this paper are the first exhibiting such a sliding phenomenon.
\varepsilonnd{abstract}
\noindent{\bf
Piecewise smooth differential systems has become a frontier between mathematics and sciences in general. The study of such systems contributes to the understanding of a wide range of phenomena in several areas of physics, engineering, biology, economy, etc \cite{BBCK,physDspecial}. The dynamics of piecewise smooth differential systems is ruled by the Filippov convention \cite{F}. In this case, they are called Filippov systems. In such a context, the trajectories are allowed to slide on the switching surface giving rise to ``sliding phenomena'', which are inherent of Filippov systems. In this paper, a study is carried out on a nonlinear sliding phenomenon that we call a ``Shilnikov sliding orbit''. This phenomenon bears a striking resemblance to ``Shilnikov homoclinic orbits'' for smooth differential systems \cite{S1,S2,S3}. Our main result states a version of Shilnikov's Theorem for such orbits. More specifically, we show that arbitrarily close to a sliding Shilnikov orbit there exist countably infinitely many sliding periodic orbits. In the smooth context, this result is true under a certain assumption (Shilnikov condition). Here, no additional Shilnikov-like assumption is needed. Finally, we analyze a family of piecewise linear differential systems and we analytically show that a Shilnikov sliding orbit exists for such a family.}
\varepsilonnsuremath{\mathbb{S}}ection{Introduction}
The study of piecewise smooth dynamical systems produces interesting mathematical challenges (see, for instance, \cite{T,NTZ}). These systems are widely used in several branches of science (see, for instance, \cite{BBCK,physDspecial} and the references therein). The present work focuses on the analysis of a nonlinear phenomenon that occurs in such systems, which bears a striking resemblance to {\it Shilnikov homoclinic orbit} in smooth differential systems.
We start by defining the concept of {\it Shilnikov homoclinic orbit} for smooth vector fields. Consider a smooth three dimensional vector field $X$ for which $p\in\varepsilonnsuremath{\mathbb{R}}^3$ is a hyperbolic saddle-focus equilibrium. The hyperbolic saddle-focus has a two-dimensional invariant manifold $W^2,$ associated with the complex conjugate eigenvalues, $\lambda_{1,2}\in\varepsilonnsuremath{\mathbb{C}},$ and a one-dimensional invariant manifold $W^1,$ associated with the real eigenvalue $\lambda_0\in\varepsilonnsuremath{\mathbb{R}}.$ These two invariant manifolds have opposite stability.
A {\it Shilnikov homoclinic orbit} $\Gammaamma$ is a trajectory of $X$ connecting $p$ to itself, bi-asymptotically. Thus, $\Gammaamma\varepsilonnsuremath{\mathbb{S}}ubset W^1\cap W^2.$ Under suitable genericity conditions, this orbit is a codimension one scenario and its unfolding depends on the {\it saddle quantity} $\varepsilonnsuremath{\mathbb{S}}igma=\lambda_0+\mathrm{Re}(\lambda_{1,2}).$ We say that the {\it Shilnikov condition} {\bf (S)} holds when, among the eigenvalues, the pair of complex conjugate $\lambda_{1,2},$ is the nearest to the imaginary axis in the complex plane. Notice that, assuming $\lambda_0>0$ (resp. $\lambda_0<0$), the Shilnikov condition holds if, and only if, $\varepsilonnsuremath{\mathbb{S}}igma>0$ (resp. $\varepsilonnsuremath{\mathbb{S}}igma<0$).
In \cite{S2}, Shilnikov showed that $\Gammaamma$ is isolated from periodic orbits provided that {\bf (S)} does not hold. In this case, at most one limit cycle bifurcates from $\Gammaamma$ when it is unfolded. Conversely in \cite{S1}, assuming condition {\bf (S)}, Shilnikov proved the existence of countably infinitely many saddle periodic orbits in a neighborhood of $\Gammaamma$ and, in \cite{S3}, Shilnikov found a chaotic dynamics near $\Gammaamma$ (see also \cite{T1,T2}). The interested reader is referred to \cite{survey}, where it can be found a very nice review of Shilnikov's contributions.
In the theory of nonsmooth dynamical systems, the notion of solutions of a piecewise smooth differential equation expressed as
\betagin{equation}\lambdabel{eq1intro}
x'=Z(x)=F_0(x)+\varepsilonnsuremath{\mathbb{S}}gn(h_1(x))F_1(x)+\cdots+\varepsilonnsuremath{\mathbb{S}}gn(h_k(x))F_k(x)
\varepsilonnd{equation}
is stated by the Filippov's convention (see \cite{F}). In the above differential equation, $F_i,$ $i=0,1,\ldots,k,$ are smooth vector fields defined on an open subset $D\varepsilonnsuremath{\mathbb{S}}ubset \varepsilonnsuremath{\mathbb{R}}^3,$ and $h_i:D\rightarrow\varepsilonnsuremath{\mathbb{R}},$ $i=1,2,\ldots,k,$ are smooth real functions having $0$ as a regular value and satisfying $h_i^{-1}(0)\cap h_j^{-1}(0)=\varepsilonmptyset,$ for $i\neq j.$ As usual, $\Sigma=\cup_{i=1}^k h_i^{-1}(0)$ denotes the {\it switching surface}.
It is worthwhile to mention that Shilnikov homoclinic orbits have already been considered in the nonsmooth context. Indeed, in the earlier work of Tresser \cite{T2}, it is mentioned how to extend the Shilnikov's Theorems for Lipschitz continuous piecewise smooth differential systems. In \cite{Med}, the Shilnikov homoclinic bifurcation was analytically studied in Chua's circuit model, which is a continuous piecewise linear differential system with three pieces. For this last system, the Shilnikov homoclinic connection and the associated strange attractors had already been numerically detected in \cite{gribov} and \cite{chua1,chua3,chua2}, respectively. Numerical arguments were also used in \cite{arneodo} to show the existence of Shilnikov homoclinic orbits in continuous piecewise linear differential systems with two pieces. Finally, in \cite{Carmona,LPT}, it was analytically shown the existence of Shilnikov homoclinic orbits for continuous piecewise linear differential systems with two pieces. We emphasize that all above cited works deal with continuous piecewise smooth differential systems that admit a Shilnikov homoclinic orbit $\Gammaamma$ satisfying two main properties: {\bf (a)} $\Gammaamma$ is transversal to $\Sigma$ and {\bf (b)} the hyperbolic saddle-focus equilibrium is not contained in $\Sigma,$ that is, the vector field is smooth in a neighborhood of the equilibrium.
Under assumptions {\bf (a)} and {\bf (b)}, Shilnikov homoclinic orbits can also be considered for discontinuous piecewise smooth differential systems, which will be referred as {\it crossing Shilnikov orbit}. In this case, the transversality between $\Gammaamma$ and $\Sigma$ implies that the dynamics in a neighborhood of $\Gammaamma,$ concerning the transition of the trajectories of \varepsilonqref{eq1intro} through the switching surface $\Sigma,$ is of crossing type. This means that the local trajectories of \varepsilonqref{eq1intro}, for points in $\Sigma,$ are trivially given by the concatenation of the trajectories defined in both sides of $\Sigma.$ So, in this neighborhood, the trajectories of \varepsilonqref{eq1intro} define a continuous flow. Moreover, since the vector field is smooth in a neighborhood of the equilibrium, it is expected to get similar results to those for continuous vector fields, where the unfolding of $\Gammaamma$ depends on the Shilnikov condition {\bf (S)}.
In the Filippov context, special attention must be paid to some minimal sets contained in the switching surface $\Sigma,$ for which one cannot find their analogous in the smooth theory, the so called {\it pseudo equilibrium}. A pseudo equilibrium is a proper equilibrium of the well known {\it Filippov sliding dynamics} defined on the switching surface (see Section \mathrm{Re}f{sp} for a formal definition of the {\it sliding vector field} and {\it pseudo equilibrium}). The sliding dynamics gives rise to the definition of {\it sliding homoclinic orbit}, which is a trajectory, in the Filippov sense, sliding through the switching surface and connecting an equilibrium or a pseudo-equilibrium to itself. In \cite{Glendinning18}, Glendinning studied Shilnikov chaos emerging from sliding homoclinc orbits connecting a hyperbolic saddle-focus to itself. It was also shown that this kind of orbit bifurcates from some boundary equilibrium (see also \cite{Simpson18}). Here, we focus on the study of sliding homoclinic orbits connecting a hyperbolic {\it pseudo saddle-focus} to itself, which we call a {\it sliding Shilnikov orbit} (see Definition \mathrm{Re}f{defshil}). The hyperbolic pseudo saddle-focus also has a two-dimensional invariant manifold $W^2 \varepsilonnsuremath{\mathbb{S}}ubset\Sigma^s,$ associated with complex conjugate eigenvalues, and a one-dimensional stable invariant manifold $W^1.$ However, we shall see that the trajectories on $W^1$ reach the pseudo equilibrium in finite time (see Figure \mathrm{Re}f{slidingshil}).
\betagin{figure}[h]
\betagin{center}
\betagin{overpic}[width=10cm]{shil.pdf}
\partialut(66,42){$\Gamma$}
\partialut(51,-3){$\varepsilonll$}
\partialut(50.5,15){$p$}
\partialut(62.5,5){$q$}
\partialut(10,6){$W^2$}
\partialut(51,35){$W^1$}
\partialut(35,30){$\Sigma$}
\varepsilonnd{overpic}
\varepsilonnd{center}
\caption{Sliding Shilnikov homoclinic orbit $\Gammaamma$ connecting a hyperbolic pseudo saddle-focus $p$ to itself. Notice that $\Gammaamma$ has an entire segment of orbit contained in the switching surface $\Sigma$ and leaves it through the fold point $q.$ Generically, the existence of a fold point $q$ implies the existence of a whole a curve $\varepsilonll\varepsilonnsuremath{\mathbb{S}}ubset\Sigma$ of fold points.}\lambdabel{slidingshil}
\varepsilonnd{figure}
We emphasize that sliding Shilnikov orbits differ from Shilnikov homoclinic orbits, previously studied for nonsmooth differential systems, mainly in two aspects: first, for sliding Shilnikov orbits the pseudo equilibrium is contained in $\Sigma,$ whereas in all the previous cases the equilibrium is not contained in $\Sigma$; and second, analogous to the case addressed in \cite{Glendinning18}, sliding Shilnikov orbits have an entire segment of orbit contained in the switching surface $\Sigma$ and leaves it through a quadratic contact point (fold point) between the vector field and $\Sigma,$ whereas the crossing Shilnikov orbits are transversal to $\Sigma.$
This paper has two main goals. The first consists in providing a version of the Shilnikov's Theorem, regarding the existence of countably infinitely many periodic solutions, for Filippov systems admitting a sliding Shilnikov orbit $\Gammaamma.$ Second, we prove that sliding Shilnikov orbits exist in discontinuous piecewise linear differential systems by providing explicit examples.
The existence of countably infinitely many periodic solutions for Filippov systems admitting a sliding Shilnikov orbit will be obtained via topological mechanisms. More specifically, we shall apply the {\it Brouwer Fixed Point Theorem} for the first return map of $Z$ associated with $\Gamma,$ which is a one-dimensional map defined in a curve of fold points on the switching surface $\Sigma.$ We shall see that, in this case, no Shilnikov-like assumption is needed and that the hyperbolicity assumption on the pseudo-equilibrium can also be avoided (see Remark \mathrm{Re}f{shilcond}). In addition, as far as we know, the examples of discontinuous piecewise linear differential systems provided in this paper are the first exhibiting such a sliding phenomenon.
This paper is organized as follows. Section \mathrm{Re}f{sp} contains some basic notions and definitions on Filippov systems as well as the formal definition of a sliding Shilnikov orbit (see Definition \mathrm{Re}f{defshil}). In Section \mathrm{Re}f{mr}, we state our main results, Theorems \mathrm{Re}f{t1} and \mathrm{Re}f{t2}. In short, Theorem \mathrm{Re}f{t1} claims that sliding Shilnikov orbits occur in generic one-parameter families of vector fields in $\Omega^r.$ In addition, any neighborhood of a Filippov system admitting a sliding Shilnikov orbit contains infinitely many topological equivalence class of vector fields. Theorem \mathrm{Re}f{t2} provides the existence of countably infinitely many sliding periodic orbits near a sliding Shilnikov orbit. Section \mathrm{Re}f{proof} is devoted to the proofs of Theorems \mathrm{Re}f{t1} and \mathrm{Re}f{t2}. In Section \mathrm{Re}f{lm}, we analyze some explicit examples of one-parameter families of piecewise linear differential systems, $Z_{\mu}\in\Omega^r.$ Theorem \mathrm{Re}f{t3} shows that, for the critical value of the parameter $\mu=0,$ $Z_0$ exhibits a sliding Shilnikov orbit. Finally, Section \mathrm{Re}f{cfd} contains some closing remarks.
\varepsilonnsuremath{\mathbb{S}}ection{Filippov systems and sliding Shilnikov orbit}\lambdabel{sp}
In this section the basic notions of Filippov systems and the definition of sliding Shilnikov orbit are given. Let $U$ be an open bounded subset of $\varepsilonnsuremath{\mathbb{R}}^3.$ We denote by $\varepsilonnsuremath{\mathbb{C}}C^r(K,\varepsilonnsuremath{\mathbb{R}}^3),$ $K=\overline U,$ the set of all $\varepsilonnsuremath{\mathbb{C}}C^r$ vector fields $X:K\rightarrow \varepsilonnsuremath{\mathbb{R}}^3$ endowed with the topology induced by the norm $||X||_r=\varepsilonnsuremath{\mathbb{S}}up\{||D^i X(\mathbf{x}i)||:\,\mathbf{x}i\in K,\,i\in\{0,1,\ldots,r\}\}.$ Given $h:K\rightarrow\varepsilonnsuremath{\mathbb{R}}$ a differentiable function having $0$ as a regular value we denote by $\Omega_h^r(K,\varepsilonnsuremath{\mathbb{R}}^3)$ the space of piecewise vector fields
\betagin{equation}\lambdabel{omega}
Z(\mathbf{x}i)=\left\{\betagin{array}{l}
X(\mathbf{x}i),\quad\textrm{if}\quad h(\mathbf{x}i)>0,\\
Y(\mathbf{x}i),\quad\textrm{if}\quad h(\mathbf{x}i)<0,
\varepsilonnd{array}\right.
\varepsilonnd{equation}
with $X,Y\in \varepsilonnsuremath{\mathbb{C}}C^r(K,\varepsilonnsuremath{\mathbb{R}}^3).$ As usual, system \varepsilonqref{omega} is denoted by $Z=(X,Y)$ and the switching surface $h^{-1}(0)$ by $\Sigma.$ So, we are taking $\Omega_h^r(K,\varepsilonnsuremath{\mathbb{R}}^3)=\varepsilonnsuremath{\mathbb{C}}C^r(K,\varepsilonnsuremath{\mathbb{R}}^3)\times \varepsilonnsuremath{\mathbb{C}}C^r(K,\varepsilonnsuremath{\mathbb{R}}^3)$ endowed with the product topology. When the context is clear we shall refer the sets $\Omega_h^r(K,\varepsilonnsuremath{\mathbb{R}}^3)$ and $\varepsilonnsuremath{\mathbb{C}}C^r(K,\varepsilonnsuremath{\mathbb{R}}^3)$ only by $\Omega^r$ and $\varepsilonnsuremath{\mathbb{C}}C^r,$ respectively. We emphasize that \varepsilonqref{omega} is a local description of \varepsilonqref{eq1intro}.
Some regions on $\Sigma$ must be distinguished. The points on $\Sigma$ where both vectors fields $X$ and $Y$ simultaneously point outward or inward from $\Sigma$ define, respectively, the {\it escaping} $\Sigma^e$ or {\it sliding} $\Sigma^s$ regions, and the interior of its complement in $\Sigma$ defines the {\it crossing region} $\Sigma^c.$ The complementary of the union of those regions constitutes the {\it tangency} points between $X$ or $Y$ with $\Sigma.$ The points in $\Sigma^c$ satisfy $Xh(\mathbf{x}i)\cdot Yh(\mathbf{x}i) > 0,$ where $Xh$ denotes the derivative of the function $h$ in the direction of the vector $X,$ that is, $Xh(\mathbf{x}i)=\lambdangle \nabla h(\mathbf{x}i), X(\mathbf{x}i)\rangle.$ The points in $\Sigma^s$ (resp. $\Sigma^e$) satisfy $Xh(\mathbf{x}i)<0$ and $Yh(\mathbf{x}i) > 0$ (resp. $Xh(\mathbf{x}i)>0$ and $Yh(\mathbf{x}i) < 0$). Finally, the tangency points of $X$ (resp. $Y$) satisfy $Xh(\mathbf{x}i)=0$ (resp. $Yh(\mathbf{x}i)=0$).
\betagin{definition}\lambdabel{fold}
A tangency point $\mathbf{x}i\in\Sigma$ is called {\bf visible fold} of
$X$ $($resp. $Y)$ if $(X)^2h(\mathbf{x}i)>0$ $($resp. $(Y)^2h(\mathbf{x}i)<0).$ Reversing the inequalities, the tangency point is called {\bf invisible fold}. A visible/invisible fold point $\mathbf{x}i\in\Sigma$ of $X$ (resp. $Y$) is called {\bf visible/invisible fold-regular point} if $Yh(\mathbf{x}i)>0$ (resp. $Xh(\mathbf{x}i)<0$).
\varepsilonnd{definition}
On the region $\Sigma^s\cup\Sigma^e,$ we define the {\it sliding vector field}
\betagin{equation}\lambdabel{slisys}
\widetilde Z(\mathbf{x}i)=\dfrac{Y h(\mathbf{x}i) X(\mathbf{x}i)-X h(\mathbf{x}i) Y(\mathbf{x}i)}{Y h(\mathbf{x}i)- Xh(\mathbf{x}i)}.
\varepsilonnd{equation}
The local trajectory $\varphi_Z(t,\mathbf{x}i)$ of the discontinuous piecewise differential system $\dot \mathbf{x}i= Z(\mathbf{x}i)$ passing through a point $\mathbf{x}i\in\varepsilonnsuremath{\mathbb{R}}^3$ is given by Filippov's convention (see \cite{F,GST}). Let $I\varepsilonnsuremath{\mathbb{S}}ubset\varepsilonnsuremath{\mathbb{R}}$ be a sufficiently small neighborhood of the origin. Denote by $\varphi_W(t,\mathbf{x}i)$ the trajectory of a vector field $W$ satisfying $\varphi_W(0,\mathbf{x}i)=\mathbf{x}i.$ Then, the Filippov convention is summarized as following:
\betagin{itemize}
\item[$(i)$] for $\mathbf{x}i\in U$ such that $h(\mathbf{x}i)>0$ (resp. $h(\mathbf{x}i)<0$), the local trajectory of $Z$ is defined as $\varphi_Z(t,\mathbf{x}i)=\varphi_X(t,\mathbf{x}i)$ (resp. $\varphi_Z(t,\mathbf{x}i)=\varphi_Y(t,\mathbf{x}i)$) for $t\in I.$
\item[$(ii)$] for $\mathbf{x}i\in\Sigma^c$ such that $(Xh)(\mathbf{x}i),(Yh)(\mathbf{x}i)>0$ the local trajectory of $Z$ is defined as $\varphi_Z(t,\mathbf{x}i)=\varphi_Y(t,\mathbf{x}i)$ for $t\in I\cap\{t<0\}$ and $\varphi_Z(t,\mathbf{x}i)=\varphi_X(t,\mathbf{x}i)$ for $t\in I\cap\{t>0\}.$ For the case $(Xh)(\mathbf{x}i),(Yh)(\mathbf{x}i)<0,$ the definition is the same reversing time;
\item[$(iii)$] for $\mathbf{x}i\in\Sigma^s$ the local trajectory is defined as $\varphi_Z(t,\mathbf{x}i)=\varphi_{\widetilde{Z}}(t,\mathbf{x}i)$ for $t\in I\cap\{t\gammaeq 0\}$ and $\varphi_Z(t,\mathbf{x}i)$ is either $\varphi_X(t,\mathbf{x}i)$ or $\varphi_Y(t,\mathbf{x}i)$ or $\varphi_{\widetilde{Z}}(t,\mathbf{x}i)$ for $t\in I\cap\{t\leq 0\}.$ For the case $\mathbf{x}i\in\Sigma^e$ the definition is the same reversing time.
\varepsilonnd{itemize}
For tangency points $\partial\Sigma^c\cup\partial\Sigma^s\cup\partial\Sigma^e$ the definition of local trajectory is more delicate. Here, we provide the definition for visible fold-regular points (see Definition \mathrm{Re}f{fold}). Let $\mathbf{x}i\in\Sigma$ be a visible fold-regular point of $Z.$ Without loss of generality, assume that $\mathbf{x}i$ is a visible fold point of $X.$ Following item $(iv)$ above, the local trajectory of $Z$ passing through $\mathbf{x}i$ is defined as $\varphi_Z(t,\mathbf{x}i)=\varphi_1(t,\mathbf{x}i)$ for $t\in I\cap\{t\leq 0\}$ and $\varphi_Z(t,\mathbf{x}i)=\varphi_2(t,\mathbf{x}i)$ for $t\in I\cap\{t\gammaeq 0\},$ where $\varphi_1$ is either $\varphi_X$ or $\varphi_Y$ or $\varphi_{\widetilde{Z}}$ and $\varphi_1$ is $\varphi_X.$
An equilibrium $\mathbf{x}i^*\in\Sigma^{s,e}$ of the sliding vector field (that is, $\widetilde Z(\mathbf{x}i^*)=0$) is called a {\it pseudo equilibrium} of $Z.$ We say that $\mathbf{x}i^*$ is {\it hyperbolic pseudo equilibrium} of $Z$ when it is a hyperbolic equilibrium of $\widetilde Z.$ Particularly, if $\mathbf{x}i^*\in\Sigma^s$ (resp. $\mathbf{x}i^*\in\Sigma^e$) is an unstable (resp. stable) hyperbolic focus of $\widetilde Z,$
then we call $\mathbf{x}i^*$ a {\it hyperbolic saddle-focus pseudo equilibrium} or just {\it hyperbolic pseudo saddle-focus}.
In order to study the orbits of the sliding vector field it is convenient to define the normalized sliding vector field
\betagin{equation}\lambdabel{norslisys}
\widehat Z(\mathbf{x}i)=(Y h(\mathbf{x}i)- Xh(\mathbf{x}i))\widetilde Z(\mathbf{x}i)=Y h(\mathbf{x}i) X(\mathbf{x}i)-X h(\mathbf{x}i) Y(\mathbf{x}i),
\varepsilonnd{equation}
which has the same phase portrait of $\widetilde Z$ reversing the direction of the flow in the escaping region. Indeed, system \varepsilonqref{norslisys} is obtained by multiplying the sliding vector field \varepsilonqref{slisys} (time rescaling) by the function $Y h(\mathbf{x}i)- Xh(\mathbf{x}i),$ which is positive (resp. negative) for $\mathbf{x}i\in\Sigma^s$ (resp. $\mathbf{x}i\in\Sigma^e$).
The next definition introduces the concept of sliding Shilnikov orbit (see \cite{NovaesThesis15}).
\betagin{definition}\lambdabel{defshil}
Let $Z=(X,Y)$ be a piecewise continuous vector field having a hyperbolic pseudo saddle-focus $p\in \Sigma^{s}$ $($resp. $p\in \Sigma^{e}),$ and let $q\in\partial\Sigma^s$ $($resp. $q\in\partial\Sigma^e)$ be a visible fold-regular point of $Z$ such that:
\betagin{itemize}
\item[$(j)$] the backward (resp. forward) trajectory of $Z$ starting at $q$ follows the sliding vector field $\widetilde Z$ and converges to $p$ backward in time $($resp. forward in time$)$;
\item[$(jj)$] the forward (resp. backward) trajectory of $Z$ starting at $q$ intersects the switching surface only at crossing points and reaches $p$ in finite time $t_0>0$ $($resp. $t_0<0).$
\varepsilonnd{itemize}
So, through $p$ and $q,$ a sliding loop $\Gamma$ is easily characterized. We call $\Gamma$ a {\it sliding Shilnikov orbit} $($see Figures \mathrm{Re}f{slidingshil}$).$
\varepsilonnd{definition}
The next definition introduces the concept of $\Sigma$-equivalence of Filippov vector fields (see, for instance, \cite{T}). Of course, the notion of $\Sigma$-structural stability in $\Omega^r$ is naturally obtained.
\betagin{definition}
Let $Z_1,Z_2\in\Omega^r.$ We say that $Z_1$ and $Z_2$ are $\Sigma$-equivalent if there exists a homeomorphism $h:U\rightarrow U$ satisfying $h(\Sigma)=\Sigma$ and sending orbits of $Z_1$ to orbits of $Z_2.$
\varepsilonnd{definition}
\betagin{remark}\lambdabel{transv} Assume that $q\in\partial\Sigma^{e,s}$ is a visible fold-regular point of $Z.$ Then, the following properties hold (for more details, see \cite{T}):
\betagin{itemize}
\item[$(i)$] there exists a neighborhood $U$ of $q$ such that $\varepsilonll=U\cap\partial\Sigma^{e,s}$ is constituted by visible fold-regular points;
\item[$(ii)$] the sliding vector field $\widetilde Z$ \varepsilonqref{slisys} is transverse to $\varepsilonll;$
\item[$(iii)$] and there exists a neighborhood $V$ of $\varepsilonll$ such that $Z\big|_V$ is structurally stable.
\varepsilonnd{itemize}
\varepsilonnd{remark}
\varepsilonnsuremath{\mathbb{S}}ection{Main results}\lambdabel{mr}
Our first main result shows that sliding Shilnikov orbits occur in generic one-parameter families of vector fields in $\Omega^r$ (see \cite{soto}). Furthermore, if $Z_0$ admits a sliding Shilnikov orbit, then any neighborhood $W\varepsilonnsuremath{\mathbb{S}}ubset\Omega^r$ of $Z_0$ contains infinitely many topological equivalence classes of vector fields.
\betagin{mtheorem}\lambdabel{t1}
Assume that $Z_0=(X_0,Y_0)\in\Omega^r$ $($with $r\gammaeq 1)$ has a sliding Shilnikov orbit $\Gamma_0$ and let $W\varepsilonnsuremath{\mathbb{S}}ubset \Omega^r$ be a small neighborhood of $Z_0.$ Then, there exists a $\varepsilonnsuremath{\mathbb{C}}C^1$ function $g:W\rightarrow\varepsilonnsuremath{\mathbb{R}},$ having $0$ as a regular value, such that $Z\in W$ has a sliding Shilnikov orbit $\Gamma$ if, and only if, $g(Z)=0.$ Furthermore, any neighborhood $W\varepsilonnsuremath{\mathbb{S}}ubset\Omega^r$ of $Z_0$ contains infinitely many $\Sigma$-equivalence classes of Filippov vector fields.
\varepsilonnd{mtheorem}
Theorem \mathrm{Re}f{t1} is proved in Section 4.1.
\betagin{remark}\lambdabel{rem}
As a consequence of Theorem \mathrm{Re}f{t1}, $g^{-1}(0)$ is a codimension-1 submanifold of $W.$ Hence, for each $Z^*\in g^{-1}(0),$ there exists a curve $Z^*_{\mu}\varepsilonnsuremath{\mathbb{S}}ubset W,$ with $\mu\in\varepsilonnsuremath{\mathbb{R}}$ taken in a small neighborhood of $0,$ which intersects $g^{-1}(0)$ transversally at $Z^*_{0}=Z^*.$ Particularly, for $Z_0,$ we say that $Z_{\mu}$ is a splitting of the sliding Shilnikov orbit $\Gamma_0$ (see Figure \mathrm{Re}f{unfold}).
\varepsilonnd{remark}
\betagin{figure}[H]
\betagin{center}
\betagin{overpic}[width=14cm]{Bifurc.pdf}
\partialut(58,15){$\Gamma$}
\partialut(10,-3.5){$\mu<0$}
\partialut(40,-3.5){$\mu=0$}
\partialut(70,-3.5){$\mu>0$}
\varepsilonnd{overpic}
\varepsilonnd{center}
\caption{Generic one-parameter family $Z_{\mu}=(X_{\mu},Y_{\mu})\in\Omega^r$ for which $Z_0$ has a sliding Shilnikov orbit $\Gamma.$}\lambdabel{unfold}
\varepsilonnd{figure}
Our second main result is a version of Shilnikov's theorem regarding the existence of sliding periodic orbits of $Z_{\mu},$ which intersect the fold-regular curve just at one point ($1$-periodic) in a neighborhood of a sliding Shilnikov orbit.
\betagin{mtheorem}\lambdabel{t2}
Assume that $Z_0=(X_0,Y_0)\in\Omega^r$ $($with $r\gammaeq0)$ has a sliding Shilnikov orbit $\Gamma_0$ and let $Z_{\mu}=(X_{\mu},Y_{\mu})\in\Omega^r$ be a splitting of $\Gammaamma_0.$ Then, the following statements hold:
\betagin{itemize}
\item[$(a)$] for $\mu=0$ every neighborhood $G\varepsilonnsuremath{\mathbb{S}}ubset\varepsilonnsuremath{\mathbb{R}}^3$ of $\Gamma_0$ contains countably infinitely many sliding $1$-periodic orbits of $Z_0$;
\item[$(b)$] Let $G\varepsilonnsuremath{\mathbb{S}}ubset\varepsilonnsuremath{\mathbb{R}}^3$ be a sufficiently small neighborhood of $\Gamma_0.$ Then, for each $|\mu|\neq0$ sufficiently small, $G$ contains at least a finite number $N(\mu)>0$ of sliding $1$-periodic orbits of $Z_{\mu}.$ Moreover, $N(\mu)\to\infty$ when $\mu\to 0.$
\varepsilonnd{itemize}
\varepsilonnd{mtheorem}
Theorem \mathrm{Re}f{t2} is proved in Section 4.2.
\betagin{remark}\lambdabel{shilcond}
In the smooth context the hyperbolic saddle-focus has a two-dimensional invariant manifold $W^2,$ associated with the complex conjugate eigenvalues, and a one-dimensional invariant manifold $W^1,$ associated with the real eigenvalue. These two invariant manifolds have opposite stability. As said before, the Shilnikov condition imposes that, among the eigenvalues, the pair of complex conjugate is the nearest to the imaginary axis in the complex plane. Roughly speaking, this means that the linear effect of $W^1$ is stronger than the spiral effect of $W^2$ on the solutions in a neighborhood of the equilibrium. For instance, if $W^1$ is a stable manifold, then the linear attraction to the equilibrium is stronger than the spiral repulsion from the equilibrium.
In Theorem \mathrm{Re}f{t2}, it is worthwhile to notice that no Shilnikov-like condition is needed in order to guarantee the existence of countably infinitely many sliding periodic orbits. In our nonsmooth setting, the pseudo saddle-focus also has a two-dimensional unstable invariant manifold $W^2 \varepsilonnsuremath{\mathbb{S}}ubset\Sigma^s,$ associated with the complex conjugate eigenvalues, and a one-dimensional stable invariant manifold $W^1$ (see Figure \mathrm{Re}f{slidingshil}). However, the trajectories on $W^1$ reach the pseudo equilibrium in finite time, which implies that the attraction to the pseudo equilibrium in the $W^1$-direction is, in some sense, infinitely stronger than the spiral repulsion from the pseudo equilibrium. Hence, the balance between the attraction and repulsion effects of the invariant manifolds, required by the Shilnikov condition in the smooth context, is automatically satisfied for sliding Shilnikov orbits with no further assumptions.
Moreover, it will be clear in the proof of Theorem \mathrm{Re}f{t2} that the hyperbolicity assumption on the pseudo saddle-focus is not necessary to get statement $(a)$, which still holds when $p_0$ is an unstable weak focus of the sliding vector field.
\varepsilonnd{remark}
\varepsilonnsuremath{\mathbb{S}}ection{Proofs of the main results}\lambdabel{proof}
This section is completely devoted to the proofs of Theorem \mathrm{Re}f{t1} (Section 4.1) and Theorem \mathrm{Re}f{t2} (Section 4.2). We start providing some common notions and notations for both proofs.
First, for the sake of simplicity, we take $h(x,y,z)=z,$ that is, $\Sigma=\{z=0\}.$ Suppose that $Z_0=(X_0,Y_0) \in \Omega^r$ admits a sliding Silnikov orbit $\Gamma_0,$ which connects the hyperbolic pseudo focus-saddle $p_0=(0,0,0)\in\Sigma^s$ to itself and contains the fold-regular point $q_0.$ The case when $p_0\in\Sigma^e$ would follow similarly. Without loss of generality, we assume that $q_0$ is a visible fold point for $X_0$ and that the arc-orbit of $Z$ connecting $q_0$ to $p_0,$ in this direction, intersects the switching surface $\Sigma$ only at $p_0$ and $q_0.$
Now, denote $\gammaamma_{\varepsilon }=\overline{B_{\varepsilon }(q_0)\cap\partial\Sigma^s},$ where $B_{\varepsilon }(q_0)\varepsilonnsuremath{\mathbb{S}}ubset \Sigma$ is the planar ball with center at $q_0$ and radius $\varepsilon.$ Notice that, for $\varepsilon >0$ sufficiently small, $\gammaamma_{\varepsilon }$ is a curve of fold points and the sliding vector field $\widetilde Z_0$ (see \varepsilonqref{slisys}), defined on $\Sigma^s,$ is transversal to $\gammaamma_{\varepsilon }$ (see Remark \mathrm{Re}f{transv}). From Definition \mathrm{Re}f{defshil}, $p_0$ is a hyperbolic focus of the sliding vector field $\widetilde Z_0,$ the backward trajectory of $\widetilde Z_0,$ starting at $q_0,$ converges to $p_0,$ and the forward trajectory of $X_0,$ starting at $q_0,$ reaches transversally the switching surface $\Sigma$ at $p_0.$ Hence, the implicit function theorem can be used to show that, for $\varepsilon >0$ sufficiently small, the backward trajectories of $\widetilde Z_0,$ starting at points of $\gammaamma_{\varepsilon },$ converge to $p_0,$ and the forward trajectories of $X_0,$ starting at points of $\gammaamma_{\varepsilon },$ reach transversally the switching surface $\Sigma$ in a curve $\nu_{\varepsilon }.$ Notice that $p_0\in\nu_{\varepsilon }.$
Finally, let $W\varepsilonnsuremath{\mathbb{S}}ubset \Omega^r$ be a small neighborhood of $Z_0.$ From the structural stability property of fold-regular points (as discussed in Remark \mathrm{Re}f{transv}), each $Z=(X,Y) \in W$ admits a fold-regular point $p_Z$ contained in a curve of fold-regular points $\gammaamma_{\varepsilon }^Z$ satisfying $p_Z\to p_0$ and $\gammaamma_{\varepsilon}^Z\to\gammaamma_{\varepsilon}$ as $Z\to Z_0.$ In addition, from differentiable dependence results (see, for instance, \cite[Chapter 6]{LS}) of the trajectories of $X$ and $\widetilde Z$ on the initial conditions and parameters ($Z$ can be seen as a parameter on a Banach space), we conclude the following: the backward trajectories of $\widetilde Z,$ starting at points of $\gammaamma_{\varepsilon }^Z,$ converge to $p_Z;$ the forward trajectories of $X,$ starting at points of $\gammaamma_{\varepsilon }^Z,$ reach transversally the switching surface $\Sigma$ in a curve $\nu_{\varepsilon }^Z;$ and $\nu_{\varepsilon}^Z\to \nu_{\varepsilon}$ as $Z\to Z_0.$ Notice that, in this case, $Z$ has a sliding Shilnikov orbit if, and only if, $p_Z\in\nu_{\varepsilon }^Z.$ In the case that $Z_{\mu}$ is a splitting of the sliding Shilnikov connection $\Gammaamma_0$ (see Remark \mathrm{Re}f{rem}) we shall denote $\gamma_{\varepsilon }^{\mu}=\gamma_{\varepsilon }^{Z_\mu},$ $S_{\varepsilon }^{\mu}=S_{\varepsilon }^{Z_\mu},$ $\nu_{\varepsilon }^{\mu}=\nu_{\varepsilon }^{Z_\mu},$ and $p_{\mu}=p_{Z_{\mu}}.$
Now we are ready to prove Theorems \mathrm{Re}f{t1} and \mathrm{Re}f{t2}.
\varepsilonnsuremath{\mathbb{S}}ubsection{Proof of Theorem \mathrm{Re}f{t1}}
We may assume that, in a suitable local coordinate system $(x,y)$ around $p_0 \in \Sigma^s,$ $\nu_{\varepsilon }$ is given by $y=0,$ that is, $\nu_{\varepsilon }=\{(x,0,0):\,-\varepsilon\leq x\leq \varepsilon\}.$ So, for $Z \in W,$ $\nu_{\varepsilon }^Z$ is also given as a graph $y=k_{Z}(x)= a_0^Z+a_1^Z x + \varepsilonnsuremath{\mathbb{C}}O_2(x),$ with $a_0^Z,\,a_1^Z$ small parameters satisfying $a_1^{Z_0}=a_2^{Z_0}=0.$
Denote $p_Z=(x_Z,y_Z,0)$ and define $g: W\rightarrow\varepsilonnsuremath{\mathbb{R}}$ by $g(Z)=k_{Z}(x_Z) -y_Z.$ From the initial comments, $g$ is a $\varepsilonnsuremath{\mathbb{C}}C^1$ function and $g(Z_0)=0.$ We prove now that $0$ is a regular value of $g,$ that is, the linear map $g'(Z^*):\Omega^r\rightarrow\varepsilonnsuremath{\mathbb{R}}$ is surjective for every $Z^*\in g^{-1}(0).$ Let $Z^*\in W$ satisfying $g(Z^*)=0$ and take $V\in\Omega^r.$ The derivative of $g$ at $Z^*$ in the direction $V,$ $g'(Z^*)\cdot V,$ can be computed as
\[
g'(Z^*)\cdot V=\dfrac{d}{d v}g(Z(v))\varepsilonnsuremath{\mathcal{B}}ig|_{v=0}= \lim_{v\to 0}\dfrac{g(Z(v))-g(Z^*)}{v},
\]
where $Z(v)$ is any smooth curve in $\Omega^r$ such that $Z(0)=Z^*$ and $Z'(0)=V\in\Omega^r.$ So, taking $Z(v)$ in such a way that $p_{Z(v)}=(0,0)$ and $k_{Z(v)}(x)=v,$ we get that $g(Z(v))=v$ and, therefore, $g'(Z^*)\cdot V=1.$ This implies that $g'(Z^*)$ is surjective for every $Z^*\in g^{-1}(0).$
Finally, let $Z_{\mu}$ be a splitting of the sliding Shilnikov connection $\Gammaamma_0$ (see Remark \mathrm{Re}f{rem}). Since the pseudo equilibrium $p_{\mu}$ of $Z_{\mu}$ is not contained in $\nu_{\varepsilon }^{\mu},$ for $\mu\neq0,$ the saturation of $\gammaamma_{\varepsilon}^{\mu}$ through the backward flow of $\widetilde Z_0$ intersects $\nu_{\varepsilon }^{\mu}$ in a finite number $N(\mu)$ of disjoint sets. Thus, one can find trajectories of $\widetilde Z_0,$ starting at $\gammaamma_{\varepsilon}^{\mu},$ which intersect $\nu_{\varepsilon }^{\mu}$ in $N(\mu)$ points. In addition, the intersection between $\nu_{\varepsilon }^{\mu}$ and any trajectory of $\widetilde Z_0,$ starting at $\gammaamma_{\varepsilon}^{\mu},$ has no more than $N(\mu)$ points. Now, if $Z_1,Z_2\in W$ are topologically equivalent, then $\gammaamma_{\varepsilon}^{Z_1}$ and $\nu_{\varepsilon}^{Z_1}$ are sent to $\gammaamma_{\varepsilon}^{Z_2}$ and $\nu_{\varepsilon}^{Z_2},$ respectively. Hence, $N(\mu_1)\neq N(\mu_2)$ implies that $Z_{\mu_1}$ and $Z_{\mu_2}$ are not $\Sigma$-equivalent. Since $N(\mu)\to\infty$ as $\mu\to 0,$ we get the existence of infinitely many $\Sigma$-equivalence classes of Filippov vector fields in any neighborhood $W\varepsilonnsuremath{\mathbb{S}}ubset\Omega^r$ of $Z_0.$ This concludes the proof.
\varepsilonnsuremath{\mathbb{S}}ubsection{Proof of Theorem \mathrm{Re}f{t2}}
To prove statement $(a),$ let $S_{\varepsilon }$ be the backward saturation of $\gamma_{\varepsilon }$ through the flow of the sliding vector field $\widetilde Z.$ Since $p_0$ is a focus of the sliding vector field, we get
\[
S_{\varepsilon }\cap\nu_{\varepsilon }=\bigcup_{i=1}^{\infty} I_i,
\]
where the sequence of compact sets $(I_i)_{i=1}^{\infty}$ satisfies: $I_i\cap I_j=\varepsilonmptyset$ for $i\neq j$ and $I_i\to \{p_0\}$ (see Figure \mathrm{Re}f{POShilnikov}).
\betagin{figure}[H]
\betagin{center}
\betagin{overpic}[width=9.5cm]{POShilnikov2.pdf}
\partialut(70,35){$\Gamma$}
\partialut(83,24){$\Sigma^{s}$}
\partialut(51,15){$p_0$}
\partialut(66,6){$q_0$}
\partialut(70.5,10){$\gamma_{\varepsilon }$}
\partialut(24,10){$S_{\varepsilon }$}
\partialut(29,4){$\nu_{\varepsilon }$}
\partialut(67,26){$I_1$}
\partialut(38,6){$I_2$}
\partialut(57,23.5){$I_3$}
\partialut(39,12){$I_4 \cdots$}
\varepsilonnd{overpic}
\varepsilonnd{center}
\caption{Representation of the sequence of compact sets $(I_i)_{i=1}^{\infty},$ satisfying $I_i\to \{p_0\},$ which are characterized by the intersection between $\nu_{\varepsilon }$ and $S_{\varepsilon }.$
}\lambdabel{POShilnikov}
\varepsilonnd{figure}
Now, for each $i=1,2,\ldots,$ we define $J_i$ as the intersection between the backward saturation of $I_i$ through the flow of $X$ with the curve $\gamma_{\varepsilon }.$ Clearly, $J_i\cap J_j=\varepsilonmptyset$ for $i\neq j$ and $J_i\to\{q_0\}.$ Moreover, a first return map $\partiali$ is well defined on $\cup_{i=1}^{\infty} J_i.$ It is easy to see that $\partiali$ is not injective. In what follows, we shall construct a sequence of applications $(\partialsi_i)_{i=1}^{\infty},$ $\partialsi_i:J_i\rightarrow J_i,$ satisfying the following property:
\betagin{itemize}
\item[{\bf (P)}] {\it for each $i\in\mathbb{N},$ if $y \in J_i$ and $x=\partialsi_i(y),$ then $\partiali(x)=y.$}
\varepsilonnd{itemize}
This property implies that a fixed point of $\partialsi_i$ is also a fixed point of $\partiali.$ So, for $\mathbf{x}i\in\Sigma^s$ and $z\in\varepsilonnsuremath{\mathbb{R}}^3$ let $\varphi^s(t,\mathbf{x}i)$ and $\varphi^X(t,z)$ be the flows of $\widetilde Z$ and $X,$ respectively.
For $\mathbf{x}i\in J_i$ there exists $t^s_i(\mathbf{x}i)<0$ and $t^X_i(\mathbf{x}i)<0$ such that $\mathbf{x}i_i(\mathbf{x}i)=\varphi^s(t^s_i(\mathbf{x}i),\mathbf{x}i)\in I_i$ and $\varphi^X(t^X_i(\mathbf{x}i),\mathbf{x}i_i(\mathbf{x}i))\in J_i,$ respectively. So, define $\partialsi_i(\mathbf{x}i)=\varphi^X(t^X_i(\mathbf{x}i),\mathbf{x}i_i(\mathbf{x}i)).$ Notice that $\partialsi_i$ is a $\varepsilonnsuremath{\mathbb{C}}C^r$ function. From the above construction, the property {\bf (P)} is satisfied for the sequence of functions $(\partialsi_i)_{i=1}^{\infty}$ and, consequently, fixed points of $\partialsi_i$ correspond to sliding periodics orbit of $Z$ (see Figure \mathrm{Re}f{PPOShilnikov}).
\betagin{figure}[H]
\betagin{center}
\betagin{overpic}[width=9.5cm]{PPOShilnikov.pdf}
\partialut(70,35){$\Gamma$}
\partialut(83,24){$\Sigma^{s}$}
\partialut(51,15){$p_0$}
\partialut(66,6){$q_0$}
\partialut(70.5,9.5){$q_1$}
\partialut(59,2){$q_2$}
\partialut(67,26){$I_1$}
\partialut(38,6){$I_2$}
\varepsilonnd{overpic}
\varepsilonnd{center}
\caption{Representation of sliding periodic orbits of $Z$ corresponding to fixed points of $\partialsi_i.$}\lambdabel{PPOShilnikov}
\varepsilonnd{figure}
Now, for each $i\in\mathbb{N},$ $\partialsi_i$ is a continuous function from a compact interval $J_i$ to itself. So, applying the {\it Brouwer fixed-point Theorem} we obtain a sequence $(q_i)_{i=1}^{\infty}$ such that $q_i\in J_i$ and $\partialsi_i(q_i)=q_i.$ Hence, we conclude that there exists a sequence of sliding periodic orbits of $Z$ passing through $q_i.$ The proof of statement $(a)$ follows just by observing that $q_i\to q_0.$
In what follows we prove statement $(b).$ Let $Z_{\mu}$ be a splitting of the sliding Shilnikov connection $\Gammaamma_0$ (see Remark \mathrm{Re}f{rem}). Since the pseudo equilibrium $p_{\mu}$ of $Z_{\mu}$ is not contained in $\nu_{\varepsilon }^{\mu},$ the intersection $S_{\varepsilon }^{\mu}\cap\nu_{\varepsilon }^{\mu}\cap G$ has only a finite number $N(\mu)$ of disjoint sets $I_i.$ Furthermore, the number of disjoint sets $N(\mu)$ in this intersection tends to infinity when $\mu$ goes to $0.$ From here, the proof of statement $(b)$ follows analogously to the proof of statement $(a).$
\varepsilonnsuremath{\mathbb{S}}ection{A piecewise linear example}\lambdabel{lm}
In this section we present one-parameter families of discontinuous piecewise linear vector fields $Z_{\mu}\in\Omega^r$ for which $Z_0$ admits a sliding Shilnikov orbit.
For $\alpha>0,$ $\beta>0,$ and $\mu\in\varepsilonnsuremath{\mathbb{R}},$ consider the following discontinuous piecewise linear vector field.
\betagin{equation}\lambdabel{s1}
Z_{\mu}(x,y,z)=\left\{\betagin{array}{ll}
X(x,y,z)=\left(
\betagin{array}{c}
-\alpha\\
x-\betata\\
y-\dfrac{3\betata^2}{8\alpha}
\varepsilonnd{array}
\right)&\textrm{if}\quad z>0,
\\
Y_{\mu}(x,y,z)=\left(
\betagin{array}{c}
\alpha\\
\dfrac{3\alpha}{\betata}y+\betata\\
\mu+\dfrac{3\betata^2}{8\alpha}
\varepsilonnd{array}
\right)&\textrm{if}\quad z<0.
\varepsilonnd{array}\right.
\varepsilonnd{equation}
Notice that $\Sigma=\{z=0\}$ is the switching surface of system \varepsilonqref{s1}, which can be decomposed as $\Sigma=\overline{\Sigma^c}\cup\overline{\Sigma^s}\cup\overline{\Sigma^e}$ where
\[
\betagin{array}{llll}
\Sigma^c=\left\{(x,y,0):\,y>\dfrac{3\beta^2}{8\alpha}\right\}, &\Sigma^s=\left\{(x,y,0):\,y<\dfrac{3\beta^2}{8\alpha}\right\}&\textrm{and}&\Sigma^e=\varepsilonmptyset.
\varepsilonnd{array}
\]
Moreover, $c=\left(\betata,3\betata^2/(8\alpha),0\right)\in\partial\Sigma^s$ is a cusp-regular point, $\{\left(x,3\betata^2/(8\alpha),0\right):\, x<\betata\}\varepsilonnsuremath{\mathbb{S}}ubset\partial\Sigma^s$ is a curve of invisible fold-regular points, and $\{\left(x,3\betata^2/(8\alpha),0\right):\, x>\betata\}\varepsilonnsuremath{\mathbb{S}}ubset\partial\Sigma^s$ is a curve of visible fold-regular points.
We shall see that, in the above families of Filippov systems, $\mu$ is a bifurcation parameter, for which a sliding Shilnikov orbit exists only for the critical value $\mu=0.$ Parameters $\alpha$ and $\betata$ do not play any role, and just make the example more general. Finally, we point out that the presence of the fractions $3\betata^2/(8\alpha)$ and $3\alpha/\betata$ will greatly simplify the expression of the normalized sliding vector field after a variable rescaling.
Our main result on the above family is the following.
\betagin{mtheorem}\lambdabel{t3}
For each positive real numbers $\alpha$ and $\betata$ the following statements hold:
\betagin{itemize}
\item[$(a)$] For $\mu=0,$ the origin $p_0=(0,0,0)$ is a hyperbolic pseudo saddle-focus of $Z_0,$ which is an unstable hyperbolic focus of the sliding vector field $\widetilde Z_0.$ Moreover, $Z_0$ admits a sliding Shilnikov orbit, connecting $p_0$ to itself, passing through the visible fold-regular point $q_0=\big(3\beta/2,3\beta^2/(8\alpha),0\big)$ (see Figure \mathrm{Re}f{simula}).
\item[$(b)$] For $\mu\neq0,$ $Z_0$ does not admit a sliding Shilnikov orbit.
\varepsilonnd{itemize}
\varepsilonnd{mtheorem}
\betagin{proof}
We compute the sliding vector field \varepsilonqref{slisys} as
\[
\widetilde Z_{\mu}(x,y,0)=\left(\dfrac{4\alpha^2 (y+\mu)}{4\alpha (y-\mu)-3\betata^2}\,,\,\dfrac{3\beta^3x+\alpha\betata^2y-24\alpha^2y^2+8\alpha\beta\mu(x-\beta)}{6\beta^3-8\alpha\betata (y-\mu)}\,,\,0\right).
\]
Since the sliding vector field $\widetilde Z_{\mu}$ is defined only on the planar region $\Sigma^s\cup\Sigma^e\varepsilonnsuremath{\mathbb{S}}ubset\Sigma,$ it can be identified with the following planar vector field
\betagin{equation}\lambdabel{slid}
\widetilde Z_{\mu}(x,y)=\left(\dfrac{4\alpha^2 (y+\mu)}{4\alpha (y-\mu)-3\betata^2}\,,\,\dfrac{3\beta^3x+\alpha\betata^2y-24\alpha^2y^2+8\alpha\beta\mu(x-\beta)}{6\beta^3-8\alpha\betata (y-\mu)}\right).
\varepsilonnd{equation}
Notice that $p_{\mu}=\left(\dfrac{3\alpha}{\beta}\mu,-\mu \right)$ is a singularity of $\widetilde Z_{\mu}$ for every $\mu\in\varepsilonnsuremath{\mathbb{R}}.$
For $\mu=0,$ the normalized sliding vector field of \varepsilonqref{s1} writes
\betagin{equation}\lambdabel{nslid}
\widehat Z_{0}(x,y)=\left(-\alpha y\,,\,\dfrac{3\beta^2}{8\alpha}x+\dfrac{\beta}{8}y-\dfrac{3\alpha}{\beta}y^2\right).
\varepsilonnd{equation}
Notice that the origin is a hyperbolic focus for $\widehat Z_0,$ and also for $\widetilde Z_0.$ Indeed, their eigenvalues are given by
\betagin{equation*}\lambdabel{eigenvalues}
\lambda^{\partialm}=\dfrac{\alpha}{12\beta}\partialm i\dfrac{\varepsilonnsuremath{\mathbb{S}}qrt{95}\alpha}{12\betata}.
\varepsilonnd{equation*}
It implies that $p_0=(0,0,0)$ is a hyperbolic pseudo saddle-focus of $Z_0.$ Moreover, since $\mathrm{Re}(\lambda^{\partialm})>0,$ then $(0,0)$ is an unstable hyperbolic focus of the (normalized) sliding vector field \varepsilonqref{nslid}. After a change of variables and a time rescaling, expressed by
\betagin{equation}\lambdabel{change}
(x,y)=\left(\dfrac{3\beta}{2}u,\dfrac{3\beta^2}{8\alpha}v\right)\quad \text{and} \quad t=-\dfrac{4}{\betata}\tau,
\varepsilonnd{equation}
the normalized sliding vector field $\widehat Z_0$ becomes
\betagin{equation}\lambdabel{trans}
\overline{Z}=\left(v,-6u-\dfrac{1}{2}v+\dfrac{9}{2}v^2\right).
\varepsilonnd{equation}
Notice that the time rescaling \varepsilonqref{change} reverses the direction of the flow of $\varepsilonqref{slid}.$ The tangency line $\partial\Sigma^s$ is given now, in $(u,v)$ coordinates, by $\varepsilonll=\{(u,1):u\in\varepsilonnsuremath{\mathbb{R}}\}.$
We claim that the orbit of system \varepsilonqref{trans} starting at the point $(1,1)\in\varepsilonll$ converges to the focus equilibrium $(0,0)$ without touching the line $\varepsilonll.$ Clearly, going back through the transformation \varepsilonqref{change}, this implies that the orbit of system \varepsilonqref{slid} starting at the visible fold-regular point $q_0=\left(3\beta/2,3\beta^2/(8\alpha),0\right)\in\partial\Sigma^s$ is attracted, now backward in time, to the focus $(0,0,0)$ without touching the tangency line $\partial\Sigma^s.$ To prove this claim we shall construct a compact region $\varepsilonnsuremath{\mathbb{C}}R$ in the $u,v$-plane, which is positively invariant through the flow of the vector field \varepsilonqref{trans}. Accordingly, let $m(v)=-13/108+9v^2/13+54v^3/169,$ and define the curves
\[
\betagin{array}{l}
\varepsilonnsuremath{\mathbb{C}}C_1=\{(u,1):\,m(1)\leq u\leq 1\},\\
\varepsilonnsuremath{\mathbb{C}}C_2=\{(u,-2u+3):\,1\leq u\leq3/2\},\\
\varepsilonnsuremath{\mathbb{C}}C_3=\{(3/2,v):\,-91/72<v<0\},\\
\varepsilonnsuremath{\mathbb{C}}C_4=\{(u,-91/72):\,m(-91/72)\leq u\leq 3/2\},\\
\varepsilonnsuremath{\mathbb{C}}C_5=\{(m(v),v):\,-91/71\leq v\leq 1\}.
\varepsilonnd{array}
\]
We define $\varepsilonnsuremath{\mathbb{C}}R$ as being the compact region delimited by the curves $\varepsilonnsuremath{\mathbb{C}}C_i$ for $i=1,2,\ldots,5$ (see Figure \mathrm{Re}f{sliding}). After some standard computations we conclude that $\varepsilonnsuremath{\mathbb{C}}R$ is positively invariant through the flow of \varepsilonqref{trans}. Furthermore, the vector field \varepsilonqref{trans} has at most one limit cycle (see Theorem A of \cite{CGL}), which is hyperbolic. So, from the positive invariance of $\varepsilonnsuremath{\mathbb{C}}R,$ from the stability of the equilibrium $(0,0),$ and from the uniqueness of a possible limit cycle we conclude that, if this limit cycle exists, then it cannot be inside $\varepsilonnsuremath{\mathbb{C}}R.$ Applying the Poincar\'{e}-Bendixson theorem we conclude that the stable focus of \varepsilonqref{trans} attracts the orbits, forward in time, of all points in $\varepsilonnsuremath{\mathbb{C}}R$ without touching the line $\varepsilonll.$ The claim follows by noting that $(1,1)\in\varepsilonnsuremath{\mathbb{C}}R$ (see Figure \mathrm{Re}f{sliding}).
\betagin{figure}[H]
\betagin{center}
\betagin{overpic}[width=7cm]{Sliding}
\partialut(98,80){$\varepsilonll$}
\partialut(55,82){$\varepsilonnsuremath{\mathbb{C}}C_1$}
\partialut(71,65){$\varepsilonnsuremath{\mathbb{C}}C_2$}
\partialut(82,30){$\varepsilonnsuremath{\mathbb{C}}C_3$}
\partialut(51,7){$\varepsilonnsuremath{\mathbb{C}}C_4$}
\partialut(15,68){$\varepsilonnsuremath{\mathbb{C}}C_5$}
\varepsilonnd{overpic}
\varepsilonnd{center}
\caption{The dashed bold line represents the tangency line $\varepsilonll.$ The continuous bold lines represents the curves $\varepsilonnsuremath{\mathbb{C}}C_i,$ $i=1,2,\ldots,5,$ and delimit the compact region $\varepsilonnsuremath{\mathbb{C}}R$ which is positively invariant through the flow of \varepsilonqref{trans}. The red curve is the trajectory of \varepsilonqref{trans} starting at $(1,1)$ being attracted to the focus $(0,0).$}\lambdabel{sliding}
\varepsilonnd{figure}
We remark that the function $m(v),$ defined above, was obtained as an approximation of an invariant curve of \varepsilonqref{trans} expressed as $u=\overline{m}(v).$ Indeed, taking $\overline {m}(v)=p_3(v)+\varepsilonnsuremath{\mathbb{C}}O(v^4),$ with $p_2(v)=k_0+k_1 v+k_2 v^2+k_3 v^3,$ and imposing that
\[
\lambdangle\nabla(u-\overline{m}(v)), \overline{Z}(u,v)\rangle\big|_{u=\overline{m}(v)}=0,
\]
we conclude that $p_3(v)=m(v).$ Furthermore, considering the curve $u=m(v)$ we see that
\[
\lambdangle\nabla(u-m(v)), \overline{Z}(u,v)\rangle\big|_{u=m(v)}=\dfrac{729 v^4(91+72 v)}{28561},
\]
which does not change its sign for $-91/71\leq v\leq 1.$
Finally, the vector field $X$ is linear. Thus, its trajectory starting at the point $q=(x_0,3\beta^2/(8\alpha),0)\in\partial\Sigma^s$ is easily computed as
\[
\varphi^+(t,q)=\left(x_0-\alpha\, t\,,\,\dfrac{3\beta^2+8\alpha(x_0-\beta)t-4\alpha^2 t^2}{8\alpha}\,,\,\dfrac{3(x_0-\beta)t^2-\alpha t^3}{6}\right).
\]
Notice that, for $q\in\partial\Sigma^s$ and $t^+(q)=3(x_0-\betata)/\alpha$>0, $\varphi^+(t^+(q),q)\in\Sigma^s.$ Moreover, $\varphi^+(t^+(q_0),q_0)=p_0.$ It implies that there exists a sliding Shilnikov orbit of $Z_0$ connecting $p_0$ to itself passing through $q_0$ (see Figure \mathrm{Re}f{simula}). This concludes the proof of statement $(a).$
To get statement $(b),$ we notice that there is no solution to the equation $\varphi^+(t^+(q),q)=p_{\lambda},$ for $\lambda\neq0.$
\varepsilonnd{proof}
\betagin{figure}[H]
\betagin{center}
\betagin{overpic}[width=13cm]{SimulaShil3}
\partialut(75,23.5){$q_0$}
\partialut(12,17){$p_0$}
\partialut(90,4.5){$\Sigma^s$}
\partialut(55,40){$\Gammaamma_0$}
\partialut(5,21){$\partial\Sigma^s$}
\varepsilonnd{overpic}
\varepsilonnd{center}
\caption{Sliding Shilnikov orbit $\Gammaamma_0$ of the piecewise linear differential system \varepsilonqref{s1} (for $\alpha=1/2,$ $\betata=3/2,$ and $\mu=0$) connecting the hyperbolic pseudo saddle-focus $p_0=(0,0,0)$ to itself, passing through the visible fold-regular point $q_0=\big(3\beta/2,3\beta^2/(8\alpha),0\big).$}\lambdabel{simula}
\varepsilonnd{figure}
\varepsilonnsuremath{\mathbb{S}}ection{Conclusions and further directions}\lambdabel{cfd}
In this paper we study a sliding homoclinic orbit to a pseudo saddle-focus of Filippov systems. Following the nomenclature of smooth differential systems, this sliding homoclinic orbit is called sliding Shilnikov orbit. A version of the Shilnikov's Theorem was given in this context. More specifically, Theorem \mathrm{Re}f{t1} showed that sliding Shilnikov orbits occur in generic one-parameter families of vector fields in $\Omega^r$ (see \cite{soto}). In addition, if $Z_0$ admits a sliding Shilnikov orbit, then any neighborhood $W\varepsilonnsuremath{\mathbb{S}}ubset\Omega^r$ of $Z_0$ contains infinitely many topological equivalence classes of vector fields. Theorem \mathrm{Re}f{t2} provides the existence of countably infinitely many sliding periodic orbits near a sliding Shilnikov orbit. In Theorem \mathrm{Re}f{t2}, it is worthwhile to mention that no Shilnikov-like condition is needed in order to guarantee the existence of countably infinitely many sliding periodic orbits (see Remark \mathrm{Re}f{shilcond}). Finally, Theorem \mathrm{Re}f{t3} provided explicit 1-parameter families, $Z_{\mu}\in\Omega^r,$ of piecewise linear vector fields, for which $Z_0$ admits a sliding Shilnikov orbit.
Understanding how a sliding Shilnikov orbit behaves under smoothing process (see \cite{ST}) is a major problem in this context. If $Z\in\Omega^r$ admits a sliding Shilnikov orbit, it seems possible to show the existence of 1-parameter families, $Z^{\delta},$ of smooth differential systems approaching continuously to $Z$ such that, for each $\delta>0$ small enough, $Z^{\delta}$ admits a Shilnikov connection.
Also, higher dimensional vector fields allow the existence of many other kinds of sliding homoclinic connections. So, the study of typical sliding homoclinic connection in higher dimensions seems to be a very fertile theme of research.
Another possible direction for further investigation is to apply the techniques from ergodic theory to provide deeper results for these kind of connections. For instance, results on the existence of symbolic extensions, conjugation with Bernoulli shifts, and existence of Smale horseshoes would be very welcome.
Finally, preliminary studies indicate that a sliding Shilnikov orbit may exist in piecewise smooth biological models, namely prey switching models (see \cite{Piltz}). Since the existence of Shilnikov homoclinic orbits is a usual route to chaos, it seems interesting to investigate the existence of sliding Shilnikov orbits in piecewise smooth models of real phenomena.
\varepsilonnsuremath{\mathbb{S}}ection*{Acknowledgements}
We thank the referees for their comments and suggestions that helped us to greatly improve the presentation of this paper.
DDN is partially supported by FAPESP grant 2018/16430-8 and by CNPq grants 306649/2018-7 and 438975/2018-9. MAT is partially supported by a CNPq grant 301275/2017-3.
\varepsilonnd{document}
|
\begin{document}
\title{Non-trivial intersecting families of finite affine spaces}
\begin{abstract}
Guo and Xu determined the maximum size of intersecting families over finite affine spaces and showed that any family reaches maximum size must be trivial. In this paper, we characterize non-trivial intersecting family with maximum size.
\end{abstract}
\section{Introduction}
Let $[n]$ denote the set $\{1,\ldots,n\}$ and, for $0\leq k\leq n$, let $\binom{[n]}{k}$ denote the family of all $k$-subsets of $[n]$. A family $\,\mathcal{F}\subseteq\bn{[n]}{k}$ is called \emph{$t$-intersecting} if $|F_{1}\cap F_{2}|\geq t$ for all $F_{1},F_{2}\in\,\mathcal{F}$.
Erd\H{o}s, Ko and Rado \cite{KO} determined the maximum size of a $t$-intersecting family.
\begin{thm}{\em(\cite{KO})}\label{th1.1}
Let $k\geq t\geq 1$, $n>n_0(k,t)$ and $\,\mathcal{F}\subseteq\bn{[n]}{k}$ be a $t$-intersecting family. Then $|\,\mathcal{F}|\leq \bn{n-t}{k-t}$. Equality holds iff $\,\mathcal{F}$ consists of all $k$-subsets containing a fixed $t$-subset.
\end{thm}
The best value of $n_0(k,t)$ is $(t+1)(k-t+1)$. It was first proved by Frankl \cite{f1978} for $t\geq 15$, and completely determined by Wilson \cite{RMW} based on the eigenvalue method. In \cite{f1978}, Frnakl also gave a conjecture about the maximum size of a $t$-intersecting subfamily of $\bn{[n]}{k}$ for any positive $n,k$ and $t$. This conjecture was partially proved by Frankl and F\"{u}redi \cite{PFZF}, and completed settled by Ahlswede and Khachatrian \cite{RH}.
A $t$-intersecting family is called \emph{trivial} if every element contains a fixed $t$-subset of $[n]$. Theorem \ref{th1.1} shows that maximum $t$-intersecting subfamily of $\bn{[n]}{k}$ with $n>n_0(k,t)$ must be trivial. Hilton and Milner \cite{AE} determined the maximum size of a non-trivial $1$-intersecting family. The structure of a non-trivial $t$-intersecting subfamily of $\bn{[n]}{k}$ with maximum size was first considered by Frankl \cite{Peter78}, and again, completely determined in \cite{RH}.
\iffalse
Hilton and Milner \cite{AE} determined the maximum size of an intersecting family $\,\mathcal{F}$ with $\tau(\,\mathcal{F})\geq 2$.
\subsection{Basic definitions for finite sets}
Let $[n]$ denote the set $\{1,\ldots,n\}$ and, for $0\leq k\leq n$, let $\binom{[n]}{k}$ denote the family of all $k$-subsets of $[n]$. A family $\,\mathcal{F}\subseteq\bn{[n]}{k}$ is called intersecting if for all $F_{1},F_{2}\in\,\mathcal{F}$ we have $F_{1}\cap F_{2}\ne \emptyset$. For any family $\,\mathcal{F}\subseteq\bn{[n]}{k}$, the covering number $\tau(\,\mathcal{F})$ is the minimum size of a set that meets all $F\in\,\mathcal{F}$. Erd\H{o}s, Ko and Rado \cite{KO} determined the maximum size of an intersecting family. Their conclusion also showed that if an intersecting family $\,\mathcal{F}\subseteq \bn{[n]}{k}$ is of maximum size, then $\tau(\,\mathcal{F})=1$.
Hilton and Milner \cite{AE} determined the maximum size of an intersecting family $\,\mathcal{F}$ with $\tau(\,\mathcal{F})\geq 2$.
\begin{thm}{\em({\cite{AE}})}\label{1.0}
Let $\,\mathcal{F}\subseteq\bn{X}{k}$ be an intersecting family with $|X|=n$, $k\geq 2$, $n\geq 2k+1$ and $\tau(\,\mathcal{F})\geq 2$. Then $\,\mathcal{F}\leq \bn{n-1}{k-1}-\bn{n-k+1}{k-1}+1$. Equality holds only if
\noindent{\em{(i)}} $\,\mathcal{F}=\{G\in\bn{X}{k}:x\in G,\ F\cap G\ne\emptyset\}\cup\{F\}$ for some $k$-subset $F$ and $x\in X\backslash F$.
\noindent{\em{(ii)}} $\,\mathcal{F}=\{F\in\bn{X}{3}:|F\cap S|\geq 2\}$ for some $3$-subset $S$ if $k=3$.
\end{thm}
\subsection{Basic definitions for vector spaces}
\fi
Let $V$ denote an $n$-dimensional vector space over the finite field $\mathbb{F}_q$ and $\gn{V}{k}_q$ denote the family of all $k$-dimensional subspaces of $V$. For $n,k\in\mathbb{Z}^+$, define the \emph{Gaussian binomial coefficient} by
$$\gn{n}{k}_q:=\prod_{0\leq i<k}\frac{q^{n-i}-1}{q^{k-i}-1}.$$
Note that the size of $\gn{V}{k}_{q}$ is $\gn{n}{k}_q$. From now on, we will omit the subscript $q$.
A family $\,\mathcal{F}\subseteq\gn{V}{k}$ is called \emph{$t$-intersecting} if $\dim(F_1\cap F_2)\geq t$ holds for any $F_1,F_2\in\,\mathcal{F}$. A $t$-intersecting family is called \emph{trivial} if every element contains a fixed $t$-space of $V$. The structure of a $t$-intersecting subfamily of $\gn{V}{k}$ with maximum size was partially determined by Hsieh \cite{Hsieh}, Frankl and Wilson \cite{PR}, and completely obtained by Tanaka \cite{TANAKA2006}. Their results showed that maximum $t$-intersecting subfamily of $\gn{V}{k}$ with $\dim(V)\geq 2k+1$ must be trivial.
Suppose $A$ and $B$ are subspaces of $V$. We say $A$ intersects $B$ if $\dim(A\cap B)\geq 1$. Let $\,\mathcal{F}\subseteq\gn{V}{k}$ be an $1$-intersecting family. Denote \emph{the covering number $\tau(\,\mathcal{F})$} is the minimum dimension of a subspace of $V$ that intersects all elements of $\,\mathcal{F}$. Note that $\,\mathcal{F}$ is non-trivial iff $\tau(\,\mathcal{F})\geq 2$. Blokhuis et al. \cite{AB} determined the maximum size of an $1$-intersecting family $\,\mathcal{F}$ with $\tau(\,\mathcal{F})\geq 2$.
\begin{thm}{\em{(\cite{AB})}}\label{1.1}
Let $k\geq 3$ and either $q\geq 3$ and $n\geq 2k+1$ or $q=2$ and $n\geq 2k+2$. Let $V$ be an n-dimensional vector space over $\mathbb{F}_q$, then for any intersecting family $\,\mathcal{F}\subseteq \gn{V}{k}$ with $\tau(\,\mathcal{F})\geq 2$, we have
$$|\,\mathcal{F}|\leq \gn{n-1}{k-1}-q^{k(k-1)}\gn{n-k-1}{k-1}+q^k.$$
Equality holds iff
\begin{itemize}
\item[\em (i)]$\,\mathcal{F}=\{F\in\gn{V}{k}:E\subseteq F,\dim(F\cap U)\geq 1\}\cup\gn{E+U}{k}$ for some $E\in\gn{V}{1}$ and $U\in\gn{V}{k}$ with $E\not\subseteq U$.
\item[\em (ii)]$\,\mathcal{F}=\{F\in\gn{V}{k}:\dim(F\cap S)\geq 2\}$ for some $S\in\gn{V}{3}$ if $k=3$.
\end{itemize}
\end{thm}
There are results of maximum non-trivial $1$-intersecting subfamily of $\gn{V}{k}$ with $\dim(V)=2k$, see \cite{AAT,Fi}.
Suppose that $P$ is an $k$-dimensional subspace of $\mathbb{F}_q^n$. A coset of $\mathbb{F}_q^n$ relative to an $k$-dimensional subspace $P$ is called an $k$-flat. The \emph{dimension} of an $k$-flat $U+x$ is defined to be the dimension of the subspace $U$, denoted by $\dim(U+x)$. A flat $F_1$ is said to be \emph{incident} with a flat $F_2$, if $F_1$ contains or is contained in $F_2$. The point set $\mathbb{F}_q^n$ with all the flats and the incidence relation among them defined above is said to be the $n$-dimensional \emph{affine space}, denoted by $AG(n,\mathbb{F}_q)$. Denote by $\,\mathcal{M}(k,n)$ the set of all $k$-flats in $AG(n,\mathbb{F}_q)$. Denote by $F_1\cap{F_2}$ the intersection of the flats $F_1$ and $F_2$, and by $F_1\cup{F_2}$ the minimum flat containing both $F_1$ and $F_2$. It is known that the intersection of two flats is either a flat or empty-set.
A family $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ is called \emph{$t$-intersecting} if $\dim(F_1\cap F_2)\geq t$ holds for any $F_1,F_2\in\,\mathcal{F}$. When $t=1$, we say $\,\mathcal{F}$ is intersecting. A $t$-intersecting family is called \emph{trivial} if every element of $\,\mathcal{F}$ contains a fixed $t$-flat of $\mathbb{F}_q$. Guo and Xu \cite{GUO2017} determined the maximum size of a $t$-intersecting family and showed that maximum $t$-intersecting family must be trivial.
\begin{thm}{\em(\cite[Theorem 1.3]{GUO2017})}
Let $n\geq 2k+1$ and $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be a $0$-intersecting family. Then $|\,\mathcal{F}|\leq\gn{n}{k}$. Equality holds iff $\,\mathcal{F}$ consists of all elements containing a fixed vector.
\end{thm}
\begin{thm}{\em(\cite[Theorem 1.4]{GUO2017})}
Let $t\geq 1$, $n\geq 2k+t+2$ and $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be a $t$-intersecting family. Then $|\,\mathcal{F}|\leq{\gn{n-t}{k-t}}$. Equality holds iff $\,\mathcal{F}$ consists of all elements containing a fixed $t$-flat.
\end{thm}
In 2019, Guo \cite{GUO2019} determined the maximum size of a non-trivial $0$-intersecting family.
\begin{thm}{\em(\cite[Theorem 1.6]{GUO2019})}
Let $n\geq 2k+1\geq 3$ and $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be an non-trivial $0$-intersecting family. Then
\[|\,\mathcal{F}|\leq 1+\gn{n-1}{k-1}+\sum_{i=0}^k q^{i(i+1)}(q^{k-i}-1)\gn{n-k-1}{i}\gn{k}{1}.\]
Equality holds iff there exist $U\in\,\mathcal{M}(k,n)$ and $x\in \mathbb{F}_q^n\setminus\{x\}$ such that
\[\,\mathcal{F}=\{F\in\,\mathcal{M}(k,n):x\in F, F\cap U\neq\emptyset\}\cup\{x\}.\]
\end{thm}
Let $\,\mathcal{F}\subseteqq\,\mathcal{M}(k,n)$ be an intersecting family. $T\in AG(n,\mathbb{F}_q)$ is called a \emph{covering flat} of $\,\mathcal{F}$ if $\dim(T\cap F)\geq 1$ holds for any $F\in\,\mathcal{F}$. The covering number $\tau(\,\mathcal{F})$ is the minimum dimension of a covering flat of $\,\mathcal{F}$. Note that $\,\mathcal{F}$ is trivial iff $\tau(\,\mathcal{F})=1$. In this paper, we determine the maximum size of an intersecting family $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ with $\tau(\,\mathcal{F})\geq 2$.
For any fixed $n,k\in\mathbb{N}$ and prime power $q$, denote
\[f(n,k,q)=\gn{n-1}{k-1}-q^{k(k-1)}\gn{n-k-1}{k-1}+q^k.\]
Suppose $A,B\in AG(n,\,\mathbb{F}_q)$. Let $A'$ be the subspace related to $A$ and
\[\,\mathcal{H}_{A,B}(\gamma)=\{F\in\,\mathcal{M}(k,n):A\subseteq F,~\dim(F\cap B)\geq \gamma\}.\]
\iffalse Suppose $\dim(A)=a$ and $\dim(A\cup B)=a+b$. It is known that
\[A'\in\gn{(A\cup B)'}{a}\]
and the size of
\[\,\mathscr{I}_{A,B}:=\left\{I\in\gn{(A\cup B)'}{b}: I\cap A'=0\right\}\]
is $q^{ab}$.
Assume that $I_1,\ldots,I_{q^{ab}}$ are the elements of $\,\mathscr{I}_{A,B}$ and $i_1,\ldots,i_{q^{ab}}$ be elements of $\,\mathbb{F}_q^n$, denote
\[\,\mathscr{I}_{A,B}(i_1,\ldots,i_{q^{ab}})=\left\{I_j+i_j:j\in\left\{1,\ldots,q^{ab}\right\}\right\}.\]
\fi
We say $\,\mathcal{F}$ is an HM-type family if there exist $E\in\,\mathcal{M}(1,n)$, $U\in\,\mathcal{M}(k,n)$ with $\dim(E\cap U)=0$ and $\alpha_1,\ldots,\alpha_{q^k}\in(E\cup U)$ such that
$$\,\mathcal{F}=\,\mathcal{H}_{E,U}(1)\cup\{A_i+\alpha_i:i\in[q^k]\},$$
where $A_1,\ldots, A_{q^k}$ are the $k$-subspaces of $(E\cup U)'$ not containing $E'$.
We say $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family if there exist $D\in\,\mathcal{M}(3,n)$ with
\[\gn{D'}{2}=\{B_1,\ldots,B_{\gn{3}{2}}\}\]
and $\beta_1,\ldots,\beta_{\gn{3}{2}}\in D$ such that
\[\,\mathcal{F}=\bigcup_{R\in \,\mathcal{B}}\{F\in\,\mathcal{M}(k,n):R\subseteq F\},\]
where
\[\,\mathcal{B}=\{B_i+\beta_{i}:i\in[\textstyle\gn{3}{2}]\}.\]
The main result is as follows.
\iffalse
Let $E\in\,\mathcal{M}(1,n)$ and $U\in\,\mathcal{M}(k,n)$ with $\dim(E\cap U)=0$. Assume $E\cap U=\{x\}$, we have $E=E'+x, U=U'+x$ and $E\cup U=(E'+U')+x$, which shows that $E\cup U$ is a $(k+1)$-flat. Let $T_1, T_2,\ldots, T_{q^k}$ be all the $k$-subspaces of $E'+U'$ not containing $E'$ and $t_1,t_2,\ldots,t_{q^k}\in(E\cup U)$. We say that $\,\mathcal{F}$ is an HM-type family if
$$\,\mathcal{F}=\{F\in\,\mathcal{M}(k,n):E\subseteq F,\dim(F\cap U)\geq 1\}\cup\{T_i+t_i:1\leq i\leq q^k\}$$
and denote its size $f(n,k,q)$.
Now assume $k=3$ and $U\in\,\mathcal{M}(3,n)$ with $U=U'+x$ where $U'\in\gn{\mathbb{F}_q^n}{3}$. Let $S_1,\ldots,S_{\gn{3}{2}}$ be all $2$-subspaces of $U'$ and $s_1,\ldots,s_{\gn{3}{2}}\in U$. Let $\,\mathcal{T}=\{S_i+s_i:1\leq i\leq \gn{3}{2}\}$. We say that $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family if $\,\mathcal{F}=\bigcup_{1\leq i\leq\gn{3}{2}}\{F\in\,\mathcal{M}(3,n): (S_i+s_i)\subseteq F\}$.
The main result is as follows.
\fi
\begin{thm}\label{1.2}
Suppose $k\geq 3$, $n\geq 2k+4$ and $(n,q)\neq(2k+4,2)$. Let $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})\geq 2$. Then $|\,\mathcal{F}|\leq f(n,k,q)$. Equality holds iff
\begin{itemize}
\item[\em(i)]$\,\mathcal{F}$ is an HM-type family.
\item[\em(ii)]$\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family if $k=3$.
\end{itemize}
\end{thm}
In Section $2$, we give some equalities and inequalities which are used to prove Theorem \ref{1.2}. In section $3$, we prove Theorem \ref{1.2}.
\section{Some lemmas}
In this section, we shall give some lemmas which are used to prove Theorem \ref{1.2}. Let $V$ be a space of dimension $(n+l)$ over $\mathbb{F}_{q}$ and $W$ be a fixed $l$-subspace of $V$. A subspace $P\in\gn{V}{m}$ is called an \emph{$(m,k)$-type subspace} of $V$ if $\dim(P\cap W)=k$. Let $N'(m_{1},k_{1};m,k;n+l,n)$ be the number of subspaces of type $(m,k)$ in $V$ containing a given subspace of type $(m_{1},k_{1})$.
\begin{lem}{\emph{(\cite[Lemma 2.3]{KW})}}\label{2.1}
$N^{'}(m_{1},k_{1};m,k;n+l,n)\not= 0$ iff
\[0\leq k_{1}\leq k\leq l,~0\leq m_{1}-k_{1}\leq m-k\leq n.\]
Moreover, if $N'(m_1,k_1;m,k;n+l,n)>0$, then
\[N^{'}(m_{1},k_{1};m,k;n+l,n)=q^{(l-k)(m-k-m_{1}+k_{1})}\gn{n-(m_{1}-k_{1})}{(m-k)-(m_{1}-k_{1})}\gn{l-k_{1}}{k-k_{1}}.\]
\end{lem}
\iffalse
\begin{lem}\label{2.2}
Suppose $M\in\gn{V}{m+1}$ with $m\geq 2$ and $E\in \gn{M}{1}$. Then for any $X\in\gn{V}{1}$, there exists $F\in\gn{V}{m}$ such that
\[F\cap E=F\cap X=0.\]
\end{lem}
\proof
By Lemma \ref{2.1}, the number of $F\in\gn{V}{m}$ with $E\cap F$ is equal to
\[N'(0,0;m,0;m+1,m)=q^m.\]
We only need to consider the case when $X\in\gn{M}{1}$ and $X\neq E$. The results follow from the fact that the number of $F\in\gn{V}{m}$ containing $X$ and not intersecting $E$ is equal to
\[N'(1,0;m,0;m+1,m)=q^{m-1}\]
by Lemma \ref{2.1}.
$\qed$
\fi
\begin{lem}\label{2.3}{\em(\cite[Theorem 1.18]{WAN})}
The number of $k$-flats in $AG(n,\,\mathbb{F}_q)$ contained in a given $m$-flat, where $0\leq k\leq m\leq n$, is equal to $q^{m-k}\gn{m}{k}$.
\end{lem}
\begin{lem}\label{2.4}{\textup{(\cite[Theorem 1.19]{WAN})}}
The number of $m$-flats in $AG(n,\,\mathbb{F}_q)$ containing a given $k$-flat, where $0\leq k\leq m\leq n$, is equal to $\gn{n-k}{m-k}$.
\end{lem}
\begin{lem}\label{2.5}{\em(\cite[Theorem 1.20]{WAN})}
Let $F_1=F'_1+f_1$ and $F_2=F'_2+f_2$ be two flats of $AG(n,\mathbb{F}_q)$. The following hold.
\begin{itemize}
\item[\em (i)]$F_1\cap F_2\neq\emptyset$ iff $(f_1-f_2)\in (F'_1+F'_2)$.
\item[\em (ii)]If $F_1\cap F_2\neq\emptyset$, then $F_1\cap F_2=F'_1\cap F'_2+x$, where $x\in F_1\cap F_2$.
\item[\em (iii)]$F_1\cup F_2=(F'_1+F'_2)+\langle f_2-f_1\rangle+f_1$. In particular,
\[
\dim(F_1\cup F_2)=
\begin{cases}
\dim F_1+ \dim F_2-\dim(F_1\cap F_2), &\text{if}~F_1\cap F_2\neq\emptyset,\\
\dim F_1 + \dim F_2-\dim(F'_1\cap F'_2)+1, &\text{if}~F_1\cap F_2=\emptyset.
\end{cases}
\]
\end{itemize}
\end{lem}
For any $A, B\in AG(n,\mathbb{F}_q)$ and $\mathcal{F}\subseteq\mathcal{M}(k,n)$, let
\[\,\mathcal{F}'=\{F':F\in\,\mathcal{F}\},~\,\mathcal{F}_{A}=\{F\in\,\mathcal{F}:A\subseteq F\},~\,\mathcal{M}_A=\{F\in\,\mathcal{M}(k,n):A\subseteq F\}.\]
We say that $A$ intersects $B$ if $\dim(A\cap B)\geq 1$.
\begin{lem}\label{2.6}
Let $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be an intersecting family. Then $\,\mathcal{F}'$ is an intersecting family of $k$-subspaces with $|\,\mathcal{F}'|=|\,\mathcal{F}|$ and $\tau(\,\mathcal{F}')\leq \tau(\,\mathcal{F})$.
\end{lem}
\begin{proof}
Let $F_1$ and $F_2$ be two distinct flats. By Lemma \ref{2.5}, one gets that $F_1$ intersects $F_2$ only if
\[\dim(F_1'\cap F_2')\geq 1,~F_1'\neq F_2'.\]
Let $T$ be a covering flat of $\,\mathcal{F}$ with $\dim(T)=\tau(\,\mathcal{F})$. It follows that $\,\mathcal{F}'$ is an intersecting family with $|\,\mathcal{F}'|=|\,\mathcal{F}|$ and $T'$ is a covering subspaces of $\,\mathcal{F}'$, which implies that
\[\tau(\,\mathcal{F}')\leq \dim(T')=\dim(T)=\tau(\,\mathcal{F}).\]
\end{proof}
\begin{lem}\label{2.7}
Suppose $m\geq 2$, $M\in\,\mathcal{M}(m+1,n)$ and $E\in\,\mathcal{M}(1,M)$. Let $A_1,\ldots,A_{\gn{m+1}{m}}$ be the $m$-subspaces of $M'$ and $A_1,\ldots,A_{q^m}$ be those not containing $E'$.
\begin{itemize}
\item[\em(i)] Let $F_1,F_2$ be two distinct elements of $\,\mathcal{M}(m,M)$. Then $F_1$ intersects $F_2$ iff $F_1'\neq F_2'$.
\item[\em(ii)] $\,\mathcal{F}\subseteq\,\mathcal{M}(m,M)$ is an intersecting family iff there exist $\alpha_1,\ldots,\alpha_{\gn{m+1}{m}}\in M$ such that
\[\,\mathcal{F}\subseteq\{A_i+\alpha_i:i\in[\textstyle\gn{m+1}{m}]\}.\]
\item[\em(iii)] $\,\mathcal{F}\subseteq\,\mathcal{M}(m,M)$ is an intersecting family with $\dim(F\cap E)=0$ holds for any $F\in\,\mathcal{F}$ iff there exist $\beta_1,\ldots,\beta_{q^m}\in M$ such that
\[\,\mathcal{F}\subseteq\{A_i+\beta_i:i\in[q^m]\}.\]
\end{itemize}
\end{lem}
\begin{proof}
It is direct results of Lemma \ref{2.5}.
\end{proof}
\iffalse
For any fixed $n,k,q$ and $\gamma$, let $A,B$ be two flats of $AG(n,\,\mathbb{F}_q^n)$. Denote
\[\begin{split}
\,\mathscr{G}_{A,B}(\gamma)=\{F\in\,\mathcal{M}(k,n):A\subseteq F,~\dim\left(F\cap (A\cup B)\right)\geq \gamma\},\\
\,\mathscr{G}_{A,B}(\gamma)=\{F\in\,\mathcal{M}(k,n):A\subseteq F,~\dim(F\cap B)\geq \gamma\}.
\end{split}\]
\fi
\begin{lem}\label{2.8}
Suppose $a<b\leq k$. Let $A\in\,\mathcal{M}(a,n)$, $B\in\,\mathcal{M}(b,n)$ such that $A$ does not intersect $B$ and $M=A\cup B$.
\begin{itemize}
\item[\em(i)]
We have $b+1\leq \dim(M)\leq a+b+1$ and
\[\dim(M)=a+b+1\Leftrightarrow A\cap B=\emptyset,~A'\cap B'=0.\]
\item[\em(ii)]
One gets that $\,\mathcal{H}_{A,B}(1)\subseteq \,\mathcal{H}_{A,M}(a+1)$. If $\dim(M)=a+b+1$, then $\,\mathcal{H}_{A,B}(1)\subseteq \,\mathcal{H}_{A,M}(a+2)$.
\end{itemize}
\end{lem}
\begin{proof}
(i) Note that $0\leq \dim(A'\cap B')\leq a$. The desired results follow by Lemma \ref{2.5}.
\noindent(ii) Let $F\in\,\mathcal{H}_{A,B}(1)$. Note that $\dim(F\cap B)\geq 1$ and $A$ does not intersect $(F\cap B)$. One gets that
\begin{equation}\label{eq10}
A\subsetneqq \left(A\cup (F\cap B)\right)\subseteq (F\cap M),
\end{equation}
which implies that $F\in\,\mathcal{H}_{A,M}(a+1)$.
Now assume that $A\cap B=\emptyset$ and $A'\cap B'=0$. Let $F\in\,\mathcal{H}_{A,B}(1)$. One gets that $\dim(F\cap B)\geq 1$ and
\[(F\cap B)\cap A=\emptyset,~(F\cap B)'\cap A'=0.\]
By Lemma \ref{2.5} and (\ref{eq10}), it follows that
\[\begin{split}
\dim(F\cap M)\geq \dim\left(A\cup (F\cap B)\right)=\dim(A)+\dim(F\cap B)+1-\dim((F\cap B)'\cap A')\geq a+2,
\end{split}\]
which implies that $F\in\,\mathcal{H}_{A,M}(a+2)$.
\end{proof}
\begin{lem}\label{2.12}
Suppose $s<k<n/2$. Let $\mathscr{F}\subseteq \mathscr{M}(k,n)$ be an intersecting family, $U\in\mathscr{F}$ and $S\in\,\mathcal{M}(s,n)$ such that $S$ dose not intersect $U$. Denote $M=S\cup U$.
\begin{itemize}
\item[\em{(i)}]
If $\dim(M)\leq k+s$, then there exists an $T\in\,\mathcal{M}(s+1,M)$ such that $S\subseteq T$ and $|\,\mathcal{F}_S|\leq\gn{k}{1}|\,\mathcal{F}_T|$.
\item[\em(ii)]
If $\dim(M)=k+s+1$, then there exists $R\in\,\mathcal{M}(s+2,M)$ such that $S\subseteq R$ and $|\,\mathcal{F}_S|\leq\gn{k}{1}^2|\,\mathcal{F}_R|$.
\item[\em(iii)]
$|\,\mathcal{F}_{S}|\leq \gn{k}{1}\gn{n-s-1}{k-s-1}$.
\end{itemize}
\end{lem}
\proof
Denote $m=\dim(M)$ and
\[\begin{split}
\,\mathcal{A}=\{A\in\,\mathcal{M}(s+1,M):S\subseteq A\},~\,\mathcal{B}=\{B\in\,\mathcal{M}(s+2,M):S\subseteq B\}.
\end{split}\]
By Lemmas \ref{2.4} and \ref{2.8}, one gets that $\,\mathcal{F}_S\subseteq \,\mathcal{H}_{S,U}(1)$ and
\[k+1\leq m\leq k+s+1,~|\,\mathcal{A}|=\gn{m-s}{1},~|\,\mathcal{B}|=\gn{m-s}{2}.\]
\noindent(i) Note that
\[\,\mathcal{F}_S\subseteq \,\mathcal{H}_{S,U}(1)\subseteq \,\mathcal{H}_{S,M}(s+1)=\bigcup_{A\in\,\mathcal{A}}\,\mathcal{M}_A\]
by Lemma \ref{2.8}. It follows that
\[\,\mathcal{F}_S=\bigcup_{A\in\,\mathcal{A}}\,\mathcal{F}_A.\]
There exists $T\in\,\mathcal{A}$ such that $|\,\mathcal{F}_T|$ is maximum. Thus,
\[\begin{split}
|\,\mathcal{F}_S|\leq\sum_{A\in\,\mathcal{A}}|\,\mathcal{F}_A|\leq |\,\mathcal{A}||\,\mathcal{F}_T|\leq \gn{k}{1}|\,\mathcal{F}_T|.
\end{split}\]
\noindent(ii) Similar to the proof of (i), we obtain that
\[\,\mathcal{F}_S=\bigcup_{B\in\,\mathcal{B}}\,\mathcal{F}_B\]
and there exists $R\in\,\mathcal{B}$ such that $|\,\mathcal{F}_R|$ is maximum. The fact that
\[|\,\mathcal{B}|=\gn{k}{2}<\gn{k}{1}^2\]
implies the desired result.
\noindent(iii) Observe that
\[|\,\mathcal{F}_T|\leq \gn{n-s-1}{k-s-1},~|\,\mathcal{F}_R|\leq \gn{n-s-2}{k-s-2},~\gn{k}{1}\gn{n-s-2}{k-s-2}<\gn{n-s-1}{k-s-1}\]
by Lemmas \ref{2.3} and \ref{2.10}. It is a direct result of (i) and (ii).
$\qed$
\iffalse
\begin{lem}\label{2.13}
Let $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ be an intersecting family. If there exist $S\in\,\mathcal{M}(k-1,M)$ and $U\in\,\mathcal{F}$ such that $S$ does not intersect $U$, then $|\,\mathcal{F}_S|\leq \gn{k}{1}$.
\end{lem}
\proof
Suppose that $|\,\mathcal{F}_S|>0$. The desired result follows by Lemma \ref{2.12}.
$\qed$
\fi
\iffalse
$S\subsetneqq F\cap(S\cup F_0)$, which implies that there exists an $s+1$-flat $T$ with $S\subseteq T\subseteq (S\cup F_0)$ such that $F\in\,\mathcal{F}_T$. We have
$$\,\mathcal{F}_S=\bigcup_{S\subseteq T\subseteq (S\cup F_0),\ T\in\,\mathcal{M}(s+1,n)}\,\mathcal{F}_T.$$
Note that the number of $T$ is at most $\gn{k+s-s}{s+1-s}=\gn{k}{1}$ and there must exists a $T_0$ such that $|\,\mathcal{F}_{T_0}|$ is maximum, we have $|\,\mathcal{F}_S|\leq\gn{k}{1}|\,\mathcal{F}_{T_0}|$. We get the inequality since $|\,\mathcal{F}_{T_0}|\leq\gn{n-s-1}{k-s-1}$ by Lemma \ref{2.3}.
(ii) If $\dim(S\cup F_0)=k+s+1$, we have $S\cap F_0=\emptyset$ and $\dim(S'\cap F_0)=0$ by Lemma \ref{2.4}. Then for any $1$-flat $E\subseteq F_0$, we have $\dim(S\cup E)=s+2$ by Lemma \ref{2.4}, which implies that for any $F\in\,\mathcal{F}_S$, $\dim(F\cap(S\cup F_0))\geq s+2$. Note that the number of $s+2$-flat $T$ satisfying $S\subseteq T\subseteq (S\cup F_0)$ is $\gn{k+s+1-s}{s+2-s}=\gn{k+1}{2}$ and there must exists a $T_0$ such that $|\,\mathcal{F}_{T_0}|$ is maximum, we have $|\,\mathcal{F}_S|\leq\gn{k+1}{2}|\,\mathcal{F}_{T_0}|$. Since $(q^{k+1}-1)(q-1)<(q^k-1)(q^2-1)$ holds for any $q\geq2$ and $k\geq3$, we have $\gn{k+1}{2}=\frac{(q^{k+1}-1)(q^k-1)}{(q^2-1)(q-1)}<(\frac{q^k-1}{q-1})^2=\gn{k}{1}^2$. Together with $|\,\mathcal{F}_{T_0}|\leq\gn{n-s-2}{k-s-2}$ by Lemma \ref{2.3} and $\gn{k}{1}\gn{n-s-2}{k-s-2}<\gn{n-s-1}{k-s-1}$ by Lemma \ref{2.6}, we get the results.
$\qed$
\fi
\begin{lem}\label{2.13}
Let $a$, $b$ be positive integers with $a\leq b-2$. Suppose $\{\alpha_n\}$ is a infinite sequence of numbers with $\alpha_n\in\{1,2\}, n\in\mathbb{N}^+$ and $\beta_k=a+\sum_{i=1}^k\alpha_i$, $k\in\mathbb{N}^+$. Then there exists $r$ such that $\beta_{r}\in\{b-1,b\}$.
\iffalse and denote
\begin{itemize}
\item[\em(i)]
There exists $l$ such that $\beta_{l}>b$ and $\beta_{l-1}\in\{b-1,b\}$.
\item[\em(ii)] There exists $r$ such that
\end{itemize}
\fi
\end{lem}
\begin{proof}
Let $\,\mathcal{A}=\{k\in\mathbb{N}^+: \beta_k>b\}$. Note that $(b-a+1)\in\,\mathcal{A}$ and $1\not\in\,\mathcal{A}$. By the least number principle, $\,\mathcal{A}$ has a minimum element $l$ with $l\geq 2$. Let $r=l-1$. It is routine to check that $\beta_{r}\in\{b-1,b\}$.
\end{proof}
\begin{lem}\label{2.14}
Let $\,\mathcal{F}\subseteq \mathscr{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})=t$. Then for any $S\in\,\mathcal{M}(s,n)$ with $s\leq t$,
\[|\,\mathcal{F}_{S}|\leq{\gn{k}{1}}^{t-s}\gn{n-t}{k-t}.\]
\end{lem}
\proof
We only need to consider to case when $s<t$. Note that $S$ is not a covering flat of $\,\mathcal{F}$. If $s=t-1$, then the desired result follows by Lemma \ref{2.12}. Now assume that $s\leq t-2$.
\iffalse
Assume that $|\,\mathcal{F}_S|>0$. We divide our proof into three cases.
\noindent{\bf Case 1}: $s=t$.
It is obvious that $|\,\mathcal{F}_{S}|\leq \gn{n-t}{k-t}$ when $s=t$.
\noindent{\bf Case 2}: $s<t<n$.
By Lemmas \ref{2.12} and \ref{2.13}, there exists a sequence of flats
\[S=S_0\subseteq S_1\subseteq\cdots\subseteq S_r\]
such that $\dim(S_r)\in\{t,t+1\}$ and
\[\dim(S_{i})-\dim(S_{i-1})=\alpha_i\in\{1,2\},~|\,\mathcal{F}_{S_{i-1}}|\leq \gn{k}{1}^{\alpha_{i}}|\,\mathcal{F}_{S_i}|,~i\in[r],\]
which implies that
\[|\,\mathcal{F}_S|\leq
\begin{cases}
\gn{k}{1}^{t-s}|\,\mathcal{F}_{S_r}|\leq \gn{k}{1}^{t-s}\gn{n-t}{k-t},&\text{if}~\dim(S_r)=t,\\
\gn{k}{1}^{t+1-s}|\,\mathcal{F}_{S_r}|\leq \gn{k}{1}^{t+1-s}\gn{n-t-1}{k-t-1},&\text{if}~\dim(S_r)=t+1.
\end{cases}\]
The fact that $\gn{k}{1}\gn{n-s-1}{k-s-1}<\gn{n-s}{k-s}$ implies the desired result.
\noindent{\bf Case 3}: $s<t=k$.
\fi
By Lemmas \ref{2.12} and \ref{2.13}, there exists a sequence of flats
\[S=S_0\subseteq S_1\subseteq\cdots\subseteq S_r\]
such that $\dim(S_r)\in\{t-1,t\}$ and
\[\dim(S_{i})-\dim(S_{i-1})=\alpha_i\in\{1,2\},~|\,\mathcal{F}_{S_{i-1}}|\leq \gn{k}{1}^{\alpha_{i}}|\,\mathcal{F}_{S_i}|,~i\in[r].\]
Note that $S_r$ is not a covering flat of $\,\mathcal{F}$ if $\dim(S_r)=t-1$. By Lemma \ref{2.12}, one gets that
\[|\,\mathcal{F}_S|\leq
\begin{cases}
\gn{k}{1}^{t-s}|\,\mathcal{F}_{S_r}|\leq \gn{k}{1}^{t-s}\gn{n-t}{k-t},&\text{if}~\dim(S_r)=t,\\
\gn{k}{1}^{t-s-1}|\,\mathcal{F}_{S_r}|\leq \gn{k}{1}^{t-s}\gn{n-t}{k-t},&\text{if}~\dim(S_r)=t-1.
\end{cases}\]
$\qed$
\begin{lem}\label{2.10}{\em(\cite[Lemma 2.1]{AB})}
Let $a\geq 0$ and $n\geq k\geq a+1$ and $q\geq 2$. Then
\[\gn{k}{1}\gn{n-a-1}{k-a-1}<\frac{1}{(q-1)q^{n-2k}}\gn{n-a}{k-a}.\]
\end{lem}
\begin{lem}\label{2.11}
Suppose $k\geq 3$, $q\geq 2$, $r\geq 4$ and $n=2k+r$ with $(r,q)\neq(4,2)$. Then
\[
f(n,k,q)>{{k}\brack{1}}{{n-2}\brack{k-2}}-q{{k}\brack{2}}{{n-3}\brack{k-3}}>\left(1-\frac{1}{q^r(q^2-1)}\right)\gn{k}{1}\gn{n-2}{k-2}.
\]
\end{lem}
\proof
The first inequality is the direct result of Lemma 2.3 in \cite{AB}, and the second inequality follows by
\[q\gn{k}{2}=\frac{(\gn{k}{1}-1)\gn{k}{1}}{q+1}<\frac{\gn{k}{1}^2}{q+1}\]
and Lemma \ref{2.10}.
$\qed$
\section{Proof of Theorem \ref{1.2}}
In this section, we always assume that $k\geq 3$, $n\geq 2k+4$ and $(n,q)\neq (2k+4,2)$. Let $V$ be a $n$-space over $\mathbb{F}_{q}$ and denote $\,\mathcal{M}(k,n)$ be the set of all $k$-flats contained in $V$.
\begin{lem}\label{2.9}
Suppose $E\in\,\mathcal{M}(1,n)$ and $U\in\,\mathcal{M}(k,n)$ with $\dim(E\cup U)=k+1$. Denote $M=E\cup U$.
\begin{itemize}
\iffalse\item[\em(i)] We have either $\dim(E\cap U)=0$ or $E\cap U=\emptyset$ with $E'\subseteq U'$.
\item[\em(ii)]Let $M=E\cup U$. Then $\,\mathcal{H}_{E,U}\subseteq\,\mathscr{G}_{E,U}(2)=\,\mathscr{G}_{E,M}(2)$.
\fi
\item[\em(i)] We have
\begin{equation}\label{k1}
|\,\mathcal{H}_{E,M}(2)|=\gn{n-1}{k-1}-q^{k(k-1)}\gn{n-k-1}{k-1}.
\end{equation}
\item[\em(ii)] If $E\cap U\neq\emptyset$, then $\,\mathcal{H}_{E,U}(1)=\,\mathcal{H}_{E,M}(2)$.
\item[\em(iii)] If $E\cap U=\emptyset$, then
\[|\,\mathcal{H}_{E,U}(1)|\leq |\,\mathcal{H}_{E,M}(2)|-q^{(k-1)(k-2)}\gn{n-k-1}{k-2}.\]
\end{itemize}
\end{lem}
\proof
\iffalse
\noindent(i) It is a direct result of Lemma \ref{2.5}.
\noindent(ii) It is a direct result of Lemma \ref{2.8} and the fact that $\,\mathscr{G}_{E,U}(\gamma)=\,\mathscr{G}_{E,M}(\gamma)$ for any $\gamma\in\mathbb{N}$.
\fi
\noindent(i) It is routine to check that $\,\mathcal{H}_{E,M}(2)$ is an intersecting family and
\[|\,\mathcal{H}'_{E,M}(2)|=\gn{n-1}{k-1}-N'(1,1;k,1;n,n-k-1)\]
by Lemma \ref{2.1}, which implies that (\ref{k1}) holds by Lemma \ref{2.6}.
\noindent(ii) Note that $\dim(M')=k+1$ and $U\in\gn{M'}{k}$. It is routine to check that
\[F\cap U\neq\emptyset,~\dim(F'\cap M')=\dim(F\cap M)\geq 2\]
hold for any $F\in\,\mathcal{H}_{E,M}(2)$. One gets that
\[\dim(F\cap U)=\dim(F'\cap U')=\dim((F'\cap M')\cap U')\geq 1\]
by Lemma \ref{2.6}, which implies that $F\in\,\mathcal{H}_{E,U}(1)$. Together with $\,\mathcal{H}_{E,U}(1)\subseteq\,\mathcal{H}_{E,M}(2)$ by Lemma \ref{2.8}, the desired result follows.
\noindent(iii) Assume that $E=E'+e$ and $U=U'+u$. One gets that
\[E'\subseteq U'\subseteq M',~(e-u)\not\in U',~M=M'+e.\]
Pick $A\in\gn{U'}{2}$ with $E'\subseteq A$ and let $S=A+e$. It follows that
\[E\subseteq S\subseteq M,~S\cap U=\emptyset.\]
Denote
\[\,\mathcal{A}=\{F\in\,\mathcal{M}(k,n): F\cap M=S\}.\]
It is routine to check that
\[\,\mathcal{A}\subseteq \,\mathcal{H}_{E,M}(2)\setminus\,\mathcal{H}_{E,U}(1).\]
Note that $\,\mathcal{A}$ is an intersecting family. It follows that
\[\begin{split}
|\,\mathcal{A}|&=|\,\mathcal{A}'|=\left|\left\{K\in\gn{V}{k}:K\cap M'=A\right\}\right|\\
&=N'(2,2;k,2;n,n-(k+1))=q^{(k-1)(k-2)}\gn{n-k-1}{k-2}.
\end{split}\]
by Lemmas \ref{2.1} and Lemma \ref{2.5}, which implies the desired result by (\ref{k1}).
\begin{prop}\label{3.1}
Let $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ be an HM-type family. Then $\,\mathcal{F}$ is an intersecting family with $\tau(\,\mathcal{F})=2$ and $|\,\mathcal{F}|=f(n,k,q)$.
\end{prop}
\begin{proof}
There exist $E\in\,\mathcal{M}(1,n)$, $U\in\,\mathcal{M}(k,n)$ with $\dim(E\cap U)=0$ and $\alpha_1,\ldots,\alpha_{q^k}\in(E\cup U)$ such that
\[\,\mathcal{F}=\,\mathcal{H}_{E,U}(1)\cup\{A_i+\alpha_i:i\in[q^k]\},\]
where $A_1,\ldots,A_{q^k}$ are the $k$-subspaces of $(E\cup U)'$ not containing $E'$. Denote $M=E\cup U$ and
\[\,\mathcal{A}=\{A_i+\alpha_i:i\in[q^k]\},~\,\mathcal{B}=\{S\in\,\mathcal{M}(2,M):E\subseteq S\}.\]
By Lemmas \ref{2.1}, \ref{2.7} and \ref{2.9}, it is routine to check the following hold:
\begin{itemize}
\item[(i)] For any $F\in\,\mathcal{H}_{E,U}(1)$, there exists $S\in\,\mathcal{B}$ such that $S\subseteq F$.
\item[(ii)] Every element of $\,\mathcal{B}$ is a covering flat of $\,\mathcal{H}_{E,U}(1)$ and $\,\mathcal{A}$.
\item[(iii)] $\,\mathcal{H}_{E,U}(1)$ and $\,\mathcal{A}$ are intersecting families of $k$-flats with $|\,\mathcal{F}|=f(n,k,q)$ and $\tau(\,\mathcal{A}')=2$.
\end{itemize}
It follows that $\,\mathcal{F}$ is an intersecting family and every element of $\,\mathcal{B}$ is a covering flat of $\,\mathcal{F}$, which implies that
\[2\geq \tau(\,\mathcal{F})\geq \tau(\,\mathcal{A})\geq \tau(\,\mathcal{A}')=2\]
by Lemma \ref{2.6}
\end{proof}
\begin{prop}\label{3.2}
Let $\,\mathcal{F}\subseteq \,\mathcal{M}(3,n)$ be an $\,\mathcal{F}_3$-type family. Then $\,\mathcal{F}$ is an intersecting family with $\tau(\,\mathcal{F})=2$ and $|\,\mathcal{F}|=f(n,3,q)$.
\end{prop}
\proof
There exist $D\in\,\mathcal{M}(3,n)$ and $\beta_1,\ldots,\beta_{\gn{3}{2}}\in D$ such that $\,\mathcal{F}=\bigcup_{A\in\,\mathcal{A}}\,\mathcal{M}_A$ with
\[\gn{D'}{2}=\{B_1,\ldots,B_{\gn{3}{2}}\},~\,\mathcal{A}=\{B_1+\beta_i:i\in[\textstyle\gn{3}{2}]\}.\]
Note that $\,\mathcal{A}$ is an intersecting family by Lemma \ref{2.7}. It follows that $\,\mathcal{F}$ is an intersecting family and every element of $\,\mathcal{A}$ is a covering flat of $\,\mathcal{F}$. One gets that
\[\,\mathcal{F}'=\left\{K\in\gn{V}{3}: \dim(K\cap D')\geq 2\right\}\]
which implies that $|\,\mathcal{F}|=|\,\mathcal{F}'|=f(n,3,q)$ and
\[2\geq \tau(\,\mathcal{F})\geq \tau(\,\mathcal{F}')=2\]
by Theorem \ref{1.2} and Lemma \ref{2.6}.
\iffalse, one gets that and the covering number of
is $2$. One gets that
Note that
\[\,\mathcal{M}_{R_1}\cap\,\mathcal{M}_{R_2}=\{D\}\]
holds for any $R_1\neq R_2\in\,\mathcal{R}$. It follows that
\[\begin{split}
|\,\mathcal{F}|=\sum_{R\in\,\mathcal{R}}|\,\mathcal{M}_R\setminus\{D\}|+|\{D\}|=\gn{3}{2}(\gn{n-2}{1}-1)+1=f(n,3,q).
\end{split}\]
\fi
$\qed$
\iffalse
\begin{lem}
$f(n,k,q)=\gn{n-1}{k-1}-q^{k(k-1)}\gn{n-k-1}{k-1}+q^k.$
\end{lem}
\proof
For any fixed $E\in\,\mathcal{M}(k,n)$ and $U\in\,\mathcal{M}(k,n)$ satisfying $\dim(E\cap U)=0$, we can assume that $E\cap U=\{x\}$. Then we have $E=E'+x$, $U=U'+x$ and $E\cap U=(E'+U')+x$, which shows that $\dim(E\cup U)=k+1$. Let $T_1,\ldots,T_{q^k}$ be all the $k$-subspaces of $E'+U'$ not containing $E'$. For any fixed $t_1,\ldots,t_{q^k}\in(E\cup U)$, we have $\dim(E\cap (T_i+t_i)))=0$ for any $1\leq i\leq q^k$ by Lemma \ref{2.4}. Let
\begin{align*}
A&=\{F\in\,\mathcal{M}(k,n):E\subseteq F, \dim(F\cap U)\geq 1\},\\
B&=\{T_1+t_1,\ldots,T_{q^k}+t_{q^k}\},\\
C&=\{F\in\,\mathcal{M}(k,n):E\subseteq F\}\text{ and}\\
D&=\{F\in\,\mathcal{M}(k,n):E\subseteq F, F~\text{not intersect}~U\}.
\end{align*}
Before we start to prove this lemma, we would like to show that
$$A=A':=\{F\in\,\mathcal{M}(k,n): E\subseteq F, \dim(F\cap (E\cup U))\geq 2\}.$$
It is obvious that $A\subseteq A'$. For any $F\in A'$, let $M=F\cap(E\cup U)$ with $M=M'+x$, $E'\subseteq M'\subseteq E'+U'$ and $\dim(M')\geq 2$. Note that $M'\not\subseteq U'$, we always have $M'+U'=E'+U'$ and $\dim(M'\cap U')\geq 1$, which shows that $F$ and $U$ are intersect and $F\in A$, hence we have $A=A'$.
Now we prove this lemma. It is easy to see that
\begin{align*}
f(n,k,q)=|A|+|B|-|A\cap B|=|C|-|D|+|B|.
\end{align*}
with $|B|=q^k$ and $|C|=\gn{n-1}{k-1}$ by Lemma \ref{2.3}. We only need to determine the size of $D$.
Note that for any $F\in D$, $F\cap U=\{x\}$. Let $F=F'+x$ with $F'\in\gn{\mathbb{F}_q^n}{k}$, we have $\dim(F'\cap U')=\dim(F\cap U)=0$ by Lemma \ref{2.3}. Then we have $D=\{F'+x:F\in\gn{\mathbb{F}_q^n}{k},E'\subseteq F', \dim(F'\cap U')=0\}$ and $|D|=N'(1,0;k,0;n,n-k)=q^{k(k-1)}\gn{n-k-1}{k-1}$ by Lemma \ref{2.1}, hence we get the result of this lemma.
$\qed$
\begin{lem}\label{2.10}
Let $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family. Then $|\,\mathcal{F}|=(q^2+q+1)\gn{n-2}{1}-q^2-q$.
\end{lem}
\proof
Let $U\in\,\mathcal{M}(3,n)$ with $U=U'+u$ where $U'\in\gn{\mathbb{F}_q^n}{3}$. Let $S_1,\ldots,S_{\gn{3}{2}}$ be all $2$-subspaces of $U'$ and $s_1,\ldots,s_{\gn{3}{2}}\in U$. Let $\,\mathcal{F}_{(i)}=\{F\in\,\mathcal{M}(3,n):(S_i+s_i)\subseteq F\}$. We have $|\,\mathcal{F}_{(i)}|=\gn{n-2}{1}$ and $\,\mathcal{F}_{(i)}\cap\,\mathcal{F}_{(j)}=\{U\}$ for any $i\neq j$. Then it is easy to see that the size of an $\,\mathcal{F}_3$-type family is equal to $|\bigcup_{1\leq i\leq\gn{3}{2}}\,\mathcal{F}_{(i)}|=|\bigcup_{1\leq i\leq\gn{3}{2}}(\,\mathcal{F}_{(i)}\setminus\{U\})\cup\{U\}|=\gn{3}{2}(\gn{n-2}{1}-1)+1$.
$\qed$
\fi
For $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ with $\tau(\,\mathcal{F})=t$, denote $\,\mathcal{T}_{\,\mathcal{F}}$ be the family of all $t$-covering flats of $\,\mathcal{F}$. We will omit the subscript $\,\mathcal{F}$.
\begin{lem}\label{3.3}
Let $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})=2$.
\begin{itemize}
\item[\em(i)] If there exist $A,B\in\,\mathcal{T}$ such that $A$ does not intersect $B$, then $\,\mathcal{F}$ is not maximal.
\item[\em(ii)] If $\,\mathcal{F}$ is maximal, then
\[\bigcup_{T\in\,\mathcal{T}}\,\mathcal{M}_T\subseteq\,\mathcal{F}.\]
\end{itemize}
\end{lem}
\proof
(i) Let $A=A'+a$ and $B=B'+b$. If $A'\cap B'=0$, then there exists $K\in\gn{V}{k}$ such that
\[A'\subseteq K,~K\cap B'=0,\]
which implies that $(K+a)$ is contained in $\,\mathcal{M}_A$ and does not intersect $B$. If $\dim(A'\cap B')\geq 1$, then $A\cap B=\emptyset$. Denote $E=\langle a-b\rangle$. It follows that
\[E\cap (A'+B')=0,~\dim(A'+B')\leq 3.\]
By Lemma \ref{2.1}, there exists $X\in\gn{V}{n-1}$ such that $(A'+B')\subseteq X$ and $X\cap E=0$. Pick $K\in\gn{X}{k}$ with $A'\subseteq K$. The fact that $(B'+K)\cap E=0$ implies that
\[(K+a)\in\,\mathcal{M}_A,~(K+a)\cap B=\emptyset.\]
Let $\,\mathcal{F}_1=\,\mathcal{F}\cup\,\mathcal{M}_A$. It is obvious that $\,\mathcal{F}_1$ is an intersecting family and $B$ is not a covering flat of $\,\mathcal{F}_1$, which implies that $\,\mathcal{F}\subsetneqq\,\mathcal{F}_1$.
\noindent(ii) By (i), one gets that $|\,\mathcal{T}|=1$ or $\,\mathcal{T}$ is an intersecting family. Let $\,\mathcal{F}_2=\,\mathcal{F}\cup\bigcup_{T\in\,\mathcal{T}}\,\mathcal{M}_T$. It is routine to check that $\,\mathcal{F}_2\subseteq\,\mathcal{M}(k,n)$ is an intersecting family with $\tau(\,\mathcal{F}_2)=\tau(\,\mathcal{F})$, which implies that $\,\mathcal{F}_2=\,\mathcal{F}$ by the maximality of $\,\mathcal{F}$.
$\qed$
By Lemma \ref{3.3}, maximal intersecting families with covering number no less than $2$ can be divided into four cases:
\begin{itemize}
\item $\tau(\,\mathcal{F})=2$ and $|\,\mathcal{T}|=1$.
\item $\tau(\,\mathcal{F})=2$ and $\,\mathcal{T}$ is an intersecting family with $\tau(\,\mathcal{T})=1$.
\item $\tau(\,\mathcal{F})=2$ and $\,\mathcal{T}$ is an intersecting family with $\tau(\,\mathcal{T})=2$.
\item $\tau(\,\mathcal{F})\geq 3$.
\end{itemize}
\begin{prop}\label{3.4}
Let $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})=2$ and $\,\mathcal{T}=\{T\}$. Then
\begin{equation*}
|\,\mathcal{F}|\leq \gn{n-2}{k-2}+q(q+1)\left(\gn{k}{1}-1\right)\gn{k}{1}\gn{n-3}{k-3}.
\end{equation*}
\iffalse
Denote $\,\mathcal{M}(1,T)=\{E_1,\ldots,E_{q(q+1)}\}$.
\begin{itemize}
\item[\rm(i)] For any $i\in[q(q+1)]$, there exists $U_i\in\,\mathcal{F}$ such that $E_i$ does not intersect $U_i$ and
\[T\subseteq (E_i\cup U_i),~k+1\leq \dim(E_i\cup U_i)\leq k+2.\]
\item[\rm(ii)] Denote $M_i=(E_i\cup U_i)$, $i\in[q(q+1)]$. If $\dim(M_i)=k+1$, then
\[\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T=\bigcup_{S\in\,\mathcal{A}_i}\,\mathcal{F}_S,\]
where $\,\mathcal{A}_i=\{S\in \,\mathcal{M}(2,M_i): E_i\subseteq S\}\setminus\{T\}$. If $\dim(M_j)=k+2$, then
\[\,\mathcal{F}_{E_j}\setminus\,\mathcal{F}_T=\bigcup_{R\in\,\mathcal{B}_j}\,\mathcal{F}_R,\]
where $\,\mathcal{B}_j=\{R\in\,\mathcal{M}(3,M_j): E_j\subseteq R,~T\not\subseteq R\}$.
\item[\rm(iii)] We have
\begin{equation*}
|\,\mathcal{F}|\leq \gn{n-2}{k-2}+q(q+1)\left(\gn{k}{1}-1\right)\gn{k}{1}\gn{n-3}{k-3}.
\end{equation*}
\end{itemize}
\fi
\end{prop}
\begin{proof}
Denote $\,\mathcal{M}(1,T)=\{E_1,\ldots,E_{q(q+1)}\}$. Note that $\tau(\,\mathcal{F})=2$. Then for any $i\in[q(q+1)]$, there exists $U_i\in\,\mathcal{F}$ such that $E_i$ does not intersect $U_i$. Let
\[\begin{split}
M_i=(E_i\cup U_i),~\,\mathcal{A}_i=\{S\in\,\mathcal{M}(2,M_i):E_i\subseteq S_i\}\setminus\{T\},~\,\mathcal{B}_i=\{R\in\,\mathcal{M}(3,M_i):E_i\subseteq R,T\not\subseteq R\}.
\end{split}\]
It is routine to check that
\[\begin{split}
\dim(M_i)\in\{k+1,k+2\},~T=E_i\cup(U_i\cap T)\subseteq E_i\cup U_i
\end{split}\]
hold for any $i\in[q(q+1)]$. We claim that
\begin{align}
\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T\subseteq
\begin{cases}
\displaystyle\bigcup_{S\in\,\mathcal{A}_i}\,\mathcal{F}_S&\text{if} \dim(M_i)=k+1,\\
\displaystyle\bigcup_{R\in\,\mathcal{B}_i}\,\mathcal{F}_R&\text{if} \dim(M_i)=k+2
\end{cases}
\end{align}
hold for any $i\in[q(q+1)]$. If $\dim(M_i)=k+1$, then by Lemma \ref{2.8}, one gets that
\[\left(\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T\right)\subseteq \,\mathcal{F}_{E_i}\subseteq\,\mathcal{H}_{E_i,U_i}(1)\subseteq \,\mathcal{H}_{E_i,M_i}(2).\]
Therefore, for any $F\in\left(\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T\right)$, there exists $2$-flat $S$ such that $E_i\subseteq S\subseteq M_i$ and $S\subseteq F$. Note that $T\not\subseteq F$. It follows that $S\in\,\mathcal{A}_i$, which implies that
\[\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T\subseteq\bigcup_{S\in\,\mathcal{A}_i}\,\mathcal{F}_S.\]
If $\dim(M_j)=k+2$, then by Lemma \ref{2.8}, one gets that
\[\left(\,\mathcal{F}_{E_j}\setminus\,\mathcal{F}_T\right)\subseteq \,\mathcal{F}_{E_j}\subseteq \,\mathcal{H}_{E_j,U_j}(1)\subseteq \,\mathcal{H}_{E_j,M_j}(3).\]
Therefore, for any $F\in\left(\,\mathcal{F}_{E_j}\setminus\,\mathcal{F}_T\right)$, there exists $3$-flat $E_j\subseteq R\subseteq M_j$ such that $R\subseteq F$. Note that $T\not\subseteq F$. It follows that $R\in\,\mathcal{B}_j$, which implies that
\[\,\mathcal{F}_{E_j}\setminus\,\mathcal{F}_T\subseteq\bigcup_{R\in\,\mathcal{B}_j}\,\mathcal{F}_R.\]
It obvious that
\[\,\mathcal{F}=\bigcup_{i=1}^{q(q+1)}\,\mathcal{F}_{E_i}=\bigcup_{i=1}^{q(q+1)}\left(\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T\right)\cup\,\mathcal{F}_T\]
and $\,\mathcal{A}_i\cap \,\mathcal{T}=\emptyset$ for any $i\in[q(q+1)]$. By Lemmas \ref{2.5} and \ref{2.10}, one gets that
\[|\,\mathcal{A}_i|=\gn{k}{1}-1,~|\,\mathcal{F}_S|\leq \gn{k}{1}\gn{n-3}{k-3}\]
for any $S\in\,\mathcal{A}_i$ if $\dim(M_i)=k+1$, and
\[|\,\mathcal{B}_j|=\gn{k+1}{2}-\gn{k}{1},~|\,\mathcal{F}_R|\leq \gn{n-3}{k-3}\]
for any $R\in\,\mathcal{B}_j$ if $\dim(M_j)=k+2$. The fact that
\[|\,\mathcal{F}_T|\leq \gn{n-2}{k-2},~\gn{k+1}{2}-\gn{k}{1}<\gn{k}{1}\left(\gn{k}{1}-1\right)\]
imply the desired result.
\iffalse
The fact that
\begin{equation}
|\,\mathcal{F}_{E_i}\setminus\,\mathcal{F}_T|\leq \left(\gn{k}{1}-1\right)\gn{k}{1}\gn{n-3}{k-3}
\end{equation}
holds for any $i\in[q(q+1)]$. If , then every element in
, for any $i,j\in[q(q+1)]$ with , $\dim(M_j)=k+2$ and any $S\in\,\mathcal{A}_i$, $R\in\,\mathcal{B}_j$, one gets that
\[\begin{split}
&|\,\mathcal{F}_T|\leq \gn{n-2}{k-2},~|\,\mathcal{F}_{S}|\leq \gn{k}{1}\gn{n-3}{k-3},~|\,\mathcal{F}_{R}|\leq \gn{n-3}{k-3},\\
&|\,\mathcal{A}_i|=\gn{k}{1}-1,~|\,\mathcal{B}_j|=\gn{k+1}{2}-\gn{k}{1}<\gn{k}{1}\left(\gn{k}{1}-1\right).
\end{split}\]
$\dim(F_{E}\cap T)=1$ and $(F_{E}\cap T)\neq E$, then we must have $T=E\cup(F_{E}\cap T)\subseteq E\cup F_E$. For any fixed $E$ and $F_E$,
\noindent {\bfseries{Case.1}}: $(E\cup F_E)$ is a $(k+1)$-flat.
For any $F\in\,\mathcal{F}_E\setminus\,\mathcal{F}_T$, since $F$ and $F_E$ are intersect, there must exist an $S\in\,\mathcal{M}(2,n)$ satisfying $E\subseteq S\subseteq(E\cup F_E)$ such that $F\in\,\mathcal{F}_S$. Note that $S\neq T$, which shows that $S$ does not intersect all elements of $\,\mathcal{F}$. Then the number of $S$ is at most $\gn{k+1-1}{2-1}-1$ and $|\,\mathcal{F}_S|\leq \gn{k}{1}\gn{n-3}{k-3}$ holds for any $S$ by Lemma \ref{2.3} and \ref{2.8}. In this case, we have $|\,\mathcal{F}_E\setminus\,\mathcal{F}_T|\leq|\bigcup_{S}\,\mathcal{F}_S|\leq(\gn{k}{1}-1)\gn{k}{1}\gn{n-3}{k-3}$.
\noindent {\bfseries{Case.2}}: $(E\cup F_E)$ is a $(k+2)$-flat.
Let $E=E'+e$ and $F_E=F'+f$, we have $E\cap F_E=\emptyset$ and $\dim(E'\cap F')=0$ by Lemma \ref{2.4}, which shows that for any $1$-flat $E_1\subseteq F_E$, $\dim(E\cup E_1)=3$. Then for any $F\in\,\mathcal{F}_E\setminus\,\mathcal{F}_T$, there must be an $S\in\,\mathcal{M}(3,n)$ satisfying $E\subseteq S\subseteq(E\cup F_E)$ such that $F\in\,\mathcal{F}_S$. Note that $T\not\subseteq S$, then the number of $S$ is at most $\gn{k+2-1}{3-1}-\gn{k+2-2}{3-2}=\gn{k+1}{2}-\gn{k}{1}$. We have $\gn{k+1}{2}<\gn{k}{1}^2$ and $|\,\mathcal{F}_S|\leq \gn{n-3}{k-3}$ holds for any $S$ by Lemma \ref{2.3}. In this case, we have $|\,\mathcal{F}_E\setminus\,\mathcal{F}_T|\leq|\bigcup_{S}\,\mathcal{F}_S|\leq(\gn{k+1}{2}-\gn{k}{1})\gn{n-3}{k-3}\leq(\gn{k}{1}-1)\gn{k}{1}\gn{n-3}{k-3}$.
Together with case.1, we have
$$|\,\mathcal{F}|\leq|\,\mathcal{F}_{S}|+\sum_{E\subseteq T, E\in\,\mathcal{M}(1,n)}|\,\mathcal{F}_E\setminus\,\mathcal{F}_T|\leq\gn{n-2}{k-2}+q(q+1)(\gn{k}{1}-1)\gn{k}{1}\gn{n-3}{k-3}$$
by Lemma \ref{2.3}.
\fi
\end{proof}
\begin{prop}\label{3.5}
Let $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})=2$ and $\,\mathcal{T}$ is an intersecting family with $|\,\mathcal{T}|\geq 2$ and $\tau(\,\mathcal{T})=1$. Denote
\[m=\dim\left(\bigcup_{T\in\,\mathcal{T}}T\right)-1.\]
Then $2\leq m\leq k$ and the following hold.
\begin{itemize}
\item[\em(i)]If $3\leq m<k$, then
\[|\,\mathcal{F}|\leq\gn{m}{1}\gn{n-2}{k-2}+\left(\gn{k}{1}-\gn{m}{1}\right)\gn{k}{1}\gn{n-3}{k-3}+(q^{m+1}+q^m-1)\gn{n-m}{k-m};\]
if $m=2$, then
\[|\,\mathcal{F}|\leq(q+1)\gn{n-2}{k-2}+\left(\gn{k}{1}-q-1\right)\gn{k}{1}\gn{n-3}{k-3}+(q^3+q^2-1)\gn{k}{1}\gn{n-3}{k-3};\]
\item[\em(ii)]If $m=k$, then $|\,\mathcal{F}|\leq f(n,k,q)$ and equality holds iff $\,\mathcal{F}$ is an HM-type family.
\end{itemize}
\iffalse
. Suppose $\,\mathcal{T}=\{T_1,\ldots,T_l\}$ is an intersecting family with $|\,\mathcal{T}|\geq 2$ and $E$ is the $1$-covering flat of $\,\mathcal{T}$. Let $M=\bigcup_{i=1}^l T_i$ with $\dim(M)=m+1$.
\begin{itemize}
\item[\rm(i)]
One gets that $2\leq m\leq k$ and
\[\dim(U\cap M)=m,~\dim(E\cup U)=k+1,~M\subseteq (E\cup U)\]
hold for any $U\in\,\mathcal{F}\setminus\,\mathcal{F}_E$.
\item[\rm(ii)] If $3<m<k$, then
\[|\,\mathcal{F}|\leq\gn{m}{1}\gn{n-2}{k-2}+\left(\gn{k}{1}-\gn{m}{1}\right)\gn{k}{1}\gn{n-3}{k-3}+(q^{m+1}+q^m-1)\gn{n-m}{k-m}.\]
If $m=2$, then
\[|\,\mathcal{F}|\leq(q+1)\gn{n-2}{k-2}+\left(\gn{k}{1}-q-1\right)\gn{k}{1}\gn{n-3}{k-3}+(q^3+q^2-1)\gn{k}{1}\gn{n-3}{k-3}.\]
\item[\rm(iii)] If $m=k$, then $|\,\mathcal{F}|\leq f(n,k,q)$ and equality holds iff $\,\mathcal{F}$ is an HM-type family.
\end{itemize}
\fi
\end{prop}
\proof
Let $\,\mathcal{T}=\{T_1,\ldots,T_l\}$ with $l\geq 2$. The fact that
\[m+1=\dim(M)\geq \dim(T_1\cup T_2)=3\]
implies that $m\geq 2$. Pick $U\in\,\mathcal{F}\setminus\,\mathcal{F}_E$. Denote $E_i=U\cap T_i$ for any $i\in[l]$ and $X=\bigcup_{i=1}E_i$. It is routine to check that
\begin{equation}\label{eq1}
E_i\in\,\mathcal{M}(1,n),~T_i=E\cup E_i,~E_i'\subseteq X'\subseteq U'
\end{equation}
hold for any $i\in[l]$ and
\[X\subseteq (U\cap M)\subsetneqq M,~M=E\cup X\subseteq E\cup U,\]
which implies that
\begin{equation}\label{eq2}
\begin{split}
\dim(X)\leq \dim(U\cap M)<\dim(M)\leq \dim(E\cup U).
\end{split}
\end{equation}
If $E\cap U\neq\emptyset$, then by Lemma \ref{2.6}, (\ref{eq1}) and (\ref{eq2}), one gets that
\begin{equation}\label{eq3}
E'\cap E_1'=E'\cap X'=E'\cap U'=0,~E\cap E_1\neq \emptyset,~E\cap X\neq\emptyset.
\end{equation}
If $E\cap U=\emptyset$, then by Lemma \ref{2.6}, (\ref{eq1}) and (\ref{eq2}), one gets that
\begin{equation}\label{eq4}
E\cap E_1=E\cap X=\emptyset,~E'=E_1'\subseteq X'\subseteq U'.
\end{equation}
\iffalse
It follows that
\begin{equation}\label{eq3}
\begin{split}
m+1&=\dim(M)\leq \dim(E\cup U)=k+1,\\
m+1&=\dim(M)>\dim(X\cap M)\geq \dim(X)\\
&=\dim(M)+\dim(X'\cap E')-\dim(E')\\
&=m,
\end{split}
\end{equation}
It follows that
\begin{equation}\label{eq4}
\begin{split}
m+1&=\dim(M)\leq\dim(E\cup U)=k+1,\\
m+1&=\dim(M)>\dim(X\cap M)\geq \dim(X)\\
&=\dim(M)+\dim(X'\cap E')-1-\dim(E')\\
&=m.
\end{split}
\end{equation}
\fi
By Lemma \ref{2.6}, Both (\ref{eq3}) and (\ref{eq4}) imply that
\begin{equation}\label{k0}
\dim(E\cup U)=k+1,~m\leq k,~U\cap M=X\in\gn{M}{m}.
\end{equation}
\noindent(i) Pick a fixed $U\in\,\mathcal{F}\setminus\,\mathcal{F}_E$. Denote $N=E\cup U$ and
\[\begin{split}
\,\mathcal{A}&=\{A\in\,\mathcal{M}(2,M):E\subseteq A\},\\
\,\mathcal{B}&=\{B\in\,\mathcal{M}(2,N):E\subseteq B\not\subseteq M\},\\
\,\mathcal{C}&=\{C\in\,\mathcal{M}(m,M):E\not\subseteq C\}.
\end{split}\]
By (\ref{k0}), Lemmas \ref{2.3} and \ref{2.4}, one gets that
\begin{equation}\label{eq5}
\begin{split}
&\,\mathcal{F}_E=\bigcup_{A\in\,\mathcal{A}}\,\mathcal{F}_A\cup\bigcup_{B\in\,\mathcal{B}}\,\mathcal{F}_B,~\,\mathcal{F}\setminus\,\mathcal{F}_E=\bigcup_{C\in\,\mathcal{C}}\,\mathcal{F}_C,\\
&|\,\mathcal{A}|=\gn{m}{1},~|\,\mathcal{B}|=\gn{k}{1}-\gn{m}{1},~|\,\mathcal{C}|=q^{m+1}+q^m-1.
\end{split}
\end{equation}
Note that
\[\,\mathcal{T}\cap\,\mathcal{B}=\,\mathcal{T}\cap\,\mathcal{C}=\emptyset.\]
The desired upper bounds follow by (\ref{eq5}), Lemmas \ref{2.4} and \ref{2.12}.
\noindent(ii) By (\ref{k0}), one gets that $\,\mathcal{F}\setminus\,\mathcal{F}_E\subseteq\,\mathcal{M}(k,M)$. Let $A_1,\ldots,A_{q^k}$ be the elements of $\gn{M'}{k}$ not containing $E'$. If $U\cap E\neq\emptyset$ for any $U\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, then there exist $\alpha_1,\ldots,\alpha_{q^k}\in M$ such that
\[\,\mathcal{F}\setminus\,\mathcal{F}_E\subseteq \{A_i+\alpha_i:i\in[q^k]\}\]
by Lemma \ref{2.7}. Pick a fixed $U\in\,\mathcal{F}\setminus\,\mathcal{F}_E$. One gets that
\[M=E\cup U,~\,\mathcal{F}_E\subseteq \,\mathcal{H}_{E,U}(1),\]
which implies that $\,\mathcal{F}$ is an HM-type family by the maximality of $\,\mathcal{F}$ and Proposition \ref{3.1}.
Now assume that there exists an $U\in\,\mathcal{F}\setminus\,\mathcal{F}_E$ such that $E\cap U=\emptyset$. Observe that $k\geq 3$, $n\geq 2k+4$ and $\left(\,\mathcal{F}\setminus\,\mathcal{F}_E\right)\subseteq \,\mathcal{M}(m,(E\cup U))$ is an intersecting family. One gets that
\[\begin{split}
|\,\mathcal{F}_E|&\leq |\,\mathcal{H}_{E,U}|\leq f(n,k,q)-q^k-q^{(k-1)(k-2)}\gn{n-k-1}{k-2}<f(n,k,q)-2q^k,\\
|\,\mathcal{F}\setminus\,\mathcal{F}_E|&\leq\gn{k+1}{1}=q^k+\gn{k}{1}<2q^k
\end{split}\]
by Lemmas \ref{2.7} and \ref{2.9}, which implies that $|\,\mathcal{F}|<f(n,k,q)$.
\iffalse
Since $\tau(\,\mathcal{T})=1$, we can assume that $\,\mathcal{T}=\{T_1,\ldots,T_l\}$ and $E\subseteq T_i$ for any $1\leq i\leq l$ where $E\in\,\mathcal{M}(1,n)$. Let $M_0=\bigcup_{1\leq i\leq l}T_i$ be an $(m+1)$-flat, we have $m\geq 2$. We can find $T_{1}$, $T_{2},\ldots,T_{m}\in\,\mathcal{T}$ such that $M=\bigcup_{i= 1}^{m}T_{i}$. Since $\tau(\,\mathcal{F})=2$, $\,\mathcal{F}\setminus\,\mathcal{F}_E\neq\emptyset$. For any fixed $F\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, let $F=F'+f$, $E=E'+e$ and $E_i=F\cap T_i$ for any $1\leq i \leq l$. We always have:
(i) $E_i\neq E$ and $E\cup E_i=T_i$ for any $i$.
(ii) $E\cup\bigcup_{1\leq i\leq l}E_i=M_0$.
(iii) $\bigcup_{1\leq i\leq l}E_i\subseteq(F\cap M_0)$.
(iv) $M_0\subseteq E\cup F$.
We divide our proof into two cases.
{\bf Case.1}: $F\cap E\neq\emptyset$.
We can assume that $E\cap F=\{e\}$, then we have $F=F'+e$, $\dim(E'\cap F')=0$ and $E\cup F$ is a $k+1$-flat. Let $E_1=E'_1+e_1$ where $E'_1\subseteq F'$ and $e_1\in F$, we have $\dim(E'\cap E'_1)=0$. Together with $E\cup E_1=T_1$ where $T_1\in\,\mathcal{M}(2,n)$, we get $E\cap E_1\neq\emptyset$ by Lemma \ref{2.4}, which implies that $E\cap(\bigcup_{1\leq i\leq l}E_i)\neq\emptyset$. Let $\bigcup_{1\leq i\leq l}E_i=H+e_1$ where $H$ is a subspace of $V$, we have $H\subseteq F'$ and $\dim(E'\cap H)=0$. Note that $E\cup(\bigcup_{1\leq i\leq l}E_i)=M_0$ is a $m+1$-flat, we get $\dim(\bigcup_{1\leq i\leq l}E_i)=m$ by Lemma \ref{2.4}, which shows that $F$ intersect $M_0$ in an $m$-flat.
{\bf Case.2}: $F\cap E=\emptyset$.
We can assume $E_1=E'_1+e_1$ and $\bigcup_{1\leq i\leq l}E_i=H+e_1$ where $e_1\in E_1$ and $E'_1$ and $H$ are subspaces of $V$. Note that $E\cap E_1=\emptyset$ and $E\cup E_1=T_1$ is a $2$-flat, we have $E'=E'_1$, which shows that $E'\subseteq H\subseteq F'$. Together with $E\cap(\bigcup_{1\leq i\leq l}E_i)=E\cap F=\emptyset$, we have $\dim(E\cup F)=k+1$ and $\dim(\bigcup_{1\leq i\leq l}E_i)=m$, which implies that $F$ intersect $M_0$ in an $m$-flat.
So in both cases, we have $(E\cup F)\in\,\mathcal{M}(k+1,n)$ and $\dim(F\cap M_0)\geq m$, which also implies that $m\leq k$. In order to get the upper bound, we pick a fixed $F_0\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, then we have
$$\,\mathcal{F}=\,\mathcal{F}_E\cup\,\mathcal{F}\setminus\,\mathcal{F}_E.$$
For any $F\in\,\mathcal{F}_E$, since $F$ and $F_0$ are intersect, we can find an $2$-flat $S$ satisfying $E\subseteq S\subseteq(E\cup F_0)$ such that $F\in\,\mathcal{F}_S$. It is easy to see that $|\,\mathcal{F}_S|\leq \gn{n-2}{k-2}$ if $S\subseteq M$ by Lemma \ref{2.3}, and $|\,\mathcal{F}|\leq\gn{k}{1}\gn{n-3}{k-3}$ if $S\not\subseteq M$ by Lemma \ref{2.8}. Note that the number of $S$ is $\gn{k}{1}$ and the number of $S$ satisfying $S\subseteq M$ is $\gn{m}{1}$, we have
$$|\,\mathcal{F}_E|\leq\gn{m}{1}\gn{n-2}{k-2}+(\gn{k}{1}-\gn{m}{1})\gn{k}{1}\gn{n-3}{k-3}.$$
For any $F\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, $F$ must contain a $m$-flat $M$ such that $E\not\subseteq M\subseteq M_0$. The number of $M$ is $q^{m+1-m}\gn{m+1}{m}-\gn{m+1-1}{m-1}=q\gn{m+1}{1}-\gn{m}{1}=q^{m+1}+q^m-1$. So we have
$$|\,\mathcal{F}\setminus\,\mathcal{F}_E|\leq(q^{m+1}+q^m-1)\gn{n-m}{k-m}$$
by Lemma \ref{2.3}. Hence we get the upper bound.
Let $m=2$, for any $F\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, $F\cap M_0$ is a $2$-flat and not containing $E$, which implies that $(F\cap M_0)\notin\,\mathcal{T}$. Then we have $|\,\mathcal{F}_{F\cap M_0}|\leq \gn{k}{1}\gn{n-3}{k-3}$ by Lemma \ref{2.8}. Hence we get the upper bound when $m=2$.
Let $m=k$, we only need to prove that $|\,\mathcal{F}|\leq f(n,k,q)$ and equality holds if and only if $\,\mathcal{F}$ is an HM-type family. We divide the discussion into two cases.
{\bf Case.1}: For any $F\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, $F\cap E\neq\emptyset$.
Pick a fixed $F_0\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, then for any $F\in\,\mathcal{F}_E$, $F\in\{F\in\,\mathcal{M}(k,n):E\subseteq F, \dim(F\cap F_0)\geq 1\}$. For any $F\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, $F\subseteq (E\cup F_0)$ where $E\cup F_0$ is a $k+1$-flat. Let $\,\mathcal{F}\setminus\,\mathcal{F}_E=\{F_1,\ldots, F_l\}$ and $F_i=F'_i+f_i$ for any $1\leq i\leq l$ and suppose $E\cup F_0=(E\cup F_0)'+e$. We have $F'_i\subseteq (E\cup F_0)'$ and $E\not\subseteq F_i$ for any $1\leq i\leq l$. Note that for any $F_i,F_j\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, $F_i$ and $F_j$ are intersect if and only if $F'_i\neq F'_j$. Since $\,\mathcal{F}\setminus\,\mathcal{F}_E$ is an intersecting family, we must have $l\leq q^k$. Together with the upper bound of $\,\mathcal{F}_E$, we have $|\,\mathcal{F}|\leq f(n,k,q)$. Note that $\,\mathcal{F}$ is actually a subfamily of an HM-type family in this case, we only need to show that an HM-type family is an intersecting family and its covering number is $2$.
Let $E\in\,\mathcal{M}(1,n)$ and $U\in\,\mathcal{M}(k,n)$ with $E=E'+x, U=U'+x$ and $\dim(E\cap U)=0$. We have $E\cup U=(E'+U')+x$. Let $T_1, T_2,\ldots, T_{q^k}$ be all the $k$-subspaces of $E'+U'$ not containing $E'$ and $t_1,t_2,\ldots,t_{q^k}\in(E\cup U)$. Let $\,\mathcal{F}$ be an HM-type family with
$$\,\mathcal{F}=\{F\in\,\mathcal{M}(k,n):E\subseteq F,\dim(F\cap U)\geq 1\}\cup\{T_i+t_i:1\leq i\leq q^k\}.$$
For any fixed $2$-flat $S$ with $E\subseteq S\subseteq (E\cup U)$ and any $1\leq i\leq q^k$, we have $S=S'+x$ where $S'$ is a $2$-subspace of $E'+U'$. Note that $S'\not\subseteq T_i$, we have $\dim(S'\cap T_i)=1$ and $S'+T_i=E'+U'$, which implies that $S$ and $T_i+t_i$ are intersect. It is easy to see that $\,\mathcal{F}$ is an intersecting family and $\tau(\,\mathcal{F})=2$ since $S$ is a flat that intersect all elements of $\,\mathcal{F}$.
{\bf Case.2}: There exists an $F_0\in\,\mathcal{F}\setminus\,\mathcal{F}_E$ such that $E\cap F_0=\emptyset$.
From the prove above, we still have $(E\cup F_0)\in\,\mathcal{M}(k+1,n)$, which implies that
$$\,\mathcal{F}_E\subseteq\{F\in\,\mathcal{M}(k,n):E\subseteq F,\dim(F\cap F_0)\geq 1\}\subseteq\{F\in\,\mathcal{M}:E\subseteq F,\dim(F\cap(E\cup F_0))\geq 2\}.$$
For any $F\in\,\mathcal{F}\setminus\,\mathcal{F}_E$, $F$ is a $k$-flat contained in $E\cup F_0$. Note that $\,\mathcal{F}\setminus\,\mathcal{F}_E$ is an intersecting family, we have $|\,\mathcal{F}\setminus\,\mathcal{F}_E|\leq \gn{k+1}{k}=\gn{k+1}{1}.$
Denote $\,\mathcal{F}_{E,F_0}=\{F\in\,\mathcal{M}:E\subseteq F,\dim(F\cap(E\cup F_0))\geq 2\}$. We have $|\,\mathcal{F}|\leq|\,\mathcal{F}_E|+\gn{k+1}{1}$ while $f(n,k,q)=|\,\mathcal{F}_{E,F_0}|+q^k$ by Lemma \ref{2.3}. In order to prove $|\,\mathcal{F}|<f(n,k,q)$, we only need to show that $|\,\mathcal{F}_{E,F_0}\setminus\,\mathcal{F}_E|>\gn{k+1}{1}-q^k=\gn{k}{1}$.
Let $E=E'+e$ and $F_0=F'_0+f_0$, where $E'\subseteq F'_0$. Pick an fixed $S'\in\gn{V}{2}$ such that $E'\subseteq S'\subseteq F_0'$ and let $S=S'+e$, then $S$ is a $2$-flat contained in $E\cup F_0$ and $S\cap F_0=\emptyset$ since $E\cap F_0=\emptyset$. Note that $F_0\in\,\mathcal{F}$ and $\,\mathcal{F}$ is an intersecting family, we have
$$\,\mathcal{F}':=\{F\in\,\mathcal{M}(k,n): F\cap(E\cup F_0)=S\}\subseteq\,\mathcal{F}_{E,F_0}\setminus\,\mathcal{F}_E.$$
Let $E\cup F_0=(E\cup F_0)'+e$. It is obvious that $F\in\,\mathcal{F}'$ if and only if $F=F'+e$ where $F'\in\gn{V}{k}$ and $F'\cap (E\cup F_0)'=S'$. The number of $F'$ is equal to $N'(2,2;k,2;n,n-k-1)=q^{(k+1-2)(k-2)}\gn{n-k-1}{k-2}$. Hence we have
$$|\,\mathcal{F}_{E,F_0}\setminus\,\mathcal{F}_E|\geq |\,\mathcal{F}'|=N'(2,2;k,2;n,n-k-1)=q^{(k-1)(k-2)}\gn{n-k-1}{k-2}>q^k>\gn{k}{1}$$
holds for any $k\geq 3$ and $q\geq 2$, which shows that $|\,\mathcal{F}|<f(n,k,q)$.
\fi
$\qed$
\begin{prop}\label{3.6}
Let $\,\mathcal{F}\subseteq\,\mathcal{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})=2$ and $\,\mathcal{T}$ be an intersecting family with $\tau(\,\mathcal{T})=2$.
\begin{itemize}
\item[\em(i)] There exists $D\in\,\mathcal{M}(3,n)$ such that $\,\mathcal{T}\subseteq \,\mathcal{M}(2,D)$ and
\[\dim(F\cap D)\geq 2,~F\in\,\mathcal{F}.\]
\item[\em(ii)] One gets that
\begin{equation}\label{eq6}
|\,\mathcal{F}|\leq \gn{3}{2}\left(\gn{n-2}{k-2}-\gn{n-3}{k-3}\right)+\gn{n-3}{k-3}.
\end{equation}
\end{itemize}
\end{prop}
\proof
Pick $A,B\in\,\mathcal{T}$. Let $E=A\cap B$ and $D=A\cup B$. One gets that $\dim(E)=1$, $\dim(D)=3$ and there exists $C\in\,\mathcal{T}$ such that $E$ dose not intersect $C$. Denote $E_1=C\cap A$ and $E_2=C\cap B$. It is routine to check that
\[C=E_1\cup E_2,~A=E\cup E_1,~B=E\cup E_2,~D=E\cup C.\]
\noindent(i) It suffices to prove that if a flat $U$ intersects $A$, $B$ and $C$, then $\dim(U\cap D)\geq 2$. Assume that $E\subseteq U$. Note that $\dim(U\cap C)\geq 1$ and $E$ dose not intersect $(U\cap C)$. It follows that
\[E\subsetneqq E\cup (U\cap C)\subseteq (U\cap D),\]
which implies that $\dim(U\cap D)\geq 2$. Assume that $E\not\subseteq U$. Let $A_1=U\cap A$ and $B_1=U\cap B$. It is routine to check that
\[A_1,B_1\in\,\mathcal{M}(1,D),~A_1\neq B_1,~(A_1\cup B_1)\subseteq U\cap D,\]
which implies that
\[\dim(U\cap D)\geq \dim(A_1\cup B_1)=2.\]
\noindent(ii) Note that $\,\mathcal{F}$ is an intersecting family. One gets that $|\,\mathcal{F}|=|\,\mathcal{F}'|$ and
\[\,\mathcal{F}'\subseteq \left\{K\in\gn{V}{k}: \dim(K\cap D)\geq 2\right\}\]
by Lemma \ref{2.6} and (i), which implies that (\ref{eq6}) holds.
\iffalse
\noindent(iii) Let $B_1,\ldots,B_{\mathpzc{g}(2)}$ be the $2$-subspaces of $D'$. Note that $\,\mathcal{T}\subseteq \,\mathcal{M}(3,D)$ is an intersecting family. There exist $\beta_1,\ldots,\beta_{\mathpzc{g}(2)}\in D$ such that
\[\begin{split}
\,\mathcal{T}\subseteq\{B_i+\beta_i:i\in[\mathpzc{g}(2)]\},~\,\mathcal{F}\subseteq\bigcup_{T\in\,\mathcal{T}}\,\mathcal{M}_T
\end{split}\]
by Lemmas \ref{2.7}, which implies that $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family by the maximality of $\,\mathcal{F}$.
\fi
\iffalse
One gets that
\[\,\mathcal{T}\subseteq\{\}\]
for some by Lemma \ref{2.7}. Since $T\subseteq D$ for any $T\in\,\mathcal{T}$, it follows that
\[\,\mathcal{F}\subseteq\bigcup_{T\in\,\mathcal{T}}\,\mathcal{M}_T,\]
which implies that $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family by the maximality of $\,\mathcal{F}$.
\fi
$\qed$
\begin{prop}\label{3.7}
Let $\,\mathcal{F}\subseteq\,\mathcal{M}(3,n)$ be an intersecting family with $\tau(\,\mathcal{F})=2$ and $\,\mathcal{T}$ be an intersecting family with $\tau(\,\mathcal{T})=2$. Then $|\,\mathcal{F}|\leq f(n,3,q)$ and equality holds iff $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family.
\iffalse
Suppose that there exists $U\in\,\mathcal{F}$ such that $U\neq D$ and $U\cap D\not\in\,\mathcal{T}$. Denote
\[\begin{split}
\,\mathcal{F}_1&=\{F\in\,\mathcal{F}: F\neq D,~(F\cap D)\not\in \,\mathcal{T}\},~\,\mathcal{T}_1=\{F\cap D:F\in\,\mathcal{F}_1\},\\
\,\mathcal{A}_S&=\{F\in\,\mathcal{F}_1: (F\cap D)'=S\},~S\in\,\mathcal{T}'_1.
\end{split}\]
\begin{itemize}
\item[\rm(i)]
We have $\,\mathcal{T}'\cap\,\mathcal{T}'_1=\emptyset$.
\item[\rm(ii)] For any $S\in\,\mathcal{T}'_1$, there exits $X_S\in\,\mathcal{M}(4,n)$ such that
\begin{equation}
S\subseteq X_S',~\,\mathcal{A}_S\subseteq \,\mathcal{M}(3,X_S).
\end{equation}
\item[\rm(iii)] One gets that $|\,\mathcal{F}|<f(n,3,q)$.
\end{itemize}
\fi
\end{prop}
\proof
\iffalse
and
\fi
\iffalse
We divide our proof into two cases:
{\bf Case.1}: If
Let $D=D'+d$. Since $\,\mathcal{F}$ is maximum, we must have
$$(\bigcup_{T\in\,\mathcal{T}}\{F\in\,\mathcal{M}(3,n):T\subseteq F\})\subseteq \,\mathcal{F}.$$
In this case, we have $\,\mathcal{F}=\bigcup_{T\in\,\mathcal{T}}\{F\in\,\mathcal{M}(3,n):T\subseteq F\}=\bigcup_{T\in\,\mathcal{T}}\{F\in\,\mathcal{M}(3,n):T\subseteq F, F\neq D\}\cup \{D\}$. Hence
$$|\,\mathcal{F}|=|\,\mathcal{T}|(\gn{n-2}{1}-1)+1.$$
Since $\,\mathcal{T}$ is an intersect family containing several $2$-flats contained in $D\in\,\mathcal{M}(3,n)$, we have $|\,\mathcal{T}|\leq \gn{3}{1}$ and equality holds if and only if $\,\mathcal{T}=\{T_1+t_1,\ldots,T_{\gn{3}{1}}+t_{\gn{3}{1}}\}$ where $T_1,\ldots, T_{\gn{3}{1}}$ are all $2$-subspace of $D'$ and $t_1,\ldots,t_{\gn{3}{1}}$ are some fixed points in $D$, which implies that $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family. It is easy to see that an $\,\mathcal{F}_3$-type family is an intersecting family with its covering number is $2$, we prove the condition when equality holds.
\fi
By Proposition \ref{3.6}, there exists $D\in\,\mathcal{M}(3,n)$ such that $\,\mathcal{T}\subseteq\,\mathcal{M}(2,D)$ and $\dim(F\cap D)\geq 2$ holds for any $F\in\,\mathcal{F}$. Let $A_1,\ldots,A_{\gn{3}{2}}$ be the $2$-subspaces of $D'$.
If $F\cap D\in\,\mathcal{T}$ holds for any $F\in\,\mathcal{F}\setminus\{D\}$, then there exists $\alpha_1,\ldots,\alpha_{\gn{3}{2}}\in D$ such that
\[\,\mathcal{T}\subseteq\{A_i+\alpha_i:i\in[\textstyle\gn{3}{2}]\}\]
by Lemma \ref{2.7}. Note that
\[\,\mathcal{F}\subseteq\bigcup_{T\in\,\mathcal{T}}\,\mathcal{F}_T\cup\{D\}\subseteq\bigcup_{T\in\,\mathcal{T}}\,\mathcal{M}_T.\]
One gets that $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family by the maximality of $\,\mathcal{F}$.
Assume that there exists $U\in\,\mathcal{F}$ such that $U\neq D$ and $U\cap D\not\in\,\mathcal{T}$. Let
\[
\,\mathcal{F}_1=\{F\in\,\mathcal{F}: F\neq D,~(F\cap D)\not\in \,\mathcal{T}\},~\,\mathcal{T}_1=\{F\cap D:F\in\,\mathcal{F}_1\}
\]
and for any $S\in\,\mathcal{T}'_1$, denote
\[\,\mathcal{A}_S=\{F\in\,\mathcal{F}_1: (F\cap D)'=S\}.\]
First, we prove that
\begin{equation}\label{k2}
\,\mathcal{T}'\cap\,\mathcal{T}'_1=\emptyset
\end{equation}
by contradiction. Assume that $T\in\,\mathcal{T}'\cap\,\mathcal{T}_1'$. Then there exist $T_1,T_2\in\,\mathcal{M}(2,D)$ and $U\in\,\mathcal{F}_1$ such that
\[T_1'=T_2'=T,~U\cap D=T_1,~T_2\in\,\mathcal{T}.\]
The fact that $T_1\cap T_2=\emptyset$ implies that $U\cap T_2=\emptyset$, which leads to a contradiction.
Next, we claim that for any $S\in\,\mathcal{T}'_1$, there exits $X_S\in\,\mathcal{M}(4,n)$ such that
\begin{equation}\label{k3}
S\subseteq X_S',~\,\mathcal{A}_S\subseteq \,\mathcal{M}(3,X_S).
\end{equation}
Pick $S\in\,\mathcal{T}'_1$. There exist $S_1\in\,\mathcal{T}_1$ and $F_1\in\,\mathcal{F}_1$ such that
\[F_1\cap S=S_1,~S_1'=S.\]
Since $S_1\in\,\mathcal{T}_1$ is not a covering flat of $\,\mathcal{F}$, there exists $F_2\in\,\mathcal{F}$ such that $S_1$ dose not intersect $F_2$. Let $S_2=F_2\cap D$. It is routine to check that
\[F_2\neq D,~S_2\in\,\mathcal{T}_1,\]
which implies that $F_2\in\,\mathcal{F}_1$. Note that $S_1$ dose not intersect $S_2$. One gets that
\[S_2'=S_1'=S\]
by Lemma \ref{2.6}, which implies that
\[F_1'\cap F_2'=S.\]
Let $S_0=F_1\cap F_2$ and $X_S=F_1\cup F_2$. It is routine to check that $\dim(X_S)=4$, $S_0'=S$, $S'\subseteq X_S'$ and
\[\begin{split}
&S_1\cap S_2=S_1\cap S_0=S_2\cap S_0=\emptyset,\\
&D=S_1\cup S_2,~F_1=S_0\cup S_1,~F_2=S_0\cup S_2,\\
&X_S=S_0\cup S_1\cup S_2=S_0\cup D=S_1\cup F_2=S_2\cup F_1,\\
&S_0\cap D=S_1\cap F_2=S_2\cap F_1=\emptyset.
\end{split}\]
Note that $\{F_1,F_2\}\subseteq\,\mathcal{M}(3,X_S)$. Let $U\in\,\mathcal{A}_S\setminus\{F_1,F_2\}$ and $S_3=U\cap D$. If $S_0\subseteq U$, then the fact that $S_0\cap S_3=\emptyset$ implies that
\[U=S_0\cup S_3\subseteq S_0\cup D=X_S.\]
If $S_0\not\subseteq U$, then $S_0\cap U=\emptyset$. Let $U_1=F_1\cap U$ and $U_2=F_2\cap U$. It is routine to check that
\[U_1'=U_2'=S,~U_1\neq U_2,\]
which implies that $U_1\cap U_2=\emptyset$ and
\[U=U_1\cup U_2\subseteq F_1\cup F_2=X_S.\]
Note that $|\,\mathcal{T}|=|\,\mathcal{T}'|$ by Lemma \ref{2.6}. It is routine to check that $\,\mathcal{T}'\cup\,\mathcal{T}'_1\subseteq \gn{D'}{2}$ and
\begin{equation}\label{eq9}
\,\mathcal{F}=\,\mathcal{F}_1\cup\{D\}\cup\left(\bigcup_{T\in\,\mathcal{T}}\,\mathcal{F}_T\setminus\{D\}\right)=\bigcup_{S\in\,\mathcal{T}'_1}\,\mathcal{A}_S\cup \left(\bigcup_{T\in\,\mathcal{T}}\,\mathcal{F}_T\setminus\{D\}\right)\cup \{D\}.
\end{equation}
One gets that $|\,\mathcal{T}|+|\,\mathcal{T}_1'|\leq \gn{3}{2}$ and
\[\,\mathcal{A}'_S\subseteq \left\{K\in\gn{X_S}{3}: S\subseteq K\right\},~|\,\mathcal{F}_T\setminus\{D\}|\leq \gn{n-2}{1}-1\]
hold for any $T\in\,\mathcal{T}$ and $S\in\,\mathcal{T}'_1$ by (\ref{k2}), (\ref{k3}) and Lemma \ref{2.4}. The fact that $\,\mathcal{A}_S$ is an intersecting family implies that
\[|\,\mathcal{A}_S|=|\,\mathcal{A}'_S|\leq \gn{2}{1}=q+1.\]
Note that $n\geq 2k+4=10$. It follows that
\[\begin{split}
|\,\mathcal{F}|&\leq \sum_{S\in\,\mathcal{T}'_1}|\,\mathcal{A}_S|+\sum_{T\in\,\mathcal{T}}|\,\mathcal{F}_T\setminus\{D\}|+1\leq |\,\mathcal{T}'_1|(q+1)+|\,\mathcal{T}|\left(\gn{n-2}{1}-1\right)+1\\
&<(|\,\mathcal{T}'_1|+|\,\mathcal{T}|)\left(\gn{n-2}{1}-1\right)+1\leq f(n,3,q)
\end{split}\]
by (\ref{eq9}).
$\qed$
\iffalse
First we prove . For any $T_0\in\,\mathcal{T}'_1$, there exist an $F_0\in\,\mathcal{F}$ and a point $t_0\in D$ such that $F_0\cap D=T_0+t_0$ and $(T_0+t_0)\notin\,\mathcal{T}$. If $T_0\in\,\mathcal{T}'$, there exists an $t'_0\in D$ such that $T_0+t'_0\in\,\mathcal{T}$. Note that $T_0+t_0\neq T_0+t'_0$, we have $(T_0+t'_0\cap F_0)=(T_0+t'_0\cap T_0+t_0)=\emptyset$, which is an contradiction. Hence we have $\,\mathcal{T}'\cap\,\mathcal{T}'_1=\emptyset$.
For any fixed $T_1\in\,\mathcal{T}'_1$, there exist an $F_1\in\,\mathcal{F}$ and a point $t_1\in D$ such that $F\cap D=T_1+t_1$ and $(T_1+t_1)\notin\,\mathcal{T}$. Since $(T_1+t_1)\notin\,\mathcal{T}$, there exist an $F_2\in\,\mathcal{F}$ such that $F_2$ and $T_1+t_1$ are not intersect, which implies that $\dim(F_2\cap D)=2$. Let $T_2+t_2=F_2\cap D$ where $T_2\in\gn{D'}{2}$ and $t_2\in D$. It is obvious that $T_1+t_1$ and $T_2+t_2$ are neither intersect nor equal, hence we have $(T_1+t_1)\cup(T_2+t_2)=D$, which implies that $T_1=T_2$ and $(T_1+t_1)\cap(T_2+t_2)=\emptyset$ by Lemma \ref{2.4}. Note that $F_1$ and $F_2$ are intersect, we must have $F_1\cap F_2=T_1+t_0$ for some $t_0\in F_1\cap F_2$ and $\dim(F_1\cup F_2)=4$. Also it is obvious that $(F_1\cap F_2)\cap D=\emptyset$ and $F_1\cup F_2=(F_1\cap F_2)\cup D$.
Now we give an upper bound of $|\,\mathcal{F}|$. For any $F\in\,\mathcal{F}$ such that $F\cap D\in\,\mathcal{T}$, $F\in\bigcup_{T\in\,\mathcal{T}}\{F\in\,\mathcal{M}(3,n):T\subseteq F, F\neq D\}$. For any $F\in\,\mathcal{F}$ such that $F\cap D\in\,\mathcal{T}_1$, let $F\cap D=T+t$ where $T\in\,\mathcal{T}'_1$. According to the discussion above, there exist $F_1, F_2\in\,\mathcal{F}$ such that $F_1\cap D=T+t_1$, $F_2\cap D=T+t_2$, $F_1\cap F_2=T+t_0$. We can always assume that $F\notin\{F_1,F_2\}$. If $T+t_0\subseteq F$, note that $(T+t_0)\cap (F\cap D)\subseteq(F_1\cap F_2)\cap D=\emptyset$, we have $F=(T+t)\cup(T+t_0)\subseteq(F_1\cap F_2)\cup D=F_1\cup F_2$. If $T+t_0\not\subseteq F$, then $F\cap F_1=T+t_3$ and $F\cap F_2=T+t_4$ with $T+t_3\neq T+t_4$, which implies that $(T+t_3)\cap(T+t_4)=\emptyset$. Hence we have $F=(T+t_3)\cup (T+t_4)\subseteq F_1\cup F_2$. That shows that for any $F\in\,\mathcal{F}$ such that $F\cap D\in\,\mathcal{T}$ with $F=F'+f$, there exist an $T\in\,\mathcal{T}'_1$ and a fixed $W_T\in\,\mathcal{M}(4,n)$ such that $F\in\{F\subseteq W_T: T\subseteq F'\}$. Note that the size of an maximum intersect subfamily of $\{F\subseteq W_T: T\subseteq F'\}$ is equal to $\gn{4-2}{3-2}=(q+1)$.
Hence we have
$$|\,\mathcal{F}|=|\{F\in\,\mathcal{F}:F\cap D\in\,\mathcal{T}\}|+|\{F\in\,\mathcal{F}:F\cap D\in\,\mathcal{T}_1\}|+|\{D\}|\leq(\gn{3}{1}-|\,\mathcal{T}'_1|)(\gn{n-2}{1}-1)+|\,\mathcal{T}'_1|(q+1)+1=\gn{3}{1}(\gn{n-2}{1}-1)+1-|\,\mathcal{T}'_1|(\gn{n-2}{1}-q-2)<\gn{3}{1}(\gn{n-2}{1}-1)+1$$
since $n\geq 9$, $q\geq 2$ and $|\,\mathcal{T}'_1|\geq 1$.
$\qed$
\fi
\begin{prop}\label{3.8}
Suppose $k\geq 3$ and $n\geq 2k+4$ with $(n,q)\neq (2k+4,2)$. Let $\,\mathcal{F}\subseteq \,\mathcal{M}(k,n)$ be an intersecting family with $\tau(\,\mathcal{F})=2$. Then $|\,\mathcal{F}|\leq f(n,k,q)$. Equality holds iff
\begin{itemize}
\item[\em(i)] $\,\mathcal{F}$ is an HM-type family.
\item[\em(ii)] $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family when $k=3$.
\end{itemize}
\end{prop}
\proof
It suffices to prove that the upper bounds in Propositions \ref{3.4}--\ref{3.7} are less than $f(n,k,q)$. Let $r=n-2k$. By Lemma \ref{2.10}, it follows that
\[|\,\mathcal{F}|/\gn{n-2}{k-2}<1+\frac{q+1}{(q-1)q^{r-1}}\gn{k}{1}\]
in Proposition \ref{3.4}, and
\[|\,\mathcal{F}|/\gn{n-2}{k-2}<\left(\frac{1}{q}+\frac{1}{(q-1)q^{r}}\right)\gn{k}{1}+\frac{q^3+q^2-1}{(q-1)q^{r}}\]
in Proposition \ref{3.5} when $m<k$, and
\[|\,\mathcal{F}|\leq \gn{3}{1}\gn{n-2}{k-2}\]
in Proposition \ref{3.6} when $k\geq 4$. It is routine to check that these upper bounds are less than
\[\left(1-\frac{1}{(q^2-1)q^r}\right)\gn{k}{1}\gn{n-2}{k-2},\]
which implies the desired result by Lemma \ref{2.11}.
\iffalse
When $m=k$, we have $|\,\mathcal{F}|\leq f(n,k,q)$ by Lemma \ref{3.3}. In both cases, we have $|\,\mathcal{F}|\leq f(n,k,q)$ holds for any $k\geq 3$, $q\geq 2$ and $r\geq 4$ with $(q,r)\neq(2,4)$ by Lemma \ref{2.5} and equality holds if and only if $\,\mathcal{F}$ is an HM-type family. In Lemma \ref{3.4}, We have , it is obvious that $|\,\mathcal{F}|<f(n,k,q)$ for any $k\geq 4$. When $k=3$, we have $|\,\mathcal{F}|\leq f(n,3,q)$ and equality holds if and only if $\,\mathcal{F}$ is an $\,\mathcal{F}_3$-type family by Lemma \ref{3.4}.
\fi
$\qed$
\begin{prop}\label{3.9}
Suppose $k\geq 3$ and $n\geq 2k+4$ with $(n,q)=(2k+4,2)$. Let $\mathscr{F}\subseteq\mathscr{M}(k,n)$ be a intersecting family with $\tau(\mathscr{F})=t>2$. Then $|\,\mathcal{F}|<f(n,k,q)$.
\end{prop}
\proof
Note that $t\leq k$. Let $r=n-2k$. Pick $T\in\,\mathcal{T}$. It follows that
\[\,\mathcal{F}=\bigcup_{E\in\,\mathcal{M}(1,T)}\,\mathcal{F}_E,\]
which implies that
\[\begin{split}
|\,\mathcal{F}|&\leq\sum_{E\in\,\mathcal{M}(1,T)}|\,\mathcal{F}_E|\leq\sum_{E\in\,\mathcal{M}(1,T)}\gn{k}{1}^{t-1}\gn{n-t}{k-t}\\
&= q^{t-1}\gn{t}{1}\gn{k}{1}^{t-1}\gn{n-t}{k-t}<\frac{q^{t-1}}{((q-1)q^r)^{t-2}}\gn{t}{1}\gn{k}{1}\gn{n-2}{k-2}
\end{split}\]
by Lemmas \ref{2.3}, \ref{2.14} and \ref{2.10}. Hence, one gets that
\[|\,\mathcal{F}|/\gn{k}{1}\gn{n-2}{k-2}<\frac{q^{t-1}(q^t-1)}{(q-1)^{t-1}q^{r(t-2)}}.\]
It is routine to check that
\[\frac{1}{(q^2-1)q^r}+ \frac{q^{t-1}(q^t-1)}{(q-1)^{t-1}q^{r(t-2)}}\leq 1\]
holds for any $t\geq 3$, $q\geq 2$ and $r\geq 4$ with $(q,r)\neq (2,4)$, which implies that $|\,\mathcal{F}|<f(n,k,q)$ by Lemma \ref{2.11}.
$\qed$
Together with Proposition \ref{3.8}, we prove Theorem \ref{1.2}.
\end{document}
|
\begin{document}
\title
{Integral points on the modular curves $X_0(p)$}
\author{Yulin Cai}
\newcommand{\operatorname{add}\nolimitsress}{{
\footnotesize
Y.~Cai,
\textsc{Institut de Math\'ematiques de Bordeaux, Universit\'e de Bordeaux
351, cours de la Lib\'eration 33405 Talence Cedex, France}\par\nopagebreak
\textit{E-mail address}: \texttt{[email protected]}
}}
\maketitle
\begin{abstract}
In this paper, we give an explicit bound for the height of integral points on $X_0(p)$ by using a very explicit version of the Chevalley-Weil principle. We improve the bound given by Sha in \cite{sha2014bounding1}.
\end{abstract}
{\footnotesize
\tableofcontents
}
\section{Introduction}
Let~$X$ be a smooth, connected projective curve defined over a number field~$K$, and let ${x\in K(X)}$ be a non-constant rational function on~$X$. If~$R$ is a subring of~$K$, we denote by $X(R,x)$ the set of $R$-integral $K$-rational points of~$X$ with respect to $x$:
$$
X(R,x)=\{P\in X(K):x(P)\in R\}.
$$
In particular, if~$S$ is a finite set of places of~$K$ (including all the infinite places), we consider the set of \textit{$S$-integral points} $X(\mathcal{O}_S,x)$, where ${\mathcal{O}_S=\mathcal{O}_{S,K}}$ is the ring of $S$-integers in~$K$.
According to the classical theorem of Siegel~\cite{siegle1929uber} (see also \cite[Part~D]{hindry2000diophantine} for a modern exposition), the set $X(\mathcal{O}_S,x)$ is finite if at least one of the following conditions is satisfied:
\begin{align}
&g(X)\geq 1;\\
\label{ethreepoles}
&\text{$x$ admits at least~$3$ poles in $X(\bar{\mathbb Q})$}.
\end{align}
The theorem of Faltings~\cite{faltings1983end} (see also \cite[Part~E]{hindry2000diophantine}) asserts that $X(K)$ is finite if ${g(X)\geq 2}$. Unfortunately, all known proofs of the theorems of Siegel and Faltings are non-effective, which means that they do not imply any explicit expression bounding the heights of integral or rational points.
Starting from the ground-breaking work of A.~Baker in 1960th, effective proofs of Siegel's theorem were discovered, by Baker and others, for many pairs $(X,x)$, see \cite{bilu1995effctive,bilu2002baker} and the references therein.
One interesting case is when ${X=X_\Gamma}$ is the modular curve corresponding to a subgroup~$\Gamma$ of ${\Gamma(1)=\mathrm{SL}_2({\mathbb Z})}$, and ${x=j}$ is the rational function defined by the $j$-invariant.
Bilu~\cite[Section~5]{bilu1995effctive} (see also \cite[Section 4]{bilu2002baker}) made the following observation.
\begin{proposition}
\label{prthree}
Let $\Gamma$ be a congruence subgroup of $\mathrm{SL}_2({\mathbb Z})$ of level $N$ having at least $3$ cusps. Let $K$ be a number field such that $X_\Gamma$ admits a geometrically irreducible model over $K$ and such that ${j\in K(X_\Gamma)}$. Let~$S$ be a finite set of places of $K$ containing all the infinite places. Then there exists an effective constant ${c=c(N,K,S)}$ such that for any ${P\in X_\Gamma(\mathcal{O}_S,j)}$ we have ${\mathrm{h}(j(P))\leq c}$.
\end{proposition}
(Here ${\mathrm{h}(\cdot)}$ is the standard absolute logarithmic height defined on the set $\bar{\mathbb Q}$ of algebraic numbers.)
In other words, if condition~\eqref{ethreepoles} is satisfied for the couple $(X_\Gamma,j)$, then Siegel's theorem is effective for this couple.
Sha~\cite{sha2014bounding} made the bound in Proposition~\ref{prthree} totally explicit. The full statement of his result is quite involved, and we reproduce it only in Section~\ref{sproof}, see Theorem~\ref{thsha} therein. Here we only notice that Sha's bound is of the shape
${c(K,S)^{N\log N}}$, where $c(K,S)$ is an effective constant depending only on~$K$ and~$S$. Roughly speaking, we have here exponential type dependence in~$N$.
Proposition~\ref{prthree} applies in many important cases: see \cite{bilu2002baker,bilu2011effective} for further details. In particular, it applies to the modular curve $X_0(N)$ of composite level~$N$. However, it does not directly apply to the curve $X_0(p)$ of prime level~$p$, because it has only~$2$ cusps.
Nevertheless, using a covering argument, Bilu~\cite[Theorem~10]{bilu2002baker} proved that Siegel's theorem is effective for $X_0(p)$ as well. Note that the curve $X_0(N)$ has a standard geometrically irreducible model over~${\mathbb Q}$.
\begin{theorem}[Bilu]
\label{thnonexp}
Let~$p$ be a prime number distinct from $2,3,5,7,13$. Let~$K$ be a number field and~$S$ be a finite set of places of~$K$ containing all the infinite places. Then there exists an effective constant ${c=c(p,K,S)}$ such that for any ${P\in X_0(p)(\mathcal{O}_S,j)}$ we have ${\mathrm{h}(j(P))\le c}$.
\label{bilu X_0(p)}
\end{theorem}
The main tool is the classical \textit{Chevalley-Weil Principle}, used in the following form.
\begin{proposition}[Chevalley-Weil Principle]
Let ${{\widetilde{X}}\stackrel\pi\to X}$ be a non-constant \'etale morphism of projective algebraic curves defined over a number field~$K$. Then there exists a finite set~$T$ of places of~$K$ such that the following holds.
Let ${P\in X(\bar K)}$ and let ${{\widetilde{P}} \in {\widetilde{X}}(\bar K)}$ be such that ${\pi({\widetilde{P}})=P}$. Let~$v$ be a finite place of the field $K(P)$ ramified in $K({\widetilde{P}})$. Then~$v$ extends a place from~$T$.
\end{proposition}
Bilu found a subgroup~${\widetilde{\Gamma}}$ of $\Gamma_0(p)$ such that the natural morphism ${X_{\widetilde{\Gamma}}\to X_0(p)}$ is \'{e}tale and~$X_{\widetilde{\Gamma}}$ has at least three cusps, see Proposition~\ref{pgamprime} for the details. The Chevalley-Weil principle now allows one to reduce the problem from $X_0(p)$ to $X_{\widetilde{\Gamma}}$, where Proposition~\ref{prthree} applies.
In \cite{sha2014bounding1} Sha gave an explicit version of Theorem \ref{thnonexp}. We again do not reproduce here Sha's full statement, which is very involved, and only focus on the dependence on the level~$p$. One can expect here exponential type dependence in $p$, but Sha obtains an upper bound of the form $c(K,S)^{\exp(p^6\log p)}$, doubly exponential in $p$.
Sha's bound is so big because he uses a quantitative version of the Chevalley-Weil Principle from~\cite{bilu2013quantitative}, which provides extremely high upper bounds for the quantities involved.
In this paper, we will use Igusa's theorem, see \cite[Section 8.6]{diamond2005first}, combined with Proposition \ref{model} and Lemma \ref{uramified}, to have a very explicit version of the Chevalley-Weil principle in the special case we need. Then
we manage to improve the result of Sha by using it instead of the general quantitative Chevelley-Weil principle from \cite{sha2014bounding1}. For a finite place~$v$ of a number field~$K$ we denote by ${\mathbb N}N_{K/{\mathbb Q}} (v)$ the absolute norm of the prime ideal corresponding to~$v$. We will prove the following theorem.
\begin{theorem}
Keep the notations of Theorem \ref{bilu X_0(p)}. Then for $ P \in X_0(p)(\mathcal{O}_S,j)$, we have
$$\mathrm{h}(j(P)) \leq e^{9s^2p^4\log p}C(K,S)^{p^2},$$
where $C(K,S)$ can be effectively determined in terms of K and S. More explicitly, $C(K,S)$ can be chose as
$$C(K,S)=2^{31s}d^{9s}s^{2s}\ell^d|D|(\log{(|D|+1)})^{d}\prod\limits_{\substack{v \in S \\ v \nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v),$$
where $d=[K:Q]$, $D$ is the absolute discriminant of $K$, $s = |S|$, and $\ell$ is the maximal prime such that there exists $v\in S$ with $v|\ell$.
\label{main}
\end{theorem}
\section{Lemmas}
For the convenience of the readers, we state a result from \cite{liu1999models}.
\begin{proposition}[\cite{liu1999models} Corollary 4.10]
Let $K$ be a discrete valuation field with ring of integers $\mathcal{O}_K$, and $f: X \rightarrow Y$ be a finite morphism of smooth, connected projective curves over $K$.
Assume that $g(Y) \geq 1$, and that $X$ admits a smooth projective model $\mathcal{X}$. Then $Y$ admits a smooth projective model $\mathcal{Y}$, and $f$ extends to a finite morphism $\mathcal{X} \rightarrow \mathcal{Y}$.
\label{model}
\end{proposition}
\begin{lemma}
Let $f: X\rightarrow Y$ be a finite, \'{e}tale morphism of smooth, connected projective curves over a number field $K$ with $g(Y) \geq 1$, and let $\mathfrak{p} \subset \mathcal{O}_K$ be a non-zero prime with residue field $k(\mathfrak{p})$. Suppose that
\begin{enumerate}
\item[(1)] $X$ admits a smooth projective model at $\mathfrak{p}$;
\item[(2)] $[K(X):K(Y)] < {\mathbb C}har(k(\mathfrak{p}))$ or $K(X)/K(Y)$ is Galois of degree prime to ${\mathbb C}har(k(\mathfrak{p}))$.
\end{enumerate}
Then for every point $P \in Y(K)$ and $Q\in f^{-1}(P)$, we have that $\mathfrak{p}$ is unramified in the residue field $K(Q)$ of $Q$.
\label{uramified}
\end{lemma}
\begin{proof}
Suppose that $\mathcal{X}$ is the smooth model of $X$ over $\mbox{Spec}(\mathcal{O}_{K,\mathfrak{p}})$. Since $f$ is finite, and $g(Y) \geq 1$, then by Proposition \ref{model}, $Y$ admits a smooth model $\mathcal{Y}$ and $f$ is extended to a finite morphism $\mathcal{X} \rightarrow \mathcal{Y}$. We still denote the extended morphism by $f$.
We endow the closure $\overline{\{P\}}$ of $\{P\}$ in $\mathcal{Y}$ with structure of reduced closed subscheme. It is a section of $\mathcal{Y}$ over $\mbox{Spec}(\mathcal{O}_{K,\mathfrak{p}})$, that is because $P \in Y(K)$, and $\overline{\{P\}}$ is finite, birational over $\mbox{Spec}(\mathcal{O}_{K,\mathfrak{p}})$. Consider $\mathcal{X}\times_{\mathcal{Y}}\overline{\{P\}}$. It is finite over $\overline{\{P\}} \simeq \mbox{Spec}(\mathcal{O}_{K,\mathfrak{p}})$, hence affine, denoted by $\mbox{Spec}(A)$. Its underlying space is $f^{-1}(\overline{\{P\}})$. If $\mathcal{X} \rightarrow {\mathcal{Y}}$ is \'{e}tale, then after the base change $\overline{\{P\}} \rightarrow \mathcal{Y}$, $\mbox{Spec}(A) \rightarrow \mbox{Spec}(\mathcal{O}_{K,\mathfrak{p}})$ is also \'{e}tale. Since $\mathcal{O}_{K,\mathfrak{p}}$ is regular, so $A$ is regular too. Suppose that $A = \bigoplus\limits_{i=1}^{m}A_i$ such that $A_i$ is connected for each $i$. The fact that $A$ is regular and finite over $\mathcal{O}_{K,\mathfrak{p}}$ implies that $A_i$ is normal and finite over $\mathcal{O}_{K,\mathfrak{p}}$ for each $i$. In particular, the affine ring corresponding to $\overline{\{Q\}}$ is the integral closure of $\mathcal{O}_{K,\mathfrak{p}}$ in $K(Q)$. Any closed point $x$ on $\overline{\{Q\}}$ is also a closed point on $\mbox{Spec}(A)$. We know that $\overline{\{Q\}}$ and $\mathrm{Spec}(A)$ have the same local rings at $x$, so $\overline{\{Q\}} \rightarrow \overline{\{P\}}$ is \'{e}tale at $x$. Hence $\mathfrak{p}$ is unramified in $K(Q)$.
It remains to show that $\mathcal{X} \rightarrow {\mathcal{Y}}$ is \'{e}tale. Let $Z$ be the set of points in $\mathcal{X}$ at which $f$ is not \'{e}tale, then $Z$ is closed in $\mathcal{X}$. If $Z \neq \emptyset$, since $Z \neq \mathcal{X}$, by Zariski-Nagata purity theorem in \cite[Th\'eor\`em de puret\'e 3.1]{grothendieck2002rev}, it is purely of codimension $1$. Any irreducible component $W$ of $Z$ is vertical, because $X \rightarrow Y$ is \'etale. Let $\eta$ be the generic point of $W$, then $\xi = f(\eta) \in \mathcal{Y}$ is also a generic point in $\mathcal{X}_s$ from the fact that $f$ is dominant and finite, where $\mathcal{X}_s$ is the special fiber of $\mathcal{X}$. Consider $f^\#_\eta:\mathcal{O}_{\mathcal{Y},\xi} \rightarrow \mathcal{O}_{\mathcal{X},\eta}$. We claim that the maximal ideals of $\mathcal{O}_{\mathcal{Y},\xi}$ and $\mathcal{O}_{\mathcal{X},\eta}$ are $\mathfrak{p}\mathcal{O}_{\mathcal{Y},\xi}$ and $\mathfrak{p}\mathcal{O}_{\mathcal{X},\eta}$ respectively. Indeed, we have that $\mathcal{O}_{\mathcal{X}_s,\eta} = \mathcal{O}_{\mathcal{X},\eta}/\mathfrak{p}\mathcal{O}_{\mathcal{X},\eta}$, and the special fiber $\mathcal{X}_s$ is smooth, so $\mathcal{O}_{\mathcal{X}_s,\eta}$ is integral with only one prime ideal. Hence $\mathcal{O}_{\mathcal{X}_s,\eta}$ is a field, and $\mathfrak{p}\mathcal{O}_{\mathcal{X},\eta}$ is the maximal ideal of $\mathcal{O}_{\mathcal{X},\eta}$. It is similar for $\mathcal{O}_{\mathcal{Y},\xi}$. On the other hand, $[k(\eta):k(\xi)] \leq [K(X):K(Y)]$ and $[k(\eta):k(\xi)] | [K(X):K(Y)]$ if $X \rightarrow Y$ is Galois. By the assumption (2), the residue degree $[k(\eta): k(\xi)] < {\mathbb C}har(k(\mathfrak{p}))$ or $[k(\eta): k(\xi)] $ prime to $ {\mathbb C}har(k(\mathfrak{p}))$, so $k(\eta)/k(\xi)$ is separable. Hence $\mathcal{O}_{\mathcal{Y},\xi} \rightarrow \mathcal{O}_{\mathcal{X},\eta}$ is unramified. It is also flat since it is injective and $\mathcal{O}_{\mathcal{Y},\xi}$ is a Dedekind domain, hence also \'{e}tale. Contradiction.
\end{proof}
We will combine the lemma above with the following lemma in the sequel.
\begin{lemma}
\label{lded}
Let $L/K$ be a finite extension of number fields and ~$T$ be a finite set of prime numbers such that every ramified place is above a prime from~$T$. Then
$$
\bigl|{\mathbb N}N_{K/{\mathbb Q}}(D_{L/K})\bigr|\le \left(\prod_{p\in T}p\right)^{[L:{\mathbb Q}]^2},
$$
where $D_{L/K}$ is the discriminant of $L$ over $K$.
\end{lemma}
\begin{proof}
The ``Dedekind Discriminant Formula'' \cite[Theorem B.2.12]{bombieri2007heights} implies that
$$
\nu_{\mathfrak{p}}(D_{L/K}) = \sum_{{\mathfrak{P}}\mid {\mathfrak{p}}}(e_{{\mathfrak{P}}/{\mathfrak{p}}}(1+\delta_{{\mathfrak{P}}})-1)f_{{\mathfrak{P}}/{\mathfrak{p}}} \leq [K:{\mathbb Q}]\sum_{{\mathfrak{P}}\mid {\mathfrak{p}}}e^2_{{\mathfrak{P}}/{\mathfrak{p}}}f_{{\mathfrak{P}}/{\mathfrak{p}}}\leq [L:K][L:{\mathbb Q}],
$$
where~${\mathfrak{p}}$ is a prime of~$K$ ramified in~$L$, the sum is over the primes of~$L$ above~${\mathfrak{p}}$, and $0 \leq \delta_{\mathfrak{P}} \leq v_{\mathfrak{p}}(e_{{\mathfrak{P}}/{\mathfrak{p}}}) < [K:{\mathbb Q}]e_{{\mathfrak{P}}/{\mathfrak{p}}}$. For every such~${\mathfrak{p}}$ we have ${\bigl|{\mathbb N}N_{K/{\mathbb Q}}{\mathfrak{p}}\bigr| = p^{f_{{\mathfrak{p}}/p}}}$, where~$p$ is the prime number below~${\mathfrak{p}}$. Hence
$$
\bigl|{\mathbb N}N_{K/{\mathbb Q}}(D_{L/K})\bigr|\le \left(\prod_{p\in T}p^{\sum_{{\mathfrak{p}}\mid p}f_{{\mathfrak{p}}/p}}\right)^{[L:K][L:{\mathbb Q}]} \le \left(\prod_{p\in T}p^{[K:{\mathbb Q}]}\right)^{[L:K][L:{\mathbb Q}]}= \left(\prod_{p\in T}p\right)^{[L:{\mathbb Q}]^2}.
$$
\end{proof}
\section{Proof of theorem \ref{main} \label{sproof}}
\subsection{An \'{e}tale covering \label{etale covering}}
\label{smorph}
Let~${\widetilde{\Gamma}}$ be the subgroup of $\Gamma_0(p)$ defined as follows: set $A = \{a\in \mathbb{F}_p^*: a^{12}=1\}$, and
\begin{equation}
\label{egtilde}
{\widetilde{\Gamma}}=\left\{\begin{bmatrix}a&b\\c&d\end{bmatrix}\in \Gamma_0(p): a \bmod p \in A\right\}.
\end{equation}
It is not hard to see that the curve $X_{{\widetilde{\Gamma}}}$ and the natural morphisms $X_1(p) \rightarrow {X_{{\widetilde{\Gamma}}}\stackrel\pi\to X_0(p)}$ are defined over~${\mathbb Q}$.
\begin{proposition}
\label{pgamprime}
\begin{enumerate}
\item
We have $\deg \pi \leq \dfrac{p-1}{2}$.
\item
When ${p\notin\{2,3,5,7,13\}}$, the curve $X_{{\widetilde{\Gamma}}}$ has at least~$3$ cusps.
\item
\label{ietale}
The morphism~$\pi$ is \'{e}tale.
\end{enumerate}
\end{proposition}
\begin{proof}
Set $\overline{{\widetilde{\Gamma}}}$ the image of ${\widetilde{\Gamma}}$ in $\mathrm{SL}_2(\mathbb{F}_p)$, then we have
$$\mbox{deg}\pi = [\Gamma_0(p):{\widetilde{\Gamma}}] = [\mbox{ST}_2(\mathbb{F}_p):\overline{{\widetilde{\Gamma}}}] = p(p-1)/(p|A|) \leq \dfrac{p-1}{2}.$$
The second assertion is proved in \cite[page~84]{bilu2002baker}.
About the third assertion, it is only proved in~\cite{bilu2002baker} that~$\pi$ is \'{e}tale outside the cusps. In fact, $\pi$ is \'{e}tale at the cusps as well. Indeed, the $j$-map ${X(p)\stackrel j\to {\mathbb P}^1}$ has ramification index~$p$ at every cusp. Hence~$1$ and~$p$ are the only possible ramification indices for~$\pi$. Since ${\deg \pi \le (p-1)/2<p}$, the ramification indices at the cusps are all~$1$.
\end{proof}
\begin{corollary}
\label{cchw}
Let $K$ be a number field, $P \in X_0(p)(K)$ and ${\widetilde{P}} \in \pi^{-1}(P)$. Then
\begin{align}
\label{ereldeg}
[{\widetilde K}:K] \leq \dfrac{p-1}{2},\\
\label{ereldis}
\bigl|{\mathbb N}N_{K/{\mathbb Q}}(D_{{\widetilde K}/K})\bigr| \leq p^{d^2(p-1)^3/8},
\end{align}
where ${\widetilde K} = K({\widetilde{P}})$, the residue field of ${\widetilde{P}}$, and $d = [K:{\mathbb Q}]$.
\end{corollary}
\begin{proof}
It follows from Proposition \ref{pgamprime} and the formula
$$\mbox{deg} \pi = \sum\limits_{Q\in \pi^{-1}(P)}[K(Q):K]$$
that
$$
[{\widetilde K}:K]\le \deg \pi \le (p-1)/2.
$$
We know that the modular curve $X_1(p)$ has good reductions outside $p$ by Igusa's Theorem, see \cite[Section 8.6]{diamond2005first}. Now by Proposition \ref{model}, $X_{\widetilde{\Gamma}}$ also admits good reduction outside $p$. Combining this with Proposition \ref{pgamprime}, Lemma \ref{uramified} and the fact that $[K(X_{\widetilde{\Gamma}}):K(X_0(p))] = \deg\pi \leq \frac{p-1}{2}$, we apply Lemma~\ref{lded} with $T=\{q: q \leq (p-1)/2, \textrm{$q$ is prime} \} \cup\{p\}$, we obtain~\eqref{ereldis}.
\end{proof}
\subsection{Calculations}
For a number field $K$, and a finite subset $S \subseteq M_K$ containing all infinite places, we put $d=[K:\mathbb{Q}]$ and $s = |S|$. Let $\mathcal{O}_K$ be the ring of integers of $K$. We define the following quantity
$$\Delta_0(N): = \sqrt{N^{dN}|D|^{\varphi(N)}}(\mbox{log}(N^{dN}|D|^{\varphi(N)}))^{d\varphi(N)}\times \left(\prod\limits_{\substack{v \in S\\ v\nmid \infty}}\mbox{log}\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{\varphi(N)}$$
as a function of $N \in {\mathbb N}^+$, where $D$ is the absolute discriminant of $K$, $\varphi(N)$ is the Euler's totient function, and the norm $\mathcal{N}_{K/\mathbb{Q}}(v)$ of a place $v$, by definition, is equal to $|\mathcal{O}_K/\mathfrak{p}_v|$ when $v$ is finite and $\mathfrak{p}_v$ is its corresponding prime ideal, and is set to be $1$ if $v$ is infinite.
With these notations above, the main tool is following theorem proved by M.Sha in \cite{sha2014bounding}. The form of this theorem with explicit constant will be used in our proof of Theorem \ref{main}.
\begin{theorem}[\cite{sha2014bounding} Theorem 1.2]
\label{thsha}
Let $\Gamma$ be a congruence subgroup of level $N$ and $X_\Gamma$ be the corresponding modular curve over a number field $K$ with $d=[K:\mathbb{Q}]$, and $S \subseteq M_K$ be a finite set containing all archimedean places. If $v_\infty(\Gamma) \geq 3$, then for any $ P \in X_\Gamma(\mathcal{O}_S,j)$, the following holds.
\begin{itemize}
\item[(1)] If $N$ is not a power of any prime, we have
$$\mathrm{h}(j(P))\leq (CdsN^2)^{2sN}(\log(dN))^{3sN}\ell^{dN}\Delta_0(N),$$
where $C$ is an absolute effective constant, and $\ell$ is the maximal prime such that there exists $v\in S$ with $v|\ell$.
\item[(2)] If $N$ is a power of some prime, we have
$$\mathrm{h}(j(P))\leq (CdsM^2)^{2sM}(\log(dM))^{3sM}\ell^{dM}\Delta_0(M),$$
where $C$ is an absolute effective constant, $\ell$ is the maximal prime such that there exists $v\in S$ with $v|\ell$, and $M$ is defined as following: $M=3N$ if $N$ is a power of $2$, and $M=2N$ otherwise.
\end{itemize}
\label{bound}
\end{theorem}
\begin{remark}
By following the calculation carefully, see \cite{cai2019explicit}, one can show that $2^{15}$ is a suitable value for the constant $C$.
\end{remark}
\
We begin to prove Theorem \ref{main}. Recall the congruence subgroup ${\widetilde{\Gamma}} \subset \Gamma_0(p)$ defined in subsection \ref{etale covering}, i.e.
$${\widetilde{\Gamma}}=\{\left[
{\begin{array}{cc}
a&b\\
c&d
\end{array}}\right]\in \Gamma_0(p):a\ \mbox{mod} \ p \in A\},$$
where $A = \{a\in \mathbb{F}_p^*: a^{12}=1\}$, and the natural map $\pi: X_{{\widetilde{\Gamma}}} \rightarrow X_0(p)$. For any $ P \in X_0(p)(\mathcal{O}_S,j)$, there exist a finite extension ${\widetilde K}$ of $K$ and ${\widetilde{P}} \in X_{{\widetilde{\Gamma}}}({\widetilde K})$ such that $\pi({\widetilde{P}})=P$. A non-constant morphism between irreducible projective curves is always surjective, so $\pi(X_{{\widetilde{\Gamma}}})=X_0(p)$. Obviously, $\mathrm{h}(j({\widetilde{P}})) = \mathrm{h}(j(P))$, so it's sufficient to bound $\mathrm{h}(j({\widetilde{P}}))$. Hence we consider the points in $X_{{\widetilde{\Gamma}}}(\mathcal{O}_{{\widetilde S}},j)$, where
$${\widetilde S}=\{v\in M_{{\widetilde K}}: v|w\ \mbox{for some}\ w\in S\}.$$
By Proposition \ref{pgamprime}, we know that $X_{{\widetilde{\Gamma}}}$ has at least three cusps.
To apply Theorem \ref{bound} for $\mathrm{h}(j({\widetilde{P}}))$, we should bound some invariants of ${\widetilde K}$ and ${\widetilde S}$.
We fix some notations before proceeding with the proof, we set
$${\tilde d}e{\Delta}_0 := \sqrt{(2p)^{2{\tilde d} p}|{\widetilde D}|^{p-1}}(\log((2p)^{2{\tilde d} p}|{\widetilde D}|^{p-1}))^{{\tilde d}(p-1)}\times \left(\prod\limits_{\substack{v \in {\widetilde S}\\ v\nmid \infty}}\log\mathcal{N}_{{\widetilde K}/\mathbb{Q}}(v)\right)^{p-1},$$
$$D^*:=p^{d^2\frac{(p-1)^3}{8}}|D|^{\frac{p-1}{2}},$$
\begin{equation*}
\begin{aligned}
\Delta(p) := \sqrt{(2p)^{dp(p-1)}|D^*|^{p-1}}(\log((2p)^{dp(p-1)}|D^*|^{p-1}))^{d\frac{(p-1)^2}{2}} \times \left(\prod\limits_{\substack{v \in S \\ v \nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{\frac{(p-1)^2}{2}},
\end{aligned}
\end{equation*}
where ${\tilde d}:=[{\widetilde K}:\mathbb{Q}]$, and ${\widetilde D}$ is the absolute discriminant of ${\widetilde K}$.
We follow the idea of \cite{sha2014bounding}. Let ${\tilde d}e{s}= |{\widetilde S}|$, then ${\tilde d}e{s}\leq [{\widetilde K}:K]s\leq \dfrac{p-1}{2}s$ and ${\tilde d} \leq d\dfrac{p-1}{2}$. For the absolute discriminant ${\widetilde D}$ of ${\widetilde K}$, we have
\begin{equation*}
\begin{aligned}
|{\widetilde D}| &= |\mathcal{N}_{K/\mathbb{Q}}(D_{{\widetilde K}/K})||D|^{[{\widetilde K}:K]}\\
&\leq p^{d^2\frac{(p-1)^3}{8}}|D|^{\frac{p-1}{2}}\\
&= D^*.
\end{aligned}
\end{equation*}
Now let $w$ be a non-Archimedean place of $K$, and $v_1,\dots, v_m$ be all its extensions to ${\widetilde K}$ with residue degrees $f_1,\dots, f_m$ respectively over $K$. Then $f_1+ \dots + f_m \leq [{\widetilde K}:K] \leq \dfrac{p-1}{2}$, which implies $\log_2 f_1+ \dots +\log_2 f_m \leq f_1+ \dots + f_m \leq \dfrac{p-1}{2}$, i.e. $f_1\dots f_m \leq 2^{\frac{p-1}{2}}$. Since $\mathcal{N}_{{\widetilde K}/{\mathbb Q}}(v_k) = \mathcal{N}_{K/\mathbb{Q}}(w)^{f_k}$ for $1 \leq k \leq m$, we have
$$\prod\limits_{v | w}\log\mathcal{N}_{{\widetilde K}/\mathbb{Q}}(v) \leq 2^{\frac{p-1}{2}}(\log\mathcal{N}_{K/\mathbb{Q}}(w))^{\frac{p-1}{2}}.$$
Hence
$$\prod\limits_{\substack{v \in {\widetilde S}\\ v\nmid \infty}}\log\mathcal{N}_{{\widetilde K}/\mathbb{Q}}(v) \leq 2^{s\frac{p-1}{2}}\left(\prod\limits_{\substack{v \in S\\ v\nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{\frac{p-1}{2}},$$
and
\begin{equation*}
\begin{aligned}
{\tilde d}e{\Delta}_0 &= \sqrt{(2p)^{2{\tilde d} p}|{\widetilde D}|^{p-1}}(\log((2p)^{2{\tilde d} p}|{\widetilde D}|^{p-1}))^{{\tilde d}(p-1)}\times \left(\prod\limits_{\substack{v \in {\widetilde S}\\ v\nmid \infty}}\log\mathcal{N}_{{\widetilde K}/\mathbb{Q}}(v)\right)^{p-1}\\
&\leq \sqrt{(2p)^{dp(p-1)}|D^*|^{p-1}}(\log((2p)^{dp(p-1)}|D^*|^{p-1}))^{d\frac{(p-1)^2}{2}}\times 2^{s\frac{(p-1)^2}{2}}\\ &\ \ \times \left(\prod\limits_{\substack{v \in S\\ v\nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{\frac{(p-1)^2}{2}}\\
&= 2^{s\frac{(p-1)^2}{2}}\Delta(p).
\end{aligned}
\end{equation*}
By Theorem \ref{bound}, we have
\begin{equation*}
\begin{aligned}
\mbox{h}(j(P)) &= \mathrm{h}(j(Q))\\
&\leq (C{\tilde d}
{\tilde s}(2p)^2)^{4{\tilde s} p}(\log(2{\tilde d} p))^{6{\tilde s} p}\ell^{2{\tilde d} p}{\tilde d}e{\Delta}_0\\
&\leq 2^{s\frac{(p-1)^2}{2}}(Cds(p-1)^2p^2)^{2sp(p-1)}(\log(dp(p-1)))^{3sp(p-1)}\ell^{dp(p-1)}\Delta(p)\\
\end{aligned}
\end{equation*}
where $\ell$ is the maximal prime such that there exists $v\in S$ with $v|\ell$.
This bound can be made clearer. Indeed, we have the inequalities
$$D^* \leq e^{d^2p^3/8\log p}|D|^{p/2},$$
\begin{equation*}
\begin{aligned}
\Delta(p) & \leq e^{d^2p^4\log p}(2^d|D|)^{p^2} \cdot (d^2p^4\log p+p^2\log|D|)^{dp^2/2} \times \left(\prod\limits_{\substack{v \in S \\ v \nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{p^2}\\
& \leq e^{4s^2p^4\log p}(2^d|D|)^{p^2} \cdot (d^2p^5/2\log(|D|+1))^{dp^2} \times \left(\prod\limits_{\substack{v \in S \\ v \nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{p^2}\\
&\leq e^{7s^2p^4\log p}\left((d^2\log(|D|+1))^d |D|\prod\limits_{\substack{v \in S \\ v \nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{p^2}\\
&= e^{7s^2p^4\log p} C_1(K,S)^{p^2},
\end{aligned}
\end{equation*}
and
\begin{equation*}
\begin{aligned}
\mbox{h}(j(P)) &\leq 2^{sp^2}(Cdsp^4)^{2sp^2}(\log d + 2\log p)^{3sp^2}\ell^{dp^2}e^{7s^2p^4\log p}C_1(K,S)^{p^2}\\
& \leq e^{9s^2p^4\log p}\cdot 2^{sp^2}(Cds)^{2sp^2}(2d)^{3sp^2}\ell^{dp^2}C_1(K,S)^{p^2}\\
& \leq e^{9s^2p^4\log p}\left(2^{s} \cdot C^{2s}d^{9s}s^{2s}\ell^d|D|(\log{(|D|+1)})^{d}\prod\limits_{\substack{v \in S \\ v \nmid \infty}}\log\mathcal{N}_{K/\mathbb{Q}}(v)\right)^{p^2}\\
&= e^{9s^2p^4\log p}C(K,S)^{p^2}.
\end{aligned}
\end{equation*}
Hence we get Theorem \ref{main} if we take $C= 2^{15}$, see \cite{cai2019explicit}.
\operatorname{add}\nolimitsress
\end{document}
|
\begin{document}
\title{Natural Deduction for Assertibility and Deniability hanks{This paper is an outcome of the project Logical Structure of Information Channels, no. 21-23610M, supported by the Czech Science Foundation and realized at the Institute of Philosophy of the Czech Academy of Sciences.}
\begin{abstract}
In this paper we split every basic propositional connective into two versions, one is called extensional and the other one intensional. The extensional connectives are semantically characterized by standard truth conditions that are relative to possible worlds. Assertibility and deniability of sentences built out of atomic sentences by extensional connectives are defined in terms of the notion of truth. The intensional connectives are characterized directly by assertibility and deniability conditions without the notion of truth. We pay special attention to the deniability condition for intensional implication. We characterize the logic of this mixed language by a system of natural deduction that sheds some light on the inferential behaviour of these two kinds of connectives and on the way they can interact.
\end{abstract}
\section{Introduction}
Christopher Gauker, in his book \cite{gauker05}, put forward an interesting theory of conditionals based on the notions of assertibility and deniability in a context. A simplification of Gauker's theory was proposed in \cite{puncochar16} where it was shown that the characteristic features of the logic determined by Gauker's semantics are preserved even if we replace Gauker's rather complex notion of context with a much simpler one, according to which contexts are represented by sets of possible worlds. Besides simplicity the proposed modification has some further nice properties that the original Gauker's theory lacks, concerning a simple form of compositionality, validity of some plausible argument forms, or simple treatment of conditionals embedded in antecedents of other conditionals (for more details, see \cite{puncochar16}).
A peculiar feature of Gauker's theory is that it reflects one rather surprising phenomenon: Logical operators are sensitive to the types of sentences they are applied to. For example, disjunction of two conditional sentences seems to behave differently than disjunction of two elementary sentences. The original semantics from \cite{gauker05} incorporates this ambiguity directly into the formal language. This approach was further elaborated, especially from the philosophical point of view, in \cite{puncochargauker20}. The semantics from \cite{puncochar16} is designed in a different way. It disambiguates the behaviour of logical connectives at the level of the formal language by replacing each connective with two operators, one ``extensional'' and one ``intensional''. It is argued in \cite{puncochar16} that while the original Gauker's approach allows us to model some phenomena in a straightforward way, it is technically less elegant than the disambiguation strategy.
In this paper, we will focus on the semantics of extensional and intensional connectives proposed in \cite{puncochar16} and we will study the logic determined by this framework. Let us call it \textit{the Logic of Assertibility and Deniability}, or \textsf{LAD}, for short. \cite{puncochar16} provided a syntactic characterization of \textsf{LAD}, but only an indirect one, via a translation into a modal logic. The main contribution of this paper is a direct syntactic characterization of \textsf{LAD}. We will show that this logic can be characterized by an elegant system of natural deduction that clarifies the inferential behaviour of both extensional and intensional operators.
\section{Extensional and intensional connectives}\label{extint}
Consider the following scenario. A murder has been committed and we have four suspects. The murderer is not known but it is settled that it must be someone among these suspects. It is also clear that only one person has committed the crime. We have the following description of the suspects:
\begin{tabular}{ll}
tall man with dark hair and moustache & tall man with blond hair and without moustache \\
short man with blond hair and moustache & short man with dark hair and without moustache \\
\end{tabular}
Now it seems that in this context one is entitled to assert the premises but not the conclusion of the following argument that has a seemingly valid form:
\begin{tabular}{ll}
Premise 1 &\textit{The murderer is either tall or short.} \\
Premise 2 &\textit{If the murderer is tall then he has a moustache if he has dark hair.} \\
Premise 3 &\textit{If the murderer is short then he has a moustache if he has blond hair.} \\
\hline Conclusion &\textit{It either holds that the murderer has a moustache if he has dark hair} \\
& \textit{or that the murderer has a moustache if he has blond hair.} \\
\end{tabular}
The assertibility of the premisses is clear from the description of the context. But the conclusion does not seem to be assertible. As a reason for this intuition one might say that none of the two disjuncts in the conclusion is assertible in the context. But the same holds for the first premise, none of the disjuncts is assertible in the context and yet the whole disjunction clearly is. This puzzling phenomenon illustrates what we vaguely described in the introduction as the sensitivity of logical operators. The first premise says that in each possibility (``possible world''), one of the disjuncts is true. The disjunction connecting two conditionals in the conclusion says something different, namely that at least one of the two conditionals holds with respect to the whole context. We can describe these two cases as involving two different logical operators, one operating on the level of possible worlds and the other operating on the level of the whole context. One can easily formulate similar examples that involve negation of conditionals (see \cite{puncochar16}). For a discussion of similar phenomena, see, e.g. \cite{yalcin12} or \cite{bledin14}.
Such examples motivate splitting each of the basic logical connectives (implication, conjunction, disjunction, and negation) into two versions, one will be called ``extensional'' and the other one ``intensional'':
\begin{tabular}{lcccc}
Extensional connectives: & $\supset$ & $\cap$ & $\cup$ & $\sim$ \\
Intensional connectives: & $\rightarrow$ & $\wedge$ & $\vee$ & $\neg$ \\
\end{tabular}
Let $L$ be the language containing all atomic formulas and all formulas which can be constructed out of the atomic formulas using the extensional connectives. The language $L^*$ is the smallest set of formulas containing all $L$-formulas and closed under the application of the intensional connectives. The two languages can be concisely introduced as follows:
\begin{tabular}{lc}
$L$: & $\alpha= p \mid \alpha \supset \alpha \mid \alpha \cap \alpha \mid \alpha \cup \alpha \mid {\sim}\alpha$\\
$L^*$: & $\varphi= \alpha \mid \varphi \rightarrow \varphi \mid \varphi \wedge \varphi \mid \varphi \vee \varphi \mid \neg \varphi$
\end{tabular}
The argument above would be formalized in the language $L^*$ in the following way (note that the language forbids us from connecting two intensional implications by the extensional disjunction):
\begin{tabular}{l}
$p \cup q, p \rightarrow (r \rightarrow t), q \rightarrow (s \rightarrow t) / (r \rightarrow t) \vee (s \rightarrow t)$
\end{tabular}
The Greek letters $\alpha, \beta, \gamma$ will range over $L$-formulas and the letters $\varphi, \psi, \chi, \vartheta$ over $L^*$-formulas.
Let $A$ be a set of atomic formulas. A possible $A$-world is any function that assigns a truth value ($1$~representing \textit{truth} or $0$ representing \textit{falsity}) to every atomic formula from $A$. An $A$-context is a nonempty set of possible $A$-worlds. The specification of the set $A$ will be omitted if no confusion arises. Note that, by definition, there cannot be two different possible $A$-worlds in which exactly the same atomic formulas are true.
For the complex formulas of the language $L$, the truth and falsity conditions with respect to individual possible worlds are those of classical propositional logic. $\Vdash^{+}$ and $\Vdash^{-}$ will now respectively stand for the relations of assertibility and deniability between contexts and formulas of the language $L^*$. The assertibility and deniability conditions taken from \cite{puncochar16} (and motivated by \cite{gauker05}) are defined in the following way:
\begin{tabular}{l}
$C \Vdash^{+} \alpha$ iff for all $w \in C$, $\alpha$ is true in $w$.\\
$C \Vdash^{-} \alpha$ iff for all $w \in C$, $\alpha$ is false in $w$.\\
$C \Vdash^{+} \neg\varphi$ iff $C \Vdash^{-} \varphi$.\\
$C \Vdash^{-} \neg \varphi$ iff $C \Vdash^{+} \varphi$.\\
$C \Vdash^{+} \varphi \vee \psi$ iff $C \Vdash^{+} \varphi$ or $C \Vdash^{+} \psi$.\\
$C \Vdash^{-} \varphi \vee \psi$ iff $C \Vdash^{-} \varphi$ and $C \Vdash^{-} \psi$.\\
$C \Vdash^{+} \varphi \wedge \psi$ iff $C \Vdash^{+} \varphi$ and $C \Vdash^{+} \psi$.\\
$C \Vdash^{-} \varphi \wedge \psi$ iff $C \Vdash^{-} \varphi$ or $C \Vdash^{-} \psi$.\\
$C \Vdash^{+} \varphi \rightarrow \psi$ iff $D \Vdash^{+} \psi$ for all nonempty $D \subseteq C$, such that $D \Vdash^{+} \varphi$.\\
$C \Vdash^{-} \varphi \rightarrow \psi$ iff $D \Vdash^{-} \psi$ for some nonempty $D \subseteq C$, such that $D \Vdash^{+} \varphi$.\\
\end{tabular}
The consequence relation is defined as assertibility preservation. That is, a set of $L^*$-formulas $\Delta$ entails an $L^*$-formula $\psi$ (symbolically $\Delta \vDash \psi$) if $\psi$ is assertible in every context in which all formulas from $\Delta$ are assertible. This consequence relation represents what we here call \textit{the Logic of Assertibility and Deniability} (\textsf{LAD}). We say that two $L^*$-formulas, $\varphi$ and $\psi$, are (logically) equivalent in this logic (symbolically $\varphi \equiv \psi$) if they are assertible in the same contexts, that is, if $\varphi \vDash \psi$ and $\psi \vDash \varphi$. They are strongly equivalent (symbolically $\varphi \rightleftharpoons \psi$) if they are not only assertible but also deniable in the same contexts, that is, if $\varphi \equiv \psi$ and $\neg \varphi \equiv \neg \psi$. It can be shown by induction that strongly equivalent formulas are universally interchangeable (this does not hold for mere equivalence). Note that the Gauker's original semantics from \cite{gauker05} does not have this property.
\begin{proposition}
Assume that $\varphi \rightleftharpoons \psi$, and $\varphi$ occurs as a subformula in $\chi$. If $\chi'$ is the result of replacing an occurrence of the subformula $\varphi$ in $\chi$ with $\psi$ then $\chi \rightleftharpoons \chi'$.
\end{proposition}
Note that the assertibility clause for disjunction formally corresponds to the support condition for inquisitive disjunction used in inquisitive semantics (see \cite{ciardelliroelofsen11}). However, in contrast to inquisitive semantics we do not treat intensional disjunction as a question generating operator but as a statement generating operator (e.g., the formula $(r \rightarrow t) \vee (s \rightarrow t)$ in the conclusion of our example does not represent a question but a statement). The assertibility clauses for conjunction and implication also correspond to the support clauses for these connectives in inquisitive semantics. Technically speaking, if negation is omitted, \textsf{LAD} just corresponds to inquisitive logic (which is axiomatized for example in \cite{ciardelliroelofsen11}). What is different from the standard inquisitive logic is our treatment of negation via the deniability conditions. The deniability conditions for (intensional) negation, disjunction and conjunction are the standard ones, typically used in a bilateralist setting (see, e.g., \cite{odintsov13}). The most tricky case is the deniability condition for implication. There are two natural candidates for an alternative deniability clause:
\begin{tabular}{ll}
(A) & $C \Vdash^{-} \varphi \rightarrow \psi$ iff $C \Vdash^{+} \varphi$ and $C \Vdash^{-} \psi$.\\
(B) & $C \Vdash^{-} \varphi \rightarrow \psi$ iff $D \Vdash^{-} \psi$ for all nonempty $D \subseteq C$, such that $D \Vdash^{+} \varphi$.\\
\end{tabular}
The option (A) is known from Nelson logic (see \cite{odintsov13}). However, this option licenses the inference from $\neg (\varphi \rightarrow \psi)$ to $\varphi$ (and also to $\neg \psi$), which we found highly problematic from the natural language point of view (consider the following argument: \textit{It is not the case that if I die today, I will be living tomorrow. Therefore I will die today.}). The option (B) looks much more plausible. This option is known from connexive logic (see \cite{wansing21}). It makes $\neg (\varphi \rightarrow \psi)$ strongly equivalent to $\varphi \rightarrow \neg \psi$. This is indeed a reasonable way to read negations of conditionals. This clause was explored in more detail within the semantics of assertibility and deniability in \cite{puncochar14}.
In our current setting the clause (B) would allow us to completely eliminate intensional negations from any $L^*$-formula. Since we also have double negation and DeMorgan laws guaranteed by the other semantic clauses, we could push all the intensional negations occurring in a formula to the subformulas that are already in the language $L$. For example, in the formula $\neg (\neg (p \wedge {\sim}s) \rightarrow (p \cup {\sim}q))$ we could push the negations inside the formula in the following way: $(\neg p \vee \neg{\sim}s) \rightarrow \neg (p \cup {\sim}q)$. Moreover, observe that for any $L$-formula $\alpha$ we have $\neg \alpha \rightleftharpoons {\sim}\alpha$, so when pushed so that it applies to an $L$-formula, intensional negation can be replaced with extensional negation (we would obtain $({\sim}p \vee {\sim}{\sim}s) \rightarrow {\sim}(p \cup {\sim}q)$, in our example). Hence, if the deniability clause (B) for implication is employed, every occurrence of intensional negation in any $L^*$-formula can be dissolved and thus intensional negation does not add any extra expressive power.
Aside from making intensional negation redundant, the clause (B) has one other questionable consequence. If this clause is employed, there are formulas that are assertible as well as deniable in some contexts. There are even formulas that are assertible as well as deniable in every context and thus there are formulas of the form $\varphi \wedge \neg \varphi$ regarded as logically valid. A concrete example is obtained when we substitute $(p \wedge \neg p) \rightarrow (p \vee \neg p)$ for $\varphi$. Hence, the resulting logic is not only paraconsistent but it is in this sense inconsistent (see \cite{wansing21} for a more detailed discussion of this phenomenon). From a certain point of view, this may not be conceived as a substantial problem. However, in this paper, we want to maintain consistency and thus we reject clause (B) in favour of the clause stated above according to which a conditional is deniable in a context $C$ if and only if there is some subcontext of $C$ in which the antecedent is assertible and the consequent is deniable. This deniability clause corresponds to the one proposed in Gauker's book \cite{gauker05}. It can be viewed as a weakening of the strong clause (A) in the sense that $\neg (\varphi \rightarrow \psi)$ is not equivalent to $\varphi \wedge \neg \psi$ but rather to a mere possibility of $\varphi \wedge \neg \psi$. This way of denying conditionals seems to have strong support from natural language. For example, with respect to the context above, one can deny the claim \textit{if the murderer is tall, he has dark hair} because it is possible (from the perspective of the context) that the murderer is tall and does not have dark hair. In comparison to (B) we can observe that the clause (C) allows us to prove by induction the following fact.
\begin{proposition}
There is no context $C$ and no $L^*$-formula $\varphi$ such that $C \Vdash^+ \varphi$ and $C \Vdash^- \varphi$.
\end{proposition}
Another feature that distinguishes our deniability clause for implication from (B) is that it leads to the classical behaviour of intensional connectives in the special case where the context contains only one world. Thus in singleton contexts the assertibility and deniability clauses for intensional connectives coincide with truth and falsity clauses for the extensional connectives. To express this fact more formally, we will use the following notation. For any $L^*$-formula $\varphi$, $\varphi^{e}$ denotes the $L$-formula which is the result of replacing all intensional connectives in $\varphi$ with their extensional counterparts. Then the following fact can be easily established by induction.
\begin{proposition}\label{p3}
$\left\{w\right\} \Vdash^{+} \varphi$ iff $\varphi^{e}$ is true in $w$, and $\left\{w\right\} \Vdash^{-} \varphi$ iff $\varphi^{e}$ is false in $w$.
\end{proposition}
Since the truth conditions for the extensional connectives are the standard ones, the consequence relation restricted to the language $L$ is classical.
\begin{proposition}
For the formulas of the language $L$ the consequence relation coincides with the consequence relation of classical logic.
\end{proposition}
Of course, in general intensional and extensional connectives behave differently. Let us illustrate this with the murder scenario described above. Let $A$ be the set of atomic formulas $\{p, q, r, s, t \}$. Consider the following formalization and an $A$-context consisting of four possible $A$-worlds that differ from each other on who among the suspects committed the murder.
\begin{tabular}{llcccc}
&& $w_1$ & $w_2$ & $w_3$ & $w_4$ \\
\textit{The murderer is tall.} &$p$ & 1 & 1 & 0 & 0 \\
\textit{The murderer is short.} &$q$ & 0 & 0 & 1 & 1 \\
\textit{The murderer has dark hair.} &$r$ & 1 & 0 & 0 & 1\\
\textit{The murderer has blond hair.} &$s$ & 0 & 1 & 1 & 0 \\
\textit{The murderer has a moustache.} &$t$ & 1 & 0 & 1 & 0 \\
\end{tabular}
Now one can check that in this context all the premises of the argument, namely the formulas $p \cup q$, $p \rightarrow (r \rightarrow t)$, and $q \rightarrow (s \rightarrow t)$, are assertible but the conclusion $(r \rightarrow t) \vee (s \rightarrow t)$ is not.
Our deniability condition for conditionals is an important ingredient of the semantics because it increases the expressive power of the language and introduces a specific kind of non-persistent formulas. We say that a formula is \textit{persistent} if it holds that whenever it is assertible in a context, it is assertible in every subcontext (i.e. in every nonempty subset) of the context. It can be proved by induction that every $L^*$-formula that does not contain any intensional implication in the scope of an intensional negation is persistent.
This claim does not hold generally. For example, for the above context $C=\{ w_1, w_2, w_3, w_4 \}$ we have $C \Vdash^+ \neg (p \rightarrow q)$ because there is a subcontext $D=\{w_1, w_2 \}$ such that $D \Vdash^+ p$ and $D \Vdash^- q$. However, there is a subcontext $E \subseteq C$, e.g. $E = \{w_3, w_4 \}$, such that $E \nVdash^+ \neg (p \rightarrow q)$.
Note that every formula of the form $\varphi \rightarrow \psi$ is also persistent, even if it contains an occurrence of intensional implication in the scope of intensional negation. We would like to say that persistent formulas play in inferences a substantially different role than non-persistent formulas. However, the notion of a persistent formula was defined \textit{semantically} and thus we cannot use it directly in the formulation of a syntactic deductive system. Nevertheless, we just specified \textit{syntactically} a large class of formulas that have this semantic property. We will call them safe. We say that a formula is \textit{safe}, if either it does not contain any $\rightarrow$ in the scope of $\neg$, or it is of the form $\varphi \rightarrow \psi$.
\begin{proposition}\label{l: persistence of safe formulas}
Every safe formula is persistent.
\end{proposition}
Safe formulas will play an important role in the formulation of our system of natural deduction. The subsequent completeness proof will indicate that this syntactic notion sufficiently approximates the notion of a persistent formula.
Now we will show that by the extra expressive power gained from the deniability clause for implication we obtain functional completeness. To describe this fact more precisely, we will use the following defined symbols that will also be used later in the system of natural deduction.
\begin{tabular}{ll}
(a) & $\bot =_{def} p \cap {\sim} p$, for some selected fixed atomic formula $p$,\\
(b) & $\lozenge \varphi =_{def} \neg (\varphi\rightarrow \bot)$,\\
(c) & $\alpha_1 \oplus \ldots \oplus \alpha_n =_{def} (\alpha_1 \cup \ldots \cup \alpha_n) \wedge (\lozenge \alpha_1 \wedge \ldots \wedge \lozenge \alpha_n)$.
\end{tabular}
The symbol $\bot$ represents a \textit{contradiction}. It can be easily observed that $C \Vdash^- \bot$, for every context $C$. The symbol $\lozenge$ expresses a contextual possibility. It holds that $C \Vdash^+ \lozenge \varphi$ iff there is a nonempty $D \subseteq C$ such that $D \Vdash^+ \varphi$. Finally, $\oplus$ represents a ``pragmatic disjunction'', i.e. an extensional disjunction but such that all its disjuncts are possible (which is usually regarded to be a pragmatic feature of disjunction).
If a finite set of atomic formulas is fixed, any $A$-world $w$ can be described by an $L$-formula $\sigma_w$ in the usual way. For example, in the above example the formula $\sigma_{w_1}$ would be $p \cap {\sim}q \cap r \cap {\sim}s \cap t$. Now any $A$-context $C=\{w_1, \ldots, w_n \}$ can be described by the formula $\mu_C=\sigma_{w_1} \oplus \ldots \oplus \sigma_{w_n}$. Moreover, any set of $A$-contexts $X=\{ C_1, \ldots, C_k \}$ can be described by the formula $\xi_X=\mu_{C_1} \vee \ldots \vee \mu_{C_k}$. Now we can state the functional completeness result.
\begin{proposition}
Let $A$ be a finite set of atomic formulas. Then it holds for any $A$-contexts $C, D$ that $C \Vdash^+ \mu_D$ iff $D=C$. Moreover, it holds for any set of $A$-contexts $X$, and any $A$-context $C$ that $C \Vdash^+ \xi_X$ iff $C \in X$.
\end{proposition}
A similar result was observed in \cite{puncochar15} for inquisitive logic with an operation called ``weak negation''. In fact, the crucial part of the proof of our main result will be a syntactic reconstruction of weak negation within our system of natural deduction.
\section{Deductive calculus}
In this section we define a Fitch style system of natural deduction for the semantics of assertibility and deniability. We will use the following economic notation. We use brackets and the colon to refer to subproofs. For example, $[\varphi: \psi]$ stands for a subproof in which $\varphi$ is the hypothetical assumption and $\psi$ is the last line of the subproof. We use two kinds of brackets, square and round. The difference is in the formulas from the outer proof that can be used in the subproof. By using square brackets in $[\varphi: \psi]$ we indicate that all formulas that are available in the step before the hypothetical assumption $\varphi$ is made are also available under this assumption in the derivation of $\psi$ from $\varphi$. This is just as in the standard natural deduction systems for classical and intuitionistic logic. The round brackets indicate that there is a restriction concerning the formulas from the outer proof that can be used under the assumption. By writing $(\varphi: \psi)$, we indicate that only \textit{safe} formulas from the outer proof can be used in the derivation of $\psi$ from $\varphi$. (Safe formulas are defined in the previous section.)
The distinction between square and round brackets reflects the semantic distinction between two kinds of hypothetical assumption. Square brackets, i.e. $[\varphi: \psi]$, indicate that by making the hypothetical assumption $\varphi$ in a context $C$ we do not change the given context, we only assume that the assumption $\varphi$ is assertible in $C$. In contrast, round brackets, i.e. $(\varphi: \psi)$, indicate that by making the hypothetical assumption $\varphi$ in a context $C$ we move from $C$ to an arbitrary subcontext $D$ of $C$ in which $\varphi$ is assertible. If we have already proved that a formula is assertible in $C$, we can use it under the assumption $\varphi$ (in $D$) only if it is guaranteed that it is persistent. Proposition \ref{l: persistence of safe formulas} guarantees that all safe formulas are persistent.
We split the rules of the calculus into three groups. The first group contains the ``classical'' introduction and elimination rules for the extensional connectives.
\begin{tabular}{llll}
(i$\cap$) & $\alpha, \beta / \alpha \cap \beta$ & (e$\cap_1$) & $\alpha \cap \beta / \alpha$ \\
&& (e$\cap_2$) & $\alpha \cap \beta / \beta$ \\[0,3cm]
(i$\cup_1$) & $\alpha / \alpha \cup \beta$ & (e$\cup$) & $\alpha \cup \beta, (\alpha:\gamma), (\beta:\gamma) / \gamma$\\
(i$\cup_2$) & $\beta / \alpha \cup \beta$ && \\[0,3cm]
(i$\supset$) & $(\alpha:\beta)/\alpha \supset \beta$ & (e$\supset$) & $\alpha, \alpha \supset \beta / \beta$\\[0,3cm]
(i$\sim$) & $(\alpha:\bot)/{\sim}\alpha$ & (e$\sim_1$) & $\alpha, {\sim}\alpha/\bot$\\
&& (e$\sim_2$) & ${\sim}{\sim}\alpha/\alpha$\\
\end{tabular}
\noindent The second group contains the following ``intuitionistic'' rules concerning the intensional connectives (but notice the restrictions indicated by round brackets and the fact that (i$\neg$) is restricted to $L$-formulas):
\begin{tabular}{llll}
(i$\wedge$) & $\varphi,\psi/\varphi \wedge \psi$ & (e$\wedge_1$) & $\varphi \wedge \psi/ \varphi$ \\
&& (e$\wedge_2$) & $\varphi \wedge \psi/ \psi$ \\[0,3cm]
(i$\vee_1$) & $\varphi/ \varphi \vee \psi$ & (e$\vee$) & $\varphi \vee \psi, [\varphi:\chi], [\psi:\chi] / \chi$\\
(i$\vee_2$) & $\psi/ \varphi \vee \psi$ && \\[0,3cm]
(i$\rightarrow$) & $(\varphi:\psi)/\varphi \rightarrow \psi$ & (e$\rightarrow$) & $\varphi, \varphi \rightarrow \psi/ \psi$\\[0,3cm]
(i$\neg$) & $(\alpha:\bot)/\neg \alpha$ & (e$\neg$) & $\varphi, \neg \varphi / \bot$\\
&& (EFQ) & $\bot /\varphi$
\end{tabular}
The third group consists of the rules that characterize the interaction of intensional negation with all intensional operators, plus two extra rules, (CEM), i.e. ``contextual excluded middle'', and ($\lozenge\oplus$):
\begin{tabular}{llll}
($\neg\neg_1$) & $\neg \neg \varphi / \varphi$ & ($\neg\neg_2$) & $\varphi/ \neg \neg \varphi$ \\[0,1cm]
($\neg\wedge_1$) & $\neg (\varphi \wedge \psi) / \neg \varphi \vee \neg \psi$ & ($\neg\wedge_2$) & $\neg \varphi \vee \neg \psi / \neg (\varphi \wedge \psi)$ \\[0,1cm]
($\neg\vee_1$) & $\neg (\varphi \vee \psi) / \neg \varphi \wedge \neg \psi$ & ($\neg\vee_2$) & $\neg \varphi \wedge \neg \psi / \neg (\varphi \vee \psi)$ \\[0,1cm]
($\neg{\rightarrow}_1$) & $\neg (\varphi \rightarrow \psi) / \lozenge (\varphi \wedge \neg \psi)$ & ($\neg{\rightarrow}_2$) & $\lozenge (\varphi \wedge \neg \psi)/ \neg (\varphi \rightarrow \psi)$ \\[0,1cm]
(CEM) & $/ (\varphi \rightarrow \bot) \vee \lozenge \varphi$ & ($\lozenge\oplus$) & $\lozenge \alpha_{1} \wedge \ldots \wedge \lozenge \alpha_{n}/\lozenge (\alpha_{1} \oplus \ldots \oplus \alpha_{n})$ \\[0,1cm]
\end{tabular}
We will write $\varphi_1, \ldots, \varphi_n \vdash \psi$ if $\psi$ is derivable in this system from the assumptions $\varphi_1, \ldots, \varphi_n$.
As we already mentioned, our semantics has some connection to inquisitive semantics. Note, however, that our deductive system is very different from the standard system of natural deduction for inquisitive logic (see, e.g., \cite{ciardelli16}), though it has some similarities with the system for inquisitive logic with weak negation developed in \cite{puncochar15}.
We can illustrate the role of restrictions related to round brackets with the following example. If the restriction given by the round brackets were not present we could derive, for example, the contradiction $\bot$ from the premise $\lozenge p \wedge \lozenge {\sim} p$ in the following way:
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\lozenge p \wedge \lozenge {\sim} p = \neg (p \rightarrow \bot) \wedge \neg ({\sim} p \rightarrow \bot) & premise \\
p \cup {\sim} p & the standard derivation of excluded middle \\
\fh p & hypothetical assumption \\
\vline\hspace*{\fitchindent} \fh {\sim} p & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \bot & 3,4, (e${\sim}_1$) \\
\vline\hspace*{\fitchindent} {\sim} p \rightarrow \bot & 4-5, (i$\rightarrow$) \\
\vline\hspace*{\fitchindent} \neg ({\sim} p \rightarrow \bot) & 1, (e$\wedge_2$) \\
\vline\hspace*{\fitchindent} \bot & 6,7, (e$\neg$) \\
\fh {\sim} p & hypothetical assumption \\
\vline\hspace*{\fitchindent} \fh p & hypothetical assumption \\%10
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \bot & 9,10, (e${\sim}_1$) \\
\vline\hspace*{\fitchindent} p \rightarrow \bot & 10-11, (i$\rightarrow$) \\
\vline\hspace*{\fitchindent} \neg (p \rightarrow \bot) & 1, (e$\wedge_1$) \\
\vline\hspace*{\fitchindent} \bot & 12,13, (e$\neg$) \\
\bot & 2,3-8,9-14, (e$\cup$) \\
\end{fitch}
\end{itemize}}
This would be an undesirable result. In our semantics, $\lozenge p \wedge \lozenge {\sim} p$ does not entail $\bot$ which accords with the intuition that $p$ and ${\sim} p$ can consistently be both possible. As it stands, the derivation is incorrect in the system, because in the steps 7 and 13 the restriction on the rule (e$\cup$) was not respected and the unsafe formula $\lozenge p \wedge \lozenge {\sim} p$ occurring in the outer proof was used under the hypothetical assumptions.
We can observe that all rules of the system are sound with respect to the semantics. To illustrate how the soundness proof goes let us consider the rules (e$\cup$) and (e$\vee$). Let $\Delta^s$ denote the set of safe formulas from $\Delta$. Soundness of the two rules corresponds respectively to the following two semantic facts:
\begin{itemize}
\item[(a)] If $\Delta^s, \alpha \vDash \gamma$ and $\Delta^s, \beta \vDash \gamma$ then $\Delta, \alpha \cup \beta \vDash \gamma$.
\item[(b)] If $\Delta, \varphi \vDash \chi$ and $\Delta, \psi \vDash \chi$ then $\Delta, \varphi \vee \psi \vDash \chi$.
\end{itemize}
In order to prove (a), assume that $\Delta^s, \alpha \vDash \gamma$ and $\Delta^s, \beta \vDash \gamma$. Let $C$ be a context in which all formulas from $\Delta$ and the formula $\alpha \cup \beta$ are assertible. Take an arbitrary possible world $w \in C$. All formulas of $\Delta^s$ are assertible in $\{w\}$ (due to persistence of safe formulas). Moreover, $\alpha$ or $\beta$ is assertible in $\{w\}$. It follows from our assumption that $\gamma$ is assertible in $\{w\}$. Since this holds for every $w \in C$, and $\gamma$ is an $L$-formula, we obtain $C \Vdash^+ \gamma$, as required.
In order to prove (b), assume that $\Delta, \varphi \vDash \chi$ and $\Delta, \psi \vDash \chi$. Let $C$ be a context in which all formulas from $\Delta$ and the formula $\varphi \vee \psi$ are assertible. Then $\varphi$ or $\psi$ is assertible in $C$. It follows from our assumption that $\chi$ is assertible in $C$, as required.
\section{Completeness}
The proof of completeness proceeds in the following steps. First a contextual weak negation $-\varphi$ is defined recursively. This negation is a denial of assertibility, that is, $-\varphi$ is assertible in a given context iff $\varphi$ is not assertible in that context. It has to be shown that this negation behaves properly also on the syntactic side. That means that the following holds: $\Delta \nvdash \varphi$ if and only if $\Delta \cup \{-\varphi\}$ is consistent, i.e. $\Delta, -\varphi \nvdash \bot$. The proof of this fact will be the main task of this section. The reconstruction of weak negation allows us to reduce completeness to the claim that every consistent set has a model. We will only sketch the proof of this claim because the technique is basically the same as the one used in the completeness proof for inquisitive logic with weak negation from \cite{puncochar15}. The ``contextual weak negation'' is defined by the following five recursive clauses. The first clause states the definition for $L$-formulas and their intensional negations:
\begin{tabular}{lll}
1. & $-\alpha=\lozenge \neg \alpha$, & $-\neg \alpha = \lozenge \alpha$.\\
\end{tabular}
In the clauses 2.-5. we assume that $\varphi, \psi$ are arbitrary $L^*$-formulas for which $-\varphi, -\neg \varphi, -\psi, -\neg\psi$ are already defined, and we further define:
\begin{tabular}{lll}
2. & $-\neg \neg \varphi = - \varphi$.\\
3. & (a) $-(\varphi \rightarrow \psi)=\lozenge (\varphi \wedge -\psi)$, & (b) $-\neg(\varphi \rightarrow \psi)=\varphi \rightarrow -\neg \psi$.\\
4. & (a) $-(\varphi \wedge \psi)=-\varphi \vee -\psi$, & (b) $-\neg (\varphi \wedge \psi)=-\neg \varphi \wedge -\neg \psi$. \\
5. & (a) $-(\varphi \vee \psi)=-\varphi \wedge -\psi$, & (b) $-\neg (\varphi \vee \psi)=-\neg \varphi \vee -\neg \psi$.
\end{tabular}
Note that by these clauses $-\varphi$ is indeed defined for every $L^*$-formula $\varphi$. By induction on $\varphi$, we obtain the following observation.
\begin{lemma}\label{l: ass of minus is the lack of ass}
For any $L^*$-formula $\varphi$ and any context $C$, it holds that
\begin{itemize}
\item[] $C \Vdash^{+} -\varphi$ iff $C \nVdash^{+} \varphi$.
\end{itemize}
\end{lemma}
We say that a context $C$ is a model of a set of formulas $\Delta$ if and only if every formula from $\Delta$ is assertible in $C$. The following lemma follows directly from Lemma \ref{l: ass of minus is the lack of ass}.
\begin{lemma}\label{l: entailment and models}
$\Delta \nvDash \varphi$ iff $\Delta \cup \{-\varphi\}$ has a model.
\end{lemma}
The next lemma is a cornerstone of the completeness proof and the main technical issue of this paper.
\begin{lemma}\label{l: excluded middle}
For any formula $\varphi$, the following holds:
\begin{itemize}
\item[(a)] $\vdash \varphi \vee - \varphi$,
\item[(b)] $\varphi, -\varphi \vdash \bot$.
\end{itemize}
\end{lemma}
\begin{proof}
(a) We will proceed by simultaneous induction on $\varphi$ and $\neg \varphi$. In the derivations below, whenever we use a safe formula in a context in which it is required to use only safe formulas, we indicate this in the corresponding annotation.
1. Assume that $\alpha$ is from $L$. We will derive $\alpha \vee -\alpha$, i.e. $\alpha \vee \neg (\neg \alpha \rightarrow \bot)$. The derivation of $\neg \alpha \vee -\neg \alpha$ is similar.
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
(\neg \alpha \rightarrow \bot) \vee \neg (\neg \alpha \rightarrow \bot) & (CEM) \\
\fh \neg \alpha \rightarrow \bot & hypothetical assumption \\
\vline\hspace*{\fitchindent} \fh {\sim}\alpha & hypothetical assumption\\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \fh \alpha & hypothetical assumption\\%4
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \bot & 3,4, (e$\sim_1$)\\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \neg \alpha & 4-5, (i$\neg$) \\%6
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \bot & 2-safe,6, (e$\rightarrow$) \\
\vline\hspace*{\fitchindent} {\sim}{\sim}\alpha & 3-7, (i$\sim$) \\
\vline\hspace*{\fitchindent} \alpha & 8, (e$\sim_2$)\\
\vline\hspace*{\fitchindent} \alpha \vee \neg (\neg \alpha \rightarrow \bot) & 9, (i$\vee_1$) \\
\fh \neg (\neg \alpha \rightarrow \bot) & hypothetical assumption \\
\vline\hspace*{\fitchindent} \alpha \vee \neg (\neg \alpha \rightarrow \bot) & 11, (i$\vee_2$)\\
\alpha \vee \neg (\neg \alpha \rightarrow \bot) & 1,2-10,11-12, (e$\vee$) \\
\end{fitch}
\end{itemize}}
In the cases 2.-5. we assume as an inductive hypothesis that our claim holds for $\varphi, \neg \varphi, \psi, \neg \psi$.
2. It is easy to derive $\neg \neg \varphi \vee -\neg \neg \varphi$ from $\varphi \vee -\varphi$, by (e$\vee$), (i$\vee_1$), (i$\vee_2$), and ($\neg\neg_2$).
3. We show that our claim holds for $\varphi \rightarrow \psi$ and $\neg (\varphi \rightarrow \psi)$. First, we prove $\vdash (\varphi \rightarrow \psi) \vee -(\varphi \rightarrow \psi)$, i.e. $\vdash (\varphi \rightarrow \psi) \vee \lozenge (\varphi \wedge -\psi)$, which can be done in the following way:
{\footnotesize\begin{itemize}
\item[]
\begin{fitch}
((\varphi \wedge -\psi) \rightarrow \bot ) \vee \lozenge (\varphi \wedge -\psi) & (CEM) \\
\fh (\varphi \wedge -\psi) \rightarrow \bot & hypothetical assumption \\
\vline\hspace*{\fitchindent} \fh \varphi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \psi \vee - \psi & induction hypothesis \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \fh \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \fh -- \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \varphi \wedge - \psi & 3,6, (i$\wedge$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \bot & 2-safe,7, (e$\rightarrow$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \psi & 8, (EFQ) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \psi & 4,5,6-9, (e$\vee$) \\
\vline\hspace*{\fitchindent} \varphi \rightarrow \psi & 3-10, (i$\rightarrow$) \\
\vline\hspace*{\fitchindent} (\varphi \rightarrow \psi) \vee \lozenge (\varphi \wedge -\psi) & 11, (i$\vee_1$) \\
\fh \lozenge (\varphi \wedge -\psi) & hypothetical assumption \\
\vline\hspace*{\fitchindent} (\varphi \rightarrow \psi) \vee \lozenge (\varphi \wedge -\psi) & 13, (i$\vee_2$) \\
(\varphi \rightarrow \psi) \vee \lozenge (\varphi \wedge -\psi) & 1,2-12,13-14, (e$\vee$) \\
\end{fitch}
\end{itemize}}
Now we prove $\vdash \neg (\varphi \rightarrow \psi) \vee - \neg (\varphi \rightarrow \psi)$, i.e. $\neg (\varphi \rightarrow \psi) \vee (\varphi \rightarrow - \neg \psi)$:
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
((\varphi \wedge \neg \psi) \rightarrow \bot ) \vee \lozenge (\varphi \wedge \neg \psi) & (CEM) \\
\fh (\varphi \wedge \neg \psi) \rightarrow \bot & hypothetical assumption \\
\vline\hspace*{\fitchindent} \fh \varphi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \neg \psi \vee -\neg \psi & induction hypothesis \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \fh \neg \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \varphi \wedge \neg \psi & 3,5, (i$\wedge$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \bot & 2-safe,6, (e$\rightarrow$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} -- \neg \psi & 7, (EFQ) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \fh -- \neg \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} -- \neg \psi & 4,5-8,9, (e$\vee$) \\
\vline\hspace*{\fitchindent} \varphi \rightarrow -\neg \psi & 3-10, (i$\rightarrow$) \\
\vline\hspace*{\fitchindent} \neg (\varphi \rightarrow \psi) \vee (\varphi \rightarrow - \neg \psi) & 11, (i$\vee_2$) \\
\fh \lozenge (\varphi \wedge \neg \psi) & hypothetical assumption \\
\vline\hspace*{\fitchindent} \neg (\varphi \rightarrow \psi) & 13, ($\neg{\rightarrow}_2$) \\
\vline\hspace*{\fitchindent} \neg (\varphi \rightarrow \psi) \vee (\varphi \rightarrow - \neg \psi) & 14, (i$\vee_1$) \\
\neg (\varphi \rightarrow \psi) \vee (\varphi \rightarrow - \neg \psi) & 1,2-12,13-15, (e$\vee$) \\
\end{fitch}
\end{itemize}}
4. We prove that our claim holds for $\varphi \wedge \psi$ and $\neg (\varphi \wedge \psi)$. First, we prove the former, i.e. $\vdash (\varphi \wedge \psi) \vee (- \varphi \vee - \psi)$, which can be done by the following derivation:
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\varphi \vee - \varphi & induction hypothesis \\
\fh \varphi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \psi \vee - \psi & induction hypothesis \\
\vline\hspace*{\fitchindent} \fh \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \varphi \wedge \psi & 2,4, (i$\wedge$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} (\varphi \wedge \psi) \vee (-\varphi \vee -\psi) & 5, (i$\vee_1$) \\
\vline\hspace*{\fitchindent} \fh -\psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} (\varphi \wedge \psi) \vee (-\varphi \vee -\psi) & 7, (i$\vee_2$) (twice) \\%8
\vline\hspace*{\fitchindent} (\varphi \wedge \psi) \vee (-\varphi \vee -\psi) & 3, 4-6,7-8, (e$\vee$) \\
\fh - \varphi & hypothetical assumption \\%10
\vline\hspace*{\fitchindent} (\varphi \wedge \psi) \vee (-\varphi \vee -\psi) & 10, (i$\vee_1$), (i$\vee_2$) \\
(\varphi \wedge \psi) \vee (-\varphi \vee -\psi) & 1,2-9,10-11, (e$\vee$) \\
\end{fitch}
\end{itemize}}
Now we prove that $\vdash \neg (\varphi \wedge \psi) \vee (-\neg \varphi \wedge -\neg \psi)$, which can be done by the following derivation:
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\neg \varphi \vee - \neg \varphi & induction hypothesis \\
\fh \neg \varphi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \neg \varphi \vee \neg \psi & 2, (i$\vee_1$) \\
\vline\hspace*{\fitchindent} \neg (\varphi \wedge \psi) & 3, ($\neg\wedge_2$) \\
\vline\hspace*{\fitchindent} \neg (\varphi \wedge \psi) \vee (-\neg \varphi \wedge -\neg \psi) & 4, (i$\vee_1$) \\
\fh -\neg \varphi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \neg \psi \vee - \neg \psi & induction hypothesis \\
\vline\hspace*{\fitchindent} \fh \neg \psi & hypothetical assumption \\%8
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \neg \varphi \vee \neg \psi & 8, (i$\vee_2$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \neg (\varphi \wedge \psi) & 9, ($\neg\wedge_2$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \neg (\varphi \wedge \psi) \vee (-\neg \varphi \wedge -\neg \psi) & 10, (i$\vee_1$) \\
\vline\hspace*{\fitchindent} \fh - \neg \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} (-\neg \varphi \wedge -\neg \psi) & 6,12, (i$\wedge$) \\
\vline\hspace*{\fitchindent} \vline\hspace*{\fitchindent} \neg (\varphi \wedge \psi) \vee (-\neg \varphi \wedge -\neg \psi) & 13, (i$\vee_2$) \\%14
\vline\hspace*{\fitchindent} \neg (\varphi \wedge \psi) \vee (-\neg \varphi \wedge -\neg \psi) & 7,8-11,12-14, (e$\vee$) \\
\neg (\varphi \wedge \psi) \vee (-\neg \varphi \wedge -\neg \psi) & 1,2-5,6-15, (e$\vee$) \\
\end{fitch}
\end{itemize}}
5. The case of $\vee$ is analogous to the case of $\wedge$. This finishes the proof of (a).
(b) We will proceed again by simultaneous induction on $\varphi$ and $\neg \varphi$. 1. Assume that $\alpha$ is any $L$-formula. First, we will derive $\bot$ from $\alpha$ and $-\alpha = \neg (\neg \alpha \rightarrow \bot)$. The derivation of $\bot$ from $\neg \alpha$ and $-\neg \alpha$ is similar.
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\alpha & premise \\
\neg (\neg \alpha \rightarrow \bot) & premise \\
\fh \neg \alpha & hypothetical assumption \\
\vline\hspace*{\fitchindent} \bot & 1-safe,3, (e$\neg$) \\
\neg \alpha \rightarrow \bot & 2-3, (i$\rightarrow$) \\
\bot & 2,5, (e$\neg$) \\
\end{fitch}
\end{itemize}}
For the steps 3.-5., assume as an induction hypothesis that our claim holds for some arbitrary $\varphi, \neg \varphi, \psi, \neg \psi$. 2. It is easy to see that if we assume $\varphi, -\varphi \vdash \bot$, then also $\neg \neg \varphi, -\neg \neg \varphi \vdash \bot$, by ($\neg\neg_1$).
3. We prove that the claim holds for $\varphi \rightarrow \psi$, i.e. $\varphi \rightarrow \psi, -(\varphi \rightarrow \psi) \vdash \bot$.
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\varphi \rightarrow \psi & premise \\
--(\varphi \rightarrow \psi)=\neg ((\varphi \wedge - \psi ) \rightarrow \bot) & premise \\
\fh \varphi \wedge - \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \varphi & 3, (e$\wedge_1$) \\
\vline\hspace*{\fitchindent} \psi & 1-safe,4, (e$\rightarrow$) \\
\vline\hspace*{\fitchindent} -- \psi & 3, (e$\wedge_2$) \\
\vline\hspace*{\fitchindent} \bot & 5,6, induction hypothesis\\
(\varphi \wedge - \psi) \rightarrow \bot & 3-7, (i$\rightarrow$) \\
\bot & 2,8, (e$\neg$) \\
\end{fitch}
\end{itemize}}
Now we prove that $\neg (\varphi \rightarrow \psi), -\neg (\varphi \rightarrow \psi) \vdash \bot$.
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\neg (\varphi \rightarrow \psi) & premise \\
-- \neg (\varphi \rightarrow \psi)= \varphi \rightarrow - \neg \psi & premise \\
\fh \varphi \wedge \neg \psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \varphi & 3, (e$\wedge_1$) \\
\vline\hspace*{\fitchindent} -- \neg \psi & 2-safe,4, (e$\rightarrow$) \\
\vline\hspace*{\fitchindent} \neg \psi & 3, (e$\wedge_2$) \\
\vline\hspace*{\fitchindent} \bot & 5,6, induction hypothesis\\
(\varphi\wedge \neg \psi) \rightarrow \bot & 3-7, (i$\rightarrow$) \\
\neg ((\varphi \wedge \neg \psi) \rightarrow \bot) & 1, ($\neg{\rightarrow}_1$) \\
\bot & 8,9, (e$\neg$) \\
\end{fitch}
\end{itemize}}
4. We prove that our claim holds for $\varphi \wedge \psi$ and $\neg (\varphi \wedge \psi)$. First, we prove that $\varphi \wedge \psi, -(\varphi \wedge \psi) \vdash \bot$, which can be done by the following derivation:
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\varphi \wedge \psi & premise \\
-\varphi \vee -\psi & premise \\
\fh -\varphi & hypothetical assumption\\
\vline\hspace*{\fitchindent} \varphi & 1, (e$\wedge_1$) \\
\vline\hspace*{\fitchindent} \bot & 3,4, induction hypothesis \\
\fh -\psi & hypothetical assumption \\
\vline\hspace*{\fitchindent} \psi & 1, (e$\wedge_1$) \\
\vline\hspace*{\fitchindent} \bot & 6,7, induction hypothesis \\%8
\bot & 2,3-5,6-8, (e$\vee$) \\
\end{fitch}
\end{itemize}}
Now we prove that $\neg(\varphi \wedge \psi), -\neg (\varphi \wedge \psi) \vdash \bot$, which can be done by the following derivation:
{\footnotesize \begin{itemize}
\item[]
\begin{fitch}
\neg (\varphi \wedge \psi) & premise \\
-\neg \varphi \wedge -\neg \psi & premise \\
\neg \varphi \vee \neg \psi & 1, ($\neg\wedge_1$) \\
\fh \neg \varphi & hypothetical assumption \\
\vline\hspace*{\fitchindent} -\neg \varphi & 2, (e$\wedge_1$) \\
\vline\hspace*{\fitchindent} \bot & 4,5, induction hypothesis \\
\fh \neg \psi & hyp \\
\vline\hspace*{\fitchindent} -\neg \psi & 2, (e$\wedge_2$) \\
\vline\hspace*{\fitchindent} \bot & 7,8, induction hypothesis \\%9
\bot & 3,4-6,7-9, (e$\vee$) \\
\end{fitch}
\end{itemize}}
5. The case of $\vee$ is analogous to the case of $\wedge$. This finishes the proof of (b).
\end{proof}
\begin{lemma}\label{l: defined negation and consistency}
$\Delta \nvdash \varphi$ iff $\Delta \cup \{-\varphi\}$ is consistent.
\end{lemma}
\begin{proof}
The left-to-right implication is obtained from Lemma \ref{l: excluded middle}-a, using the rules (e$\vee$) and (EFQ), and the right-to-left implication follows from Lemma \ref{l: excluded middle}-b.
\end{proof}
\begin{lemma}\label{l: consistent sets have model}
Every consistent finite set of $L^*$-formulas has a model.
\end{lemma}
\begin{proof}
We can give only a sketch of the proof here. The strategy is the same as in the analogous proofs in \cite{puncochar15} and \cite{puncochargauker20}. Let $A$ be a finite set of atomic formulas. Recall that for every $A$-context $C$ there is an $L^*$-formula $\mu_C$ characterizing $C$ in the following sense: For every $A$-context $D$, $D \Vdash^{+} \mu_{C}$ iff $D=C$. We can further prove that if $\Delta$ is a consistent set of $L^*$-formulas built out of the atomic formulas from $A$, then there is an $A$-context $C$ such that $\Delta \cup \left\{\mu_{C}\right\}$ is consistent. Moreover, we can prove by induction, crucially using the rule ($\lozenge\oplus$), that for every $L^*$-formula $\varphi$ built out of the atomic formulas from $A$, either $\mu_{C} \vdash \varphi$, or $\mu_{C} \vdash -\varphi$.
We can further reason as follows. Let $\Delta$ be a consistent finite set of $L^*$-formulas and let $A$ be the set of atomic formulas occurring in $\Delta$. Then there is an $A$-context $C$ such that $\Delta \cup \left\{\mu_{C}\right\}$ is consistent. It follows that $\mu_{C} \vdash \psi$, for every $\psi \in \Delta$. Since $C \Vdash^{+} \mu_{C}$ it follows from soundness of the deductive rules that $C \Vdash^{+} \psi$, for every $\psi \in \Delta$. Hence, $\Delta$ has a model.
\end{proof}
\begin{theorem}
$\varphi_1, \ldots, \varphi_n \vdash \psi$ \, iff\, $\varphi_1, \ldots, \varphi_n \vDash \psi$.
\end{theorem}
\begin{proof}
Soundness was already discussed and completeness is obtained from Lemmas \ref{l: entailment and models}, \ref{l: defined negation and consistency}, and \ref{l: consistent sets have model}.
\end{proof}
\end{document}
|
\begin{document}
\renewcommand{\sectionmark}[1]{\markright{\spacedlowsmallcaps{#1}}}
\lehead{\mbox{\llap{\small\thepage\kern1em\color{halfgray} \vline}\color{halfgray}\hspace{0.5em}\rightmark\hfil}}
\pagestyle{scrheadings}
\title{\normalfont\spacedlowsmallcaps{An inertial extrapolation method}
\setcounter{tocdepth}{2}
\section*{Abstract}
\noindent We consider a scalar objective minimization problem over the solution set of another optimization problem. This problem is known as \emph{simple bilevel optimization problem} and has drawn a significant attention in the last few years. Our inner problem consists of minimizing the sum of smooth and nonsmooth functions while the outer one is the minimization of a smooth convex function. We propose and establish the convergence of a fixed-point iterative method with inertial extrapolation to solve the problem. Our numerical experiments show that the method proposed in this paper outperforms the currently best known algorithm to solve the class of problem considered.
\let\thefootnote\relax\footnotetext{$\dag$ \textit{Department of Mathematics, University of Nigeria, Nsukka, Nigeria; e-mail: \url{[email protected]}. Current address (May 2016 -- April 2019): Institute of Mathematics, University of W\"urzburg, Emil-Fischer-Str.\ 30,
97074 W\"urzburg, Germany. The research of this author is supported by the
Alexander von Humboldt-Foundation.}}
\let\thefootnote\relax\footnotetext{$\ddag$ \textit{Faculty of Mathematics, University of Vienna, Oskar-Morgenstern-Platz 1, 1090 Vienna, Austria; e-mail:
\url{[email protected]}. The work of this author is funded by the FWF Grant M 2499 Meitner-Programm.}}
\let\thefootnote\relax\footnotetext{$\natural$ \textit{School of Mathematics, University of Southampton
SO17 1BJ Southampton, UK; e-mail: \url{[email protected]}. The work of this author is funded by the EPSRC Grant EP/P022553/1.}}
\section{Introduction}
Our main aim in this paper is to solve a scalar objective minimization problem over the solution set of another optimization problem; i.e., precisely, the problem
\begin{eqnarray}\label{bilevel1}
\min~h(x) \; \mbox{ s.t. } \; x \in X^*\subseteq \mathbb{R}^n,
\end{eqnarray}
where $h : \mathbb{R}^n \rightarrow \mathbb{R}$ is assumed to be strongly convex and differentiable, while $X^*$ is the nonempty set of minimizers of the classical convex composite optimization problem
\begin{eqnarray}\label{bilevel2}
\min~\varphi(x) := f(x) + g(x),
\end{eqnarray}
where $f : \mathbb{R}^n \rightarrow \mathbb{R}$ is continuously differentiable and $g$, an extended real-valued function on $\mathbb{R}^n$, which can be nonsmooth. Problem \eqref{bilevel1}--\eqref{bilevel2} was labeled in \cite{DempeDinhDutta2010} as \emph{simple bilevel optimization problem}, as opposed to the more general version of the problem (see, e.g., \cite{DempeFoundations}), where the follower's problem \eqref{bilevel2} is parametric, with the parameter representing the variable controlled by the leader, which is in turn different from the one under the control of the follower. For more details on the vocabulary and connections of problem \eqref{bilevel1}--\eqref{bilevel2} to the standard bilevel optimization problem, see Subsection \ref{Standard bilevel optimization} below. \\
\noindent A common approach to solve problem \eqref{bilevel1}--\eqref{bilevel2} consists of the
Tikhonov-type regularization \cite{Tikhonov} (indirect method), based on solving the following
regularized problem
\begin{eqnarray}\label{bilevel3}
\min~\varphi_\lambda(x) := \varphi(x) + \lambda h(x)
\end{eqnarray}
for some $\lambda >0$. Note that problem \eqref{bilevel1}--\eqref{bilevel2} can be traced back to the work by Mangasarian and Meyer \cite{MangasarianMeyer1979} in the process of developing efficient algorithms for large scale linear programs. The model emerged in turn as a refinement of the regularization technique introduced by Tikhonov \cite{Tikhonov}. The underlying idea in the related papers by Mangasarian and his co-authors is called \emph{finite-perturbation property}, which consists of finding a parameter $\bar \lambda$ (\emph{Tikhonov perturbation parameter}) such that for all $\lambda\in [0, \; \bar \lambda]$,
\begin{eqnarray}\label{Finite-perturbation}
\arg\underset{x \in X^*}\min~h(x) = \arg\underset{x \in \mathbb{R}^n}\min~\varphi_\lambda(x):=\varphi(x) + \lambda h(x).
\end{eqnarray}
This property, initially proven in \cite{MangasarianMeyer1979} when the lower-level problem is a linear program, was later extended in \cite{FerrisMangasarian} to the case where it is a general convex optimization problem. \\
\noindent To the best of our knowledge, the development of solution algorithms specifically tailored to optimization problems of the form \eqref{bilevel1}--\eqref{bilevel2} can be traced back to the work by Cabot \cite{Cabot2005}, where a \emph{proximal point method} is proposed to solve the problem and its extension to a simple hierarchical optimization problem with finitely many levels. In contrary to the latter paper, where the approximation scheme is only implicit thus making the method not easy to numerically implement, Solodov \cite{Solodov2} proposed an explicit and more tractable proximal point method for problem \eqref{bilevel1}--\eqref{bilevel2}. Since then, various proximal point algorithms have been developed to solve the problems under different types of frameworks, see, e.g., \cite{BotNguyen2018, malitsky2017chambolle, SabachShtern} and references therein.\\
\noindent Motivated by the results in \cite{BeckSabach}, Sabach and Shtern \cite{SabachShtern} recently proposed
the following scheme (with $x_0 \in \mathbb{R}^n$ as starting point), called \emph{Bilevel Gradient Sequential Averaging Method} (abbreviated as \emph{BiG-SAM}), to solve problem \eqref{bilevel1}--\eqref{bilevel2}:
\begin{equation}\label{e1}
\left\{\begin{array}{l}
s_n={\rm prox}_{\lambda g}(x_{n-1}-\lambda \nabla f(x_{n-1}))\\
z_n=x_{n-1}-\gamma \nabla h(x_{n-1})\\
x_{n+1}=\alpha_n z_n+(1-\alpha_n)s_n,~~n \geq 1
\end{array}\right.
\end{equation}
with $\lambda \in \left(0,\frac{1}{L_f}\right]$, $\gamma \in \left(0,\frac{2}{L_h +\sigma}\right]$, and $\{\alpha_n\}$ satisfying the conditions assumed in \cite{XuViscosity}. Sabach and Shtern \cite{SabachShtern} obtained a nonasymptotic O($\frac{1}{n}$) global rate of convergence in terms
of the inner objective function values and showed that BiG-SAM \eqref{e1} appears simpler and cheaper than the method proposed in \cite{BeckSabach}.
The numerical example in \cite{SabachShtern} also showed that BiG-SAM \eqref{e1} outperforms the method in \cite{BeckSabach} for solving problem \eqref{bilevel1}--\eqref{bilevel2}. The algorithm in \cite{SabachShtern} seems to be the most efficient method developed so far for convex simple bilevel optimization problems.\\
\noindent Inspired by recent results on \emph{inertial extrapolation} type algorithms for solving optimization problem
(see, e.g., {\cite{Attouch3,Beck,Bot2,Ochs} and references therein),
our aim in this paper is to solve problem \eqref{bilevel1}--\eqref{bilevel2} by introducing an inertial extrapolation step to BiG-SAM \eqref{e1}
(which we shall call \emph{iBiG-SAM}).
We then establish the global convergence of our method under reasonable assumptions. Numerical experiments show that the proposed method outperforms the BiG-SAM \eqref{e1} introduced in \cite{SabachShtern}.\\
\noindent For the remainder of the paper, first note that there is a striking similarity between the exact penalization model \eqref{bilevel3} and a corresponding partial penalization approach based on the \emph{partial calmness concept} \cite{YeZhuOptCondForBilevel1995} often used to solve the general bilevel optimization problem. Both approaches seem to have originated from completely different sources and their development also seems to be occurring independently from each other till now. In Subsection \ref{Standard bilevel optimization}, we clarify this similarity and discuss some strong relationships between the two problem classes. In Subsection \ref{Sec:Prelims}, we recall some basic definitions and results that will play an important role in the paper. The proposed method and its convergence analysis are presented in Section~\ref{Sec:Method}. Some numerical experiments are given in Section~\ref{Sec:Numerics}. We conclude the paper with some final remarks in Section~\ref{Sec:Final}.
\section{General context and mathematical tools}
\subsection{Standard bilevel optimization.} \label{Standard bilevel optimization}
In this subsection, we provide a discussion to place the simple bilevel optimization introduced above in a general context of bilevel optimization. To proceed, we consider a simple optimistic version of the latter class of problem, which aligns suitably with problem \eqref{bilevel1}--\eqref{bilevel2}, i.e.,
\begin{equation}\label{Standard}
\underset{x,y}\min~h(x, y) \;\, \mbox{ s.t. }\;\, y\in S(x)
\end{equation}
where $h : \mathbb{R}^n\times \mathbb{R}^m \rightarrow \mathbb{R}$ represents the upper level objective function and the set-valued mapping $S$ defines the the set of optimal solutions of the lower level problem
\begin{equation}\label{lower-level}
\underset{y}\min~\varphi(x,y)
\end{equation}
($\varphi : \mathbb{R}^n\times \mathbb{R}^m \rightarrow \mathbb{R}$) for any fixed upper level variable $x$. Obviously, problem \eqref{bilevel1}--\eqref{bilevel2} is a special case of problem \eqref{Standard}--\eqref{lower-level}, where the optimal solution of the leader is simply picked among the optimal solutions of the lower level problem, which in turn are obtained without any influence from the leader as it is the case in the latter problem.\\
\noindent On the other hand problem \eqref{Standard}--\eqref{lower-level} can be equivalently written as the following optimization problem over an efficient set
$$
\underset{x,y}\min~h(x,y) \; \mbox{ s.t. }\; (x,y)\in E\left(\mathbb{R}^n\times \mathbb{R}^m, \; \bar{\varphi},\; \preccurlyeq\right),
$$
where $E\left(\mathbb{R}^n\times \mathbb{R}^m, \; \bar{\varphi},\; \preccurlyeq\right)$ denotes the efficient set (i.e., optimal solution set) of the problem of minimizing a multiobjective function $\bar \varphi$ (based on $\varphi$ \eqref{lower-level}) over $\mathbb{R}^n\times \mathbb{R}^m$ w.r.t. a certain order relation $\preccurlyeq$; for examples of choices of the latter function and corresponding order relations, see the papers \cite{Eichfelder, Fulop}. Obviously, an optimization problem over an efficient set is a generalization of the simple bilevel optimization problem \eqref{bilevel1}--\eqref{bilevel2}, and has been extensively investigated since the seminal work by Philip \cite{Phillip1972}; see \cite{Yamamoto2002} for a literature review on the topic.\\
\noindent One common approach to transform problem \eqref{Standard}--\eqref{lower-level} into a single-level optimization problem is the so-called lower-level optimal value function (LLVF) reformulation
\begin{equation}\label{LLVF}
\underset{x, y}\min~h(x, y) \;\; \mbox{ s.t. }\;\; \varphi(x,y)\leq \varphi^*(x),
\end{equation}
where the function $\varphi^*(x)=\underset{y}\min~\varphi(x,y)$ represents the optimal value function of the lower level problem \eqref{lower-level}.
Recall that this reformulation is an underlying feature in the development of the link \eqref{Finite-perturbation} between the simple bilevel optimization problem \eqref{bilevel1}--\eqref{bilevel2} and the penalized problem \eqref{bilevel3} as outlined in the corresponding publications; see, e.g., \cite{FerrisMangasarian}. However, we instead want to point out here an interesting similarity between the finite termination property \eqref{Finite-perturbation} and the \emph{partial calmness} concept \cite{YeZhuOptCondForBilevel1995} commonly used in the context of standard bilevel optimization. To highlight this, let $(\bar x, \bar y)$ be a local optimal solution of \eqref{bilevel1}--\eqref{bilevel2}. The problem is partially calm at $(\bar x, \bar y)$ if and only if there exists $\lambda >0$ such that $(\bar x, \bar y)$ is also a local optimal solution of the penalized problem
\begin{equation}\label{Penalized LLVF}
\underset{x, y}\min~h(x, y) + \lambda\left(\varphi(x,y)-\varphi^*(x)\right).
\end{equation}
The partial calmness concept does not automatically hold for the simple bilevel optimization problem \eqref{bilevel1}--\eqref{bilevel2}. To see this, consider the example of convex simple bilevel optimization problem of minimizing $(x-1)^2$ subject to $x\in \arg\min~y^2$.
It is clear that $0$ is the only optimal solution of this problem. But for the corresponding penalized problem \eqref{Penalized LLVF} to minimize $(x-1)^2 + \lambda x^2$,
we can easily check that the optimal solution is the number $x(\lambda):= \frac{1}{1+\lambda}$ for all $\lambda >0$. Clearly, $x(\lambda)\neq 0$ for all $\lambda >0$. \\
\noindent It is also important to note that, possibly unlike the finite termination property \eqref{Finite-perturbation}, the partial calmness concept was introduced as a qualification condition to derive necessary optimality conditions for problem \eqref{LLVF}; see \cite{DempeZemkohoGenMFCQ,YeZhuOptCondForBilevel1995} for some papers where this concept is used, and also the papers \cite{DempeDinhDuttaPandit2018, FrankeMehlitzPilecka2018} for new results on simple bilevel optimization problems from the perspective of standard bilevel optimization.
\subsection{Basic mathematical tools}\label{Sec:Prelims}
We state the following well-known lemmas which will be used in our convergence analysis in the sequel.
\begin{lem}\label{lm2}
The following well-known results hold in $\mathbb{R}^n$:
\begin{itemize}
\item[(i)] $||x+y||^2=||x||^2+2\langle x,y\rangle+||y||^2, \;\;\forall x, y \in \mathbb{R}^n;$
\item[(ii)] $||x+y||^2 \leq ||x||^2+ 2\langle y, x+y \rangle, \;\;\forall x, y \in \mathbb{R}^n;$
\item[(iii)] $\|tx+sy\|^2=t(t+s)\|x\|^2+s(t+s)\|y\|^2-st\|x-y\|^2,\;\; \forall x, y \in \mathbb{R}^n, \;\; s, t \in \mathbb{R}.$
\end{itemize}
\end{lem}
\begin{lem}(see, e.g., \cite{xu2})\label{lm23}
Let $\{a_n\}$ and $\{\gamma_n\}$ be sequences of nonnegative real numbers, $\{\alpha_n\}$ a sequence in (0,1) and $\{\sigma_n\}$ a real sequence satisfying the following relation:
$$a_{n+1}\leq(1-\alpha_n)a_n+\sigma_n+\gamma_n,~~n \geq 1.$$
\noindent Assume $\sum\gamma_n<\infty.$
Then the following results hold:
\begin{itemize}
\item[(i)] If $\sigma_n \leq \alpha_n M$ for some $M\geq 0$, then $\{a_n\}$ is a bounded sequence.
\item[(ii)] If $\sum\alpha_n=\infty$ and $\limsup \frac{\sigma_n}{\alpha_n}\leq 0$, then $\lim a_n=0$.\\
\end{itemize}
\end{lem}
\noindent We state the formal definition of some classes of operators that
play an essential role in our analysis in the sequel.
\begin{dfn}\label{Def:LipMon}
An operator $ T: \mathbb{R}^n\rightarrow \mathbb{R}^n $
is called
\begin{itemize}
\item[(a)] {\em nonexpansive} if and only if $ \| Tx - Ty \| \leq \| x - y \| $ for all $ x, y \in \mathbb{R}^n $;
\item[(b)] {\em averaged} if and only if it can be written as
the average of the identity mapping $I$ and a nonexpansive operator, i.e.,
$T: = (1-\beta)I + \beta S$ with $\beta\in (0,1)$ and $S:\mathbb{R}^n\rightarrow \mathbb{R}^n$ being a nonexpansive operator. More precisely, we say that $T$ is $\beta$-averaged;
\item[(c)] {\em firmly nonexpansive} if and only if $2T-I$ is nonexpansive, or equivalently,
$$
\langle Tx-Ty, x-y\rangle \geq \|Tx-Ty\|^2, \;\; \forall x, y \in \mathbb{R}^n.
$$
\noindent Alternatively, $T$ is said to be firmly nonexpansive if and only if it can be expressed as
$T:=\frac{1}{2}(I+S)$, where $S:\mathbb{R}^n\rightarrow \mathbb{R}^n$ is nonexpansive.
\end{itemize}
\end{dfn}
\noindent
We can see from above that firmly nonexpansive operators (in particular,
projections) are $\frac{1}{2}$-averaged.
\begin{lem} (\cite{Goebelkirk}) \label{lm25}
Let $T:\mathbb{R}^n\rightarrow \mathbb{R}^n$ be a nonexpansive operator. Let $\{x_n\}$ be a sequence in $\mathbb{R}^n$ and $x$ be a point in $\mathbb{R}^n$.
Suppose that $x_n\rightarrow x$ as $n\rightarrow \infty$ and that
$x_n-Tx_n\rightarrow 0$ as $n\rightarrow \infty$. Then, $x\in F(T)$, where $F(T)$ is the set of fixed points of $T$.\\
\end{lem}
\noindent Next, we provide some relevant properties of averaged operators.
\begin{prop}(see, e.g., \cite{cby})\label{need1}
For given operators $S$, $T$, and $V$ defined from $\mathbb{R}^n$ to $\mathbb{R}^n$, the following statements are satisfied:
\begin{itemize}
\item[(a)] If $T = (1-\alpha)S + \alpha V$ for some $\alpha \in (0, 1)$ and if $S$ is averaged and $V$ is nonexpansive, then the operator $T$ is averaged.
\item[(b)] The operator $T$ is firmly nonexpansive if and only if the complement $I- T$ is also firmly nonexpansive.
\item[(c)] If $T = (1-\alpha)S + \alpha V$ for some $\alpha \in (0, 1)$ and if $S$ is firmly nonexpansive and $V$ is nonexpansive, then $T$ is averaged.
\item[(d)] The composite of finitely many averaged operators is averaged. That is, if for each $i=1, \ldots, N$, the operator $T_i$ is averaged, then so is the composite operator $T_1\ldots T_N$. In particular,
if $T_1$ is $\alpha_1$-averaged and $T_2$ is $\alpha_2$-averaged, where $\alpha_1,\alpha_2 \in (0, 1)$, then
the composite $T_1T_2$ is $\alpha$-averaged, where $\alpha = \alpha_1 +\alpha_2-\alpha_1\alpha_2.$\\
\end{itemize}
\end{prop}
\noindent Finally, for the last proposition of this section, we recall the definition of monotonicity of nonlinear operators.
\begin{dfn}\label{Def:LipMon}
Given is a nonlinear operator
$A$ with domain $D(A)$ in $\mathbb{R}^n$ and $\beta, \nu$ are positive constants. Then $A$ is called
\begin{itemize}
\item[(a)] {\em monotone} on $D(A)$ if $\langle Ax - Ay, x-y \rangle\geq 0$ for all $x,y \in D(A)$;
\item[(b)] {\em $\beta$-strongly monotone} if $\langle Ax - Ay, x-y \rangle \geq \beta\|x-y\|^2$ for all $x,y \in D(A)$;
\item[(c)] {\em $\nu$-inverse strongly monotone } ($\nu$-ism, for short) if $ \langle Ax - Ay, x-y \rangle \geq \nu\|Ax-Ay\|^2$ for all $x,y \in D(A)$. \\
\end{itemize}
\end{dfn}
\noindent The following proposition gathers some useful results on the relationship between averaged
operators and inverse strongly monotone operators.
\begin{prop}(\cite{cby})\label{need2}
If $T: \mathbb{R}^n \rightarrow \mathbb{R}^n$ is an operator, then the following statements hold:
\begin{itemize}
\item[(a)] $T$ is nonexpansive if and only if the complement $I-T$ is $\frac{1}{2}$-ism;
\item[(b)] If $T$ is $\nu$-ism, then for $\gamma > 0,~~\gamma T$ is $\frac{\nu}{\gamma}$-ism;
\item[(c)] $T$ is averaged if and only if the complement $I-T$ is $\nu$-ism for some $\nu > 1/2$. Indeed, for $\alpha \in (0, 1),~~ T$ is $\alpha$-averaged if and only if $I-T$ is $\frac{1}{2\alpha}$-ism.
\end{itemize}
\end{prop}
\section{The algorithm and convergence analysis}\label{Sec:Method}
In this section, we give a precise statement of our method and its convergence
analysis. We first state the assumptions that will be needed throughout the rest of this paper.
\begin{asm}\label{Ass:VI} Considering problem \eqref{bilevel1}--\eqref{bilevel2}, let the following hold:
\begin{itemize}
\item[(a)] $f:\mathbb{R}^n\rightarrow \mathbb{R}$ is convex and continuously differentiable such that its gradient is Lipschitz continuous with constant $L_f$.
\item[(b)] $g:\mathbb{R}^n\rightarrow \left(-\infty,\infty\right]$ is proper, lower semicontinous and convex.
\item[(c)] $h:\mathbb{R}^n\rightarrow \mathbb{R}$ is strongly convex with parameter $\sigma>0$ and continuously differentiable such that its gradient is Lipschitz continuous with constant $L_h$.
\item[(d)] The set $X^*$ of all optimal solutions of problem \eqref{bilevel2} is nonempty.
\end{itemize}
\end{asm}
\begin{asm}\label{Ass:Parameters}
Suppose $ \{ \alpha_n \}_{n=1}^\infty$ is a sequence in (0,1) and $ \{ \epsilon_n \}_{n=1}^\infty$ is a positive sequence satisfying the following conditions:
\begin{itemize}
\item[(a)] $\lim_{n \to \infty} \alpha_n =0$ and $\sum_{n = 1}^{\infty}\alpha_n =\infty$.
\item[(b)] $\epsilon_n=o(\alpha_n)$, i.e., $\lim_{n \to \infty} \frac{\epsilon_n}{\alpha_n} =0$ (e.g., $\epsilon_n=\frac{1}{(n+1)^2}, \alpha_n=\frac{1}{n+1}$).
\item[(c)] $\lambda \in \left(0,\frac{2}{L_f}\right)$ and $\gamma \in \left(0,\frac{2}{L_h+\sigma}\right]$.
\end{itemize}
\end{asm}
\begin{rem}
\noindent Note that the stepsize $\lambda$ in Assumption (c) above is chosen in a larger interval than that of \cite{SabachShtern}. Also, our Assumption (a) is weaker than Assumption C of \cite{SabachShtern} since $\{\alpha_n\}$ is not required in our Assumption (a) to satisfy $\lim_{n \to \infty} \frac{\alpha_{n+1}}{\alpha_n}=1$ as assumed in Assumption C of \cite{SabachShtern}. Take, for example,
$\alpha_n=\frac{1}{\sqrt{n}},$ when $n$ is odd and $\alpha_n=\frac{1}{n},$ when $n$ is even.
We see that $\{\alpha_n\}$ satisfies Assumption (a) but $\frac{\alpha_{n+1}}{\alpha_n} \not \to 1$.
\end{rem}
\noindent
We next give a precise statement of our \emph{inertial Bilevel Gradient Sequential Averaging Method} (iBiG-SAM) as follows.
\begin{algorithm}[H]
\caption{iBiG-SAM}
\label{Alg:AlgL}
\begin{algorithmic}
\STATE \textbf{Step 0}: Choose sequences $ \{ \alpha_n \}_{n=1}^\infty$ and $ \{ \epsilon_n \}_{n=1}^\infty$
such that the conditions in Assumption~\ref{Ass:Parameters} hold.
Select arbitrary points $x_0, x_1 \in \mathbb{R}^n$ and $\alpha\geq 3$. Set $n:=1$.
\STATE \textbf{Step 1}: Given the iterates $x_{n-1}$ and $x_n$ (with $n \geq 1$), choose $\theta_n$ such that we have
$0\leq \theta_n \leq \bar{\theta}_n$ with $\bar{\theta}_n$ defined by
\begin{equation}\label{thetaDefine}
\bar{\theta}_n :=
\left\{\begin{array}{ll}
\min\left\{\frac{n-1}{n+\alpha-1},\;\, \frac{\epsilon_n}{\|x_n-x_{n-1}\|}\right\} & \mbox{if } \;\; x_n\neq x_{n-1},\\[1ex]
\frac{n-1}{n+\alpha-1} & {\rm otherwise}.
\end{array}
\right.
\end{equation}
\STATE \textbf{Step 2}: Proceed with the following computations:
\begin{equation}\label{e31}
\left\{\begin{array}{l}
y_n=x_n+\theta_n(x_n-x_{n-1}),\\
s_n={\rm prox}_{\lambda g}(y_n-\lambda \nabla f(y_n)),\\
z_n=y_n-\gamma \nabla h(y_n),\\
x_{n+1}=\alpha_n z_n+(1-\alpha_n)s_n,~~n \geq 1.
\end{array}
\right.
\end{equation}
\end{algorithmic}
\end{algorithm}
\begin{rem}\label{mami}
Observe that from Assumption~\ref{Ass:Parameters} and Algorithm~\ref{Alg:AlgL} we have that
$$
\lim_{n \to \infty} \theta_n \|x_n-x_{n-1}\|=0 \;\;\mbox{ and } \;\;
\lim_{n \to \infty} \frac{\theta_n }{\alpha_n}\|x_n-x_{n-1}\| =0.
$$
\end{rem}
\noindent
Also note that Step 1 in our Algorithm \ref{Alg:AlgL} is easily implemented in numerical computation
since the value of $\|x_n-x_{n-1}\|$ is a priori known before choosing $\theta_n$.
\noindent
We are now in the position to discuss the convergence of iBIG-SAM. Let us define
\begin{eqnarray}\label{proxgrad}
T_\lambda:={\rm prox}_{\lambda g}(I-\lambda \nabla f).
\end{eqnarray}
\noindent The next lemma shows that the prox-grad mapping $T_\lambda$ is averaged. This is an improvement over Lemma 1(i) of \cite{SabachShtern}.
\begin{lem}\label{use1}
The prox-grad mapping $T_\lambda$ \eqref{proxgrad} is $\frac{2+\lambda L_f}{4}$-averaged for all
$\lambda \in \left(0,\frac{2}{L_f}\right)$.
\end{lem}
\begin{proof}
Observe that the Lipschitz condition on $\nabla f$ implies that $\nabla f$ is $\frac{1}{L_f}$-ism
(see \cite{baillon}), which then implies that $\lambda \nabla f$ is $\frac{1}{\lambda L_f}$-ism.
Hence, by Proposition \ref{need2}(c), $I-\lambda \nabla f$ is $(\frac{\lambda L_f}{2})$-averaged.
Since ${\rm prox}_{\lambda f}$ is firmly nonexpansive and hence $\frac{1}{2}$-averaged, we
see from Proposition \ref{need1}(d) that the composite ${\rm prox}_{\lambda g}(I-\lambda \nabla f)$
is $\frac{2+\lambda L_f}{4}$-averaged for $\lambda \in (0,\frac{2}{L_f})$.
Hence we have that, $T_\lambda={\rm prox}_{\lambda g}(I-\lambda \nabla f)$ is
$\frac{2+\lambda L_f}{4}$-averaged.
Therefore, we can write
\begin{eqnarray}\label{prox}
T_\lambda&=&{\rm prox}_{\lambda g}(I-\lambda \nabla f)= \left( \frac{2-\lambda L_f}{4} \right)I+\left(\frac{2+\lambda L_f}{4} \right )T\\
&=&(1-\beta)I+\beta T\label{prox2},
\end{eqnarray}
where $\beta:=\frac{2+\lambda L_f}{4} \in [a,b] \subset (1/2,1)$ and $T$ is a nonexpansive mapping.
\end{proof}
\noindent Lemma 1(ii) of \cite{SabachShtern} showed the equivalence between the fixed points of prox-grad mapping $T_\lambda$ \eqref{proxgrad} and optimal solutions of problem \eqref{bilevel2}. That is, $x\in X^*$ if and only if $x \, = \, T_\lambda x$.
This equivalence will be needed in our convergence analysis in this paper.
\begin{lem}(\cite{SabachShtern})\label{use2}
Suppose that Assumption \ref{Ass:VI} (c) holds. Then, the mapping $S_\gamma$, defined by
$
S_\gamma:=I-\gamma \nabla h,
$
is a contraction for all $\gamma \in \left(0,\frac{2}{L_h+\sigma }\right]$. That is,
$$
\|S_\gamma (x)-S_\gamma (y) \| \leq \eta\|x-y\|,\;\; \forall x,y \in \mathbb{R}^n.
$$
Here, $I$ represents the identity operator and $\eta:=\sqrt{1-\frac{2\gamma\sigma L_h}{\sigma+L_h}}$.
\end{lem}
\noindent By the statements of Lemma \ref{use1} and Lemma \ref{use2}, we can re-write \eqref{e31} as
\begin{equation}\label{e32}
\left\{\begin{array}{ll}
y_n=x_n+\theta_n(x_n-x_{n-1}),\\
x_{n+1}=\alpha_n S_{\gamma}(y_n)+(1-\alpha_n)(1-\beta)y_n+\beta(1-\alpha_n)Ty_n,~~n \geq 1,
\end{array}
\right.
\end{equation}
where $T$ is a nonexpansive mapping, $S_{\gamma}$ is a contraction mapping and $\beta:=\frac{2+\lambda L_f}{4}$.\\
\noindent
Before we proceed with the main result of this section, we first show that the iterative sequence generated by our algorithm is bounded.
\begin{lem}\label{c31}
Let Assumptions~\ref{Ass:VI} and \ref{Ass:Parameters} be satisfied.
Then the sequence $\{x_n\}$ generated by Algorithm~\ref{Alg:AlgL} is bounded.
\end{lem}
\begin{proof}
From \eqref{e31}, for any $z \in X^*$, we have $z \in F(T_{\lambda})=F(T)$. Therefore,
\begin{eqnarray}\label{chi1}
\|x_{n+1}-z\| &\leq& \alpha_n\|S_{\gamma}(y_n)-z\|+(1-\alpha_n)(1-\beta)\|y_n-z\| +\beta(1-\alpha_n)\|Ty_n-z\|\nonumber\\
&\leq&\alpha_n\left(\|S_{\gamma}(y_n)-S_{\gamma}(z)\|+\|S_{\gamma}(z)-z\| \right )+(1-\alpha_n)\|y_n-z\|\nonumber \\
&\leq& \alpha_n\|S_{\gamma}(z)-z\|+(1-\alpha_n(1-\eta))\|y_n-z\|\nonumber \\
&\leq&\alpha_n\|S_{\gamma}(z)-z\|+(1-\alpha_n(1-\eta))(\|x_n-z\|+\theta_n\|x_n-x_{n-1}\|) \nonumber \\
&=&(1-\alpha_n(1-\eta))\|x_n-z\|+(1-\alpha_n(1-\eta))\theta_n\|x_n-x_{n-1}\|\nonumber \\
&& \qquad \qquad+\alpha_n\|S_{\gamma}(z)-z\|\nonumber \\
&=& (1-\alpha_n(1-\eta))\|x_n-z\|+\alpha_n(1-\eta)\frac{\|S_{\gamma}(z)-z\|}{1-\eta}\nonumber \\
&& \qquad \qquad+(1-\alpha_n(1-\eta))\theta_n\|x_n-x_{n-1}\|\nonumber \\
&=& (1-\alpha_n(1-\eta))\|x_n-z\|+\alpha_n\Big(\frac{(1-\eta)}{\alpha_n}\frac{\|S_{\gamma}(z)-z\|}{1-\eta}\nonumber \\
&& \qquad \qquad+(1-\alpha_n(1-\eta))\frac{\theta_n}{\alpha_n}\|x_n-x_{n-1}\|\Big).
\end{eqnarray}
Observe that $\sup_{n\geq 1} (1-\alpha_n(1-\eta))\frac{\theta_n}{\alpha_n}\|x_n-x_{n-1}\|$ exists by Remark \ref{mami} and take
$$
M:=\max~\left\{\frac{(1-\eta)}{\alpha_n}\frac{\|S_{\gamma}(z)-z\|}{1-\eta}, \;\;\sup_{n\geq 1} (1-\alpha_n(1-\eta))\frac{\theta_n}{\alpha_n(1-\eta)}\|x_n-x_{n-1}\|\right\}.
$$
\noindent Then \eqref{chi1} becomes
$$
\|x_{n+1}-z\| \leq (1-\alpha_n(1-\eta))\|x_n-z\|+\alpha_nM.
$$
\noindent By Lemma \ref{lm23} , we get that $\{x_n\}$ is bounded. As a consequence, $\{y_n\} $ is also bounded.
\end{proof}
\begin{thm}\label{t31}
Let Assumptions~\ref{Ass:VI} and \ref{Ass:Parameters} hold.
Then the sequence $\{x_n\} $ generated by Algorithm~\ref{Alg:AlgL}
converges to a point $z \in X^*$ satisfying
\begin{eqnarray}\label{e34}
\langle \nabla h(z),x-z\rangle \geq 0 \quad \forall x \in X^*
\end{eqnarray}
and therefore, $z=z_{mn}$ is the optimal solution of problem \eqref{bilevel1}--\eqref{bilevel2}.
\end{thm}
\begin{proof}
Start by observing that
\begin{eqnarray}\label{e6}
\|y_n-z\|^2
&=& \|x_n-z\|^2+2\theta_n\langle x_n-x_{n-1},x_n-z\rangle+\theta_n^2\|x_n-x_{n-1}\|^2.
\end{eqnarray}
From Lemma \ref{lm2} (i) it holds
\begin{eqnarray}\label{e7}
2 \langle x_n-x_{n-1}, x_n-z\rangle= -\|x_{n-1}-z\|^2+\|x_n-z\|^2 + \| x_n-x_{n-1}\|^2.
\end{eqnarray}
Substituting \eqref{e7} into \eqref{e6}, we obtain
\begin{eqnarray}\label{lara}
\|y_n-z\|^2
&=& \|x_n-z\|^2+\theta_n(-\|x_{n-1}-z\|^2+\|x_n-z\|^2+\| x_n-x_{n-1}\|^2) \nonumber\\
&& \quad +\theta_n^2\|x_n-x_{n-1}\|^2\nonumber \\
&=& \|x_n-z\|^2+\theta_n(\|x_n-z\|^2-\|x_{n-1}-z\|^2)\nonumber\\
&& \quad +\theta_n(1+\theta_n)\|x_n-x_{n-1}\|^2 \nonumber\\
&\leq & \|x_n-z\|^2+\theta_n(\|x_n-z\|^2-\|x_{n-1}-z\|^2)\nonumber\\
&& \quad +\;\; 2 \theta_n \|x_n-x_{n-1}\|^2,
\end{eqnarray}
where the last inequality follows from the fact that $\theta_n \in [0,1)$.
Using Lemma \ref{lm2} (ii) and (iii), we obtain from \eqref{e31} that
\begin{eqnarray}\label{laraa}
\|x_{n+1}-z\|^2&=&\|\alpha_n(S_{\gamma}(y_n)-z)+(1-\alpha_n)(1-\beta)(y_n-z)+\beta(1-\alpha_n)(Ty_n-z)\|^2\nonumber\\
&\leq & \|(1-\alpha_n)(1-\beta)(y_n-z)+\beta(1-\alpha_n)(Ty_n-z)\|^2 \nonumber \\
&& \quad +\;\; 2\langle \alpha_n(S_{\gamma}(y_n)-z), x_{n+1}-z \rangle \nonumber \\
&=&(1-\alpha_n)^2(1-\beta)\|y_n-z\|^2+\beta(1-\alpha_n)^2\|Ty_n-z\|^2 \nonumber\\
&& \quad -\;\;\beta(1-\beta)(1-\alpha_n)^2\|y_n-Ty_n\|^2+2\alpha_n\langle S_{\gamma}(y_n)-z, x_{n+1}-z \rangle\nonumber \\
&\leq&(1-\alpha_n)^2\|y_n-z\|^2-\beta(1-\beta)(1-\alpha_n)^2\|y_n-Ty_n\|^2\nonumber \\
&& \quad +\;\;2\alpha_n\langle S_{\gamma}(y_n)-z, x_{n+1}-z \rangle.
\end{eqnarray}
Combining \eqref{lara} and \eqref{laraa}, we get
\begin{eqnarray}\label{lara2}
\|x_{n+1}-z\|^2
&\leq& (1-\alpha_n)^2 \|x_n-z\|^2-\beta(1-\beta)(1-\alpha_n)^2\|y_n-Ty_n\|^2\nonumber \\
&& \quad +\;\;\theta_n(1-\alpha_n)^2(\|x_n-z\|^2-\|x_{n-1}-z\|^2)\nonumber\\
&&\quad +\;\;2\theta_n(1-\alpha_n)^2\|x_n-x_{n-1}\|^2 \nonumber\\
&& \quad +\;\; 2\alpha_n\left\langle S_{\gamma}(y_n)-z, x_{n+1}-z \right\rangle.
\end{eqnarray}
Setting
$
\Gamma_n:=\|x_n-z\|^2
$
for all $n \geq 1$, it follows from \eqref{lara2} that
\begin{eqnarray}\label{dubem}
\Gamma_{n+1}
&\leq&(1-\alpha_n)^2\Gamma_n-\beta(1-\beta)(1-\alpha_n)^2\|y_n-Ty_n\|^2
+\theta_n(1-\alpha_n)^2(\Gamma_n-\Gamma_{n-1})\nonumber\\
&& \quad +\;\;2\theta_n(1-\alpha_n)^2\|x_n-x_{n-1}\|^2+2\alpha_n\langle S_{\gamma}(y_n)-z, x_{n+1}-z \rangle.
\end{eqnarray}
\noindent We consider two cases for the rest of the proof.\\
\noindent {\bf Case 1:}
Suppose there exists a natural number $n_0$ such that $\Gamma_{n+1}\leq\Gamma_n$ for all
$n \geq n_0$. Therefore, $\lim_{n \to \infty} \Gamma_n$ exists.
From \eqref{dubem}, we have
\begin{eqnarray}\label{eq6}
&& \beta(1-\beta)(1-\alpha_n)^2\|y_n-Ty_n\|^2\qquad \qquad \nonumber\\
&\leq& (\Gamma_n-\Gamma_{n+1})+\theta_n(1-\alpha_n)^2 (\Gamma_n-\Gamma_{n-1})\nonumber\\
&& + \;\; 2\theta_n(1-\alpha_n)^2\|x_n-x_{n-1}\|^2+2\alpha_n\langle S_{\gamma}(y_n)-z, x_{n+1}-z \rangle.
\end{eqnarray}
Using Assumption~\ref{Ass:Parameters} (noting that $\lim_{n \to \infty} \theta_n \|x_n-x_{n-1}\|=0$ and
$\{x_n\}$, $\{y_n\}$ are bounded), we have
$$
\lim_{n \to \infty}\beta(1-\beta)(1-\alpha_n)^2\|y_n-Ty_n\|=0.
$$
\noindent Observe that $ \underset{n\rightarrow \infty}\liminf\beta(1-\beta)(1-\alpha_n)^2=\underset{n\rightarrow \infty}\lim\beta(1-\beta)(1-\alpha_n)^2=\beta(1-\beta) >0$ and this immediately implies that
$$
\lim_{n \to \infty}\|Ty_n-y_n\|=0.
$$
\noindent Since $\{x_n\}$ is bounded, take a subsequence $\{x_{n_k}\}$ of $\{x_n\}$ such that
$x_{n_k}\rightarrow p \in \mathbb{R}^n$ and using the definition of contraction mapping $S_{\gamma}$
in Lemma \ref{use2}, we have
\begin{eqnarray}\label{eq7}
\limsup_{n \to \infty} \langle S_{\gamma}(z)-z, x_n-z \rangle &=&\lim_{k \to \infty} \langle S_{\gamma}(z)-z, x_{n_k}-z \rangle \nonumber \\
&=& \langle S_{\gamma}(z)-z, p-z \rangle=\langle \nabla h(z),z-p\rangle.
\end{eqnarray}
From $y_n=x_n+\theta_n(x_n-x_{n-1})$, we get
$$
\|y_n-x_n\|=\theta_n\|x_n-x_{n-1}\|\rightarrow 0.
$$
\noindent
Since $x_{n_k}\rightarrow p $, we have $y_{n_k}\rightarrow p $.
Lemma \ref{lm25} then guarantees that $p \in F(T)=X^*$.
Furthermore, we have from \eqref{e34} and \eqref{eq7} that
\begin{eqnarray}\label{ayi}
\limsup_{n \to \infty}~\langle S_{\gamma}(z)-z, x_n-z \rangle \leq 0.
\end{eqnarray}
\noindent
From the contraction of $S_{\gamma}$ and \eqref{lara}, we can write
\begin{eqnarray*}
2\alpha_n\langle S_{\gamma}(y_n)-z, x_{n+1}-z \rangle
&=& 2\alpha_n\langle S_{\gamma}(y_n)-S_{\gamma}(z)+S_{\gamma}(z)-z, x_{n+1}-z \rangle \nonumber\\
&\leq& 2\alpha_n\eta\|y_n-z\|\|x_{n+1}-z\|+2\alpha_n\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle\nonumber\\
&\leq& \alpha_n\eta(\|y_n-z\|^2+\|x_{n+1}-z\|^2)+2\alpha_n\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle\nonumber\\
&\leq& \alpha_n\eta(\Gamma_n+\theta_n(\Gamma_n-\Gamma_{n-1})+2\theta_n\|x_n-x_{n-1}\|^2)\\
&&+\;\;2\alpha_n\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle+\alpha_n\eta\|x_{n+1}-z\|^2.
\end{eqnarray*}
Therefore from \eqref{dubem} it holds
\begin{eqnarray}
\Gamma_{n+1}&\leq&(1-\alpha_n)^2\Gamma_n+\theta_n(1-\alpha_n)^2(\Gamma_n-\Gamma_{n-1})\nonumber\\
&& \quad +\;\;2\theta_n(1-\alpha_n)^2\|x_n-x_{n-1}\|^2+2\alpha_n\langle S_{\gamma}(y_n)-z, x_{n+1}-z \rangle \nonumber\\
&\leq& ((1-\alpha_n)^2+\alpha_n\eta)\Gamma_n +\theta_n((1-\alpha_n)^2+\alpha_n\eta)(\Gamma_n-\Gamma_{n-1})\nonumber\\
&&\quad +\;\;2\theta_n((1-\alpha_n)^2+\alpha_n\eta)\|x_n-x_{n-1}\|^2+2\alpha_n\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle
\nonumber\\
&&\quad +\;\;\alpha_n\eta\|x_{n+1}-z\|^2\nonumber\\
&\leq& ((1-\alpha_n)^2+\alpha_n\eta)\Gamma_n+ \theta_n((1-\alpha_n)^2+\alpha_n\eta)\|x_n-x_{n-1}\|(\sqrt{\Gamma_n}+\sqrt{\Gamma_{n-1}})\nonumber\\
&& \quad +\;\;2\theta_n((1-\alpha_n)^2+\alpha_n\eta)\|x_n-x_{n-1}\|^2+2\alpha_n\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle \nonumber\\
&&\quad +\;\;\alpha_n\eta\|x_{n+1}-z\|^2\nonumber\\
&=&((1-\alpha_n)^2+\alpha_n\eta)\Gamma_n+ \theta_n\|x_n-x_{n-1}\|M_2+\alpha_n\eta\|x_{n+1}-z\|^2\nonumber\\
&& \quad +\;\;2\alpha_n\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle,
\end{eqnarray}
where
$$
M_2:=\sup_{n \geq 1} \left((1-\alpha_n)^2+\alpha_n\eta)(\sqrt{\Gamma_n}+\sqrt{\Gamma_{n-1}}+2((1-\alpha_n)^2+\alpha_n\eta)\|x_n-x_{n-1}\|\right).
$$
Therefore
\begin{eqnarray}\label{eq9}
\Gamma_{n+1}&\leq& \frac{(1-\alpha_n)^2+\alpha_n\eta}{1-\alpha_n \eta}\Gamma_n+ \frac{\theta_n\|x_n-x_{n-1}\|M_2}{1-\alpha_n \eta}\nonumber \\
&& \quad +\;\;2\frac{\alpha_n}{1-\alpha_n \eta}\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle \nonumber \\
&\leq& \left(1-\frac{2(1-\eta)\alpha_n}{1-\alpha_n \eta} \right)\Gamma_n+\frac{\theta_n\|x_n-x_{n-1}\|M_2}{1-\alpha_n \eta}\nonumber \\
&& \quad +\;\;2\frac{\alpha_n}{1-\alpha_n \eta}\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle +\frac{\alpha_n^2}{1-\alpha_n \eta}\Gamma_n\nonumber \\
&\leq& \left(1-\frac{2(1-\eta)\alpha_n}{1-\alpha_n \eta} \right)\Gamma_n \nonumber \\
&& +\;\;\frac{2(1-\eta)\alpha_n}{1-\alpha_n \eta}\Big\{
\frac{\theta_n\|x_n-x_{n-1}\|M_2}{2(1-\eta)\alpha_n} +\frac{\alpha_n \Gamma_{n_0}}{2(1-\eta)}+\frac{1}{1-\eta}\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle \Big\}\nonumber \\
&=&(1-\delta_n)\Gamma_n+\delta_n \sigma_n,
\end{eqnarray}
where $\delta_n:=\frac{2(1-\eta)\alpha_n}{1-\alpha_n \eta} \,$ and
$$
\sigma_n:=\frac{\theta_n\|x_n-x_{n-1}\|M_2}{2(1-\eta)\alpha_n}+\frac{\alpha_n \Gamma_{n_0}}{2(1-\eta)}+\frac{1}{1-\eta}\langle S_{\gamma}(z)-z, x_{n+1}-z \rangle.
$$
Using Lemma \ref{lm23} (ii) and Assumption~~\ref{Ass:Parameters} in \eqref{eq9}, we get
$\Gamma_n=\|x_n-z\|\rightarrow 0$ and thus $x_n\rightarrow z$ as $n\rightarrow \infty$.\\
\noindent {\bf Case 2:} Assume that there is no $ n_0 \in \mathbb{N} $ such that
$\{\Gamma_{n}\}_{n=n_0}^\infty$ is monotonically decreasing.
Let
$\tau :\mathbb{N}\rightarrow \mathbb{N}$ be a mapping defined for all
$n\geq n_{0}$ (for some $n_{0}$ large enough) by
$$
\tau(n):=\max~\left\{k\in \mathbb{N}:k\leq n,\Gamma_{k}\leq \Gamma_{k+1}\right\},
$$
i.e.\ $ \tau (n) $ is the largest number $ k $ in $ \{ 1, \ldots, n \} $ such
that $ \Gamma_k $ increases at $ k = \tau (n) $; note that, in view of
Case 2, this $ \tau (n) $ is well-defined for all sufficiently large $ n $.
Clearly, $\tau$ is a non-decreasing sequence \cite{MaingeSIAM} such that $\tau(n)\rightarrow
\infty$ as $n \rightarrow \infty$ and
$$
0\leq\Gamma_{\tau(n)}\leq\Gamma_{\tau(n)+1}, \quad \forall n\geq n_{0}.
$$
Using similar techniques as in \eqref{eq6}, it is easy to
show that
$$
\lim_{n \to \infty}\|Ty_{\tau(n)}-y_{\tau(n)}\|
=\lim_{n \to \infty}\|y_{\tau(n)}-x_{\tau(n)}\|
=\lim_{n \to \infty}\|Ty_{\tau(n)}-x_{\tau(n)}\|=0.
$$
Furthermore, using the boundedness of $ \{ x_n \} $, $ \{ y_n \} $ and
Assumption \ref{Ass:Parameters}, we get
\begin{eqnarray}\label{s7}
\|x_{\tau(n)+1}-x_{\tau(n)}\| &\leq&\alpha_{\tau(n)}\|S_{\gamma}\Big(y_{\tau(n)}\Big)-x_{\tau(n)}\|+
\theta_{\tau(n)}\|y_{\tau(n)}-x_{\tau(n)}\| \nonumber \\
& & +\;\;(1-\alpha_{\tau(n)})\|Ty_{\tau(n)}-x_{\tau(n)}\|\longrightarrow 0 \, \mbox{ as } \, n \rightarrow \infty.
\end{eqnarray}
Since $\{x_{\tau(n)}\}$ is bounded, there exists a subsequence of
$\{x_{\tau(n)}\}$, still denoted by $\{x_{\tau(n)}\}$, which converges
to some $p\in F(T)$. Similarly, as in Case 1 above, we can show that we have
$\underset{n\rightarrow\infty}\limsup~\left\langle S_\gamma(z)-z, x_{\tau(n)+1}-z\right\rangle
\leq 0.$
Following \eqref{eq9}, we obtain
\begin{eqnarray}\label{flasche}
\Gamma_{\tau(n)+1}&=&(1-\delta_{\tau(n)})\Gamma_{\tau(n)}+\delta_{\tau(n)} \sigma_{\tau(n)},
\end{eqnarray}
which implies that
$
\|x_{\tau(n)}-z\|^2 \leq \sigma_{\tau(n)}
$
while noting that $\Gamma_{\tau(n)} \leq \Gamma_{\tau(n)+1}$ and $\alpha_{\tau(n)}>0$ hold. This leads to
$
\underset{n\rightarrow\infty}\limsup~\|x_{\tau(n)}-z\|^2 \leq 0.
$
Thus, we have
$$
\lim_{n \to \infty}\|x_{\tau(n)}-z\|=\lim_{n \to \infty} \Gamma_{\tau(n)}=0,
$$
which in turn implies $\underset{n\rightarrow\infty}\lim \|x_{\tau(n)+1}-z\|=0$.
Furthermore, for $n\geq n_{0}$, it is easy to see that $\Gamma_n \leq
\Gamma_{\tau(n)+1}$ (observe that $\tau(n)\leq n$ for $n\geq n_{0}$ and consider the three cases:
$\tau(n)=n$, $\tau(n)=n-1$ and $\tau(n)<n-1$. For the first and second cases, it is obvious that $\Gamma_n \leq
\Gamma_{\tau(n)+1}$ for $n\geq n_{0}$. For the third case $\tau(n)\leq n-2$, we have from the definition of $\tau(n)$ and
for any integer $n\geq n_{0}$ that $\Gamma_{j} \geq \Gamma_{j+1}$ for $\tau(n)+1\leq j\leq n-1.$ Thus,
$\Gamma_{\tau(n)+1} \geq \Gamma_{\tau(n)+2}\geq \cdots\geq \Gamma_{n-1}\geq \Gamma_n$). As a consequence,
we obtain for all sufficiently large $ n $ that
$ 0\leq \Gamma_n \leq \Gamma_{\tau(n)+1} $.
Hence $\underset{n\rightarrow\infty} \lim\Gamma_n=0$. Therefore, $\{x_n\}$
converges to $z$.
\end{proof}
\begin{rem}
Suppose that Assumption \ref{Ass:VI}(c)
is replaced with the following milder condition:
``$h:\mathbb{R}^n \rightarrow \mathbb{R}$ is strongly convex with parameter $\sigma>0$ and $L_h$-Lipschitz continuous''. Then the step involving $z_n$ in Algorithm \ref{Alg:AlgL} can be replaced by
\begin{eqnarray*}
z_n&=&y_n-\gamma \nabla M_{\gamma h}(y_n)\\
&=&y_n-\gamma\frac{1}{\gamma}(y_n-{\rm prox}_{\gamma h}(y_n))\\
&=& {\rm prox}_{\gamma h}(y_n),
\end{eqnarray*}
where $M_{\gamma h}$ is the Moreau envelop of $h$, defined by
$$
M_{\gamma h}(x):=\min_{u \in \mathbb{R}^n}\Big\{h(u)+\frac{1}{2\gamma}\|u-x\|^2 \Big\},
$$
which is continuously differentiable (see \cite{Bauschkebook}) with $\nabla M_{\gamma h}(x)=\frac{1}{\gamma}(x-{\rm prox}_{\gamma h}(x))$ and global convergence is still obtained as in Theorem \ref{t31} using Lemma 6 of \cite{SabachShtern}.
\end{rem}
\noindent
We give some brief comments on the nonasymptotic $O(1/n^2)$ convergence rate of some estimates obtained in Theorem \ref{t31}.
\begin{rem}
Observe that for Algorithm \ref{Alg:AlgL}, we have
$\theta_n\|x_n-x_{n-1}\| \leq \epsilon_n$ for all $n \geq 1$.
\noindent
If we choose $\epsilon_n:=\frac{c}{n^2}$, where $c>0$, then
$\theta_n\|x_n-x_{n-1}\| \leq \frac{c}{n^2}$ for all $n \geq 1$.
\noindent
Thus,
$ \theta_n\|x_n-x_{n-1}\| = O(1/n^2)$
\noindent
and consequently
$$ \|y_n-x_n\| =\theta_n\|x_n-x_{n-1}\| = O(1/n^2).$$
\noindent
Full details on the convergence rate of the result in Theorem \ref{t31} is left for further careful investigation in a separate work.
\end{rem}
\section{Numerical Results}\label{Sec:Numerics}
For numerical implementation of our proposed method in Section \ref{Sec:Method} we consider the inverse problems
tested in \cite{SabachShtern} and give numerical comparison with the proposed Algorithm \ref{Alg:AlgL} (iBiG-SAM)
and that of BiG-SAM method in \cite{SabachShtern}. The codes are implemented in Matlab.
We perform all computations on a windows desktop with an Intel(R) Core(TM) i7-2600 CPU at 3.4GHz and 8.00 GB of memory.
We take $\alpha_n=\frac{2\kappa}{n(1-\beta)}$ with $\kappa=0.1$, which is the best choice for BiG-SAM considered in \cite{SabachShtern} and $\beta \in [0,1)$ defined as in \eqref{prox2} and $\theta_n=\bar{\theta}_n$ as in \eqref{thetaDefine} with $\alpha = 3$ and $\epsilon_n=\alpha_n/n^{0.01}$ for iBiG-SAM.
\begin{table}
\caption{Averaged over 100 runs for each problem in Example \ref{example1}}\label{Table1}
\centering
\renewcommand{1.25}{1.25}
\begin{tabular}{c | c c | c c }
\hline
~&iBiG-SAM&&BiG-SAM&~\\
Problem &Number of iterations&time(sec.)&Number of iterations&time(sec.)\\
\hline
Baart &119.15&1.7253&145.67&2.1089\\
\hline
Foxgood &122.04&1.7861&149.78&2.1885\\
\hline
Phillips&120.77&1.7463&148.18&2.1397\\
\hline
\end{tabular}
\end{table}
\begin{exm}\label{example1}
\noindent Following \cite{SabachShtern}, the inner objective function is taking as
$$
\varphi(x):=\frac{1}{2} \|Ax-b\|^2 +\delta_X(x),
$$
\noindent where $\delta_X$ is the indicator function over the nonnegative orthant $X:=\{x \in \mathbb{R}^n: x \geq 0\}$. Furthermore, we take the outer objective function as
\begin{equation}\label{Donali}
h(x) :=\frac{1}{2}x^TQx,
\end{equation}
\noindent where $Q$ is a positive definite matrix.
It is clear that $L_f=\|A^tA\|$ and $L_h=\|Q\|$. We choose $\lambda=\frac{1}{L_f}$ and $\gamma=\frac{2}{L_h+\sigma}$. \\
\noindent
Following \cite{Beck}, we consider three inverse problems, i.e., \emph{Baart}, \emph{Foxgood}, and \emph{Phillips} \cite{SabachShtern}. For each of these problems, we generated the corresponding $1,000$ by $1,000$
exact linear system of the form $Ax = b$, by applying the relevant function (baart, foxgood, and phillips). We then performed the simulation by adding normally
distributed noise with zero mean to the right-hand-side vector $b$, with deviation $\rho =0.01$.
The matrix $Q$ is defined by $Q =LL'+I$, where $L$ is generated by the function get-l(1,000,1) from the \emph{regularization
tools} (see \url{http://www.imm.dtu.dk/~pcha/Regutools/}) and approximates the first-derivative operator.
\begin{figure}
\caption{Distance to optimal solution v.s. CPU time for problems Baart (left) and Foxgood (right) with $n=100$.}
\label{Figure0}
\end{figure}
\noindent Following \cite{SabachShtern}, we use the stopping condition $(\varphi(x_n)-\varphi^* )/\varphi^* \leq 10^{-2}$ for both methods, where $\varphi^*$ is the optimal value of the inner problem computed in advance by BiG-SAM with $1000$ iterations.
In Table \ref{Table1} we present the averaged number of iterations and time (out of 100 runs) until the algorithms reach the stopping criterion.
It can be seen that iBiG-SAM outperforms BiG-SAM (on averaged about 20\%) in all problems tested.\\
\noindent In Figure \ref{Figure0}, we compare the behavior of iBiG-SAM wih BiG-SAM for Baart and Foxgood problems when $n=100$. \qed
\end{exm}
\begin{table}[htp]
\caption{Averaged over 100 runs for each problem in Example \ref{example2}}\label{Table2}
\centering
\renewcommand{1.25}{1.25}
\begin{tabular}{c | c c | c c }
\hline
~&iBiG-SAM&&BiG-SAM&~\\
Parameters &Iterations&time (sec.)&Iterations&time (sec.)\\
\hline
$\alpha=3, m = 100, n = 500$ &43.32&0.0498&60.43&0.0697\\
\hline
$\alpha=4, m = 200, n = 500$&12.25&0.017&18.65&0.0252\\
\hline
$\alpha=5, m = 500, n = 1000$&12.31&0.124&18.07&0.1793\\
\hline
\end{tabular}
\end{table}
\begin{exm}\label{example2}
We now look at the case when $g$ is not an indicator function. In this case, the methods proposed in \cite{Beck,FerrisMangasarian,Solodov2} cannot be applied. We still give a comparison of our method with BiG-SAM \eqref{e1}. The inner objective function is taking here as
$$
\varphi(x):=\frac{1}{2}\|Ax-b\|_{2}^{2}+\mu\|x\|_{1},
$$
where $A\in \mathbb{R}^{m\times n}$ is a given matrix, $b$ is a given vector and $\mu$ a positive scalar. This is LASSO (Least Absolute selection and Shrinkage Operator) \cite{Tibshirami} in compressed sensing. The proximal map with $g(x)=\mu\|x\|_{1}$ is given as ${\rm prox}_{g}(x)=\arg\min_{u}\mu\|x\|_{1}+\dfrac{1}{2}\|u-x\|_{2}^{2},$ which is separable in indices. Thus, for $x\in \mathbb{R}^n$,
\begin{eqnarray*}
{\rm prox}_{g}(x)\,=\,\left({\rm prox}_{\mu|.|_{1}}(x_1),\ldots,{\rm prox}_{\mu|.|_{1}}(x_n) \right) = \left(\beta_1,\ldots,\beta_n\right),
\end{eqnarray*}
where $\beta_k={\rm sgn}(x_k)\max\{|x_k|-\mu,0\}$ for $k=1,2,\ldots,n$.
As in Example \ref{example1}, we take the outer objective function as in \eqref{Donali}
with $Q$ similarly being a positive definite matrix.
\begin{figure}
\caption{Distance to optimal solution v.s. CPU time when $m=100,n=500$ (left) and $m=500,n=1000$ (right)}
\label{Figure1}
\end{figure}
We take $\mu=0.5$, and the data $b$ is generated as $Ax + \delta e$, where $A$ and $e$ are random matrices whose elements
are normally distributed with zero mean and variance $1$, and $\delta = 0.01$, and $x$ is a generated
sparse vector. The stopping condition is $\|x_n-x^*\|\leq \epsilon$ with $\epsilon=10^{-3}$ and $x^*$ computed in advance by
BiG-SAM with $1000$ iterations. In Table \ref{Table2} we present the averaged number of iterations and time (out of 100 runs) until the algorithms reach the stopping criterion for different choices of $\alpha \geq 3$ in different dimensional spaces. Again iBiG-SAM outperforms BiG-SAM in all simulations.\\
\noindent In Figure \ref{Figure1}, we compare the behavior of BiG-SAM wih iBiG-SAM for different parameters $\alpha$. It seems that iBiG-SAM with $\alpha=3$ takes advantage over other values tested. \qed
\end{exm}
\noindent Interested readers can download the codes used for the experiments above via the following link (under \emph{iBIG-SAM}), in order to proceed with their own tests on other scenarios of Examples \ref{example1} and \ref{example2} or to use corresponding adjustments for calculations on new examples: \url{http://www.southampton.ac.uk/~abz1e14/solvers.html}
\section{Concluding Remarks}\label{Sec:Final}
The paper has introduced and proved the global convergence of an inertial extrapolation-type method for solving simple convex bilevel optimization problems in finite dimensional Euclidean spaces. One can easily check that the results developed here remain valid in infinite dimensional Hilbert spaces.
Based on the numerical experiments conducted, we illustrated that our method outperforms the best known algorithm recently proposed in \cite{SabachShtern} to solve problems of the form \eqref{bilevel1}--\eqref{bilevel2}. Our next project in this subject area is to derive the convergence rate of the method proposed in this paper.
\renewcommand{0.95}{0.95}
{\small{
}}
\end{document}
|
\begin{document}
\title[A posteriori error estimate for linear elasticity problems]
{ Residual-Based A Posteriori Error Estimates for Symmetric Conforming Mixed Finite Elements for Linear Elasticity Problems }
\author{Long Chen}
\address{Department of Mathematics, University of California at Irvine, Irvine, CA 92697, USA}
\email{[email protected]}
\author{Jun Hu}
\address{LMAM and School of Mathematical Sciences, Peking University, Beijing 100871, China}
\email{[email protected]}
\author{Xuehai Huang}
\address{College of Mathematics and Information Science, Wenzhou University, Wenzhou 325035, China}
\email{[email protected]}
\author {Hongying Man}
\address{School of Mathematics and Statistics, Beijing Institute of Technology, Beijing 100081, China}
\email{[email protected]}
\thanks{The first author was supported by NSF Grant DMS-1418934. This work was finished when L. Chen visited Peking University in the fall of 2015. He would like to thank Peking University for the support and hospitality, as well as for their exciting research atmosphere.}
\thanks{The second author was supported by the NSFC Projects 11625101, 91430213 and 11421101.}
\thanks{The third author was supported by the NSFC Projects 11301396 and 11671304, Zhejiang Provincial
Natural Science Foundation of China Projects LY17A010010, LY15A010015 and LY15A010016, and Wenzhou Science and Technology Plan Project G20160019.}
\thanks{The last author was supported by the NSFC Project 11401026. She would like to thank the support of the China Scholarship Council and the university of California, Irvine during her visit to UC Irvine from 2014 to 2015.}
\begin{abstract}
A posteriori error estimators for the symmetric mixed finite element methods for linear
elasticity problems of Dirichlet and mixed boundary conditions are proposed. Stability and efficiency of the estimators are proved. Finally, we provide numerical examples
to verify the theoretical results.
\vskip 15pt
\noindent{\bf Keywords.}{
symmetric mixed finite element, linear elasticity problems, {\it a posteriori} error estimator, adaptive method.
}
\vskip 15pt
\noindent{\bf AMS subject classifications.}
{ 65N30, 73C02.}
\end{abstract}
\maketitle
\section{Introduction}
In this paper, we are concerned with the development of residual-based {\it a posteriori} error estimators for the symmetric mixed finite element methods for planar linear elasticity problems. Let $\Omega\subset\mathbb{R}^2$ be a bounded polygonal domain with boundary $\Gamma:=\partial\Omega$, based on the Hellinger-Reissner principle, the linear elasticity
problem with homogeneous Dirichlet boundary condition within a stress-displacement form reads:
Find $(\sigma,u)\in\Sigma\times V :=H({\rm div},\Omega;\mathbb
{S})
\times L^2(\Omega;\mathbb{R}^2)$, such that
\begin{equation}\label{eqn1}
\left\{ \ad{
(A\sigma,\tau)+({\rm div}\tau,u)&= 0 && \hbox{for all \ } \tau\in\Sigma,\\
({\rm div}\sigma,v)&= (f,v) &\qquad& \hbox{for all \ } v\in V, }
\right.
\end{equation}
where $\mathbb S\subset\mathbb R^{2\times2}$ is the space of symmetric matrices, and the symmetric tensor space for stress and the
space for vector displacement are, respectively,
\an{
H({\rm div},\Omega;\mathbb {S})
&:= \Big\{ \p{\tau_{ij} }_{2 \times 2} \in H(\d, \Omega)
\ \Big| \ \tau_{12} = \tau_{21} \Big\}, \\
L^2(\Omega;\mathbb{R}^2) &:=
\Big\{ \p{u_1, u_2}^T
\ \Big| \ u_1,u_2 \in L^2(\Omega) \Big\} .}
Throughout the paper, the compliance tensor
$A:\mathbb{S}~\rightarrow~\mathbb{S}$, characterizing the
properties of the material, is bounded and symmetric positive
definite. In the homogeneous isotropic case, the compliance tensor is given by $A\tau=(\tau-\lambda/(2\mu+2\lambda){\rm tr}\tau~ {\rm I})/(2\mu)$, where $\mu>0, \lambda\geq 0$ are the Lam\'{e} constants, ${\rm I}$ is the identity matrix, ${\rm tr}\tau=\tau_{11}+\tau_{22}$ is the trace of the matrix $\tau$. For simplicity, we assume $A$ is a constant matrix in this paper and comment on the generalization to the piecewise constant matrix case.
Because of the symmetry constraint on the stress tensor, it is extremely difficult to construct
stable conforming finite elements of (1.1) even for 2D problems, as stated in the plenary presentation
to the 2002 International Congress of Mathematicians by Arnold~\cite{Arnold2002}. An important progress
in this direction is the work of Arnold and Winther~\cite{ArnoldWinther2002} and Arnold, Awanou, and Winther~\cite{ArnoldAwanouWinther2008}. In particular, a sufficient condition of the discrete stable method is proposed in these two papers,
which states that a discrete exact sequence guarantees the stability of the mixed method. Based
on such a condition, conforming mixed finite elements on the simplical and rectangular meshes are developed for both 2D and 3D~\cite{AdamsCockburn2005, ArnoldAwanou2005, ArnoldWinther2003a, ChenWang2011, HuShi2007a}. Recently, based on a crucial structure of symmetric matrix valued
piecewise polynomial $H(\operatorname{div})$ space and two basic algebraic results, Hu and Zhang developed a
new framework to design and analyze the mixed finite element of elasticity problems. As a result, on both
simplicial and tensor product grids, several families of both symmetric and optimal mixed
elements with polynomial shape functions in any space dimension are constructed, see more details in~\cite{Hu2015a, Hu2015, HuZhang2014, HuZhang2015, HuZhang2016}. Theoretical and numerical analysis show that symmetric mixed finite element method is a popular choice for a robust stress approximation~\cite{CarstensenEigelGedicke2011, CarstensenGuntherReininghausThiele2008}.
Computation with adaptive grid refinement has proved to be a useful and efficient tool
in scientific computing over the last several decades. When the domain contains a re-entering corner, the stress has a singularity at that corner,
non-uniform mesh is necessary to catch the singularity. Adaptive finite element methods based on local mesh refinement can recovery the optimal rate
of convergence. The key behind this technique is to
design a good {\it a posteriori} error estimator that provides a guidance on how and where grids
should be refined. The residual-based {\it a posteriori} error estimators provide indicators for refining and coarsening the mesh and allow to control whether the error is below a given
threshold. Various error estimators for mixed finite element discretizations of the Poisson equation have been obtained
in~\cite{Alonso1996,Carstensen1997,ChenHolstXu2009,GaticaMaischak2005,HoppeWohlmuth1997,LarsonMaalqvist2008,LovadinaStenberg2006}. Extension to the mixed finite element for
linear elasticity is, however, very limited. In~\cite{CarstensenDolzmann1998, Kim2012c, LonsingVerfurth2004}, the authors gave the {\it a posteriori} error estimators for the nonsymmetric mixed finite elements only.
The symmetry of the stress tensor brings essential difficulty to the {\it a posteriori} error analysis. Since only the symmetric part is approximated and not the full gradient, the approach of {\it a posteriori} error analysis developed in~\cite{CarstensenDolzmann1998,CarstensenHu2007,Kim2012c, LonsingVerfurth2004} cannot be applied directly. In order to overcome this difficulty, Carstensen and Gedicke propose to generalize the framework of the {\it a posteriori} analysis for nonsymmetric mixed finite elements to the case of symmetric elements by decomposing the stress into the gradient and the asymmetric part of the gradient. A robust residual-based {\it a posteriori} error estimator for Arnold-Winther's symmetric element was proposed in \cite{CarstensenGedicke2016}, but an arbitrary asymmetric approximation $\gamma_h$ of the asymmetric part of the gradient skew$(Du)=(Du-D^Tu)/2$ was involved in this estimator. Furthermore $\gamma_h$ was chosen as the asymmetric gradient of a post-processed displacement to ensure the efficiency of the estimator. More details can be found below.
The goal of this paper is to present an {\it a posteriori} error estimator together with a theoretical upper and lower bounds, for the conforming
and symmetric mixed finite element solutions developed in~\cite{ArnoldWinther2002,HuZhang2014}. We shall follow the guide principle in \cite{ArnoldWinther2002}: use the continuous and discrete linear elasticity complex, c.f. \end{equation}ref{deRham} and \end{equation}ref{deRhamdiscrete}.
Given an approximation $\sigma_h$ on the triangulation $\mathcal T_h$ consisting of triangles,
we construct the following {\it a posteriori} error estimator, denoted by $\eta$,
$$
\eta^2(\sigma_{h}, {\mathcal{T}_h}):=\sum\limits_{K\in\mathcal{T}_h}\eta_K^2(\sigma_h)+\sum\limits_{e\in\mathcal{E}_h}\eta_e^2(\sigma_h)
$$
where
$$
\eta_K^2(\sigma_h) :=h_K^4\|{\rm curl \, curl \,}(A\sigma_h)\|_{0,K}^2,~~~
\eta_e^2(\sigma_h):=h_e\|\mathcal{J}_{e,1}\|_{0,e}^2+h_e^3\|\mathcal{J}_{e,2}\|_{0,e}^2,
$$
$$
\begin{array}{ll}
\displaystyle \mathcal{J}_{e,1}:=&\left\{\begin{array}{lr}
\displaystyle\big[(A\sigma_h)t_e\cdot t_e\big]_e&\quad\quad\quad\quad\quad\quad\quad\ \ {\rm if~} e\in\mathcal{E}_h(\Omega),\\
\displaystyle\big((A\sigma_h)t_e\cdot t_e\big)|_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Gamma),
\end{array}\right. \\
\\
\displaystyle \mathcal{J}_{e,2}:=&\left\{\begin{array}{lr}
\displaystyle \big[{\rm curl}(A\sigma_h)\cdot t_e\big]_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Omega),\\
\displaystyle \big({\rm curl}(A\sigma_h)\cdot t_e-\partial _{t_e}\big((A\sigma_h)t_e\cdot \nu_e\big)\big)|_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Gamma),
\end{array}\right.
\end{array}
$$
with $\mathcal E_h$ being the collection of all edges of $\mathcal T_h$. We write $\mathcal E_h=\mathcal E_{h}(\Omega)\bigcup\mathcal {E}_h(\Gamma)$, where $\mathcal E_{h}(\Omega)$ is the collection of interior edges and $\mathcal E_{h}(\Gamma)$ is the collection of all element edges on the boundary. For any edge $e\in\mathcal{E}_h$, let $t_e=(-n_2,n_1)^T$ be the unit tangential vector along edge $e$ for the unit outward normal $\nu_e=(n_1,n_2)^T$. Let $h_K$ be the diameter of the element $K$ and $h_e$ be the length of edge $e$. The data oscillation is defined as
$${\rm osc}^2(f,\mathcal{T}_h):=\sum\limits_{K\in\mathcal{T}_h}h_K^2\|f-Q_h f\|_{0,K}^2,$$
where $Q_h$ is the $L^2$ orthogonal projection operator onto the discrete displacement space.
Using the Helmholtz decomposition induced from the linear elasticity complex \cite{ArnoldWinther2002, CarstensenDolzmann1998}, we establish the following reliability
$$
\|\sigma-\sigma_h\|_A\leq C_1(\eta(\sigma_{h}, {\mathcal{T}_h})+{\rm osc}(f,\mathcal{T}_h)).
$$
In addition, we will prove the following efficiency estimate
$$C_2\eta(\sigma_{h}, {\mathcal{T}_h})\leq \|\sigma-\sigma_h\|_A$$
by following the approach from ~\cite{Alonso1996}.
We also generalize the above results to the mixed boundary problems, for which the error estimator is modified on the Dirichlet boundary edges. Reliability and efficiency of the modified error estimator can be proved similarly.
In \cite{ChenHuHuang2016}, a superconvergent approximate displacement $u_h^*$ was constructed by a postprocessing of $(\sigma_h,u_h)$ . Using this result and the {\it a posteriori} error estimation of the stress, we also give the {\it a posteriori} error estimation for the displacement $\|u-u_h^*\|_{1,h}$ in a mesh dependent norm.
In order to compare with the {\it a posteriori} error estimator in \cite{CarstensenGedicke2016}, we present their estimator as follows:
\begin{align*}
\tilde{\eta}^2(\sigma_h,\mathcal{T}_h)&:={\rm osc}^2(f,\mathcal{T}_h)+{\rm osc}^2(g,\mathcal{E}_h(\Gamma_N))\\
&+\sum\limits_{K\in\mathcal{T}}h_K^2\|{\rm curl}(A\sigma_h+\gamma_h)\|_{0,K}^2\\
&+\sum\limits_{e\in\mathcal{E}_h(\Omega)}h_e\|[A\sigma_h+\gamma_h]_e\tau_e\|_{0,e}^2\\
&+\sum\limits_{e\in\mathcal{E}_h(\Gamma_D)}h_e\|(A\sigma_h+\gamma_h-\nabla u_D)\tau_e\|_{0,e}^2.
\end{align*}
(The estimator is rewritten in our notation and the details of the standard notation can be found below.) To ensure the efficiency of the estimator, a sufficiently accurate polynomial asymmetric approximation $\gamma_h$ of the asymmetric gradient ${\rm skew}(Du):=(Du-D^Tu)/2$ is involved in the above estimator.
Since the global approximation or even minimization may be too costly, Carstensen and Gedicke compute the sufficiently accurate approximation $\gamma_h={\rm skew}(Du_h^*)$ by the post-processed displacement $u_h^*$ in the spirit of Stenberg \cite{Stenberg1988}. As we can see, this estimator is totally different to ours. The estimators we propose use the symmetric stress directly and do not need any estimation of the asymmetric part. Therefore it is more computationally efficient.
The remaining parts of the paper is organized as follows. Section 2 presents the notations and the discrete finite element problems. Section 3 proposes an {\it a posteriori} error estimator for the stress and proves the reliability and efficiency of the estimator. Section 4 generalizes the results of section 3 to mixed boundary problems. Section 5 gives {\it a posteriori} error estimation for the displacement. Section 6 presents numerical experiments to show the effectiveness of the estimator.
Throughout this paper, we use ``$\lesssim\cdots $" to mean that ``$\leq C\cdots$", where $C$ is a generic positive constant independent of $h$ and the Lam\'{e} constant $\lambda$, which may take different values at different appearances.
\section{Notations and preliminaries}
Standard notations on Sobolev spaces and norms are adopted throughout this paper and, for brevity, $\|\cdot\|:=\|\cdot\|_{L^2(\Omega)}$ denotes the $L^2$ norm. $(\cdot,\cdot)_K$ represents, as usual, the $L^2$ inner product on the domain $K$, the subscript $K$ is omitted when $K=\Omega$. $\langle\cdot,\cdot\rangle_{\Gamma}$ represents the $L^2$ inner product on the boundary $\Gamma$. For brevity, let $\displaystyle\partial_{x_i}:=\partial/\partial x_i$ and $\partial^2_{x_ix_j}:=\partial^2/\partial x_i\partial x_j, j=1,2,$ $\partial_{\nu}:=\partial/\partial \nu$, $\partial_{t}:=\partial/\partial t$. For $\phi\in H^1(\Omega;\mathbb{R})$, $v=(v_1,v_2)^{T}\in H^1(\Omega;\mathbb{R}^2)$, set
$$
{\rm \bf{Curl}}\phi:=\left(-\partial \phi/ \partial x_2,\ \partial \phi/ \partial x_1\right),\quad\quad\quad
{\rm \bf{Curl}}v:=\left(\begin{array}{cc}
-\partial v_1/ \partial x_2&\partial v_1/ \partial x_1\\
-\partial v_2/ \partial x_2&\partial v_2/ \partial x_1
\end{array}\right).$$
For $\tau=(\tau_{i,j})_{2\times2}\in H^1(\Omega; \mathbb{R}^{2\times 2})$, set
$$
{\rm curl}\tau:=\left(\begin{array}{c}
\partial\tau_{12}/ \partial x_1-\partial\tau_{11}/ \partial x_2\\
\partial\tau_{22}/ \partial x_1-\partial\tau_{21}/ \partial x_2
\end{array}\right),\qquad
{\rm div}\tau:=\left(\begin{array}{c}
\partial\tau_{11}/ \partial x_1+\partial\tau_{12}/ \partial x_2\\
\partial\tau_{21}/ \partial x_1+\partial\tau_{22}/ \partial x_2
\end{array}\right).
$$
Namely the differential operators $\operatorname{curl}$ and $\operatorname{div}$ are applied rowwise for tensors.
Let $\mathcal{T}_h$ be a shape-regular triangulation of $\bar{\Omega}$ into triangles with the set of edges $\mathcal{E}_h$. Denote by $\mathcal{E}_h(\Omega)$ the collection of all interior element edges in $\mathcal T_h$ and $\mathcal E_h(\Gamma)$ the collection of all element edges on the boundary. For any triangle $K\in\mathcal{T}_h$, let $\mathcal{E}(K)$ be the set of its edges. For any edge $e\in\mathcal{E}(K)$, let $t_e=(-n_2,n_1)^T$ be the unit tangential vector along edge $e$ for the unit outward normal vector $\nu_e=(n_1,n_2)^T$, $h_K$ be the diameter of the element $K$ and $h_e$ be the length of the edge $e$, $h=\max\limits_{K\in\mathcal{T}_h}\{h_K\}$ be the diameter of the partition $\mathcal{T}_h$. The jump $[w]_e$ of $w$ across edge $e=\bar{K}_+\cap\bar{K}_-$ reads
$$[w]_e:=(w|_{K_+})_e-(w|_{K_-})_e.$$
Particularly, if $e\in \mathcal{E}_h(\Gamma),\ [w]_e:=w|_e$.
Let $\Sigma_h\times V_h\subseteq \Sigma \times V $ be a symmetric conforming mixed element defined on the mesh $\mathcal{T}_h$, then the discrete mixed formulation for the problem (1.1) is: find $(\sigma_h, u_h)\in \Sigma_h\times V_h$, such that
\begin{equation}\label{mfem}
\left\{
\begin{array}{rcl}(A\sigma_h,\tau_h)+({\rm div}\tau_h, u_h)&& =0\quad\quad\quad\quad\;{\rm for ~all~}\tau_h\in \Sigma_h,\\
({\rm div}\sigma_h,v_h)&& =(f,v_h)\quad\quad{\rm for~all~}v_h\in V_h.
\end{array}
\right.
\end{equation}
In the sequel, we briefly introduce Hu-Zhang element~
\cite{Hu2015a, HuZhang2014, HuZhang2016}.
For each $K\in\mathcal{T}_h$, let $P_k(K)$ be the space of polynomials of total degree at most $k$ on $K$ and
$$P_{k}(K; \mathbb{S}):=\{\tau\in L^2(K;\mathbb{R}^{2\times 2})|\tau_{i,j}\in P_k(K), \tau_{ij}=\tau_{ji}, 1\leq i\leq 2,1\leq j\leq 2\}, $$
$$P_{k}(K; \mathbb{R}^2):=\{v\in L^2(K;\mathbb{R}^2)|v_i\in P_k(K), 1\leq i\leq 2\}, $$
define an $H(\operatorname{div}, K; \mathbb{S})$ bubble function as
\[
B_{K,k}:=\left\{\tau\in P_{k}(K; \mathbb{S}): \tau\nu|_{\partial K}=0\right\}.
\]
The Hu-Zhang element space
is given by
\begin{align*}
\Sigma_{h}&:=\widetilde{\Sigma}_{k,h} + B_{k,h},\\
V_{h}&:=\left\{v\in L^2(\Omega; \mathbb{R}^2): v|_K\in P_{k-1}(K; \mathbb{R}^2)\quad \forall\,K\in\mathcal
{T}_h\right\},
\end{align*}
with integer $ k\geq 3$, where
\begin{align*}
B_{k,h}&:=\left\{\tau\in H(\operatorname{div}, \Omega; \mathbb{S}):
\tau|_K\in B_{K,k} \quad \forall\,K\in\mathcal{T}_h \right\}, \\
\widetilde{\Sigma}_{k,h}&:=\left\{\tau\in H^1(\Omega; \mathbb{S}):
\tau|_K\in P_{k}(K; \mathbb{S}) \quad \forall\,K\in\mathcal{T}_h \right\}.
\end{align*}
For the above elements, the following {\it a priori } error estimate holds.
\begin{theorem}[A priori error estimate \cite{Hu2015a, HuZhang2014, HuZhang2016}]
The exact solution $(\sigma,u)$ of problem (1.1) and the approximate solution $(\sigma_h, u_h)$ of problem (2.1) satisfy
\begin{align*}
\|\sigma-\sigma_h\|_0&\lesssim h^m\|\sigma\|_m, \quad\quad\;\,\text{for}~1\leq m\leq k+1,\\
\|{\rm div}(\sigma-\sigma_h)\|_0&\lesssim h^m\|{\rm div}\sigma\|_m,\quad\text{for}~0\leq m\leq k,\\
\|u-u_h\|_0&\lesssim h^m\|u\|_{m+1}, \quad\;\,\text{for}~1\leq m\leq k.
\end{align*}
\end{theorem}
In the continuous case, the following exact sequence
\begin{equation}\label{deRham}
P_1(\Omega)
\longrightarrow
H^2(\Omega)
\stackrel{\rm \bf{Curl} \, \bf{Curl}}{\longrightarrow}
H(\operatorname{div},\Omega;\mathbb S)
\stackrel{\operatorname{div}}{\longrightarrow}
L^2(\Omega,\mathbb R^2)
\end{equation}
holds for linear elasticity~\cite{ArnoldWinther2002}. In the discrete case, the exact sequence holds similarly
\begin{equation}\label{deRhamdiscrete}
P_1(\Omega)
\longrightarrow
\Phi_h
\stackrel{\rm \bf{Curl} \, \bf{Curl}}{\longrightarrow}
\Sigma_h
\stackrel{\operatorname{div}}{\longrightarrow}
V_h.
\end{equation}
As stated in \cite{ArnoldWinther2002}, the space $\Phi_h$ for the Arnold-Winther element is precisely the space of $C^1$
piecewise polynomials which are $C^2$ at the vertices, that is, the well-known high-order Hermite
or Argyris finite element. The Hu-Zhang element is an enrichment of the Arnold-Winther element, adding all the piecewise polynomial matrices of degree $k$ which are not divergence-free on each element and belong to $ H(\operatorname{div},\Omega;\mathbb S)$ globally. So the space $\Phi_h$ for the Hu-Zhang element is the same as the one for the Arnold-Winther element.
\begin{lemma}[Helmholtz-type decomposition \cite{ArnoldWinther2002, CarstensenDolzmann1998}]
For any $\tau \in L^2(\Omega; \mathbb S)$, there exists $v\in H_0^1(\Omega;\mathbb{R}^2)$ and $\phi \in H^2(\Omega)/P_1(\Omega)$, such that
\begin{equation}\label{symdec}
\tau = \mathcal C\varepsilon (v) + {\rm \bf{Curl}\, \bf{Curl}} \phi,
\end{equation}
and the decomposition is orthogonal in the weighted $L^2$-inner product $(\mathcal C^{-1} \cdot,\cdot): = (A~ \cdot,\cdot)$, i.e.,
\begin{equation}
\|\tau\|_A^2 = \|\varepsilon (v)\|_{A^{-1}}^2 + \| {\rm \bf{Curl} \, \bf{Curl}} \phi \|_A^2,
\end{equation}
where $P_1(\Omega)$ is the linear polynomial space on $\Omega$, the norm $\|\cdot\|_A=(A~\cdot,\cdot)$. \end{lemma}
Since $$
(A^{-1}A\tau,\tau)=(\tau,\tau)=(A(A^{-1}\tau),\tau),
$$
by the boundedness and coerciveness of the operator $A$, we obtain the following relationship of the norms: for any $\tau\in\Sigma$, there exist positive constants $C_1$ and $C_2$, which are independent of the Lam\'e constant $\lambda$, such that
\begin{align}\label{normequivalence}
C_2\|\tau\|_A^2= C_2(A\tau,\tau)\leq\|\tau\|_0^2\leq C_1(A^{-1}\tau,\tau)=C_1\|\tau\|_{A^{-1}}^2.
\end{align}
It is the goal of this paper to present {\it a posterior} error estimate of $\sigma - \sigma_h$ for the Hu-Zhang element method.
It is worth mentioning that the {\it a posterior} error estimator designed in this paper can be easily extended to the Arnold-Winther element~\cite{ArnoldWinther2002}.
\section{A posteriori Error Estimation for Stress}
In this section, we shall prove the reliability and efficiency of the error estimator. The main observation is that: although it is a saddle point problem, the error of stress $\sigma - \sigma _h$ is orthogonal to the divergence-free subspace, while the part of the error that is not divergence- free can be bounded by the data oscillation using the stability of the discretization.
For any $\tau_h\in\Sigma_h$, the error estimator is defined as
\begin{equation}\label{estimator-1}
\eta^2(\tau_{h}, {\mathcal{T}_h}):=\sum\limits_{K\in\mathcal{T}_h}\eta_K^2(\tau_h)+\sum\limits_{e\in\mathcal{E}_h}\eta_e^2(\tau_h),
\end{equation}
where
$$
\eta_K^2(\tau_h) :=h_K^4\|{\rm curl \, curl \,}(A\tau_h)\|_{0,K}^2,~~~
\eta_e^2(\tau_h):=h_e\|\mathcal{J}_{e,1}\|_{0,e}^2+h_e^3\|\mathcal{J}_{e,2}\|_{0,e}^2,
$$
$$
\begin{array}{ll}
\displaystyle \mathcal{J}_{e,1}:=&\left\{\begin{array}{lr}
\displaystyle\big[(A\tau_h)t_e \cdot t_e\big]_e&\quad\quad\quad\quad\quad\quad\quad\ \ {\rm if~} e\in\mathcal{E}_h(\Omega),\\
\displaystyle\big((A\tau_h)t_e\cdot t_e\big)|_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Gamma),
\end{array}\right. \\
\\
\displaystyle \mathcal{J}_{e,2}:=&\left\{\begin{array}{lr}
\displaystyle \big[{\rm curl}(A\tau_h)\cdot t_e\big]_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Omega),\\
\displaystyle \big({\rm curl}(A\tau_h)\cdot t_e-\partial _{t_e}\big((A\sigma_h)t_e\cdot \nu_e\big)\big)|_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Gamma).
\end{array}\right.
\end{array}
$$
The data oscillation is defined as
$${\rm osc}^2(f,\mathcal{T}_h):=\sum\limits_{K\in\mathcal{T}_h}h_K^2\|f-Q_h f\|_{0,K}^2,$$
where $Q_h$ is the $L^2$ orthogonal projection operator onto the discrete displacement space $V_h$.
\subsection{Stability result}
For the easy of exposition, we write the mixed formulation for linear elasticity as $\mathcal L(\sigma, u) = f$. The natural stability of the operator is $\|\sigma\|_{H(\operatorname{div})}+\|u\|\lesssim \|f\|.$ However, a stronger stability can be proved for a special perturbation of the data.
\begin{lemma}
Let $f_h$ be the $L^2$ projection of $f$ onto $V_h$ and let $(\sigma, u) = \mathcal L^{-1}f$ and $(\tilde \sigma, \tilde u)= \mathcal L^{-1}f_h$.
Then we have
\begin{equation}\label{oscf}
\|\sigma - \tilde \sigma\|_A \lesssim {\rm osc}(f,\mathcal T_h).
\end{equation}
\end{lemma}
\begin{proof}
Use the first equation of \end{equation}ref{eqn1} and let $v = u - \tilde u$
\begin{align*}
(A(\sigma - \tilde\sigma), \sigma - \tilde\sigma)&=-({\rm div}(\sigma-\tilde\sigma), u - \tilde u)=- (f- Q_h f, u - \tilde u)\\
&=(f-Q_h f, Q_h v - v)\\
&\leq \sum\limits_{K\in\mathcal{T}_h}\|f-Q_h f\|_{0,K}\|v-Q_hv\|_{0,K}\\
&\lesssim\sum\limits_{K\in\mathcal{T}_h}\|f-Q_h f\|_{0,K}h_K |v |_{1,K}\\
&\lesssim\left (\sum\limits_{K\in\mathcal{T}_h}h_K^2\|f-Q_h f\|_{0,K}^2\right )^{\frac{1}{2}}\|\varepsilon(v)\|_0,
\end{align*}
where the Korn's inequality is used and the symmetric gradient $\varepsilon(v)=\frac{1}{2}(\nabla v+(\nabla v)^T)$. Since $\varepsilon(v)=A(\sigma-\tilde{\sigma})$, by \end{equation}ref{normequivalence}, $\|\varepsilon(v)\|_0 \lesssim \|\sigma - \tilde \sigma\|_A$. We acquire the desirable stability result.
\end{proof}
The oscillation ${\rm osc}(f,\mathcal T_h)$ is an upper bound of $\|f-f_h\|_{-1}$ and is of high order comparing with the error estimator.
\subsection{Orthogonality}
For any $\phi\in H^2(\Omega)$, ${\rm \bf{Curl} \, \bf{Curl} \, }\phi\in H({\rm div},\Omega;\mathbb{S})$, we can use the exact sequence property $\operatorname{div} {\rm \bf{Curl} \, \bf{Curl} \, } = 0$ to get
\begin{equation}\label{Asigma}
(A\tilde \sigma, {\rm \bf{Curl} \, \bf{Curl} \, } \phi) =- (\tilde u, \operatorname{div} {\rm \bf{Curl} \, \bf{Curl} \,}\phi ) = 0.
\end{equation}
Similarly $$(A \sigma_h, {\rm\bf{Curl} \, \bf{Curl} \, } \phi_h) = -(u_h, \operatorname{div} {\rm \bf{Curl} \, \bf{Curl} \,}\phi_h ) = 0$$
for any $\phi_h\in \Phi_h$.
Therefore we have a partial orthogonality
\begin{equation}\label{partial-orthog}
(A(\tilde \sigma - \sigma _h), {\rm \bf{Curl} \, \bf{Curl} \,} \phi_h ) = 0 \quad \forall~\phi_h \in \Phi_h.
\end{equation}
\subsection{Upper bound}
Let $S_h^5$ denote the Argyris finite element space, which consists of $C^1$ piecewise polynomials of degree less than or equal to $5$
\begin{align*}
S_h^5:=\left\{v\in L^2(\bar{\Omega}):~v|_K\in P_5(K),~\forall K\in\mathcal{T}_h, ~v \text{ and its all first and second}\ \ \right.\\
\text{ derivatives are continuous on the vertices,} ~v ~\text{is continuous}\ \ \\
\left.\text{ along the normal direction on the edge midpoints}\right\}.
\end{align*}
Following \cite{ShiWang2013, GiraultScott2002}, we can define a quasi-interpolation operator $I_h:~H^2(\Omega)~\rightarrow S_h^5$, which preserves the values of the function on all vertices of $\mathcal{T}_h$. On each element $K\in\mathcal{T}_h$, for any $v\in H^2(\Omega)$, $I_hv|_K\in P_5(K)$ and it satisfies
\begin{itemize}
\item $I_hv|_K(a_{i,K})=v(a_{i,K}),\quad\quad 1\leq i\leq 3;$\\
\item $\partial_{x_j}(I_hv|_K)(a_{i,K})=\frac{1}{\mathcal{N}_h(a_{i,K})}\sum\limits_{K'\in S(a_{i,K})}\partial_{x_j}(P_hv|_{K'})(a_{i,K}),\ 1\leq i\leq 3,\\ \ j=1,2;$\\
\item $\partial^2_{x_jx_l}(I_hv|_K)(a_{i,K})=\frac{1}{\mathcal{N}_h(a_{i,K})}\sum\limits_{K'\in S(a_{i,K})}\partial^2_{x_jx_l}(P_hv|_{K'})(a_{i,K}),\ 1\leq i\leq 3,\ 1\leq j\leq l\leq 2;$\\
\item $\partial_{\nu}(I_hv|_K)(a_{3+i,K})=\frac{1}{\mathcal{N}_h(a_{3+i,K})}\sum\limits_{K'\in S(a_{3+i,K})}\partial_{\nu}(P_hv|_{K'})(a_{3+i,K}),\ 1\leq i\leq 3;$
\end{itemize}
where $a_{i,K},1\leq i\leq 3,$ are the vertices of $K$, $a_{3+i,K},1\leq i\leq 3,$ are the edge midpoints of $K$, $\nu$ is the edge outer normal of the element $K$ on the edge midpoint, $S(a_{i,K})=\bigcup\{K\in\mathcal{T}_h: a_{i,K}\in K\}$ and $\mathcal{N}_h(a_{i,K})=\text{card}\{K:K\in S(a_{i,K})\}$, $P_h$ is the projection operator from $L^2(\Omega)$ onto the piecewise linear polynomial finite element space on $\mathcal{T}_h$. It is obvious that the interpolation operator $I_h$ is uniquely determined by the above degrees of freedom. Furthermore, $I_h$ is a projection, i.e.
\begin{align}
I_hv=v\quad\quad \forall v\in S_h^5,
\end{align}
and it preserves the value of the function on vertices for any $v\in H^2(\Omega)$, i.e.
\begin{align}\label{preserve-vertex value}
I_hv(a_{i,K})=v(a_{i,K})\quad\quad \forall K\in\mathcal{T}_h,\ \ 1\leq i\leq 3.
\end{align}
A similar scaling argument as in \cite{ShiWang2013, GiraultScott2002} gives the following interpolation estimates
\begin{equation}\label{inter-error-K}
|v-I_hv|_{m,K} \lesssim h_K^{2-m}|v|_{2,S_K},~~0\leq m\leq 1,\ \ \forall K\in\mathcal{T}_h,
\end{equation}
\begin{equation}\label{inter-error-e}
|v-I_hv|_{m,e} \lesssim h_e^{2-m-\frac{1}{2}}|v|_{2,S_e},~~0\leq m\leq 1, \ \ \forall\ e\in\mathcal{E}_h,
\end{equation}
where $S_K=\bigcup\{K_i\in\mathcal{T}_h: K_i\bigcap\bar{K}\neq\varnothing\}$, $S_e=\bigcup\{K_i\in\mathcal{T}_h:K_i\bigcap e\neq\varnothing \}.$
Applying the Helmholtz decomposition to the error $\tilde \sigma-\sigma_h$, we have
\begin{equation}\label{HD-error}{
\tilde \sigma-\sigma_h=\mathcal{C}\varepsilon(v)+{\rm \bf{Curl} \, \bf{Curl} \,}\phi
}
\end{equation}
and
\begin{equation}\label{HD-error-estimate}
\|{\rm \bf{Curl} \, \bf{Curl} \,}\phi\|_A\leq\|\tilde{\sigma}-\sigma_h\|_A,
\end{equation}
where $v\in H_0^1(\Omega;\mathbb{R}^2)$ and $\phi\in H^2(\Omega)/P_1(\Omega)$.
By this orthogonal decomposition and the fact $\operatorname{div} (\tilde \sigma-\sigma_h) = 0$,
\begin{align*}
\| \tilde \sigma-\sigma_h\| _A^2&= (A( \tilde \sigma-\sigma_h),\mathcal{C}\varepsilon(v)+{\rm \bf{Curl} \, \bf{Curl} \,}\phi)\\
&= - (\operatorname{div}(\tilde \sigma-\sigma_h),v)+(A(\tilde \sigma-\sigma_h),{\rm \bf{Curl} \, \bf{Curl} \,}\phi)\\
&= (A(\tilde \sigma-\sigma_h),{\rm \bf{Curl} \, \bf{Curl} \,}\phi).
\end{align*}
Since ${\rm \bf{Curl} \, \bf{Curl} \,} (I_h\phi)\in \Sigma_h$, by the orthogonality \end{equation}ref{partial-orthog} and the equation \end{equation}ref{Asigma},
\begin{align*}
(A(\tilde \sigma-\sigma_h),{\rm \bf{Curl} \, \bf{Curl} \,}\phi) &= (A(\tilde \sigma-\sigma_h),{\rm \bf{Curl} \, \bf{Curl} \,}(\phi-I_h\phi))\\
&= -(A\sigma_h,{\rm \bf{Curl} \, \bf{Curl} \,}(\phi-I_h\phi)).
\end{align*}
An integration by parts gives
\begin{align}
& (A\sigma_h,{\rm \bf{Curl} \, \bf{Curl} \,}(\phi-I_h\phi))\nonumber \\
&= -\sum\limits_{K\in\mathcal{T}_h}({\rm curl}(A\sigma_h),{\rm \bf{Curl}}(\phi-I_h\phi))_K+\sum\limits_{K\in\mathcal{T}_h}\langle(A\sigma_h)t,{\rm \bf{Curl}}(\phi-I_h\phi)\rangle_{\partial K}\nonumber\\
&=\sum\limits_{K\in\mathcal{T}_h}({\rm curl \, curl \,}(A\sigma_h),\phi-I_h\phi)_K+\sum\limits_{K\in\mathcal{T}_h}\langle(A\sigma_h)t,{\rm \bf{Curl}}(\phi-I_h\phi)\rangle_{\partial K}\label{byparts}\\&-\sum\limits_{K\in\mathcal{T}_h}\langle{\rm curl}(A\sigma_h)\cdot t,\phi-I_h\phi\rangle_{\partial K}\nonumber.
\end{align}
The second term of the right hand side can be rewritten as
\begin{align*}
\sum\limits_{K\in\mathcal{T}_h}\langle A\sigma_h t,{\rm \bf{Curl}}(\phi-I_h\phi)\rangle_{\partial K}
=&\sum\limits_{K\in\mathcal{T}_h}\langle(A\sigma_h)t\cdot t,{\rm \bf{Curl}}(\phi-I_h\phi)\cdot t\rangle_{\partial K}\\
+&\sum\limits_{K\in\mathcal{T}_h}\langle(A\sigma_h t)\cdot \nu,{\rm \bf{Curl}}(\phi-I_h\phi)\cdot \nu\rangle_{\partial K}
\end{align*}
Since the compliance tensor
$A$ is symmetric and continuous, $(A\sigma_h t)\cdot \nu=(A\sigma_h \nu)\cdot t$ and $(A\sigma_h t)\cdot \nu$ is continuous across the interior element edge. This implies
\begin{align*}
\sum\limits_{K\in\mathcal{T}_h}\langle(A\sigma_h t)\cdot \nu,{\rm \bf{Curl}}(\phi-I_h\phi)\cdot \nu\rangle_{\partial K}
=&-\sum\limits_{e\in\mathcal{E}_h(\Gamma)}\langle (A\sigma_h t_e)\cdot \nu_e,\partial_{t_e}(\phi-I_h\phi)\rangle_{e}\\
=&\sum\limits_{e\in\mathcal{E}_h(\Gamma)}\langle \partial_{t_e}((A\sigma_h t_e)\cdot \nu_e),\phi-I_h\phi\rangle_{e}
\end{align*}
where the fact $(\phi-I_h\phi)$ vanishing at the boundary vertices \end{equation}ref{preserve-vertex value} is used. So
\begin{align*}
\sum\limits_{K\in\mathcal{T}_h}\langle A\sigma_h t,{\rm \bf{Curl}}(\phi-I_h\phi)\rangle _{\partial K}&=\sum\limits_{e\in\mathcal{E}_h(\Omega)}\langle \big[(A\sigma_h t_e)\cdot t_e\big]_e,\partial_{\nu_e}(\phi-I_h\phi)\rangle_{e}\\
&+\sum\limits_{e\in\mathcal{E}_h(\Gamma)}\langle (A\sigma_h t_e)\cdot t_e,\partial_{\nu_e}(\phi-I_h\phi)\rangle_{e}\\
&+\sum\limits_{e\in\mathcal{E}_h(\Gamma)}\langle \partial_{t_e}((A\sigma_h t_e)\cdot \nu_e),\phi-I_h\phi\rangle_{e}.
\end{align*}
Substituting it into \end{equation}ref{byparts}, we get
\begin{align*}
(A\sigma_h,{\rm \bf{Curl} \, \bf{Curl} \,}(\phi-I_h\phi))
=&\sum\limits_{K\in\mathcal{T}_h}({\rm curl \, curl \,}(A\sigma_h),\phi-I_h\phi)_K\\
+&\sum\limits_{e\in\mathcal{E}_h(\Omega)}\langle \big[(A\sigma_h t_e)\cdot t_e\big]_e,\partial_{\nu_e}(\phi-I_h\phi)\rangle_{e} \\
-&\sum\limits_{e\in\mathcal{E}_h(\Omega)}\langle \big[{\rm curl}(A\sigma_h)\cdot t_e\big]_e,\phi-I_h\phi \rangle_{e}\\
+&\sum\limits_{e\in\mathcal{E}_h(\Gamma)}\langle (A\sigma_h t_e)\cdot t_e,\partial_{\nu_e}(\phi-I_h\phi)\rangle_{e} \\
+&\sum\limits_{e\in\mathcal{E}_h(\Gamma)}\langle \partial_{t_e}((A\sigma_h t_e)\cdot \nu_e)-{\rm curl}(A\sigma_h)\cdot t_e,\phi-I_h\phi\rangle_{e}.
\end{align*}
Then applying the Cauchy-Schwarz inequality, the error estimate of the quasi-interpolation \end{equation}ref{inter-error-K}, \end{equation}ref{inter-error-e}, we have
\begin{align}\label{error-tilde-sigma}
&\|\tilde{\sigma}-\sigma_h\|_A^2=(A(\tilde \sigma-\sigma_h),{\rm \bf{Curl} \, \bf{Curl} \,}\phi)\nonumber\\
&\lesssim \left [\sum\limits_{K\in\mathcal{T}_h}h_K^4\|{\rm curl~curl}(A\sigma_h)\|_{0,K}^2+\sum\limits_{e\in\mathcal{E}_h}\left(h_e\|\mathcal{J}_{e,1}\|_{0,e}^2+h_e^3\|\mathcal{J}_{e,2}\|_{0,e}^2\right)\right ]^{\frac{1}{2}}|\phi|_2\\
&\lesssim \left [\sum\limits_{K\in\mathcal{T}_h}\eta_K^2(\sigma_h)+\sum\limits_{e\in\mathcal{E}_h}\eta_e^2(\sigma_h)\right ]^{\frac{1}{2}}\| {\rm \bf{Curl}\, \bf{Curl}\,} \phi\|_0.\nonumber
\end{align}
By \cite{CarstensenDolzmann1998}, the $\phi$ defined in \end{equation}ref{HD-error} satisfies that
${\rm div}(\bf{Curl}~\bf{Curl}~\phi)=0$ and
$$
\int_{\Omega}{\rm tr}(\bf{Curl}~\bf{Curl}~\phi){\rm d}x=\int_{\Omega}{\rm tr}(\tilde \sigma-\sigma_h-\mathcal{C}\varepsilon(v)){\rm d}x=-\int_{\Omega}{\rm tr}(\mathcal{C}\varepsilon(v)){\rm d}x=0.
$$
Using Proposition~9.1.1 in \cite{BoffiBrezziFortin2013}, we get
$$
\| {\rm \bf{Curl}\, \bf{Curl}\,} \phi\|_0 \leq C \| {\rm \bf{Curl}\, \bf{Curl}\,} \phi\|_A,
$$
where the constant $C$ is independent of the Lam\'{e} constant $\lambda$. Combining this with
\end{equation}ref{HD-error-estimate}, \end{equation}ref{error-tilde-sigma}, we obtain
$$
\|\tilde\sigma-\sigma_h\|_A\lesssim \left [\sum\limits_{K\in\mathcal{T}_h}\eta_K^2(\sigma_h)+\sum\limits_{e\in\mathcal{E}_h}\eta_e^2(\sigma_h)\right ]^{\frac{1}{2}}.
$$
Together with the triangle inequality and perturbation result \end{equation}ref{oscf}, we get the desired error bound
\begin{align*}
\|\sigma-\sigma_h\|_A&\leq \|\sigma-\tilde \sigma\|_A + \|\tilde \sigma-\sigma_h\|_A\\
&\lesssim \left [\sum\limits_{K\in\mathcal{T}_h}\eta_K^2(\sigma_h)+\sum\limits_{e\in\mathcal{E}_h}\eta_e^2(\sigma_h)\right ]^{\frac{1}{2}} + {\rm osc}(f,\mathcal{T}_h).
\end{align*}
In summary, we obtain the following upper bound estimation.
\begin{theorem}[Reliability of the error estimator]
Let $(\sigma, u)$ be the solution of the mixed formulation \end{equation}ref{eqn1} and $(\sigma_h, u_h)$ be the solution of the mixed finite element method \end{equation}ref{mfem}. If the compliance tensor
$A$ is continuous, there exists positive constant $C_1$ depending only on the shape-regularity of the triangulation and the polynomial degree $k$ such that
\begin{equation}\label{eq:posterioriestimateupperbound}
\|\sigma-\sigma_h\|_{A}\leq C_1(\eta(\sigma_{h}, {\mathcal{T}_h}) + {\rm osc}(f,\mathcal{T}_h)).
\end{equation}
\end{theorem}
\begin{remark}\label{rm:disA}
When $A$ is discontinuous, we can modify $\eta(\sigma_{h}, \mathcal{T}_h)$ as follows:
\begin{align*}
\eta^2(\sigma_h, \mathcal{T}_h):& =\sum_{K\in\mathcal{T}_h}h_K^4\|{\operatorname{curl}\operatorname{curl}}(A\sigma_h)\|_{0,K}^2 + \sum_{e\in\mathcal{E}_h}h_e\|[(A\sigma_h)t_e \cdot t_e]\|_{0,e}^2 \\
&+ \sum_{e\in\mathcal{E}_h}h_e^3\|\big[{\operatorname{curl}}(A\sigma_h)\cdot t_e-\partial_ {t_e}((A\sigma_h)t_e \cdot \nu_e)\big] \|_{0, e}^2.
\end{align*}
Compared to the case of continuous coefficient $A$, this estimator includes an additional term, the jump of $\partial_ {t_e}((A\sigma_h)t_e \cdot \nu_e)$ on all interior edges, owing to the discontinuity of the matrix $A$. Similarly, we can prove the reliability of the estimator
\[
\|\sigma-\sigma_h\|_{A}\lesssim \eta(\sigma_{h}, {\mathcal{T}_h}) + {\rm osc}(f,\mathcal{T}_h).
\]
\end{remark}
\begin{remark}
\rm
By Proposition~9.1.1 in \cite{BoffiBrezziFortin2013}, it holds
\begin{equation*}
\|\tau\|_0\lesssim \|\tau\|_A + \|\operatorname{div}\tau\|_{-1} \quad \forall~\tau\in\hat{\Sigma}
\end{equation*}
where $\hat\Sigma:=\{\tau\in \Sigma: (\mathrm{tr}\tau, 1)=0\}$ with $\mathrm{tr}$ being the trace operator of matrix.
Then we also have from \end{equation}ref{eq:posterioriestimateupperbound} and the fact that $\|f-f_h\|_{-1}\lesssim {\rm osc}(f,\mathcal T_h)$
\begin{align*}
\|\sigma-\sigma_h\|_{0}\lesssim &\|\sigma-\sigma_h\|_{A}+ \|\operatorname{div}(\sigma-\sigma_h)\|_{-1}\lesssim \eta(\sigma_{h}, {\mathcal{T}_h}) + {\rm osc}(f,\mathcal{T}_h).
\end{align*}
That is we can control the $L^2$ norm of the stress with constant independent of the Lam\'e constant $\lambda$. $\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\;\Box$
\end{remark}
\subsection{Lower bound}
We shall follow Alonso \cite{Alonso1996} to prove the efficiency of the error estimator defined in \end{equation}ref{estimator-1}. Similar to \cite{Alonso1996}, we need the following lemma.
\begin{lemma}\label{Morgan-inter}
For any $K\in\mathcal{T}_h$, given $p_K\in L^2(K),~ q_e\in L^2(e),~r_e\in L^2(e),~e\in\partial K,$ there exists a unique $\psi_K\in P_{k+4}(K)$ satisfying that
\begin{equation}\label{psi}
\left\{\begin{array}{rll}
\displaystyle(\psi_K,v)=&(p_K,v)_K&\text{ for any } v\in P_{k-2}(K),\\
\displaystyle\langle\psi_K, s\rangle_e=& \langle q_e,s\rangle_e& \text{ for any } s\in P_{k-1}(e),\\
\displaystyle \langle \partial_{\nu}\psi_K,s \rangle_e=&\langle r_e,s\rangle_e&\text{ for any } s\in P_k(e),\\
\displaystyle\partial^{\alpha}\psi_K(P)=&0& |\alpha|\leq 2,~~\text{ for any vertex}~ P\in~K,
\end{array}\right.
\end{equation}
where $P_k(e)$ denotes the spaces of polynomial of degree less than or equal to $k$ on edge $e$. Moreover it holds that
\begin{align}\label{psi-estimate-1}
\|\psi_K\|_{0,K}^2\lesssim \|p_K\|_{0,K}^2+
\sum\limits_{e\in\partial K}\left (
h_e\|q_e\|_{0,e}^2+h_e^{3}\|r_e\|_{0,e}^2
\right ).
\end{align}
\end{lemma}
\begin{proof}
Similar as in \cite{MorganScott1975}, such a function $\psi_K$ is determined uniquely by the above degrees of freedoms. A standard homogeneity argument gives \end{equation}ref{psi-estimate-1}.
\end{proof}
\begin{theorem}[Efficiency of the error estimator]
Let $(\sigma, u)$ be the solution of the mixed formulation \end{equation}ref{eqn1} and $(\sigma_h, u_h)$ be the solution of the mixed finite element method \end{equation}ref{mfem}. If the compliance tensor
$A$ is continuous, there exists positive constant $C_2$ depending only on the shape-regularity of the triangulations and the polynomial degree $k$ such that
\begin{equation}\label{eq:posterioriestimatelowerbound}
C_2\eta(\sigma_{h}, \mathcal{T}_h)\leq \|\sigma-\sigma_{h}\|_{A}.
\end{equation}
\end{theorem}
\begin{proof}
The estimator $\eta^2(\sigma_{h}, {\mathcal{T}_h})$ can be rewritten as
\begin{align*}
\eta^2(\sigma_{h}, {\mathcal{T}_h})& =
\displaystyle\sum\limits_{K\in\mathcal{T}_h}\left({\rm curl \, curl \,}(A\sigma_h), h_K^4{\rm curl \, curl \,}(A\sigma_h)\right)_K\\
&+\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle (A\sigma_h) t_e\cdot t_e,\, h_e\mathcal{J}_{e,1}\rangle_e\\
&+\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K\bigcap\mathcal{E}_h(\Omega)}\langle {\rm curl\,} (A\sigma_h) \cdot t_e, \, h_e^3\mathcal{J}_{e,2}\rangle_e\\
&+\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K\bigcap\mathcal{E}_h(\Gamma)}\langle {\rm curl\,} (A\sigma_h) \cdot t_e-\partial_{t_e}
\left((A\sigma_h) t_e\cdot \nu_e\right),\, h_e^3\mathcal{J}_{e,2}\rangle_e.
\end{align*}
On each element $K\in\mathcal{T}_h$, we apply Lemma \ref{Morgan-inter} for $p_K=h_K^4{\rm curl \, curl \,}(A\sigma_h)|_K$, $q_e=-h_e^3\mathcal{J}_{e,2}$, $r_e=h_e\mathcal{J}_{e,1}$ for each edge $e\in\partial K$. Let $\psi|_K=\psi_K$, such a defined $\psi$ is in the high-order Argyris finite element space of degree $k+4$, hence $\psi\in H^2(\Omega)$. Using \end{equation}ref{psi-estimate-1}, it follows that
\begin{align}\label{psi-estimate-2}
\|\psi\|_{0,K}^2&\lesssim h_K^8\|{\rm curl \, curl \,}(A\sigma_h)\|_{0,K}^2+\sum\limits_{e\in\partial K}\left (\,
h_e^7\|\mathcal{J}_{e,2}\|_{0,e}^2+h_e^5\|\mathcal{J}_{e,1}\|_{0,e}^2
\right ).
\end{align}
This, in conjunction with \end{equation}ref{psi}, yields
\begin{align}
\eta^2(\sigma_{h}, {\mathcal{T}_h})&
=\displaystyle\sum\limits_{K\in\mathcal{T}_h}\left({\rm curl \, curl \,}(A\sigma_h), \psi_K\right)_K\nonumber\\
&-\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle {\rm curl\,} (A\sigma_h) \cdot t_e,\, \psi_K\rangle_e\nonumber\\
&+\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle (A\sigma_h) t_e\cdot t_e,\, \partial_{\nu_e}\psi_K\rangle_e\label{eta-lowerbound-1}\\
&+\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K\bigcap\mathcal{E}_h(\Gamma)}\langle \partial_{t_e}
\left((A\sigma_h) t_e\cdot \nu_e\right),\, \psi_K\rangle_e.\nonumber
\end{align}
Since $(A\sigma_h) t_e\cdot \nu_e$ is continuous across the element edge $e$, $[A\sigma_h t_e\cdot \nu_e]_e=0$ on interior edges. Noting that $\psi\in H^2(\Omega)$ and vanishes at the mesh vertices,
\begin{align}
&\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K\bigcap\mathcal{E}_h(\Gamma)}\langle \partial_{t_e}
\left((A\sigma_h) t_e\cdot \nu_e\right),\, \psi_K\rangle_e\nonumber\\
&=-\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K\bigcap\mathcal{E}_h(\Gamma)}\langle
(A\sigma_h) t_e\cdot \nu_e,\, \partial_{t_e}\psi_K\rangle_e\label{boundary-normal-1}\\
&=-\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle
(A\sigma_h) t_e\cdot \nu_e,\, \partial_{t_e}\psi_K\rangle_e.\nonumber
\end{align}
Hence the last two terms of \end{equation}ref{eta-lowerbound-1} become
\begin{align}
&\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle (A\sigma_h) t_e\cdot t_e,\, \partial_{\nu_e}\psi_K\rangle_e\nonumber\\
+&\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K\bigcap\mathcal{E}_h(\Gamma)}\langle \partial_{t_e}
\left((A\sigma_h) t_e\cdot \nu_e\right),\, \psi_K\rangle_e\nonumber\\
=&\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle (A\sigma_h) t_e\cdot t_e,\, {\rm \bf{Curl}\,}\psi_K\cdot t_e\rangle_e\label{boundary-normal-2}\\
-&\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle
(A\sigma_h) t_e\cdot \nu_e,\, -{\rm\bf{Curl}\,}\psi_K\cdot \nu_e\rangle_e\nonumber\\
=&\sum\limits_{K\in\mathcal{T}_h}\sum\limits_{e\in\partial K}\langle (A\sigma_h) t_e,\, {\rm \bf{Curl}\,}\psi_K\rangle_e.\nonumber
\end{align}
Substituting \end{equation}ref{boundary-normal-2} into \end{equation}ref{eta-lowerbound-1} leads to
\begin{align*}
\eta^2(\sigma_{h}, {\mathcal{T}_h})=\displaystyle\sum\limits_{K\in\mathcal{T}_h}\Big(\,\big({\rm curl \, curl \,}(A\sigma_h), \psi_K\big)_K
&-\sum\limits_{e\in\partial K}\langle {\rm curl\,} (A\sigma_h) \cdot t_e,\, \psi_K\rangle_e\\
& +\sum\limits_{e\in\partial K}\langle (A\sigma_h) t_e,\, {\rm \bf{Curl}\,}\psi_K\rangle_e\,\Big)
\end{align*}
Integrating the first term by parts twice,
\begin{align*}
\eta^2(\sigma_{h}, {\mathcal{T}_h})&=\sum\limits_{K\in\mathcal{T}_h}\left( A\sigma_h,\, {\rm \bf{Curl} \, \bf{Curl} \,}\psi_K\right )_K\\
&=\sum\limits_{K\in\mathcal{T}_h}\left (A(\sigma_h-\sigma),{\rm \bf{Curl} \, \bf{Curl} \,}\psi_K \right )_K\\
&\lesssim\|\sigma-\sigma_h\|_A\left(\sum\limits_{K\in\mathcal{T}_h}h_K^{-4}\|\psi\|_{0,K}^2\right)^{\frac{1}{2}},
\end{align*}
where ${\rm \bf{Curl}\, \bf{Curl}}~\psi\in\Sigma$ and the inverse inequality are used. By \end{equation}ref{psi-estimate-2},
\begin{align*}
\sum\limits_{K\in\mathcal{T}_h}h_K^{-4}\|\psi\|_{0,K}^2&\lesssim\sum\limits_{K\in\mathcal{T}_h}h_K^4\|{\rm curl \, curl \,}(A\sigma_h)\|_{0,K}^2+\sum\limits_{e\in\mathcal{E}_h}\left( h_e\|\mathcal{J}_{e,1}\|_{0,e}^2+h_e^3\|\mathcal{J}_{e,2}\|_{0,e}^2\right)\\
&\widehat{=}\eta^2(\sigma_h,\mathcal{T}_h).
\end{align*}
Combining the above two inequalities, we have that
\begin{align*}
\eta(\sigma_{h}, {\mathcal{T}_h})\lesssim\|\sigma-\sigma_h\|_A.
\end{align*}
\end{proof}
\begin{remark}\rm
For discontinuous $A$ and the modified error estimator in Remark \ref{rm:disA}, efficiency can be also proved using a similar argument.
\end{remark}
\section{A posteriori error estimation for mixed boundary problems}
The {\it a posteriori} error estimation for the linear elasticity problems with the homogeneous Dirichlet boundary condition can be generalized to problems with mixed boundary conditions. In this section, we will discuss the following linear elasticity problems with mixed boundary conditions. Let $\Omega\subset\mathbb{R}^2$ be a bounded polygonal domain with boundary $\Gamma:=\partial\Omega=\Gamma_D\cup\Gamma_N$, $\Gamma_D\cap\Gamma_N=\emptyset$, $\Gamma_N\neq\emptyset$. Given data $f\in L^2(\Omega; \mathbb{R}^2)$, $u_D\in H^1(\Omega; \mathbb{R}^2)$, and $g\in L^2(\Gamma_N; \mathbb{R}^2)$, seek the solution $(\sigma,u)\in\Sigma_g\times V$, such that
\begin{equation}\label{eqn1-MB}
\left\{ \ad{
(A\sigma,\tau)+({\rm div}\tau,u)&= \langle u_D, \tau\nu\rangle_{\Gamma_D} && \hbox{for all \ } \tau\in\Sigma_0,\\
({\rm div}\sigma,v)&= (f,v) &\qquad& \hbox{for all \ } v\in V, }
\right.
\end{equation}
where
\begin{align*}
\Sigma_0&:=\big\{\sigma\in H({\rm div},\Omega;\mathbb{S})|\int_{\Gamma_N}\psi\cdot(\sigma\nu)ds=0,~{\rm for~all}~\psi\in\mathcal{D}(\Gamma_N;\mathbb{R}^2)\big\},\\
\Sigma_g&:=\big\{\sigma\in H({\rm div},\Omega;\mathbb{S})|\int_{\Gamma_N}\psi\cdot(\sigma\nu)ds=\int_{\Gamma_N}\psi\cdot gds,~{\rm for~all}~\psi\in\mathcal{D}(\Gamma_N;\mathbb{R}^2)\big\},
\end{align*}
where $\mathcal{D}$ denotes the space of test functions. Let $\Sigma_{0,h}:=\Sigma_0\bigcap\Sigma_h,~~~~\Sigma_{g,h}:=\Sigma_g\bigcap \Sigma_h, $ the mixed finite element method seeks $(\sigma_h,u_h)\in\Sigma_{g,h}\times V_h$, such that
\begin{equation}\label{Meqn1-MB}
\left\{ \ad{
(A\sigma_h,\tau_h)+({\rm div}\tau_h,u_h)&= \langle u_D, \tau_h\nu\rangle_{\Gamma_D} && \hbox{for all \ } \tau_h\in\Sigma_{0,h},\\
({\rm div}\sigma_h,v_h)&= (f,v_h) &\qquad& \hbox{for all \ } v_h\in V_h. }
\right.
\end{equation}
We modify the {\it a posterior} error estimator defined in Section 3 as the following:
$$
\eta^2(\sigma_{h}, {\mathcal{T}_h}):=\sum\limits_{K\in\mathcal{T}_h}\eta_K^2(\sigma_h)+\sum\limits_{e\in\mathcal{E}_h}\eta_e^2(\sigma_h)
$$
where
$$
\eta_K^2(\sigma_h) :=h_K^4\|{\rm curl \, curl \,}(A\sigma_h)\|_{0,K}^2,~~~
\eta_e^2(\sigma_h):=h_e\|\mathcal{J}_{e,1}\|_{0,e}^2+h_e^3\|\mathcal{J}_{e,2}\|_{0,e}^2,
$$
$$
\begin{array}{ll}
\displaystyle \mathcal{J}_{e,1}:=&\left\{\begin{array}{ll}
\displaystyle\Big[(A\sigma_h)t_e\cdot t_e\Big]_e&{\rm if~} e\in\mathcal{E}_h(\Omega)\\
\displaystyle\Big((A\sigma_h)t_e\cdot t_e - \partial_{t_e}(u_D\cdot t_e)\Big)|_e\quad\quad\quad\quad\quad\ & {\rm if~} e\in\mathcal{E}_h(\Gamma_D)\\
\displaystyle\Big((A\sigma_h)t_e\cdot t_e\Big)|_e&{\rm if~} e\in\mathcal{E}_h(\Gamma_N)
\end{array}\right. \\
\\
\displaystyle \mathcal{J}_{e,2}:=&\left\{\begin{array}{lr}
\displaystyle \Big[{\rm curl}(A\sigma_h)\cdot t_e\Big]_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Omega)\\
\displaystyle \Big({\rm curl}(A\sigma_h)\cdot t_e+\partial_{t_et_e}(u_D\cdot\nu)-\partial _{t_e}\big((A\sigma_h)t_e\cdot \nu_e\big)\Big)|_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Gamma_D)\\
\displaystyle \Big({\rm curl}(A\sigma_h)\cdot t_e-\partial _{t_e}\big((A\sigma_h)t_e\cdot \nu_e\big)\Big)|_e&~~~~ {\rm if~} e\in\mathcal{E}_h(\Gamma_N)
\end{array}\right.
\end{array}
$$
where $\mathcal E_{h}(\Gamma_D)$, $\mathcal E_{h}(\Gamma_N)$ are the collection of element edges for Dirichlet boundary and Neumann boundary respectively.
Similar to Section 3, we can prove the reliability and efficiency of this {\it a posteriori} error estimator.
\begin{theorem}[Reliability and efficiency of the error estimator]
Let $(\sigma, u)$ be the solution of the mixed formulation \end{equation}ref{eqn1-MB} and $(\sigma_h, u_h)$ be the solution of the mixed finite element method \end{equation}ref{Meqn1-MB}. If the compliance tensor
$A$ is continuous, there exist positive constant $C_3$ and $ C_4$ depending only on the shape-regularity of the triangulation and the polynomial degree $k$ such that
\begin{equation}\label{eq:posterioriestimateupperbound-MB}
\|\sigma-\sigma_h\|_{A}\leq C_3\Big(\eta(\sigma_{h}, {\mathcal{T}_h}) + {\rm osc}(f,\mathcal{T}_h)+{\rm osc}(g,\mathcal{E}_h(\Gamma_N))\Big),
\end{equation}
and
\begin{equation}\label{eq:posterioriestimatelowerbound-MB}
C_4\eta(\sigma_{h}, \mathcal{T}_h)\leq \|\sigma-\sigma_{h}\|_{A}+{\rm osc}(u_D,\mathcal{E}_h(\Gamma_D)),
\end{equation}
where the data oscillations for the Dirichlet boundary $u_D$ and the Neumann boundary condition $g$ are defined as
\begin{align*}
{\rm osc}(g,\mathcal{E}_h(\Gamma_N))^2:=&\sum\limits_{e\in\mathcal{E}_h(\Gamma_N)}h_e\|g-g_h\|_{0,e}^2\\
{\rm osc}(u_D,\mathcal{E}_h(\Gamma_D))^2:=&\sum\limits_{e\in\mathcal{E}_h(\Gamma_D)}h_e\|\partial_{t_e}(u_D\cdot t_e)-\partial_{t_e}(u_{D,h}\cdot t_e)\|_{0,e}^2\\
+&\sum\limits_{e\in\mathcal{E}_h(\Gamma_D)}h_e^3\|\partial_{t_et_e}(u_D\cdot \nu_e)-\partial_{t_et_e}(u_{D,h}\cdot \nu_e)\|_{0,e}^2,
\end{align*}
$g_h$ is the piecewise $L^2$ projection of $g$ onto $P_k(\mathcal{E}_h(\Gamma_N),\mathbb{R}^2)$ and
$u_{D,h}$ is the piecewise $L^2$ projection of $u_D$ onto $P_k(\mathcal{E}_h(\Gamma_D),\mathbb{R}^2)$.
\end{theorem}
\section{A Posteriori Error Estimation for Displacement}
In this section, we shall discuss the {\it a posteriori} error estimate for a superconvergent postprocessed displacement recently constructed in \cite{ChenHuHuang2016}. The key points of the theoretical analysis involve the discrete inf-sup condition and the norm equivalence on $H^1(\mathcal{T}_h; \mathbb{R}^2)$ developed in \cite{ChenHuHuang2016}, and the {\it a posteriori} error estimates \end{equation}ref{eq:posterioriestimateupperbound} and \end{equation}ref{eq:posterioriestimatelowerbound}.
Here the broken space
\[
H^1(\mathcal{T}_h; \mathbb{R}^2):=\left\{v\in L^2(\Omega; \mathbb{R}^2): v|_K\in H^1(K; \mathbb{R}^2)\quad \forall\,K\in\mathcal
{T}_h\right\}.
\]
For any $v\in H^1(\mathcal{T}_h; \mathbb{R}^2)$,
define mesh dependent norm
\begin{align*}
|v|_{1,h}^2 &:= \|\varepsilon_h (v)\|_0^2 + \sum_{e\in \mathcal E_h} h_e^{-1}\|[v]\|_{0,e}^2.
\end{align*}
We first recall the superconvergent postprocessed displacement from $(\sigma_h, u_h)$ developed in \cite{ChenHuHuang2016}.
To this end, let
\[
V_{h}^{\ast}:=\left\{v\in L^2(\Omega; \mathbb{R}^2): v|_K\in P_{k+1}(K; \mathbb{R}^2)\quad \forall\,K\in\mathcal
{T}_h\right\}.
\]
Then a postprocessed displacement is defined as follows \cite{ChenHuHuang2016, LovadinaStenberg2006, BrambleXu1989}:
Find $ u_h^{\ast}\in V_{h}^{\ast}$ such that
\begin{equation}\label{postprocess1}
(u_h^{\ast}, v)_K= (u_h, v)_K \quad \forall~ v\in P_{k-1}(K; \mathbb{R}^2),
\end{equation}
\begin{equation}\label{postprocess2}
(\varepsilon(u_h^{\ast}), \varepsilon(w))_K=(A\sigma_h, \varepsilon(w))_K \quad \forall~ w\in (I- Q_h) V_{h}^{\ast}|_K,
\end{equation}
for any $K\in\mathcal{T}_h$.
We recall the following two useful results \cite{ChenHuHuang2016}:
the discrete inf-sup condition
\begin{equation}\label{eq:infsup22}
|v_h|_{1,h}\lesssim \sup_{0\neq\tau_h \in \Sigma_{h}} \frac{(\operatorname{div}\tau_h, v_h)}{\|\tau_h\|_{0}} \quad \forall~v_h\in V_{h},
\end{equation}
and norm equivalence
\begin{equation}\label{eq:temp4post}
| v- Q_h v|_{1,h} \end{equation}sim \|\varepsilon_h ( v- Q_h v)\|_0 \quad \forall~v\in H^1(\mathcal{T}_h; \mathbb{R}^2).
\end{equation}
\begin{theorem}
Let $(\sigma, u)$ be the solution of the mixed formulation \end{equation}ref{eqn1}, $(\sigma_h, u_h)$ be the solution of the mixed finite element method \end{equation}ref{mfem}, and $u_h^{\ast}$ be the postprocessed displacement defined by \end{equation}ref{postprocess1}-\end{equation}ref{postprocess2}.
Then we have
\begin{equation}\label{eq:postdispalcementupperbound}
\|\sigma-\sigma_h\|_{A} + |u-u_h^{\ast}|_{1,h}\lesssim \eta(\sigma_{h}, {\mathcal{T}_h}) + \|A\sigma_h-\varepsilon_h(u_h^{\ast})\|_{0} + {\rm osc}(f,\mathcal{T}_h).
\end{equation}
\begin{equation}\label{eq:postdispalcementlowerbound}
\eta(\sigma_{h}, \mathcal{T}_h) + \|A\sigma_h-\varepsilon_h(u_h^{\ast})\|_{0} \lesssim \|\sigma-\sigma_h\|_{A} + |u-u_h^{\ast}|_{1,h}.
\end{equation}
\end{theorem}
\begin{proof}
Using the discrete inf-sup condition~\end{equation}ref{eq:infsup22} with $v_h=Q_h(u-u_h^{\ast})$, \end{equation}ref{postprocess1}, the first equations of \end{equation}ref{eqn1} and \end{equation}ref{mfem}, we get
\begin{align*}
|Q_h(u-u_h^{\ast})|_{1,h}\lesssim & \sup_{0\neq\tau_h \in \Sigma_{h}} \frac{(\operatorname{div}\tau_h, Q_h(u-u_h^{\ast}))}{\|\tau_h\|_{0}}=\sup_{0\neq\tau_h \in \Sigma_{h}} \frac{(\operatorname{div}\tau_h, u-u_h)}{\|\tau_h\|_{0}} \\
=&\sup_{0\neq\tau_h \in \Sigma_{h}} \frac{(A(\sigma-\sigma_h), \tau_h)}{\|\tau_h\|_{0}} \leq \|A(\sigma-\sigma_h)\|_0.
\end{align*}
Choosing $v=u-u_h^{\ast}$ in \end{equation}ref{eq:temp4post},
\begin{align*}
| v- Q_h v|_{1,h} \end{equation}sim &\|\varepsilon_h ( v- Q_h v)\|_0 \leq \|\varepsilon_h (u-u_h^{\ast})\|_0 + |Q_h(u-u_h^{\ast})|_{1,h} \\
=& \|A\sigma - \varepsilon_h (u_h^{\ast})\|_0 + |Q_h(u-u_h^{\ast})|_{1,h} \\
\lesssim & \|A\sigma_h - \varepsilon_h (u_h^{\ast})\|_0 + \|A(\sigma-\sigma_h)\|_0.
\end{align*}
Then it follows from the last two inequalities that
\[
|u-u_h^{\ast}|_{1,h}\lesssim \|A\sigma_h - \varepsilon_h (u_h^{\ast})\|_0 + \|A(\sigma-\sigma_h)\|_0,
\]
which combined with \end{equation}ref{eq:posterioriestimateupperbound} implies \end{equation}ref{eq:postdispalcementupperbound}.
Next we prove the efficiency \end{equation}ref{eq:postdispalcementlowerbound}. By the triangle inequality,
\begin{align*}
\|A\sigma_h - \varepsilon_h (u_h^{\ast})\|_0\leq & \|A(\sigma-\sigma_h)\|_0+ \|A\sigma - \varepsilon_h (u_h^{\ast})\|_0 \\
=& \|A(\sigma-\sigma_h)\|_0 + \|\varepsilon_h (u-u_h^{\ast})\|_0 \\
\lesssim & \|\sigma-\sigma_h\|_A + |u-u_h^{\ast}|_{1,h}.
\end{align*}
Therefore we can end the proof by using \end{equation}ref{eq:posterioriestimatelowerbound}.
\end{proof}
\section{Numerical experiments}
We will testify the {\it a posteriori} error estimator by some numerical examples in this section.
In the first example, let $\Omega=(0, 1)^2$, $k=3$, $\mu=1$,
the right-hand side
\[
f(x,y)=\pi^3\left(
\begin{array}{c}
-\sin(2\pi y)(2\cos(2\pi x)-1) \\
\sin(2\pi x)(2\cos(2\pi y)-1)
\end{array}
\right),
\]
and the exact solution \cite[Section~5.2]{CarstensenGedicke2016}
\[
u(x,y)=\frac{\pi}{2}\left(
\begin{array}{c}
\sin^2(\pi x)\sin(2\pi y) \\
-\sin^2(\pi y)\sin(2\pi x)
\end{array}
\right).
\]
We subdivide $\Omega$ by a uniform triangular mesh.
The {\it a priori} and {\it a posteriori} error estimates for $\lambda=10$ and $\lambda=10000$ are listed in Tables~\ref{table:lambda10}-\ref{table:lambda10000},
from which we can see that the convergence rates of $\|\sigma-\sigma_h\|_A$, $\|\nabla_h(u-u_h^{\ast})\|_{0}$, $\eta(\sigma_h, \mathcal{T}_h)$ and $\|A\sigma_h - \varepsilon_h(u_h^{\ast})\|_0$ are all $O(h^{4})$.
Hence the {\it a posteriori} error estimators $\eta(\sigma_h, \mathcal{T}_h)$ and $\eta(\sigma_h, \mathcal{T}_h)+\|A\sigma_h - \varepsilon_h(u_h^{\ast})\|_0$ are both uniformly reliable and efficient
with respect to the mesh size $h$ and $\lambda$ for smooth solutions.
\begin{table}[htbp]
\centering
\caption{Numerical errors for the first example when $\lambda=10$}\label{table:lambda10}
\resizebox{\textwidth}{!}{
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
$h$ & $\|\sigma-\sigma_h\|_A$ & order & $\|\nabla_h(u-u_h^{\ast})\|_{0}$ & order & $\eta(\sigma_h, \mathcal{T}_h)$ & order & $\|A\sigma_h - \varepsilon_h(u_h^{\ast})\|_0$ & order \\
\hline $2^{-1}$ & 6.6998E-01 & $-$ & 7.9544E-01 & $-$ & 1.6615E+01 & $-$ & 4.0073E-02 & $-$ \\
\hline $2^{-2}$ & 5.2451E-02 & 3.68 & 6.0585E-02 & 3.71 & 1.3585E+00 & 3.61 & 9.3899E-03 & 2.09 \\
\hline $2^{-3}$ & 3.6139E-03 & 3.86 & 4.5839E-03 & 3.72 & 1.0918E-01 & 3.64 & 7.1387E-04 & 3.72 \\
\hline $2^{-4}$ & 2.2714E-04 & 3.99 & 3.0676E-04 & 3.90 & 7.4510E-03 & 3.87 & 4.5925E-05 & 3.96 \\
\hline $2^{-5}$ & 1.4193E-05 & 4.00 & 1.9600E-05 & 3.97 & 4.7919E-04 & 3.96 & 2.8824E-06 & 3.99 \\
\hline $2^{-6}$ & 8.8742E-07 & 4.00 & 1.2347E-06 & 3.99 & 3.0263E-05 & 3.99 & 1.8040E-07 & 4.00 \\
\hline $2^{-7}$ & 5.5567E-08 & 4.00 & 7.7435E-08 & 3.99 & 1.8992E-06 & 3.99 & 1.1306E-08 & 4.00 \\
\hline
\end{tabular}
}
\end{table}
\begin{table}[htbp]
\centering
\caption{Numerical errors for the first example when $\lambda=10000$}\label{table:lambda10000}
\resizebox{\textwidth}{!}{
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
$h$ & $\|\sigma-\sigma_h\|_A$ & order & $\|\nabla_h(u-u_h^{\ast})\|_{0}$ & order & $\eta(\sigma_h, \mathcal{T}_h)$ & order & $\|A\sigma_h - \varepsilon_h(u_h^{\ast})\|_0$ & order \\
\hline $2^{-1}$ & 6.6096E-01 & $-$ & 7.7905E-01 & $-$ & 1.6050E+01 & $-$ & 4.3292E-02 & $-$ \\
\hline $2^{-2}$ & 5.1630E-02 & 3.68 & 5.8762E-02 & 3.73 & 1.3066E+00 & 3.62 & 9.0182E-03 & 2.26 \\
\hline $2^{-3}$ & 3.5430E-03 & 3.87 & 4.3977E-03 & 3.74 & 1.0508E-01 & 3.64 & 6.8780E-04 & 3.71 \\
\hline $2^{-4}$ & 2.2220E-04 & 4.00 & 2.9277E-04 & 3.91 & 7.1542E-03 & 3.88 & 4.4330E-05 & 3.96 \\
\hline $2^{-5}$ & 1.3873E-05 & 4.00 & 1.8668E-05 & 3.97 & 4.5947E-04 & 3.96 & 2.7853E-06 & 3.99 \\
\hline $2^{-6}$ & 8.6708E-07 & 4.00 & 1.1751E-06 & 3.99 & 2.8998E-05 & 3.99 & 1.7442E-07 & 4.00 \\
\hline $2^{-7}$ & 5.4210E-08 & 4.00 & 7.3695E-08 & 4.00 & 1.8195E-06 & 3.99 & 1.0922E-08 & 4.00 \\
\hline
\end{tabular}
}
\end{table}
Next we use the {\it a posteriori} error estimator $\eta(\sigma_h, \mathcal{T}_h)$ to design an adaptive mixed finite element method, i.e. Algorithm~\ref{alg:amfem}.
The approximate block factorization preconditioner with GMRES \cite{ChenHuHuang2016} is adopted in the SOLVE part of Algorithm~\ref{alg:amfem}, which is verified to be highly efficient and robust even on adaptive meshes by our numerical examples.
\begin{algorithm}
\label{alg:amfem}
\caption{Adaptive algorithm for the mixed finite element method~\end{equation}ref{mfem}.}
Given a parameter $0<\vartheta<1$ and an initial mesh
$\mathcal{T}_{0}$. Set $m:=0$.
\begin{enumerate}[1.]
\item \textbf{SOLVE}: Solve the mixed finite element method \end{equation}ref{mfem} on $\mathcal{T}_{m}$ for
the discrete solution $(\sigma_{m},u_{m})\in\Sigma_{m}\times
V_{m}$.
\item \textbf{ESTIMATE}: Compute the error indicator $\eta^2(\sigma_{m}, {\mathcal{T}_m})$ piecewise.
\item \textbf{MARK}: Mark a set $\mathcal{S}_{m}\subset\mathcal{T}_{m}$ with minimal cardinality by D\"{o}rfler marking such that
\[
\eta^2(\sigma_{m}, {\mathcal{S}_m})\geq\vartheta \eta^2(\sigma_{m}, {\mathcal{T}_m}).
\]
\item \textbf{REFINE}: Refine each triangle $K$ with at least one edge in $\mathcal{S}_{m}$ by the newest vertex bisection to get
$\mathcal{T}_{m+1}$.
\item Set $m:=m+1$ and go to Step 1.
\end{enumerate}
\end{algorithm}
Now we construct a problem with singularity in the solution to test Algorithm~\ref{alg:amfem}.
Set L-shaped domain $\Omega=(-1,1)\times(-1,1)\backslash
[0,1)\times(-1,0]$.
Let
\[
\Phi_1(\theta)=\left(
\begin{array}{c}
\big((z+2)(\lambda+\mu)+4\mu\big)\sin(z\theta) - z(\lambda+\mu)\sin((z-2)\theta) \\
z(\lambda+\mu)\big(\cos(z\theta)-\cos((z-2)\theta)\big)
\end{array}
\right),
\]
\[
\Phi_2(\theta)=\left(
\begin{array}{c}
z(\lambda+\mu)\big(\cos((z-2)\theta)-\cos(z\theta)\big) \\
-\big((2-z)(\lambda+\mu)+4\mu\big)\sin(z\theta) - z(\lambda+\mu)\sin((z-2)\theta)
\end{array}
\right),
\]
\begin{align*}
\Phi(\theta) = &\Big(z(\lambda+\mu)\sin((z-2)\omega) + \big((2-z)(\lambda+\mu)+4\mu\big)\sin(z\omega)\Big)\Phi_1(\theta) \\
&- z(\lambda+\mu)\big(\cos((z-2)\omega)-\cos(z\omega)\big)\Phi_2(\theta),
\end{align*}
where $z\in(0, 1)$ is a real root of $(\lambda+3\mu)^2\sin^2(z\omega)=(\lambda+\mu)^2z^2\sin^2\omega$ with $\omega=3\pi/2$.
The exact singular solution in polar coordinates is taken as \cite[Section~4.6]{Grisvard1992}
\[
u(r,\theta)=\frac{1}{(\lambda+\mu)^2}(r^2\cos^2\theta-1)(r^2\sin^2\theta-1)r^{z}\Phi(\theta).
\]
It can be computed that $z=0.561586549334359$ for $\lambda=10$, and $z=0.544505718203590$ for $\lambda=10000$.
We also take $k=3$ and $\mu=1$.
Some meshes generated by Algorithm~\ref{alg:amfem} for different bulk parameter $\vartheta$ and Lam\'e constant $\lambda$
are shown in Figure~\ref{fig:ldomainIter}, where $\#\textrm{dofs}$ is the number of degrees of freedom.
The adaptive Algorithm~\ref{alg:amfem} captures the singularity of the exact solution on the corner $(0, 0)$ very well.
The histories of the adaptive Algorithm~\ref{alg:amfem} for $\vartheta=0.1, 0.2$ and $\lambda=10, 10000$ are presented
in Figures~\ref{fig:errorex2lambda10}-\ref{fig:errorex2lambda10000}.
We can see from Figures~\ref{fig:errorex2lambda10}-\ref{fig:errorex2lambda10000} that
the convergence rates of errors $\|\sigma-\sigma_h\|_A$ and $\eta(\sigma_h, \mathcal{T}_h)$ are both $O((\#\textrm{dofs})^{-2})$
no matter $\lambda=10$ or $\lambda=10000$, which demonstrates the theoretical results. For uniform grid, $\#(\textrm{dofs})^{-2}\cong h^4$, this means that the errors $\|\sigma-\sigma_h\|_A$ and $\eta(\sigma_h, \mathcal{T}_h)$ converge with an optimal rate.
\begin{figure}
\caption{Meshes generated in Algorithm~\ref{alg:amfem}
\label{fig:ldomainIter}
\end{figure}
\begin{figure}
\caption{Errors $\|\sigma-\sigma_h\|_A$ and $\eta(\sigma_h, \mathcal{T}
\label{fig:errorex2lambda10}
\end{figure}
\begin{figure}
\caption{Errors $\|\sigma-\sigma_h\|_A$ and $\eta(\sigma_h, \mathcal{T}
\label{fig:errorex2lambda10000}
\end{figure}
The third example considers the L-shape benchmark problem with general boundary conditions testified in \cite[section~5.3]{CarstensenGedicke2016} on the rotated L-shaped domain with the initial mesh as depicted in Figure~\ref{fig:initialdomainex3}. We impose the Neumann boundary
condition on the boundary $x^2=y^2$ and the Dirichlet boundary condition on the rest boundary of $\Omega$. The exact solution in the polar coordinates is given as follows
\[
\begin{pmatrix}u_r(r,\theta) \\ u_{\theta}(r,\theta)\end{pmatrix}=\frac{r^{\alpha}}{2\mu}\begin{pmatrix}-(\alpha+1)\cos((\alpha+1)\theta) + (C_2-\alpha-1)C_1\cos((\alpha-1)\theta) \\ (\alpha+1)\sin((\alpha+1)\theta) + (C_2+\alpha-1)C_1\sin((\alpha-1)\theta) \end{pmatrix}.
\]
The constants are $C_1:=-\cos((\alpha+1)\omega)/\cos((\alpha-1)\omega)$ and $C_2:=-2(\lambda+2\mu)/(\lambda+\mu)$, where $\alpha=0.544483736782$ is the positive solution of $\alpha \sin(2\omega)+\sin(2\omega\alpha)=0$ for $\omega=3\pi/4$.
The Lam\'{e} parameters
\[
\lambda=\frac{E\nu}{(1+\nu)(1-2\nu)},\quad \mu=\frac{E}{2(1+\nu)}
\]
with the elasticity modulus $E=10^5$ and the Poisson ratio $\nu=0.4999$.
The volume force $f(x,y)$ and the Neumann boundary data vanish, and the Dirichlet boundary condition is taken from the exact solution.
The histories of Algorithm~\ref{alg:amfem} for $k=3, 4, 5$ and $\vartheta=0.1$ are presented
in Figures~\ref{fig:errorex3priori}-\ref{fig:errorex3posteriori}, which indicate that the convergence rates of errors $\|\sigma-\sigma_h\|_A$ and $\eta(\sigma_h, \mathcal{T}_h)$ are both $O((\#\textrm{dofs})^{-(k+1)/2})$.
\begin{figure}
\caption{The rotated L-shaped domain with the initial mesh.}
\label{fig:initialdomainex3}
\end{figure}
\begin{figure}
\caption{Errors $\|\sigma-\sigma_h\|_A$ vs $\#$dofs in
$\log_{10}
\label{fig:errorex3priori}
\end{figure}
\begin{figure}
\caption{Errors $\eta(\sigma_h, \mathcal{T}
\label{fig:errorex3posteriori}
\end{figure}
\end{document}
|
\begin{document}
\baselineskip=14.pt plus 2pt
\title[
]{On some quotient groups of hyperbolic groups}
\author[]{O.V.~Kulikova}
\address{\newline O.V.Kulikova \newline
Lomonosov Moscow State University, \newline Moscow Center for Fundamental and Applied Mathematics \newline (Moscow, Russian Federation) }
\email{[email protected]}
\thanks{2010 \it{Mathematics Subject Classification.} 20F05,20F65, 20F67 \newline The work was supported by the Russian Science
Foundation, project no. 22-11-00075.}
\keywords{}
\maketitle
\begin{abstract}
This paper describes some generalizations of the results presented in the book "Geometry of defining Relations in Groups"\, of A.Yu.Ol'shanskii to the case of non-cyclic torsion-free hyperbolic groups. In particular, it is proved that for every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian torsion-free quotient group in which all proper subgroups are cyclic, and the intersection of any two of them is not trivial.
\end{abstract}
\setcounter{tocdepth}{2}
\def{\sf st}{{\sf st}}
\section*{ Introduction.}
Similar to free groups, hyperbolic groups have many different homomorphic images.
In the article \cite{olsh91}, A.Yu.Ol'shanskii noted that the constructions in \cite{olsh91} open up the possibility to generalize many of the results presented in his book \cite{olsh}
to the case of torsion-free hyperbolic groups. This paper describes some such generalizations of Theorems 31.1-31.4, 31.7, 31.8
from \cite{olsh}.
For example, in Theorem 2 of \cite{ashmanovOlsh} (= Theorem 31.4 of \cite{olsh}), I.S.Ashmanov and A.Yu.Ol'shanskii strengthened the example of S.I. Adyan \cite{adyan71, adyan75} and
proved the existence of a non-Abelian torsion-free group, in which all proper subgroups are cyclic, and the intersection of any two of them is not trivial.
In this paper, using the limit group constructed in \cite{semenov}, we prove that
for every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian torsion-free quotient group, in which all proper subgroups are cyclic, and the intersection of any two of them is not trivial. Using this quotient, we get the generalization of Theorem 31.7 from \cite{olsh}:
for every prime $p$, for every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian quotient group $K$ of finite period, in which all proper subgroups are cyclic, and the Sylow $p$-subgroup $P$ is central, but $P$ is not a direct multiplier of $K$.
In \cite{olsh93}, for every non-cyclic torsion-free hyperbolic group, A.Yu.Ol'shanskii constructed a non-Abelian quotient group, all of whose proper subgroups are cyclic, and the intersection of any two different maximal proper subgroups is trivial.
In this paper, using this quotient group, we obtain that for every non-cyclic torsion-free hyperbolic group and for every $k\in\mathbb{N}$, there exists a non-Abelian torsion-free quotient group $\mathbf{A}$ in which all maximal proper subgroups are free Abelian groups of rank $k$ and the intersection of any two different maximal proper subgroups coincides with the center of $\mathbf{A}$, which is a free Abelian group of rank $k-1$, and no nontrivial subgroup of the center is a direct multiplier of $\mathbf{A}$.
The proofs in this paper rely heavily on the proofs and results from \cite{olsh, ashmanovOlsh,semenov,olsh91,olsh93}.
The author thanks A.Yu. Ol'shanskii for the problem statement, the proposed method of solution and useful discussions during the writing of this article.
\section{Designations}\label{S:df}
Let $G$ be a group with a generating set $\mathcal{A}$ and let $\mathcal{O}$ be the set of all relators of $G$.
Let $\mathcal{R}=\bigcup_{i=1}^{\infty}\mathcal{R}_i$, where $\mathcal{R}_i$ is a symmetrized set of words in the alphabet $\mathcal{A}^{\pm 1}$, $\mathcal{R}_i\cap\mathcal{R}_j=\emptyset$ for $i\neq j$, some $\mathcal{R}_i$ may be empty, and $\mathcal{R}\cap\mathcal{O}=\emptyset$.
For every $k=1,2,\ldots$, define a group $G_k=\langle \mathcal{A}\|\mathcal{O}\cup \bigcup_{i=1}^{k}\mathcal{R}_i\rangle$.
The presentation
\begin{equation} \label{infPresent}
G(\infty) = \langle \mathcal{A}\|\mathcal{O}\cup \bigcup_{i=1}^{\infty}\mathcal{R}_i\rangle
\end{equation}
of a group $G(\infty)$ with such partition of the defining relation set will be called {\it $G$-graded}.
By $\Phi$, we denote the canonical homomorphism from the free group $F=F(\mathcal{A})$ to the group $G=\langle \mathcal{A} \| \mathcal{O} \rangle$ with kernel $N$. Then $$G(\infty) = G/\bar N_{\mathcal{R}},$$ where
$\bar N_{\mathcal{R}}$ is the image of the normal closure $N_{\mathcal{R}}$ of $\mathcal{R}$ in $F$
under the homomorphism $\Phi$.
We will say that $\mathcal{R}_i$ satisfies the condition $(*)$, if no element $R\in \mathcal{R}_i$ is conjugated to $R^{-1}$ in $G_{i-1}$.
\section{Asphericity and atoricity}\label{S:aspher}
We will consider diagrams over $G(\infty) = \langle \mathcal{A}\|\mathcal{O}\cup \bigcup_{i=1}^{\infty}\mathcal{R}_i\rangle$ with a gradation similar to \cite{olsh91} (see the definition of a diagram and related notions in \cite{olsh93}). The faces of a diagram are divided into $0$-faces (with labels from $\mathcal{O}$) and $\mathcal{R}$-faces (with labels from $\mathcal{R}=\bigcup_{i=1}^{\infty}\mathcal{R}_i$). The $\mathcal{R}$-faces are subdivided into $\mathcal{R}_i$-faces (with labels from $\mathcal{R}_i$). The rank of a $\mathcal{O}$-face is equal to $0$, the rank of a $\mathcal{R}_i$-face is equal to $i$. Since any diagram contains a finite number of faces, the maximum of the ranks of its faces is well-defined, and will be called {\it the rank of the diagram}. If there are no faces in the diagram, then we assume that its rank is zero.
Following the definition from \cite{olsh93}, a pair of two different $\mathcal{R}_i$-faces $\Pi_1$ and $\Pi_2$ in a diagram over a $G$-graded presentation (\ref{infPresent}) will be called {\it opposite $\mathcal{R}_i$-faces}, if for their boundary labels $R_1$ and $R_2$ reading in a clockwise direction, starting from vertices $o_1$ and $o_2$, one can find (after a series of elementary transformations) a simple path $s$ in the diagram such that $s_{-}=o_1, s_{+}=o_2$ and
$$\varphi(s)^{-1}R_1\varphi(s)R_2=1\,\,\,\text{ in}\,\,\,G_{i-1}.$$
If all $\mathcal{R}_i$ satisfy the condition $(*)$, and for any words $R,R'\in \mathcal{R}_i$ that are not cyclic permutations of each other, $R'$ is not conjugated to $R$ in $G_{i-1}$, then the word $R_1$ is a cyclic permutation of $R_2^{-1}$.
A $G$-graded presentation (\ref{infPresent}) will be called {\it $G$-aspherical} ({\it $G$-atorical}), if every diagram on a sphere (on a torus) of positive rank over the presentation (\ref{infPresent}) contains opposite $\mathcal{R}_i$-faces for some $i>0$.
Note that \cite{olsh} uses {\it $i$-pairs of faces} that satisfy stricter requirements than opposite $\mathcal{R}_i$-faces (in the notation above):
1) $R_1$ and $R_2$ are mutually inverse words in the free group;
2) $\varphi(s)=1$ in $G_{i-1}$.
Similar to \cite{olsh}, a $G$-graded presentation (\ref{infPresent}) will be called {\it aspherical} ({\it atorical}), if any diagram on a sphere (on a torus) of positive rank over the presentation (\ref{infPresent}) contains an $i$-pair of faces for some $i>0$.
Note that if a $G$-graded presentation is aspherical, then all $\mathcal{R}_i$ satisfy the condition $(*)$, and for any words $R,R'\in \mathcal{R}_i$ that are not cyclically permutations of each other, $R'$ is not conjugated to $R$ in $G_{i-1}$.
It is obvious that asphericity implies $G$-asphericity, and atoricity implies $G$-atoricity.
As in \cite{olsh91}, let's call $\tau(\Delta)=(\tau_1,\tau_2,\ldots)$ {\it the type} of a diagram $\Delta$, where $\tau_i$ is the number of faces of rank $i$ in $\Delta$, that is, unlike \cite{olsh}, the type does not take into account the number of $0$-faces in $\Delta$. The types are lexicographically ordered as in \cite{olsh}.
\section{Relations from $[N_{\mathcal{R}}, F]N$}\label{S:commutant2}
In this section, let's consider a group $G(\infty)$ with a presentation (\ref{infPresent}) assuming that all $\mathcal{R}_i$ satisfy the condition $(*)$.
The group $H\cong G/[\bar N_{\mathcal{R}}, G]$ is the central extension of $G(\infty)$. Let's investigate the structure of the central subgroup $\bar
N_R/[\bar N_{\mathcal{R}}, G]$ of the group $H$.
The symmetrized set $\mathcal{R}_i\subset\mathcal{R}$ is divided into disjoint subsets of words conjugated to each other in the group $G_{i-1}$:
$$\mathcal{R}_i = \bigsqcup_{j} (\mathcal{R}_{i,j}^+\sqcup \mathcal{R}_{i,j}^-),$$ where $\mathcal{R}_{i,j}^- = (\mathcal{R}_{i,j}^+)^{-1}$. Note that $\mathcal{R}_{i,j}^-\neq \mathcal{R}_{i,j}^+$ by the condition $(*)$.
In each of the sets $\mathcal{R}_{i,j}^+$, fix a representative
${r_{i,j}^+}$.
Let $\mathcal{R}_i^{+}$ denote the set of all representatives $r_{i,j}^+$, and $\mathcal{R}^{+}=\bigcup_{i=1}^{\infty}\mathcal{R}_i^+$.
The group $G_k$, $k=1,2,\ldots$, has the presentation $\langle \mathcal{A}\|\mathcal{O}\cup \bigcup_{i=1}^{k}\mathcal{R}_i^+\rangle$.
The presentation
\begin{equation} \label{infPresent+}
G(\infty) = \langle \mathcal{A}\|\mathcal{O}\cup \bigcup_{i=1}^{\infty}\mathcal{R}_i^+\rangle
\end{equation}
of the group $G(\infty)$ will be called {\it reduced}.
Consider an arbitrary diagram $\Delta$ over a reduced presentation (\ref{infPresent+}) (which is the same as a diagram over the presentation $\langle \mathcal{A}\|\mathcal{O}\cup \bigcup_{i=1}^{\infty}\mathcal{R}_i^*\rangle$, where $\mathcal{R}_i^*$ is the set containing all elements from $\mathcal{R}_i^{+}$, their inverses and all their cyclic permutations). For each $r_{i,j}^+\in \mathcal{R}_i^{+}$, let $\sigma_{+}(r_{i,j}^+)$ denote the number of the faces in $\Delta$ with labels from $\mathcal{R}_{i,j}^+$, and let $\sigma_{-}(r_{i,j}^+)$ denote the number of the faces with labels from $\mathcal{R}_{i,j}^-$, and $\sigma_{\Delta}(r_{i,j}^+) = \sigma_{+}(r_{i,j}^+) -
\sigma_{-}(r_{i,j}^+)$.
In the diagram $\Delta$, the number $\sigma_{+}(r_{i,j}^+)$ coincides with the number of faces whose labels are equal to cyclic permutations of the word $r_{i,j}^+$, and $\sigma_{-}(r_{i,j}^+)$ coincides with the number of faces whose labels are equal to cyclic permutations of $(r_{i,j}^+)^{-1}$.
Further, the course of reasoning in Lemma \ref{lemma31.1}, Lemma \ref{lemma31.2} and Theorem \ref{th31.1} coincides with Lemmas 31.1, 31.2 and Theorem 31.1 from \cite{olsh}
(see also \cite{ok3}).
The main difference from \cite{olsh} is the presence of $\mathcal{O}$-faces in diagrams and multipliers from $N$ in words.
\begin{lem}\label{lemma31.1}
If in a disk diagram $\Delta$ over a $G$ graded reduced presentation (\ref{infPresent+}) of a group $G(\infty)$, $\sigma_{\Delta}(R) = 0$
for any $R\in \mathcal{R}^+$, then the boundary label of the diagram $\Delta$ belongs to $[N_{\mathcal{R}}, F]N$.
\end{lem}
\noindent{\it Proof.} In a standard way, making cuts in the diagram $\Delta$, we get a bouquet of diagrams, each of which contains at most one face. Since the cuts do not change the boundary label as an element of the free group $F$, the boundary label of the diagram $\Delta$ is equal to
$$(S_1R^{\pm 1
}_1S_1^{-1})\ldots (S_mR^{\pm 1 }_mS_m^{-1})$$
in $F$, where
$R_k \in \mathcal{O}\cup\mathcal{R}^+$, $S_k\in F$.
Each of
these factors belongs to $N_{\mathcal{R}}$ or $N$, therefore, they can
be rearranged modulo $[N_{\mathcal{R}},F]N$. In addition, an element $SRS^{-1}$ is trivial for $R\in \mathcal{O}$ and coincides with $R$ for $R\in \mathcal{R}^+$ modulo $[N_{\mathcal{R}},F]N$. So, by the condition of the lemma, the boundary label is trivial modulo $[N_{\mathcal{R}},F]N$.
\rule {6pt}{6pt}
In order to use induction in the case of $G$-asphericity and $G$-atoricity and prove an analogue of Lemma 31.2 from \cite{olsh}, we need an additional lemma.
\begin{lem}\label{addtolemma31.2} Let a $G$-graded reduced presentation (\ref{infPresent+}) of a group $G(\infty) = G/\bar N_{\mathcal{R}}$ is $G$-atorical.
Then
\begin{itemize}
\item[$1$)] for an arbitrary diagram $\Delta$ of rank $i$ over
(\ref{infPresent+}) on a torus, we have $\sigma_{\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$;
\item[$2$)] if $X^{-1}Y^{-1}XY=1$ in $G_i$ for some words $X, Y$,
then there exists a disk diagram $\Delta$ of rank $\leqslantq i$ over the presentation (\ref{infPresent+}) with the boundary label equal to $X^{-1}Y^{-1}XY$,
such that $\sigma_{\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$.
\end{itemize}
\end{lem}
\noindent{\it Proof.} Statements 1) and 2) are proved by common induction on the rank $i$.
1) Let $\Delta$ be a diagram of rank $i$ on a torus. The proof will be by induction on the type $\tau(\Delta)$. The statement is obvious if there are no
$\mathcal{R}$-faces in $\Delta$. Otherwise, as follows from the definition of $G$-atoricity, in $\Delta$, one can find
a pair of two different $\mathcal{R}_j$-faces $\Pi_1$ and $\Pi_2$ ($j\leqslantq i$) having boundary labels $R_1$ and $R_2$ reading in a clockwise direction, starting from vertices $o_1$ and $o_2$ and (after a series of elementary transformations) a simple path $s$ such that $s_{-}=o_1, s_{+}=o_2$ and
$\varphi(s)^{-1}R_1\varphi(s)R_2=1\,\,\,\text{ in}\,\,\,G_{j-1},$
moreover, due to the condition $(*)$ and the reducibility of presentation, it can be assumed that $R_1$ and $R_2$ are mutually inverse words in the free group.
For the disk subdiagram $\Gamma$ with boundary label $\varphi(s)^{-1}R_1\varphi(s)R_2$ containing only $\Pi_1$ and $\Pi_2$, it is obvious that $\sigma_{\Gamma}(R) = 0$ for any $R\in\mathcal{R}^+$. By induction assumption, one can apply statement 2) to the word $\varphi(s)^{-1}R_1\varphi(s)R_2$ and find a disk diagram $\Gamma'$ of rank $\leqslantq j-1$ over the presentation (\ref{infPresent+}) with boundary label $\varphi(s)^{-1}R_1\varphi(s)R_2$, for which $\sigma_{\Gamma'}(R) = 0$ for any $R\in\mathcal{R}^+$.
After removing $\Gamma$ from $\Delta$ and pasting $\Gamma'$, we get a diagram $\Delta'$, for which $\tau(\Delta')<\tau(\Delta)$. By induction assumption, $\sigma_{\Delta'}(R) = 0$ for any $R\in\mathcal{R}^+$. Hence, $\sigma_{\Delta}(R) = \sigma_{\Delta'}(R) - \sigma_{\Gamma'}(R) + \sigma_{\Gamma}(R) = 0$ for any $R\in\mathcal{R}^+$.
2) If $X^{-1}Y^{-1}XY=1$ in $G_i,$
then by the Van Kampen lemma, there exists a disk diagram $\Delta$ of rank $j\leqslantq i$ over the presentation (\ref{infPresent+}) with boundary label equal to $X^{-1}Y^{-1}XY$.
If $j=0$, then it is obvious that $\sigma_{\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$.
If $j>0$, then using the disk diagram $\Delta$, we construct the diagram $\bar{\Delta}$ on a torus in a standard way, gluing the boundary subpath labeled by $X$ with the boundary subpath labeled by $X^{-1}$, and the boundary subpath labeled by $Y$ with the boundary subpath labeled by $Y^{-1}$.
By statement 1)
$\sigma_{\bar\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$.
Hence, $\sigma_{\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$.
\rule {6pt}{6pt}
\begin{lem}\label{lemma31.2} Let a $G$-graded reduced presentation (\ref{infPresent+}) of a group $G(\infty) = G/\bar N_R$ be $G$-aspherical and $G$-atorical.
Then
\begin{itemize}
\item[$1$)] for an arbitrary spherical diagram $\Delta$ over the presentation
(\ref{infPresent+}), we have $\sigma_{\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$;
\item[$2$)] if a word $X$ presents in $F$ an element from $[N_{\mathcal{R}}, F]N$,
then for any disk diagram $\Delta$ with boundary label equal to $X$,
$\sigma_{\Delta}(R) = 0$ for any $R\in\mathcal{R}^+$.
\end{itemize}
\end{lem}
\noindent{\it Proof.}
1) Let $\Delta$ be a diagram of rank $i$ on a sphere. The proof will be by induction on the type $\tau(\Delta)$. The statement is obvious if there are no
$\mathcal{R}$-faces in $\Delta$. Otherwise, as follows from the definition of $G$-asphericity,
in $\Delta$, one can find
a pair of two different $\mathcal{R}_j$-faces $\Pi_1$ and $\Pi_2$ ($j\leqslantq i$) having boundary labels $R_1$ and $R_2$ reading in a clockwise direction, starting from vertices $o_1$ and $o_2$ and (after a series of elementary transformations) a simple path $s$ such that $s_{-}=o_1, s_{+}=o_2$ and
$\varphi(s)^{-1}R_1\varphi(s)R_2=1\,\,\,\text{ in}\,\,\,G_{j-1},$
moreover, due to the condition $(*)$ and the reducibility of presentation, it can be assumed that $R_1$ and $R_2$ are mutually inverse words in the free group.
For the disk subdiagram $\Gamma$ with boundary label $\varphi(s)^{-1}R_1\varphi(s)R_2$ containing only $\Pi_1$ and $\Pi_2$, it is obvious that $\sigma_{\Gamma}(R) = 0$ for any $R\in\mathcal{R}^+$. By statement 2) of Lemma \ref{addtolemma31.2} there is a disk diagram $\Gamma'$ of rank $\leqslantq j-1$ over the presentation (\ref{infPresent+}) with boundary label $\varphi(s)^{-1}R_1\varphi(s)R_2$, for which $\sigma_{\Gamma'}(R) = 0$ for any $R\in\mathcal{R}^+$.
After removing $\Gamma$ from $\Delta$ and pasting $\Gamma'$, we get a spherical diagram $\Delta'$, for which $\tau(\Delta')<\tau(\Delta)$. By induction assumption, $\sigma_{\Delta'}(R) = 0$ for any $R\in\mathcal{R}^+$. Hence, $\sigma_{\Delta}(R) = \sigma_{\Delta'}(R) - \sigma_{\Gamma'}(R) + \sigma_{\Gamma}(R) = 0$ for any $R\in\mathcal{R}^+$.
2) The word $X$ can be written in $F$ in the form
$(\mathsf{pr}od[Y_k,{S_k}R^{\pm 1}_{k}S_k^{-1}])Z,$
where $S_k,Y_k\in F, R_{k}\in \mathcal{R}^+, Z\in N$, since
the relation $[w,uv]=[w,u][uwu^{-1},uvu^{-1}]$ is identical in $F$. With a geometric interpretation of the equality $X=1$ in $G(\infty)$, we obtain a disk diagram $\Delta_0$ with $\sigma_{\Delta_0}(R)=0$ for any $R\in\mathcal{R}^+$. If $\Delta$ is another disk diagram with boundary label equal to
$X$, then $\Delta$ together with the mirror copy $\Delta^0$ of the diagram $\Delta_0$ gives a spherical diagram $\bar \Delta$. By statement 1),
$\sigma_{\bar \Delta}(R) = 0$ for any $R\in\mathcal{R}^+$.
Hence, $\sigma_{\Delta}(R) = \sigma_{\bar \Delta}(R) - \sigma_{\Delta^0}(R) = 0$ for any $R\in\mathcal{R}^+$. \rule {6pt}{6pt}
\begin{thm}\label{th31.1} Let a $G$-graded reduced presentation (\ref{infPresent+}) of a group $G(\infty) = G/\bar N_\mathcal{R}$ be $G$-aspherical and $G$-atorical.
Then
\begin{itemize}
\item[$1$)] the following conditions for a word $X\in N_{\mathcal{R}}N$ are equivalent:
a) $X\in [N_{\mathcal{R}}, F]N$;
b) in the writing $Z\mathsf{pr}od_k {S_k}R^{\pm 1}_k{S_k^{-1}}$ representing
the word $X$ in $F$, where $Z\in N, R_k\in \mathcal{R}^+, S_k\in F$, the sum of
the exponents of $R$ is equal to zero for each $R\in\mathcal{R}^+$;
\item[$2$)] $\bar N_{\mathcal{R}}/[\bar N_{\mathcal{R}}, G]\simeq N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$
is a free Abelian group with basis $\{\bar R\}_{R\in \mathcal{R}^+}$, where $\bar{R}=R[N_{\mathcal{R}}, F]N$.
\end{itemize}
\end{thm}
\noindent{\it Proof.} 1) The condition b) is equivalent to the fact that for a disk diagram $\Delta$ constructed from the writing $Z\mathsf{pr}od_k {S_k}R^{\pm 1}_k{S_k^{-1}}$, for any $R\in\mathcal{R}^+$, $\sigma_{\Delta}(R) = 0$. Therefore, the condition a) follows from b) by statement 2) of Lemma \ref{lemma31.2}, and the condition b) follows from a) by Lemma \ref{lemma31.1}.
2) The group $N_{\mathcal{R}}N$ is generated by all $R\in \mathcal{R}^+$ together with the words conjugated to them in $F$ and their inverses, and by all $Z\in N$. But the words from $N_{\mathcal{R}}$, conjugated in $F$, have the same images in $N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$, and the images of the words $Z\in N$ are equal to the identity in $N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$. Hence, the Abelian group $N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$
is generated by the elements $\{\bar R\}_{R\in \mathcal{R}^+}$.
Let's prove that $\{\bar R\}_{R\in \mathcal{R}^+}$ freely generates $N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$. It is enough to make sure that for different elements $R_1,...,R_k\in \mathcal{R}^+$, an equality
$\bar{R}_1^{m_1}...\bar{R}_k^{m_k} = 1$ in $N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$ implies the equalities $m_1 = ... = m_k =0$. Indeed, we have the equality
$R_1^{m_1}...R_k^{m_k} = X\in [N_{\mathcal{R}}, F]N$ in the free group. Hence,
$W \equiv R_1^{m_1}...R_k^{m_k}X^{-1}=1$ in $F$.
Applying statement 1) of the theorem to $X$ and to $W$, we get $m_1 =
... = m_k =0$.
\rule {6pt}{6pt}
Note that modulo $N$, the full analogue of Lemma 31.2 from \cite{olsh}, in which only asphericity of a $G$-graded reduced presentation is required, can be proved word-by-word as in \cite{olsh}. So in Theorem \ref{th31.1}, it is sufficient for a $G$-graded reduced presentation to be only aspherical.
\section{Use of the limit groups from \cite{olsh91} and \cite{semenov}.}\label{S:commutant1}
Using the limit group constructed in \cite{olsh91}, we obtain analogues of Theorems 31.2, 31.3
from \cite{olsh} for a non-cyclic torsion-free hyperbolic groups.
As in \cite{olsh91}, let $G^n$ denote the subgroup generated by all $n$-th powers of the elements of a group $G$.
\begin{thm}\label{thm31.2_ex1} For every non-cyclic torsion-free hyperbolic group $G$, there exists an integer $n_0(G)$ such that for every odd $n>n_0(G)$,
the group $H=G/[G^n, G]$ has no torsion, and the subgroup $H^n$ is a free Abelian group.
\end{thm}
\noindent{\it Proof.} Consider the group $G(\infty)=G/G^n$ constructed in \cite{olsh91}.
Then $H^n\cong G^n/[G^n, G]$, and $H/H^n$ is the group $G/G^n$. As proved in \cite{olsh91} (p. 565), for $G(\infty)$, the analogue of Lemma 18.2 from \cite{olsh} holds, that is, there are asphericity and atoricity, so one can apply Theorem \ref{th31.1}, i.e. the analogue of Theorem 31.1 from \cite{olsh}. In addition, as proved in \cite{olsh91} (p. 565),
for $G(\infty)$, the analogue of Lemma 18.3 from \cite{olsh} holds, that is, any word is conjugated to a power of a period of some rank in $H/H^n$.
Taking into account the presence of these analogues, the proof of Theorem \ref{thm31.2_ex1} repeats the proof of Theorem 31.2 from \cite{olsh}.
\rule {6pt}{6pt}
\begin{thm}\label{thm31.3_ex1}
For every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian torsion-free quotient group in which any two non-trivial subgroups have a non-trivial intersection.
\end{thm}
\noindent{\it Proof.} For the group $H$ from Theorem \ref{thm31.2_ex1}, the reasoning from the proof of Theorem 31.3 \cite{olsh} is repeated, only the reference to Theorem 31.2 in \cite{olsh} should be replaced with the reference to its analogue -- to Theorem \ref{thm31.2_ex1}.
\rule {6pt}{6pt}
Using the limit group constructed in \cite{semenov, semenov_phd}, we obtain generalizations of Theorems 31.4, 31.7 and 31.8 from \cite{olsh}.
\begin{thm}\label{thm31.4_ex2} For every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian torsion-free quotient group in which
any two non-trivial subgroups have a non-trivial intersection and all proper subgroups are cyclic.
\end{thm}
\noindent{\it Proof.} Consider an arbitrary non-cyclic torsion-free hyperbolic group $G_0$. Since the group $G_0$ is not elementary, its commutant $G_0'$ is not elementary. Since $G_0$ is a non-cyclic torsion-free hyperbolic group, then by Proposition 1, Theorems 1, 2 of \cite{olsh93}, there exists a homomorphism from the group $G_0$ to a non-cyclic torsion-free hyperbolic group $G$ such that it is surjective on $G_0'$.
By Theorem 2.6 \cite{semenov_phd} (= Theorem 1 \cite{semenov}), for the group $G$ and for any odd $n$ greater than some sufficiently large number $n_0=n_0(G)$, there exists an infinite quotient group
$G(\infty)=G/\bar N_{\mathcal{R}}$, all proper subgroups of which are cyclic groups of order dividing $n$.
By Lemma 54 and Theorem 2.4 in \cite{semenov_phd}, the analogues of Lemma 25.1 and Theorem 26.4 from \cite{olsh} are true for $G(\infty)$. Therefore, we can continue as in the proof of Theorem 31.3 \cite{olsh}.
Consider the group $H=G/[\bar N_{\mathcal{R}}, G]\simeq F/[N_{\mathcal{R}}, F]N$ (hereinafter the designations from Sections \ref{S:df} and \ref{S:commutant2}). Since there are asphericity and atoricity (Lemma 54 \cite{semenov_phd}), Theorem \ref{th31.1} can be applied. Hence, the Abelian group
$\bar N_{\mathcal{R}}/[\bar N_{\mathcal{R}}, G]\simeq N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$ freely generated by the elements $\{\bar{R}\}_{R\in \mathcal{R}^+}$. In $H$, consider the subgroup $L$ consisted of the products $\mathsf{pr}od_k \bar{R}_k^{s_k}$, where $\sum_ks_k=0$. Since $\bar{R}$ lies in the center of $H$, the subgroup $L$ is normal in $H$. In the quotient group $\mathbb{A}=H/L$, the coset $\bar{R}_1L=\bar{R}_2L=\ldots=C$ has infinite order. The group $\mathbb{A}$ is an extension of the group $G(\infty)$ by the infinite cyclic central subgroup $\langle C\rangle$. Since $G(\infty)$ is not Abelian and infinite, then $\mathbb{A}$ is also not Abelian and infinite.
To prove that there are no elements of finite order in $\mathbb{A}$, assume the opposite. For every nontrivial element $X$ of finite order in $\mathbb{A}$, we have
$X\notin \langle C\rangle$, i.e. $X\neq 1$ in $G(\infty)$, and there exists $j\geqslantq 1$ such that $X$ has finite order in $G_j$. By Theorem 2.4 \cite{semenov_phd}, $X$ is conjugated in $G_j$ to a power of a period $A$ of the first type, i.e. $A^n\in\mathcal{R}$. Hence, replacing $X$ with a conjugate, we can assume that $X=A^kC^l$ in the group $\mathbb{A}$.
Therefore, $X^n=A^{kn}C^{ln}=C^kC^{ln}$. Since the element $C$ has infinite order in $\mathbb{A}$, we get that $n$ divides $k$, that contradicts the condition $X\notin \langle C\rangle$.
Since there is no nontrivial element of finite order in $\mathbb{A}$, and all elements in $\mathbb{A}/\langle C\rangle$ have finite orders, any element of the group $\mathbb{A}$ in some power lies in $\langle C\rangle$. So the intersection of any two nontrivial subgroups of the group $\mathbb{A}$ is nontrivial.
Consider an arbitrary proper subgroup $K$ of the group $\mathbb{A}$. It is mapped to a subgroup $\hat{K}$ of the group $G(\infty)$. The subgroup $\hat{K}$
is either finite, or coincides with $G(\infty)$. In the first case, $K$ is elementary, and therefore it is cyclic. In the second case, $\mathbb{A}=K\langle C\rangle$, hence, $K$ is a normal subgroup of the group $\mathbb{A}$.
Since $\mathbb{A}/K$ is an Abelian group, the commutant of the group $\mathbb{A}$ belongs to $K$.
Since the commutant of the group $G$ coincides with $G$, the commutant of $\mathbb{A}$ coincides with $\mathbb{A}$. Hence, $K=\mathbb{A}$, i.e. all proper subgroups of the group $\mathbb{A}$ are cyclic.
\rule {6pt}{6pt}
If the orders of all elements of a group are bounded in common, the smallest common multiple of the orders of all its elements will be called the {\it period} of the group.
A periodic group will be called a {\it $p$-group}, if the order of each of its elements is some power of a prime number $p$. A $p$-subgroup of an arbitrary group will be called a {\it Sylow $p$-subgroup} if it is not contained in a larger $p$-subgroup of this group.
\begin{thm}\label{thm31.7_ex1} For every prime $p$, for every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian quotient group $K$ of finite period, in which all proper subgroups are cyclic, and the Sylow $p$-subgroup $P$ is central, but it is not a direct multiplier of the group $K$.
\end{thm}
\noindent{\it Proof.} Consider the group $\mathbb{A}$ from the Theorem \ref{thm31.4_ex2} for a sufficiently large odd number $n$, not divisible by $p$. Since the infinite cyclic subgroup $\langle C\rangle$ is central, the subgroup $\langle C^p\rangle$ is normal in $\mathbb{A}$. Put $K=\mathbb{A}/\langle C^p\rangle$. Since the group $G(\infty)$ is simple (Remark 2 to Theorem 2.6 of \cite{semenov_phd}), the center of the group $K$ coincides with the subgroup $P=\langle C\rangle/\langle C^p\rangle$. Since the orders of the elements of the group $G(\infty)=K/P$ are finite and divide $n$, then $P$ is a Sylow $p$-subgroup in $K$, and the orders of the elements of the group $K$ are finite and divide $np$. Since the complete preimage of any subgroup of the group $K$ is a subgroup in $\mathbb{A}$, then all proper subgroups of the group $K$ are cyclic. In particular, it follows that $P$ is not a direct multiplier of the group $K$.
\rule {6pt}{6pt}
Note that in the Theorem \ref{thm31.7_ex1}, any proper subgroup of the group $K$ either trivially intersects with $P$, or coincides with $P$, or contains $P$ as a direct multiplier. And additionally (compared to Theorem 31.7 of \cite{olsh}), there is a cyclicity of all proper subgroups in $K$.
\begin{thm}\label{thm31.8_ex1} For every torsion-free non-cyclic hyperbolic group, for every sufficiently large prime number $p$ and any integer $k\geqslantq 0$, there exists a non-Abelian quotient group $K$ of the period $p^{k+1}$, which contains a central cyclic subgroup $D$ of order $p^k$ such that any subgroup of the group $K$ is cyclic (except $K$) and either contains $D$, or is contained in $D$.
\end{thm}
\noindent{\it Proof} is similar to the proof of Theorem 31.8 \cite{olsh}. As a group $S$ from the proof of Theorem 31.8 \cite{olsh}, we need to take the group $\mathbb{A}$ from Theorem \ref{thm31.4_ex2} for $n=p$. Instead of referring to Theorem 26.4 from \cite{olsh}, we need to refer to its analogue -- to Theorem 2.4 from \cite{semenov_phd}.
\rule {6pt}{6pt}
\section{Use of a limit group from \cite{olsh93}.}\label{S:ex2}
In \cite{olsh93} (and subsequently in another way in \cite{semenov}), for any non-cyclic torsion-free hyperbolic group, A.Yu. Ol'shanskii constructed a non-Abelian torsion-free quotient group $\overline{G}$, all of whose proper subgroups are cyclic, and the intersection of any two different maximal proper subgroups is trivial. (The latter follows from Lemma 10 \cite{olsh91} and the fact that $\overline{G}$ is a limit of torsion-free hyperbolic groups.)
To obtain some examples of central extensions of $\overline{G}$, we need the following theorem. For completeness, we formulate and prove it not only for the group $\overline{G}$ from Corollary 1 \cite{olsh93}, but also for $\hat{G}$ from Corollary 4 \cite{olsh93} and $\tilde{G}$ from Corollary 2 \cite{olsh93}.
\begin{thm}\label{thm31.2} Let $G$ be an arbitrary non-cyclic torsion-free hyperbolic group, $G/\bar{N}_{\mathcal{R}}$ be the group $\hat{G}$ constructed in Corollary 4 \cite{olsh93}, or $\overline{G}$ from Corollary 1 \cite{olsh93}, or $\tilde{G}$ from Corollary 2 \cite{olsh93} (we assume that
$\overline{G}$ and $\tilde{G}$ are constructed using Theorem 4 \cite{olsh93} instead of Theorem 2 \cite{olsh93} or Theorem 3 \cite{olsh93}). Then $H=G/[\bar N_{\mathcal{R}}, G]$ has no torsion, and
$\bar N_{\mathcal{R}}/[\bar N_{\mathcal{R}}, G]$ is a free Abelian group with countable basis of elements $\{\bar R\}_{R\in \mathcal{R}^+}$, where $\bar{R}=R[N_{\mathcal{R}}, F]N$.
\end{thm}
\noindent{\it Proof.} Consider the group $\hat{G}$
(for $\overline{G}$ and $\tilde{G}$, everything is similar, even simpler).
Recall that in Corollary 4 \cite{olsh93}, $\hat{G}$ is an infinite non-Abelian group such that all its proper subgroups are finite. In constructing of $\hat{G}= G(\infty)= {\rm lim}_{\rightarrow}\,\, G_i$, one consider all elements $g_1,g_2,\ldots$ of $G$ and all its finitely generated non-elementary subgroups $H_1, H_2,\ldots$, and by Theorem 4 \cite{olsh93}, one find a sequence of epimorphisms $G_0=G\rightarrow G_1\rightarrow G_2\rightarrow\ldots$ such that every element $g_i$ has finite order in $G_{2i-1}$ (as in Corollary 2 \cite{olsh93}) and the image of $H_i$ is elementary in $G_{2i}$ or coincides with $G_{2i}$ (as in Corollary 1 \cite{olsh93}).
For sufficiently large numbers $m_{i,0}$, the numbers $m_i$ are chosen arbitrarily so that $m_i\geqslantq m_{i,0}$, and the numbers $m_{2i-1}$ are odd.
The sets $\mathcal{R}_{2i}$ are the sets $\mathcal{R}_{k_{2i},l_{2i},m_{2i}}$ from the proof of Theorem 2 \cite{olsh93} or empty sets, if $H_i$ is elementary in $G_{2i-1}$.
If $g_i$ has finite order in $G_{2i-2}$, then $G_{2i-2}\rightarrow G_{2i-1}$ is an identical homomorphism and $\mathcal{R}_{2i-1}=\emptyset$.
Otherwise, one consider the maximal elementary subgroup $E_{i,0}$ containing $g_i$, and its maximal
infinite cyclic normal subgroup $\langle \bar{g}_i\rangle$, and $G_{2i-1}=G_{2i-2}/\langle \overline{g}_i^{m_{2i-1}} \rangle^{G_{2i-2}}$, and the set $\mathcal{R}_{2i-1}$ is given as the set of all cyclic permutations of the word $W_i^{\pm m_{2i-1}}$, where $W_i$ is a word representing the element $\bar{g}_i$.
Since $G$ is a non-cyclic torsion-free hyperbolic group, then, as shown in the proof of Corollary 4 \cite{olsh93}, $G$ is non-elementary and satisfies
the quasiidentical relation
$$(\forall x,y\in G) x^2y=yx^2\Rightarrow xy=yx.$$
In addition, by construction (Theorem 4 \cite{olsh93}), all $G_i$ are non-elementary hyperbolic groups satisfying the quasiidentical relation
(for this, $m_{2i-1}$ are chosen odd).
Since it follows from $xyx^{-1}=y^{-1}$ that $x^2yx^{-2}=y$, then by the quasiidentical relation, we obtain $y^2=1$, which is impossible for an element of infinite order.
Since all elements of $\mathcal{R}_i$ have infinite order in $G_{i-1}$ (by Lemmas 4.1(1), 4.2, 7.2 \cite{olsh93}), for any $R\in \mathcal{R}_i$, $R^{-1}$ is not conjugated to $R$ in $G_{i-1}$. Thus, $\mathcal{R}_i$ satisfy the $C_2(\epsilon_i,\mu_i,\lambda_i,c_i,\rho_i)$-condition by Lemma 4.1 (2) \cite{olsh93} for odd $i$ and the $C_1(\epsilon_i,\mu_i,\lambda_i,c_i,\rho_i)$-condition by Lemma 4.2 \cite{olsh93} for even $i$ (there exist appropriate $\lambda_i>0, (l_i>0), c_i\geqslantq 0$ such that for any $\mu_i>0, \epsilon_i\geqslantq 0, \rho_i>0$, there exists $m_{i,0}>0$).
Since the $C_1(\epsilon_i,\mu_i,\lambda_i,c_i,\rho_i)$-condition and the $C_2(\epsilon_i,\mu_i,\lambda_i,c_i,\rho_i)$-condition hold, by Lemma 6.6 of \cite{olsh93}, the presentation (\ref{infPresent}) of the group $\hat{G}=G(\infty)$ is $G$-aspherical, and also, as noted in the proof of Theorem 4 in \cite{olsh93}, it is $G$-atorical.
Consider the group $H=G/[\bar N_{\mathcal{R}}, G]\simeq F/[N_{\mathcal{R}}, F]N$ (the designations from Sections \ref{S:df} and \ref{S:commutant2}). By Theorem \ref{th31.1}, the Abelian group
$M=\bar N_{\mathcal{R}}/[\bar N_{\mathcal{R}}, G]\simeq N_{\mathcal{R}}N/[N_{\mathcal{R}}, F]N$ is freely generated by the elements of $\{\bar{R}\}_{R\in \mathcal{R}^+}$, where $\bar{R}=R[N_{\mathcal{R}}, F]N$.
Since $\hat{G}$ is not hyperbolic, $\mathcal{R}^+\neq\cup_{i=1}^{k}\mathcal{R}_i^+$ for every $k$ and the set $\mathcal{R}^+=\cup_{i=1}^{\infty}\mathcal{R}_i^+$ is infinite. The group $H$ is an extension of the group $\hat{G}$ by the central subgroup $M$.
Let's prove that there is no torsion in $H$. Assume the opposite. For every nontrivial element $X$ of finite order in $H$, we have that
$X\notin M$, i.e. $X\neq 1$ in $\hat{G}$, and there exists the smallest number $j$ such that $X$ has finite order in $G_j$, and $j\geqslantq 1$.
Among all such elements, we choose an element $X\in H$ with the smallest number $j$. By Lemma 7.2 \cite{olsh93}, in $G_j$, $X$ is conjugated to some element represented by a word $A$, which represents an element from the centralizer $C_{G_{i-1}}(R_i)$ for some $R_i\in \mathcal{R}_i^+$, $i\leqslantq j$.
Therefore, since $G_{i-1}$ is a hyperbolic group, there exists a natural number $s$ such that $A^s=R_i^l$ in $G_{i-1}$,
so, $A^s=R_i^l\mathsf{pr}od S_kR_{i_k}^{\delta_k}S_k^{-1}$ in $G$, where $\delta_k=\pm 1, R_{i_k}\in \bigcup_{\nu=1}^{i-1}\mathcal{R}_{\nu}^+$.
In $H$, we have
$$A^s=\bar{R}_i^l\mathsf{pr}od_{\nu=1}^{i-1}\mathsf{pr}od_{k_\nu}\bar{R}_{k_{\nu}}^{l_{k_{\nu}}},$$
where $R_{k_{\nu}}\in \mathcal{R}_{\nu}^+$ are different for different ${k_{\nu}}$, $l_{k_{\nu}}$ are integers. Replacing $X$ with a conjugate, we can assume that $X=AZ$ in the group $H$, where $Z\in M$.
Since $Z$ belongs to the center, in the group $H$, we have
$$X^s=A^sZ^{s}=\bar{R}_i^l\mathsf{pr}od_{\nu=1}^{i-1}\mathsf{pr}od_{k_\nu}\bar{R}_{k_{\nu}}^{l_{k_{\nu}}}Z^{s}.$$
Since $\bar{R}_i$, $\bar{R}_{k_{\nu}}$ (for all $k_{\nu}$ for all $\nu=1,\ldots,i-1$) are different basis elements of $M$, and there are no elements of finite order in $M$, then $X^s=1$ in $H$ and $l=sd, l_{k_{\nu}}=sd_{k_{\nu}}$ for all $k_{\nu}$.
Hence, $(A(\bar{R}_i^{d}\mathsf{pr}od_{\nu=1}^{i-1}\mathsf{pr}od_{k_{\nu}}\bar{R}_{k_{\nu}}^{d_{k_{\nu}}})^{-1})^s=1$ in $H$, and $A(\bar{R}_i^{d}\mathsf{pr}od_{\nu=1}^{i-1}\mathsf{pr}od_{k_{\nu}}\bar{R}_{k_{\nu}}^{d_{k_{\nu}}})^{-1}\neq 1$ in $H$ (otherwise it would be $A\in M$, which means $X\in M$). Since $A\in C_{G_{i-1}}(R_i)$, then we have $(A(\bar{R}_i^{d}\mathsf{pr}od_{\nu=1}^{i-1}\mathsf{pr}od_{k_{\nu}}\bar{R}_{k_{\nu}}^{d_{k_{\nu}}})^{-1})^s=1$ in $G_{i-1}$. Since $i-1<j$, we have a contradiction with the choice of $X$.
\rule {6pt}{6pt}
The proof of the absence of torsion in $H$ for $H$ of Theorem \ref{thm31.2} is a little different from one for $H$ of Theorem 31.2\cite{olsh}, since $l$ does not have to equal to $1$ here.
\begin{thm}\label{thm31.4_ex3} For every non-cyclic torsion-free hyperbolic group and every $k\in \mathbb{N}$, there exists a non-Abelian torsion-free quotient group $\mathbf{A}$, in which all maximal proper subgroups are isomorphic to $\mathbb{Z}^k$ and the intersection of any two different maximal proper subgroups coincides with the center of $\mathbf{A}$, which is isomorphic to $\mathbb{Z}^{k-1}$, and no nontrivial subgroup of the center is a direct multiplier of $\mathbf{A}$.
\end{thm}
\noindent{\it Proof.} Consider an arbitrary non-cyclic torsion-free hyperbolic group. As in the proof of Theorem \ref{thm31.4_ex2}, it is surjectively mapped to a non-cyclic torsion-free hyperbolic group $G$, whose commutant coincides with $G$.
By Corollary 1 of \cite{olsh93}, for the group $G$, there exists a non-Abelian torsion-free quotient group $G(\infty)=\overline{G}=G/\bar N_{\mathcal{R}}$ such that all its proper subgroups are cyclic, the intersection of any two different maximal subgroups is trivial, and the center is trivial. This group proves the statement for $k=1$. Let $k>1$.
Consider the group $H=G/[\bar N_{\mathcal{R}}, G]$. By Theorem \ref{thm31.2}, the Abelian group
$\bar N_{\mathcal{R}}/[\bar N_{\mathcal{R}}, G]$ has a countable basis of free generators $\{\bar{R}\}_{R\in \mathcal{R}^+}$. Numerate the elements of $\{\bar{R}\}_{R\in \mathcal{R}^+}$ by the natural numbers.
Since $\bar{R}$ lies in the center of $H$, the subgroup $L$ consisted with the products $\mathsf{pr}od_m \bar{R}_{i_m}^{s_m}$, where $\sum_ms_m=0$, $i_m\geqslantq k-1$, is normal in $H$. The group $\mathbf{A}=H/L$ is an extension of the group $\overline{G}$ by the free Abelian central subgroup $\mathbf{C}=\langle \bar{R}_1L\rangle\times\ldots\times\langle \bar{R}_{k-1}L\rangle$. Since $\overline{G}$ is non-Abelian and infinite, then $\mathbf{A}$ is also non-Abelian and infinite. Since the group $\overline{G}$ has no torsion, then the group $\mathbf{A}$ also has no torsion.
Since the center of $\overline{G}$ is trivial, the center $Z(\mathbf{A})$ of $\mathbf{A}$ coincides with $\mathbf{C}$.
Consider an arbitrary proper subgroup $K$ of $\mathbf{A}$. It is mapped to a subgroup $\hat{K}$ of $\overline{G}$. Either the subgroup $\hat{K}$ coincides with $\overline{G}$, or it is a cyclic subgroup generated by some element $\hat{g}$. In the first case, $\mathbf{A}=KZ(\mathbf{A})$, and hence, $K$ is a normal subgroup of $\mathbf{A}$. Since $\mathbf{A}/K$ is an Abelian group, the commutant of $\mathbf{A}$ belongs to $K$, and hence, $K=\mathbf{A}$ (it follows similarly that no nontrivial subgroup of the center is a direct multiplier of $\mathbf{A}$). In the second case, if $\hat{g}$ is not trivial, then $K=\langle g\rangle\times (K\cap Z(\mathbf{A}))\leqslantq\langle g\rangle\times Z(\mathbf{A})\neq \mathbf{A}$, where $g\in K$ is a preimage of $\hat{g}$, otherwise $K\leqslantq Z(\mathbf{A})\leqslantq\langle g\rangle\times Z(\mathbf{A})\neq \mathbf{A}$ for an arbitrary element $g\notin Z(\mathbf{A})$.
\rule {6pt}{6pt}
\begin{thm}\label{thm31.4_ex4} For every non-cyclic torsion-free hyperbolic group, there exists a non-Abelian torsion-free quotient group $\mathbf{A}$ such that all its maximal proper subgroups are free Abelian of countable rank and the intersection of any two different maximal proper subgroups coincides with the center of $\mathbf{A}$, which is a free Abelian group with basis $\{e_i|i\in \mathbb{N}\}$, and a basis of any maximal proper subgroup of $\mathbf{A}$ can be obtained by adding one element to $\{e_i|i\in \mathbb{N}\}$, and no nontrivial subgroup of the center is a direct multiplier of $\mathbf{A}$.
\end{thm}
\noindent{\it Proof} is similar to the proof of Theorem \ref{thm31.4_ex3} for trivial $L$ and $\mathbf{C}=\bar N_{\mathcal{R}}/[\bar N_{\mathcal{R}}, G]$. \rule {6pt}{6pt}
Note that the center of $\mathbf{A}$ from Theorems \ref{thm31.4_ex3} (from Theorem \ref{thm31.4_ex4}) is the Frattini subgroup.
\begin{thm}\label{thm31.4_ex5}
For every non-cyclic torsion-free hyperbolic group, there exists an infinite non-Abelian quotient group $K$ such that all its elements of finite order form a subgroup $P$, which is the center, and $P$ is not a direct multiplier of $K$.
\end{thm}
\noindent{\it Proof.} Consider the group $\mathbf{A}$ from Theorem \ref{thm31.4_ex3}, for example, for $k=2$.
In this case, its center $\mathbf{C}$ is an infinite cyclic subgroup $\langle C\rangle$. For an arbitrary integer $s>1$, its subgroup $\langle C^s\rangle$ is normal in $\mathbf{A}$. Put $K=\mathbf{A}/\langle C^s\rangle$.
Since the center of $\overline{G}=K/P$ is trivial, the center of $K$ coincides with the subgroup $P=\langle C\rangle/\langle C^s\rangle$.
Since $\overline{G}$ has no torsion, all elements of finite order in $K$ lie in $P$. The subgroup $P$ is not a direct multiplier of $K$, since the complete preimage of any proper subgroup of $K$ is a proper subgroup of $\mathbf{A}$, and $K$ is non-Abelian.
\rule {6pt}{6pt}
{\it Keywords:} Torsion-free hyperbolic groups, quotients
\end{document}
|
\betagin{document}
\title{Optimal control on distributions}
\newcommand{\hbox{area}}{\hbox{area}}
\newcommand{\Omega}{\Omega}
\newcommand{\Omega}{\Omega}
\newcommand{\hbox{$\:$grad \,}}{\hbox{$\:$grad \,}}
\newcommand{\hbox{$\:$div \,}}{\hbox{$\:$div \,}}
\newcommand{\hbox{$\:$rot \,}}{\hbox{$\:$rot \,}}
\newcommand{\hbox{$\:$Im }}{\hbox{$\:$Im }}
\newcommand{\hbox{$\:$Re }}{\hbox{$\:$Re }}
\newcommand{\hbox{$\:$sh\, }}{\hbox{$\:$sh\, }}
\newcommand{\hbox{$\:$ch\,}}{\hbox{$\:$ch\,}}
\newcommand{\omega}{\Omegaga}
\newcommand{R}{\hbox{\bf R}}
\newcommand{\hbox{\bf C}}{\hbox{\bf C}}
\newcommand{\hbox{\bf Z}}{\hbox{\bf Z}}
\newcommand{\primerime}{\primerime}
\newcommand{\infty}{\infty}
\newcommand{\displaystyle\fracisplaystyle}{\displaystyle\fracisplaystylesplaystyle}
\newcommand{\varepsilonphi}{\varepsilonphirphi}
\newcommand{\stackreligma}{\stackreligmagma}
\newcommand{\gamma}{\gammamma}
\newcommand{\gammaa}{\Gamma}
\newcommand{\nabla}{\nablabla}
\newcommand{\theta}{\theta}
\newcommand{\ldots}{\ldotsots}
\newcommand{\over}{\overer}
\newcommand{\rightarrow}{\Rightarrow}
\newcommand{\noalign}{\noalignlign{
}}
\newcommand{\lambdabda}{\lambdabdambda}
\newcommand{\stackrelubset}{\stackrelubsetbset}
\newcommand{\quad}{\quadad}
\newcommand{\forall}{\forallrall}
\newcommand{\^alpha}{\^alphapha}
\newcommand{\Leftrightarrow}{\Leftrightarrow}
\newcommand{\beta}{\betata}
\newcommand{\epsilon}{\varepsilonphirepsilon}
\newcommand{\primeartial}{\primeartialrtial}
\newcommand{\times}{\timesmes}
\newcommand{\Delta}{\Delta}
\newcommand{\Sigma}{\Sigma}
\newcommand{\displaystyle\fracelta}{\displaystyle\fraceltalta}
\newcommand\C{{\,I\!\!\!\!\!\: C}}
\newcommand{\displaystyle\fracisplaystyle}{\displaystyle\fracisplaystylesplaystyle}
\newcommand{\overline}{\overerline}
\newcommand{\widetildedetilde}{\widetildedetildeidetilde}
\newcommand{\underline}{\underline}
\newcommand{\stackrel}{\stackreltackrel}
\newcommand{\prime}{\primerime}
\displaystyle\fraceltaf\displaystyle\frac{\displaystyle\fracisplaystylesplaystyle\frac}
\displaystyle\fraceltaf\primeartial{\primeartialrtial}
\displaystyle\fraceltaf\widetildedetildei{\widetildedetildeidetilde}
\displaystyle\fraceltaf\over{\overer}
\displaystyle\fraceltaf\rightarrow{\rightarrowghtarrow}
\displaystyle\fraceltaf\begin{array}{\betagin{array}}
\displaystyle\fraceltaf\end{array}{\end{array}}
\displaystyle\fraceltaf{\rm{Ker}}\,{{\rm{Ker}}\,}
\displaystyle\fraceltaf\varepsilonphir{\varepsilonphirepsilon}
\displaystyle\fraceltaf\varphi{\varepsilonphirphi}
\displaystyle\fraceltaf\Lambda{\Lambdabda}
\displaystyle\fraceltaf\lambdabdam{\lambdabdambda}
\displaystyle\fraceltaf\^\i{\^\i}
\displaystyle\fraceltaf\displaystyle\fracx{\displaystyle\fracot x}
\displaystyle\fraceltaf\Deltax{\Deltaot x}
\displaystyle\fraceltaf\quad{\quadad}
\displaystyle\fraceltaf\^alpha{\^alphapha}
\displaystyle\fraceltaf\stackrelubsetl{\displaystyle\fracisplaystylesplaystyle\stackrelubsetm\limits}
\displaystyle\fraceltaf\Leftrightarrow{\Leftrightarrow}
\displaystyle\fraceltaf\widetildedetildee{\widetildedetildeedge}
\displaystyle\fraceltaf\^I{\^I}
\displaystyle\fraceltaf\^a{\^a}
\displaystyle\fraceltaf\longrightarrow{\longrightarrow}
\displaystyle\fraceltaf\Leftrightarrowi{\Longrightarrow}
\displaystyle\fraceltaf\Longleftrightarrow{\Longleftrightarrow}
\displaystyle\fraceltaf\longleftrightarrow{\longleftrightarrow}
\displaystyle\fraceltafR{R}
\displaystyle\fraceltaf\omega{\Omegaga}
\displaystyle\fraceltaf\quad{\quadad}
\displaystyle\fraceltaf\ldots{\ldotsots}
\displaystyle\fraceltaf\forall{\forallrall}
\displaystyle\fraceltaf\displaystyle\fracisplaystyle{\displaystyle\fracisplaystylesplaystyle}
\displaystyle\fraceltaf\^alpha{\^alphapha}
\displaystyle\fraceltaf\epsilon{\epsilonsilon}
\displaystyle\fraceltaf\stackrelubset{\stackrelubsetbset}
\displaystyle\fraceltaf\gammaa{\Gamma}
\displaystyle\fraceltaf\infty{\infty}
\displaystyle\fraceltaf\varepsilonphi{\varepsilonphirphi}
\displaystyle\fraceltaf\noalign{\noalignlign}
\displaystyle\fraceltaf\times{\timesmes}
\displaystyle\fraceltaf\primerime{\primerime}
\stackreletcounter{page}{1}
\thetaxtheight=19cm \thetaxtwidth=13cm
\oddsidemargin=16mm \evensidemargin=16mm
\displaystyle\fracate{}
{ \forallotnotesize \it This paper studies (single-time and multitime) optimal control problems
on a nonholonomic manifold (described either by the kernel of a Gibbs-Pfaff form or by
the span of appropriate vector fields). For both descriptions we analyse: infinitesimal deformations
and adjointness, single-time optimal control problems, multitime optimal control
problem of maximizing a multiple integral functional, multitime optimal control problem of
maximizing a curvilinear integral functional, Curvilinear functionals depending on curves,
optimization of mechanical work on Riemannian manifolds.
Also we prove that a nonholonomic system can be always controlled by
uni-temporal or bi-temporal bang-bang controls.}
{\bf Mathematics Subject Classification 2010}: 49J15, 49J20, 93C15, 93C20.
{\bf Keywords:} nonholonomic manifold, single-time optimal control, multitime optimal control, bang-bang controls.
\stackrelection{Optimal control on a distribution \\displaystyle\fraceltascribed by a Pfaff equation}
A {\it generalized distribution} $\Delta: x \to \Delta_x$, or Stefan-Sussmann distribution, is similar to a distribution,
but the subspaces are not required to be all of the same dimension. The definition
requires that the subspaces $\Delta_x$ are locally spanned by a set of vector fields,
but these will no longer be everywhere linearly independent. It is not hard to see that the dimension of the distribution $\Delta$ is
lower semicontinous, so that at special points the dimension is lower than at nearby points.
One class of examples is provided by a non-free action of a Lie group on a manifold,
the vector fields in question being the infinitesimal generators of the group
action (a free action gives rise to a genuine distribution).
Another examples arise in dynamical systems, where the set of vector fields in
the definition is the set of vector fields that commute with a given one.
There are also examples and applications in Control theory, where the
generalized distribution represents infinitesimal constraints of the system.
see ControlJakubczyk, pag 146
{\bf Lemma} {\it If the variational system (treated as linear system without constraints on the control) is controllable, then
the original system is strongly accessible}.
Nonholonomic path planning represents a fusion of some of the
newest ideas in control theory, classical mechanics, and differential geometry
with some of the most challenging practical problems in robot
motion planning. Furthermore, the class of systems to which the theory
is relevant is broad: mobile robots, space-based robots, multifingered
hands, and even such systems as a one-legged hopping robot. The
techniques presented here indicate one possible method for generating
efficient and computable trajectories for some of these nonholonomic
systcms in the absence of obstacles.
The delay differential equations (DDEs) are a type of differential equation
in which the derivative of the unknown function at a certain time is given in terms of the values of the function at previous times.
For example
$$\frac{d}{dt}x(t) = f(x(t), x(t-\tau)).$$
A delay Pfaff equation means
$$a_i(x(t),x(t - \tau))dx^i(t) = 0.$$
\stackrelubsetbsection{Infinitesimal deformations and \\^adjointness on distributions}
Let $D$ be a nonholonomic distribution on $R^n$ described by a Pfaff equation
$$a_i(x)dx^i = 0.\leqno(1)$$
Let $x(t),\,\,t\in I = [t_0, t_1] \stackrelubsetbset R$, be an integral curve of the distribution $D$.
Let $x(t;\epsilonsilon),\,\epsilonsilon \in [0,\displaystyle\fracelta)$ be a differentiable variation of $x(t)$, i.e.,
$$A_i(x(t;\epsilonsilon); \epsilonsilon)dx^i(t;\epsilonsilon) = 0,\, A_i(x;0) = a_i(x), \,x(t;0) = x(t).$$
The variation $(t,\epsilon) \to x(t;\epsilonsilon),\, t \in I,\,\epsilonsilon \in [0,\displaystyle\fracelta)$ is a surface. It is an integral surface
only if the distribution $D$ admits integral surfaces.
Taking the partial derivative with respect to $\epsilonsilon$
and denoting $y^i(t) =\displaystyle\fracisplaystyle \frac{\primeartial x^i}{\primeartial \epsilonsilon}(t;0)$, we find the {\it single-time (Pfaff) infinitesimal deformation equation}
$$\left(\frac{\primeartial a_i}{\primeartial x^j}(x)y^j(t) + b_i(x)\rightarrowght)dx^i + a_i(x) dy^i = 0,\, b_i (x)= \frac{\primeartial A_i}{\primeartial \epsilonsilon}(x;0)\leqno(2)$$
around a solution $x(t)$ of the Pfaff equation (1).
The {\it single-time adjoint Pfaff system} is
$$d(p(t)a_j(x(t))) =\frac{\primeartial (p(t) a_i(x)dx^i)}{\primeartial x^j},\leqno(3)$$
whose solution $p(t)$ is called the {\it costate function}. The foregoing Pfaff equations (2) and (3) are {\it adjoint} ({\it dual})
in the following sense: if $y$ is a solution of the infinitesimal deformation (Pfaff) equation (2), then the function
$p(t)a_i(x)y^i(t)$ verifies the Pfaff equation $d(p(t)\,a_i(x) y^i(t)) + p(t)\,b_i(x)dx^i = 0$.
Let $x(t),\,\, t \in \Omega_{t_0t_1}\stackrelubsetbset R^m_+$ be a maximal, $m$-dimensional, $m \leq n-2$,
integral submanifold of the distribution $D$.
We fix $k = 1,...,n- m -1$. Let $\epsilonsilon = (\epsilonsilon^A)\in [0,\displaystyle\fracelta)^k, \, A = 1,...,k,$
and let $x(t;\epsilonsilon)$ be a differentiable variation of $x(t)$, i.e.,
$$A_i(x(t;\epsilonsilon);\epsilonsilon)dx^i(t;\epsilonsilon) = 0,\, A_i(x,0) = a_i(x),\, x(t;0) = x(t).$$
The variation $(t,\epsilon) \to x(t;\epsilonsilon),\, t \in \Omega_{t_0t_1},\,\epsilonsilon \in [0,\displaystyle\fracelta)^k$ is an $(m+k)$-dimensional manifold,
but not an integral submanifold.
Taking the partial derivative with respect to $\epsilonsilon^A$
and denoting $y^i_A(t) = \frac{\primeartial x^i}{\primeartial \epsilonsilon^A}(t;0)$, we find the {\it multitime infinitesimal deformation (Pfaff) system}
$$\left(\frac{\primeartial a_i}{\primeartial x^j}(x)y^j_A(t) + b_{iA}(x)\rightarrowght)dx^i + a_i(x) dy^i_A = 0,\,b_{iA} (x)= \frac{\primeartial A_i}{\primeartial \epsilonsilon^A}(x;0)\leqno(4)$$
around a solution $x(t)$ of the Pfaff equation (1). The {\it multitime adjoint Pfaff system} is
$$d(p^A(t)a_j(x)) = \frac{\primeartial (p^A(t)a_i(x)dx^i)}{\primeartial x^j},\leqno(5)$$
whose solution $p(t) = (p^A(t))$ is called the {\it costate vector}. The foregoing Pfaff equations (4) and (5) are {\it adjoint} ({\it dual})
in the following sense: if $y^i_A$ is a solution of the infinitesimal deformation (Pfaff) system (4), then the function
$a_i(x)p^A(t)\,y^i_A(t)$ verify the Pfaff equation $d(a_i(x) p^A(t)\,y^i_A(t)) + p^A(t)\,b_{iA}(x)dx^i= 0$.
\stackrelubsetbsection{Evolution of a distribution}
Let $D$ be a nonholonomic distribution on $R^n$ described by a Pfaff equation (1).
Let $x(t),\,\,t\in I = [t_0, t_1] \stackrelubsetbset R$, be an integral curve of the distribution $D$.
Let $x(t;\epsilonsilon),\, \epsilon \in [0,\displaystyle\fracelta)$ be a differentiable variation of $x(t)$. Suppose that $(x(t;\epsilonsilon);\epsilonsilon)$
is an integral surface of a Pfaff equation in $R^{n+1}$, i.e.,
$$A_i(x(t;\epsilonsilon), \epsilonsilon)dx^i(t;\epsilonsilon) = B(x(t;\epsilonsilon), \epsilonsilon)d\epsilonsilon,$$
$$A_i(x,0) = a_i(x), \,B(x,\epsilonsilon) = A_i(x,\epsilonsilon)\frac{\primeartial x^i}{\primeartial \epsilonsilon},\, B(x,0) = 0,\,x(t;0) = x(t).$$
Taking the partial derivative with respect to $\epsilonsilon$, we find
$$\left(\frac{\primeartial A_i}{\primeartial x^j} \frac{\primeartial x^j}{\primeartial \epsilonsilon} + \frac{\primeartial A_i}{\primeartial \epsilonsilon}\rightarrowght) dx^i + A_i\, d\, \frac{\primeartial x^i}{\primeartial \epsilonsilon}
= \left(\frac{\primeartial B}{\primeartial x^j} \frac{\primeartial x^j}{\primeartial \epsilonsilon} + \frac{\primeartial B}{\primeartial \epsilonsilon}\rightarrowght) d\epsilonsilon.$$
If we accept an evolution after the direction of the vector field $X^j$, i.e.,
$\displaystyle\fracisplaystyle\frac{\primeartial x^j}{\primeartial \epsilonsilon} = \^alpha\, X^j$, then we find the PDE system
$$\left(\frac{\primeartial A_i}{\primeartial x^j} X^j + \frac{\primeartial A_i}{\primeartial \epsilonsilon}\rightarrowght) dx^i + A_i\, dX^i
= \left(\frac{\primeartial B}{\primeartial x^j} X^j + \frac{\primeartial B}{\primeartial \epsilonsilon}\rightarrowght) d\epsilonsilon$$
with unknowns $A_i$, fixed by initial conditions. For $\epsilonsilon = 0$, we rediscover the system in variations,
with the condition $a_i(x)y^i(t)=0, \, y^i(t) = \frac{\primeartial x^i}{\primeartial \epsilon}(t,0)$.
\stackrelubsetbsection{Single-time optimal control problems on \\^a distribution}
Let $D$ be a distribution on $R^n$ described by a controlled Pfaff equation
$$a_i(x, u)dx^i = 0,\, x = (x^i)\in R^n,\, u = (u^a)\in R^k$$
and let $x(t),\,\,t\in I = [t_0, t_1],$ be an integral curve of the distribution $D$.
{\it A single-time optimal control problem consists of maximizing the functional
$$I(u(\cdot)) = \int_{t_0}^{t_1}L(t,x(t),u(t)) dt +g(x(t_1))\leqno(6)$$
subject to
$$a_i(x(t), u(t))dx^i(t) = 0,\,\hbox{a.e.}\,\,\, t\in I = [t_0, t_1],\, x(t_0) = x_0.\leqno(7)$$}
It is supposed that $L: I \timesmes A\timesmes U \to R$ is a $C^2$ function, $a_i: A\timesmes U \to R, \, i = 1,..., n$
are $C^2$ functions and $g(x(t_1))$ is a $C^1$ function.
Ingredients: $A$ is a bounded and closed subset of $R^n$ which contains each trajectory $x(t),\, t\in I$
of controlled system, and $x_0$ and $x_1$ are the initial and final states of the trajectory $x(t)$.
The values of the control functions belong to a set $U\stackrelubsetbset R^k$, bounded and closed.
Let us find the first order necessary conditions for an optimal pair $(x,u)$. We fix the control $u(t)$ and we variate
the state $x(t)$ into $x(t,\epsilon)$. We obtain the {\it single-time infinitesimal deformation (Pfaff) equation}
$$\frac{\primeartial a_i}{\primeartial x^j}(x,u)y^j dx^i + a_i(x,u) dy^i = 0.$$
of the nonholonomic constraint $a_i(x(t), u(t))dx^i(t) = 0$.
It follows the {\it single-time adjoint Pfaff equation}
$$d(p(t) a_j(x(t),u(t))) = \frac{\primeartial (p(t) a_i(x,u)dx^i)}{\primeartial x^j},$$
whose solution $p(t)$ is called the {\it costate function}.
Here, the symbol $d$ in the left hand member of the adjoint equation
means the differentiation with respect to $p$ and $x$.
Using the Lagrangian $1$-form
$${\cal L} = L(t,x(t),u(t)) dt,$$
we build the {\it Hamiltonian $1$-form}
$${\cal H} = {\cal L}(t,x(t),u(t)) + p(t) a_i(x(t), u(t))dx^i(t).$$
{\bf Theorem} ({\bf Single-time maximum principle})
{\it Suppose that the problem of maximizing the functional (6) constrained by (7) has
an interior optimal solution $\hat u(t)$, which determines
the optimal evolution $x(t)$. Then there exists a
costate function $(p(t))$ such that
$$
\frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p}= a_i(x(t), u(t))dx^i(t) = 0, \leqno(8)
$$
the function $(p(t))$ is the unique solution of the following Pfaff system (adjoint system)
$$
d(p(t)a_j(x(t),u(t))) = \frac{\primeartial {\cal H}}{\primeartial x^j}\leqno(9)
$$
and satisfies the critical point conditions
$$
{\mathcal H}_{u^a}\left( t,x(t),u(t),p(t)\rightarrowght) =0,\,a = \overerline{1,k}.\leqno(10)
$$}
{\bf Proof} We use the Hamiltonian $1$-form ${\cal H}$.
The solutions of the foregoing problem are among the solutions of the free maximization problem of the curvilinear integral functional
$$J(u(\cdot)) = \int_{\timeslde\Gamma}{\cal H}(t,x(t),u(t),p(T)) + g(x(t_1)),$$
where $\timeslde \Gamma = ([t_0,t_1], x([t_0,t_1]))=\{(t,x(t))|t \in [t_0,t_1]\}\stackrelubsetbset R_+\timesmes R^n$.
Suppose that there exists a continuous control $\hat u(t),\, t \in I = [t_0,t_1]$, with $\hat u(t)\in \hbox{Int}\,{U}$,
and an integral curve $x(t)$ which are optimal in the previous problem. Now consider a control variation
$u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t),\, t \in I = [t_0,t_1]$, where $h$ is an arbitrary continuous vector function,
and a state variation $x(t,\epsilon),\, t \in I = [t_0,t_1]$, related by
$$a_i(x(t;\epsilonsilon), u(t; \epsilonsilon))dx^i(t;\epsilonsilon) = 0,\, x(t;0) = x(t),\, x(t_0,0)=x_0.$$
Since $\hat u(t)\in \hbox{Int}\,{\cal U}$ and any continuous function over a compact set
$I$ is bounded, there exists $\epsilonsilon_h>0$ such
that $u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)\in \hbox{Int}\,{U},\,\,\forallrall |\epsilonsilon|<\epsilonsilon_h$.
This $\epsilonsilon$ is used in our variational arguments.
For $|\epsilonsilon|<\epsilonsilon_h$, we define the function
$$J(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}{\cal H}(t,x(t,\epsilonsilon),u(t,\epsilonsilon),p(t)) + g(x(t_1,\epsilon))$$
$$= \int_{\timeslde\Gamma(\epsilon)}{\cal L}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) + p(t) a_i(x(t,\epsilonsilon), u(t,\epsilonsilon)) dx^i(t,\epsilonsilon)+ g(x(t_1,\epsilon)).$$
Differentiating with respect to $\epsilonsilon$, it follows
$$
J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}{\cal L}_{x^j}(t,x(t,\epsilonsilon),u(t,\epsilonsilon))x^j_{\epsilonsilon}(t,\epsilonsilon) + \frac{\primeartial g}{\primeartial x^j}(x(t_1,\epsilon))x^j_\epsilon(t_1,\epsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)} p(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t,\epsilonsilon), u(t,\epsilonsilon))x^j_{\epsilonsilon}(t,\epsilonsilon) dx^i(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)} p(t) a_i(x(t,\epsilonsilon), u(t,\epsilonsilon)) dx^i_{\epsilonsilon}(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)}{\cal L}_{u^a}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)h^a(t) $$
$$+ \int_{\timeslde\Gamma(\epsilon)} p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t,\epsilonsilon), u(t,\epsilonsilon))h^a(t) dx^i(t,\epsilonsilon)).$$
Evaluating at $\epsilonsilon = 0$, we find $\timeslde\Gamma(0) = \timeslde\Gamma$ and
$$J^{\primerime}(0)= \int_{\timeslde\Gamma}\left({\cal L}_{x^j}(t,x(t),\hat u(t))+ p(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t)\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$ + \int_{\timeslde\Gamma} p(t) a_i(x(t), \hat u(t)) dx^i_{\epsilonsilon}(t,0) + \frac{\primeartial g}{\primeartial x^j}(x(t_1,0))x^j_\epsilon(t_1,0)$$
$$ + \int_{\timeslde\Gamma}\left({\cal L}_{u^a}(t,x(t),\hat u(t)) + p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\rightarrowght) h^a(t),$$
where $x(t)$ is the curve of the state variable corresponding to the optimal control $\hat u(t)$. The integral from the middle can be written
$$\int_{\timeslde\Gamma} p(t) a_j(x(t), \hat u(t)) dx^j_{\epsilonsilon}(t,0) = p(t) a_jx^j_{\epsilonsilon}|_{\primeartial{\timeslde\Gamma}} - \int_{\timeslde\Gamma}d(p(t)a_j(x(t), \hat u(t)))x^j_{\epsilonsilon}(t,0),$$
where the symbol $d$ in the last integral means the differentiation with respect to $p$ and $x$.
We find $J^{\primerime}(0)$ as
$$\int_{\timeslde\Gamma}\left({\cal L}_{x^j}(t,x(t),\hat u(t)) + p(t)\frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t) - d(p(t)a_j(x(t), \hat u(t))\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$+ p(t) a_j(x(t), \hat u(t))x^j_{\epsilonsilon}(t,0)|_{t_0}^{t_1} + \frac{\primeartial g}{\primeartial x^j}(x(t_1,0))x^j_\epsilon(t_1,0)$$
$$ + \int_{\timeslde\Gamma}\left({\cal L}_{u^a}(t,x(t),\hat u(t)) + p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\rightarrowght) h^a(t).$$
Since $x(t_0)=x_0$, we have $x^j_{\epsilonsilon}(t_0,0) = 0$.
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation
$${\cal L}_{x^j}(t,x(t),\hat u(t)) + p(t)\frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t) - d(p(t)a_j(x(t), \hat u(t)) = 0,$$
with the terminal condition $p(t_1) a_j(x(t_1), \hat u(t_1)) + \frac{\primeartial g}{\primeartial x^j}(x(t_1)) =0$.
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a(t))$. Since
the variation $h$ is arbitrary, we get the following ({\it critical point condition})
$$\frac{\primeartial {\cal L}}{\primeartial {u^a}}(t,x(t),\hat u(t)) + p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t) = 0.$$
The foregoing equations (9) and (10) can be written
$$d(p(t)a_j(x(t),u(t))) = \frac{\primeartial {\cal H}}{\primeartial x^j}\left( t,x(t),u(t),p(t)\rightarrowght),$$
$$\frac{\primeartial {\cal H}}{\primeartial {u^a}}\left( t,x(t),u(t),p(t)\rightarrowght) = 0.$$
{\bf Example} Let us solve the problem
$$\max J(u(\cdot)) = - \frac{1}{2}\int_{t_0}^{t_1}(u^2(t) + z^2(t))dt$$
subject to (controlled Martinet distribution)
$$dz = \frac{1}{2}(y^2 + u)dx,\,x(t_0) = x_0, y(t_0) = y_0, z(t_0) = z_0.$$
Denote $\omega = \frac{1}{2}(y^2 + u)dx -dz$. Since $$\omega \widetildedetildeedge d\omega = \frac{1}{2}dx \widetildedetildeedge d(y^2+u) \widetildedetildeedge dz$$
the distribution admits only integral curves with the parameter $t$. The Pfaff equation
$$dz = \frac{1}{2}(y^2 + u)dx,$$
is equivalent to a differential equation
$$\displaystyle\fracot z(t) = \frac{1}{2}(y(t)^2 + u(t))\displaystyle\fracot x(t)$$
or to the ODE system
$$\displaystyle\fracot x(t) = \varepsilonphirphi(t),\, \displaystyle\fracot y(t) = \primesi(t), \, \displaystyle\fracot z(t)= \frac{1}{2}(y(t)^2 + u(t)) \varepsilonphirphi(t).$$
{\bf First variant} Using the Hamiltonian
$$H = - \frac{1}{2}(u^2(t) + z^2(t)) + p_1(t) \varepsilonphirphi(t) + p_2(t) \primesi(t)) + p_3(t) \left(\frac{1}{2}(y(t)^2 + u(t))\varepsilonphirphi(t)\rightarrowght)\!\!,$$
we find the adjoint ODEs
$$\displaystyle\fracot p_1(t) = - \frac{\primeartial H}{\primeartial x} = 0,\, \displaystyle\fracot p_2(t) = - \frac{\primeartial H}{\primeartial y}= - p_3 y \varepsilonphirphi,\, \displaystyle\fracot p_3(t) = - \frac{\primeartial H}{\primeartial z} = z(t)$$
and the critical point condition
$$\frac{\primeartial H}{\primeartial u} = - u + \frac{1}{2}\,p_3 \varepsilonphirphi = 0.$$
We find the control $u = \frac{1}{2}\,p_3\varepsilonphirphi$ and $p_1(t) = c_1$. We need to find solutions for the system
$$\displaystyle\fracot p_2(t) = - p_3(t) y(t) \displaystyle\fracot x(t),\, \displaystyle\fracot p_3(t) = z(t),\, \displaystyle\fracot z(t) = \frac{1}{2}\left(y(t)^2 + \frac{1}{2}p_3(t)\displaystyle\fracot x(t)\rightarrowght)\displaystyle\fracot x(t).$$
{\bf Second variant} The Hamiltonian $1$-form is
$${\mathcal H} = - \frac{1}{2} (u^2(t) + z^2(t))dt + p(t) \left(\frac{1}{2}(y^2 + u)dx - dz\rightarrowght).$$
The critical point condition
$$\frac{\primeartial {\mathcal H}}{\primeartial u} = - u dt+ \frac{1}{2}\,p\,dx = 0$$
gives the control $u = \frac{1}{2}\,p\displaystyle\fracot x$.
Since $a_1 = \frac{1}{2}(y^2 + u)$, $a_2 = 0$, $a_3 = -1$, we find the adjoint Pfaff equations
$$d(p\frac{1}{2}(y^2 + u)) = \frac{\primeartial {\cal H}}{\primeartial x} = 0,\, 0 = \frac{\primeartial {\cal H}}{\primeartial y} = pydx, \,dp = - \frac{\primeartial {\cal H}}{\primeartial z} = zdt.$$
We need to solve the system
$$p(y^2 + u)= c, pydx = 0, dp = zdt,$$
$$udt = \frac{1}{2}\,p dx, dz = \frac{1}{2}(y^2 + u)dx.$$
On the other hand, the second variant ofers two explicit extremals:
(1) $y=0, pu=c_1, dp = zdt, udt = \frac{1}{2}pdx, dz = udx$, which does not satisfy the general
initial conditions; (2) $x=c_1, u = 0, z = c_3, p =c_3 t + c_4, y = \primem\stackrelqrt{\frac{c}{c_3t+c_4}}$, depending upon four
arbitrary constants, which determine from initial conditions and terminal condition.
The first variant can be identified with the second variant via $p_1 = pa_1; p_2 = pa_2, p_3 = - pa_3$.
{\bf Third variant (Ionel Tevy)} We introduce two auxiliary controls $u_1, u_2$, changing the Pfaff equation into the controlled ODE system
$$\displaystyle\fracot x(t) = u_1(t), \displaystyle\fracot y(t) = u_2(t), \displaystyle\fracot z (t)= \frac{1}{2}(y(t)^2 + u(t))u_1(t).$$
Then
$$H = -\frac{1}{2}(z^2 + u^2) + p_1u_1 + p_2u_2 + \frac{1}{2}p(y^2 + u)u_1.$$
We find the adjoint ODEs and the critical point conditions
$$\displaystyle\fracot p_1 = - \frac{\primeartial H}{\primeartial x}=0, \displaystyle\fracot p_2 = - \frac{\primeartial H}{\primeartial y} = -pyu_1, \displaystyle\fracot p = z, $$
$$\frac{\primeartial H}{\primeartial u_1} = p_1 + \frac{1}{2}p(y^2 + u)=0, \frac{\primeartial H}{\primeartial u_2} = p_2 =0, \frac{\primeartial H}{\primeartial u} = - u +\frac{1}{2}pu_1=0.$$
It follows two extremals:
$$p_1(t) = c,\, p_2(t)=0,\, p(t) = -c_3 t - a,$$
$$u_1(t) = 0, \,u_2(t) = \displaystyle\fracot y(t),\, u(t)=0,$$
$$x(t) = c_1, \,y(t) = \primem \stackrelqrt{\frac{2c}{c_3t + a}}, \,z = - c_3;$$
$$p_1(t) = c, \,p_2(t) = 0, \,p(t) = \stackrelqrt{\frac{(\^alpha t +\beta)^2 + 4c^2}{\^alpha}},$$
$$u_1 = \frac{4\^alpha c}{(\^alpha t +\beta)^2 + 4c^2}, \,u_2(t) =0, \,u(t) = - 2c \stackrelqrt{\frac{\^alpha}{(\^alpha t +\beta)^2 + 4c^2}},$$
$$x(t) = \int u_1(t)dt = 2 \,\hbox{atan}\frac{\^alpha t + \beta}{2c} + c_1, \,y(t) = 0,\, z(t) = \frac{\^alpha t + \beta}{\stackrelqrt{(\^alpha t +\beta)^2 + 4c^2}}.$$
O varianta a rezolvarii Tevy se gaseste in Michele Pavon, Optimal control of nonholonomic systems, Proceedings of the 17th
International Symposium on Mathematical Theory of Networks and Systems, Kyoto, July 24-28, 2006; vezi NonhOptCon.pdf; citeaza lucrarile mele;
\stackrelubsetbsection{Multitime optimal control problems on \\^a distribution}
Let $D$ be a distribution on $R^n$ described by a controlled Pfaff equation
$$a_i(x, u)dx^i = 0,\, x = (x^i)\in R^n,\, u = (u^a)\in R^k$$
and let $x(t),\,\, t \in \Omega_{t_0t_1}\stackrelubsetbset R^m$ be an $m$-dimensional
($m < n$, m maximal) integral submanifold of the distribution $D$.
Let us start with a
\stackrelubsetbsubsection{ Multitime optimal control problem of \\maximizing a multiple integral functional}
{\it Find}
$$\max_{u(\cdot)}\,I(u(\cdot)) = \int_{\Omega_{t_0t_1}}L(t,x(t),u(t))\, \omega + \int_{\primeartial{\Omega_{t_0t_1}}}g(x(t))d\stackreligma \leqno(11)$$
{\it subject to}
$$a_i(x(t), u(t))dx^i(t) = 0,\,\hbox{a.e.}\,\,\, t\in \Omega_{t_0t_1},\, x(t_0) = x_0.\leqno(12)$$
It is supposed that $L: \Omega_{t_0t_1} \timesmes A\timesmes U \to R$ is a $C^2$ function and $a_i: A\timesmes U \to R, \, i = 1,..., n$
are $C^2$ functions. Ingredients: $\omega = dt^1\cdots dt^m$ is the volume element, $A$ is a bounded and closed subset of $R^n$,
containing the images of the $m$-sheets $x(t),\,\ t\in \Omega_{t_0t_1}$ of controlled system,
and $x_0$ and $x_1$ are the initial and final states of an $m$-sheet $x(t)$.
The set in which the control functions takes their values is called as $U$, which is a bounded and
closed subset of $R^k$.
Let us find the first order necessary conditions for an optimal pair $(x,u)$.
We fix the control $u(t)$ and variate the state $x(t)$ into $x(t,\epsilon),\, \epsilon = (\epsilon^1,...,\epsilon^m)$.
We find the {\it multitime infinitesimal deformation (Pfaff) system}
$$\frac{\primeartial a_i}{\primeartial x^j}(x,u)y^j_\^alpha(t) dx^i + a_i(x,u) dy^i_\^alpha = 0$$
of the nonholonomic constraint $a_i(x(t), u(t))dx^i(t) = 0$. It follows the {\it multitime adjoint Pfaff system}
$$d(p^\^alpha(t) a_j(x,u)) = \frac{\primeartial (p^\^alpha(t)a_i(x,u)dx^i)}{\primeartial x^j},$$
whose solution $p(t) = (p^\^alpha(t))$ is called the {\it costate vector}. Here,
the symbol $d$ in the left hand member of the adjoint equation
means the differentiation with respect to $p$ and $x$.
We use the Lagrangian $m$-form
$${\cal L} = L(t,x(t),u(t)) \omega.$$
Introducing the $(m-1)$-forms
$$\Omegaga_\lambdabda =\displaystyle\fracisplaystyle \frac{\primeartial}{\primeartial t^\lambdabda}\rfloor\Omegaga,$$
a {\it costate variable vector or Lagrange multiplier vector} $p=p^\^alpha(t)\displaystyle\fracisplaystyle\frac{\primeartial}{\primeartial t^\^alpha}$ is identified to
the $(m-1)$-form $p = p^\lambdabda(t) \Omegaga_\lambdabda$. We build a {\it Hamiltonian $m$-form}
$${\cal H} = {\cal L}(t,x(t),u(t)) + p^\lambdabda(t) a_i(x(t), u(t))dx^i(t)\widetildedetildeedge \Omegaga_\lambdabda.$$
{\bf Theorem} ({\bf Multitime maximum principle})
{\it Suppose that the problem of maximizing the functional (11) constrained by (12) has
an interior optimal solution $\hat u(t)$, which determines the optimal evolution $x(t)$. Then there exists a
costate function $(p(t))$ such that
$$
\frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p^\lambdabda}= a_i(x(t), u(t))dx^i(t)\widetildedetildeedge \Omegaga_\lambdabda = 0, \leqno(13)
$$
the function $(p(t))$ is the unique solution of the following Pfaff system (adjoint system)
$$
d(p^\lambdabda a_j(x,u))\widetildedetildeedge \Omegaga_\lambdabda = \frac{\primeartial {\cal H}}{\primeartial x^j} \leqno(14)
$$
and satisfies the critical point conditions
$$
{\mathcal H}_{u^a}\left( t,x(t),u(t),p(t)\rightarrowght) =0,\,a = \overerline{1,k}.\leqno(15)
$$}
{\bf Proof} We use the Hamiltonian $m$-form ${\cal H}$.
The solutions of the foregoing problem are among the solutions of the free maximization problem of the functional
$$J(u(\cdot)) = \int_{\timeslde\Omega}{\cal H}(t,x(t),u(t),p(t)) + \int_{\primeartial {\timeslde\Omega}} g(x(t))d\stackreligma\,$$
where $\timeslde \Omega = (\Omega_{t_0t_1}, x(\Omega_{t_0t_1}))\stackrelubsetbset R^m_+\timesmes R^n$.
Suppose that there exists a continuous control $\hat u(t)$ defined over the interval
$\Omega_{t_0t_1}$ with $\hat u(t)\in \hbox{Int}\,{U}$
which is an optimum point in the previous problem. Now we consider a control variation
$u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)$, where $h$ is an arbitrary continuous vector function,
and a state variation $x(t,\epsilon),\, t \in \Omega_{t_0t_1}$, connected by
$$a_i(x(t;\epsilonsilon), u(t; \epsilonsilon))dx^i(t;\epsilonsilon) = 0,\, x(t;0) = x(t),\, x(t_0,0)=x_0.$$
Since $\hat u(t)\in \hbox{Int}\,{\cal U}$ and any continuous function over a compact set
$\Omega_{t_0t_1}$ is bounded, there exists $\epsilonsilon_h>0$ such
that $u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)\in \hbox{Int}\,{U},\,\,\forallrall |\epsilonsilon|<\epsilonsilon_h$.
This $\epsilonsilon$ is used in our variational arguments.
For $|\epsilonsilon|<\epsilonsilon_h$, we define the function
$$J(\epsilonsilon)= \int_{\timeslde\Omega(\epsilon)}{\cal H}(t,x(t,\epsilonsilon),u(t,\epsilonsilon),p(t)) + \int_{\primeartial {\timeslde\Omega(\epsilon)}}g(x(t,\epsilon))d\stackreligma$$
$$= \int_{\timeslde\Omega(\epsilon)}{\cal L}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) + p^\lambdabda(t) a_i(x(t,\epsilonsilon), u(t,\epsilonsilon)) dx^i(t,\epsilonsilon)\widetildedetildeedge \Omegaga_\lambdabda +
\int_{\primeartial {\timeslde\Omega(\epsilon)}}g(x(t,\epsilon))d\stackreligma.$$
Differentiating with respect to $\epsilonsilon$, it follows
$$J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Omega(\epsilon)}{\cal L}_{x^j}(t,x(t,\epsilonsilon),u(t,\epsilonsilon))x^j_{\epsilonsilon}(t,\epsilonsilon) +
\int_{\primeartial {\timeslde\Omega(\epsilon)}}g_{x^j}(x(t,\epsilon))x^j_{\epsilonsilon}(t,\epsilonsilon) d\stackreligma$$
$$ + \int_{\timeslde\Omega(\epsilon)} p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t,\epsilonsilon), u(t,\epsilonsilon))x^j_{\epsilonsilon}(t,\epsilonsilon) dx^i(t,\epsilonsilon)\widetildedetildeedge \Omegaga_\lambdabda$$
$$ + \int_{\timeslde\Omega(\epsilon)} p^\lambdabda(t) a_i(x(t,\epsilonsilon), u(t,\epsilonsilon)) dx^i_{\epsilonsilon}(t,\epsilonsilon)\widetildedetildeedge \Omegaga_\lambdabda$$
$$ + \int_{\timeslde\Omega(\epsilon)}{\cal L}_{u^a}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)h^a(t)$$
$$+ \int_{\timeslde\Omega(\epsilon)} p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t,\epsilonsilon), u(t,\epsilonsilon))h^a(t) dx^i(t,\epsilonsilon))\widetildedetildeedge \Omegaga_\lambdabda.$$
Evaluating at $\epsilonsilon = 0$, we find $\timeslde\Omega(0) = \timeslde\Omega$ and
$$J^{\primerime}(0)= \int_{\timeslde\Omega}\left({\cal L}_{x^j}(t,x(t),\hat u(t))+ p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t)\widetildedetildeedge \Omegaga_\lambdabda\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$ + \int_{\timeslde\Omega} p^\lambdabda(t) a_i(x(t), \hat u(t)) dx^i_{\epsilonsilon}(t,0)\widetildedetildeedge \Omegaga_\lambdabda +\int_{\primeartial {\timeslde\Omega}}g_{x^j}(x(t))x^j_{\epsilonsilon}(t,0) d\stackreligma$$
$$ + \int_{\timeslde\Omega}\left({\cal L}_{u^a}(t,x(t),\hat u(t)) + p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\widetildedetildeedge \Omegaga_\lambdabda\rightarrowght) h^a(t),$$
where $x(t)$ is the $m$-sheet of the state variable corresponding to the optimal control $\hat u(t)$.
To evaluate the multiple integral
$$\int_{\timeslde\Omega} p^\lambdabda(t) a_i(x(t), \hat u(t)) dx^i_{\epsilonsilon}(t,0)\widetildedetildeedge \Omegaga_\lambdabda,$$
we integrate by parts, via the formula
$$d(p^\lambdabda a_i x^i_{\epsilonsilon} \Omegaga_\lambdabda)=(x^i_\epsilonsilon d(p^\lambdabda a_i) + p^\lambdabda a_i dx^i_\epsilonsilon)\widetildedetildeedge\Omegaga_\lambdabda,$$
obtaining
$$\displaystyle\fracisplaystyle\int_{\timeslde\Omega}p^\lambdabda a_idx^i_\epsilonsilon\widetildedetildeedge \Omegaga_\lambdabda=\displaystyle\fracisplaystyle\int_{\timeslde\Omega}d(p^\lambdabda a_ix^i_\epsilonsilon\Omegaga_\lambdabda)
- \displaystyle\fracisplaystyle\int_{\timeslde\Omega}d(p^\lambdabda a_i) x^i_\epsilonsilon\widetildedetildeedge \Omegaga_\lambdabda,$$
where the symbol $d$ in the last integral means the differentiation with respect to $p$ and $x$.
Now we apply the Stokes integral formula
$$\displaystyle\fracisplaystyle\int_{\timeslde\Omega}d(p^\lambdabda a_ix^i_\epsilonsilon\Omegaga_\lambdabda) = \displaystyle\fracisplaystyle\int_{\primeartial{\timeslde\Omega}}\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha a_ix^i_\epsilonsilon n^\beta d\stackreligmagma,$$
where $(n^\beta(t))$ is the unit normal vector to the boundary ${\primeartial{\timeslde\Omega}}$.
Since the integral from the middle can be written
$$\displaystyle\fracisplaystyle\int_{\timeslde\Omega}p^\lambdabda a_idx^i_\epsilonsilon\widetildedetildeedge \Omegaga_\lambdabda=\displaystyle\fracisplaystyle\int_{\primeartial{\timeslde\Omega}}\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha a_ix^i_\epsilonsilon n^\beta3 d\stackreligmagma
- \displaystyle\fracisplaystyle\int_{\timeslde\Omega}d(p^\lambdabda a_i) x^i_\epsilonsilon\widetildedetildeedge \Omegaga_\lambdabda,$$
we find $J^{\primerime}(0)$ as
$$\int_{\timeslde\Omega}{\cal L}_{x^j}(t,x(t),\hat u(t))x^j_{\epsilonsilon}(t,0)$$
$$+ \int_{\timeslde\Omega} \left(p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t)
- d(p^\lambdabda a_j(x(t), \hat u(t))\rightarrowght)\widetildedetildeedge \Omegaga_\lambdabda x^j_{\epsilonsilon}(t,0)$$
$$+ \displaystyle\fracisplaystyle\int_{\primeartial{\timeslde\Omega}}\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha a_ix^i_\epsilonsilon(t,0) n^\beta(t) d\stackreligmagma +\int_{\primeartial {\timeslde\Omega}}g_{x^j}(x(t))x^j_{\epsilonsilon}(t,0) d\stackreligma$$
$$ + \int_{\timeslde\Omega}\left({\cal L}_{u^a}(t,x(t),\hat u(t)) + p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\widetildedetildeedge \Omegaga_\lambdabda\rightarrowght) h^a(t).$$
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation (boundary value problem)
$${\cal L}_{x^j}(t,x(t),\hat u(t)) + \left(p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t)
- d(p^\lambdabda a_j(x(t), \hat u(t))\rightarrowght)\widetildedetildeedge \Omegaga_\lambdabda = 0,$$
$$\left(\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha(t) n^\beta(t) a_i(x(t), \hat u(t)) + g_{x^i}(x(t))\rightarrowght)|_{\primeartial \Omega} = 0.$$
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a(t))$. Since
the variation $h$ is arbitrary, we get (critical point condition)
$$\frac{\primeartial {\cal L}}{\primeartial {u^a}}(t,x(t),\hat u(t)) + p^\lambdabda(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\widetildedetildeedge \Omegaga_\lambdabda = 0.$$
The foregoing equations (14) and (15) can be written
$$\frac{\primeartial {\cal H}}{\primeartial {x^j}} - d(p^\lambdabda a_j) \widetildedetildeedge \Omegaga_\lambdabda = 0,\,\,\frac{\primeartial {\cal H}}{\primeartial {u^a}} = 0.$$
Let us start with a
\stackrelubsetbsubsection{Multitime optimal control problem of \\maximizing a curvilinear integral functional}
{\it Find}
{$$\max_{u(\cdot)}\,I(u(\cdot)) = \int_{\Gamma_{t_0t_1}}L_\^alpha(t,x(t),u(t)) dt^\^alpha + g(x(t_1))\leqno(16)$$
{\it subject to}
$$a_i(x(t), u(t))dx^i(t) = 0,\,\hbox{a.e.}\,\,\, t\in \Omega_{t_0t_1},\, x(t_0) = x_0.\leqno(17)$$}
It is supposed that $L_\^alpha: \Omega_{t_0t_1} \timesmes A\timesmes U \to R$ and $a_i: A\timesmes U \to R, \, i = 1,..., n$
are $C^2$ functions. Ingredients: ${\cal L} = L_\^alpha(t,x(t),u(t)) dt^\^alpha$ is an $1$-form, $A$ is a bounded and closed subset of $R^n$,
containing the images of the $m$-sheets $x(t), \,t\in \Omega_{t_0t_1}$ of the controlled system,
and $x_0$ and $x_1$ are the initial and final states of the $m$-sheet $x(t)$ in the controlled system.
The set, in which the control functions $u^a$ takes their values, is called as $U$, which is a bounded and
closed subset of $R^k$.
Let us find the first order necessary conditions for an optimal pair $(x,u)$.
We fix the control $u(t)$ and variate the state $x(t)$ into $x(t,\epsilon),\, \epsilon = (\epsilon^1,...,\epsilon^m)$.
We find the {\it multitime infinitesimal deformation (Pfaff) system}
$$\frac{\primeartial a_i}{\primeartial x^j}(x,u)y^j_\^alpha(t) dx^i + a_i(x,u) dy^i_\^alpha = 0$$
of the nonholonomic constraint $a_i(x(t), u(t))dx^i(t) = 0$.
It follows the {\it multitime adjoint Pfaff system}
$$d(pa_j(x,u)) = \frac{\primeartial (pa_idx^i)}{\primeartial x^j},$$
whose solution $p(t)$ is called the {\it costate vector}. Here, the symbol $d$ in the left hand member of the adjoint equation
means the differentiation with respect to $p$ and $x$.
We use the Lagrangian $1$-form
$${\cal L} = L_\^alpha(t,x(t),u(t)) dt^\^alpha.$$
Introducing a {\it costate variable or Lagrange multiplier} $p$, we build a {\it Hamiltonian $1$-form}
$${\cal H} = {\cal L}(t,x(t),u(t)) + p(t) a_i(x(t), u(t))dx^i(t).$$
{\bf Theorem} ({\bf Multitime maximum principle})
{\it Suppose that the problem of maximizing the functional (16) constrained by (17) has
an interior optimal solution $\hat u(t)$, which determines the optimal evolution $x(t)$. Then there exists a
costate function $(p(t))$ such that
$$
\frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p}= a_i(x(t), u(t))dx^i(t) = 0, \leqno(18)
$$
the function $(p(t))$ is the unique solution of the following Pfaff system (adjoint system)
$$
d(pa_j(x,u)) = \frac{\primeartial {\cal H}}{\primeartial x^j}\leqno(19)
$$
and the critical point conditions
$$
{\mathcal H}_{u^a}\left( t,x(t),u(t),p(t)\rightarrowght) =0,\,a = \overerline{1,k}\leqno(20)
$$
hold.}
{\bf Proof} We use the Hamiltonian $1$-form ${\cal H}$.
The solutions of the foregoing problem are between the solutions of the free maximization problem of the curvilinear integral functional
$$J(u(\cdot)) = \int_{\timeslde\Gamma}{\cal H}(t,x(t),u(t)) + g(x(t_1)),$$
where $\timeslde \Gamma = (\Gamma_{t_0t_1}, x(\Gamma_{t_0t_1}))\stackrelubsetbset R^m_+\timesmes R^n$.
Suppose that there exists a continuous control $\hat u(t)$ defined over the interval $\Omega_{t_0t_1}$ with $\hat u(t)\in \hbox{Int}\,{U}$
which is an optimum point in the previous problem. We consider a control variation
$u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)$, where $h$ is an arbitrary continuous vector function,
and a state variation $x(t,\epsilon),\, t \in \Omega_{t_0t_1}$, connected by
$$a_i(x(t;\epsilonsilon), u(t; \epsilonsilon))dx^i(t;\epsilonsilon) = 0,\, x(t;0) = x(t),\, x(t_0,0)=x_0.$$
Since $\hat u(t)\in \hbox{Int}\,{\cal U}$ and a continuous function over a compact set
$\Omega_{t_0t_1}$ is bounded, there exists $\epsilonsilon_h>0$ such
that $u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)\in \hbox{Int}\,{U},\,\,\forallrall |\epsilonsilon|<\epsilonsilon_h$.
This $\epsilonsilon$ is used in our variational arguments.
For $|\epsilonsilon|<\epsilonsilon_h$, we define the function
$$J(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}{\cal H}(t,x(t,\epsilonsilon),u(t,\epsilonsilon),p(t)) + g(x(t_1,\epsilon))$$
$$= \int_{\timeslde\Gamma(\epsilon)}{\cal L}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) + p(t) a_i(x(t,\epsilonsilon), u(t,\epsilonsilon)) dx^i(t,\epsilonsilon)+ g(x(t_1,\epsilon)).$$
Differentiating with respect to $\epsilonsilon$, it follows
$$
J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}{\cal L}_{x^j}(t,x(t,\epsilonsilon),u(t,\epsilonsilon))x^j_{\epsilonsilon}(t,\epsilonsilon)+ g_{x^j}(x(t_1,\epsilon))x^j_{\epsilonsilon}(t,\epsilonsilon)
$$
$$ + \int_{\timeslde\Gamma(\epsilon)} p(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t,\epsilonsilon), u(t,\epsilonsilon))x^j_{\epsilonsilon}(t,\epsilonsilon) dx^i(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)} p(t) a_i(x(t,\epsilonsilon), u(t,\epsilonsilon)) dx^i_{\epsilonsilon}(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)}{\cal L}_{u^a}(t,x(t,\epsilonsilon),u(t,\epsilonsilon))h^a(t)$$
$$+ \int_{\timeslde\Gamma(\epsilon)} p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t,\epsilonsilon), u(t,\epsilonsilon))h^a(t) dx^i(t,\epsilonsilon)).$$
Evaluating at $\epsilonsilon = 0$, we find $\timeslde\Gamma(0) = \timeslde\Gamma$ and
$$J^{\primerime}(0)= \int_{\timeslde\Gamma}\left({\cal L}_{x^j}(t,x(t),\hat u(t))+ p(t) \frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t)\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$
+ \int_{\timeslde\Gamma} p(t) a_i(x(t), \hat u(t)) dx^i_{\epsilonsilon}(t,0) + g_{x^j}(x(t_1)x^j_{\epsilonsilon}(t,0)
$$
$$ + \int_{\timeslde\Gamma}\left({\cal L}_{u^a}(t,x(t),\hat u(t)) + p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\rightarrowght) h^a(t),$$
where $x(t)$ is the $m$-sheet of the state variable corresponding to the optimal control $\hat u(t)$.
To evaluate the curvilinear integral
$$\int_{\timeslde\Gamma} p(t) a_i(x(t), \hat u(t)) dx^i_{\epsilonsilon}(t,0),$$
we integrate by parts, via the formula
$$d(p a_i x^i_{\epsilonsilon})= x^i_\epsilonsilon d(p a_i) + p a_i dx^i_\epsilonsilon,$$
obtaining
$$\displaystyle\fracisplaystyle\int_{\timeslde\Gamma}p a_idx^i_\epsilonsilon = \displaystyle\fracisplaystyle\int_{\timeslde\Gamma}d(p a_ix^i_\epsilonsilon)
- \displaystyle\fracisplaystyle\int_{\timeslde\Gamma}d(p a_i) x^i_\epsilonsilon$$
$$ = (p(t) a_i(x(t), \hat u(t)) x^i_{\epsilonsilon}(t,0))|_{t_0}^{t_1} - \displaystyle\fracisplaystyle\int_{\timeslde\Gamma}d(p(t) a_i(x(t), \hat u(t))) x^i_\epsilonsilon,$$
where the symbol $d$ in the last integral means the differentiation with respect to $p$ and $x$. We find
$$J^{\primerime}(0) = \int_{\timeslde\Gamma}{\cal L}_{x^j}(t,x(t),\hat u(t))x^j_{\epsilonsilon}(t,0)$$
$$+ \int_{\timeslde\Gamma} \left(p(t)\frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t) - d(p(t)a_j(x(t), \hat u(t))\rightarrowght) x^j_{\epsilonsilon}(t,0)$$
$$+ (p(t) a_i(x(t),\hat u(t))x^i_{\epsilonsilon}(t,0))|_{t_0}^{t_1} + g_{x^j}(x(t_1)x^j_{\epsilonsilon}(t,0)$$
$$ + \int_{\timeslde\Gamma}\left({\cal L}_{u^a}(t,x(t),\hat u(t)) + p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t)\rightarrowght) h^a(t).$$
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation (terminal value problem)
$${\cal L}_{x^j}(t,x(t),\hat u(t)) + p(t)\frac{\primeartial a_i}{\primeartial x^j}(x(t), \hat u(t))dx^i(t) - d(p(t)a_j(x(t), \hat u(t)) = 0,$$
subject to $p(t_1) a_i(x(t_1),\hat u(t_1)) + g_{x^i}(x(t_1)) =0$.
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a(t))$. Since
the variation $h$ is arbitrary, we get (critical point condition)
$$\frac{\primeartial {\cal L}}{\primeartial {u^a}}(t,x(t),\hat u(t)) + p(t) \frac{\primeartial a_i}{\primeartial u^a}(x(t), \hat u(t))dx^i(t) = 0.$$
The foregoing equations (19) and (20) can be written
$$\frac{\primeartial {\cal H}}{\primeartial {x^j}} = d(p\, a_j),\,\,\frac{\primeartial {\cal H}}{\primeartial {u^a}} = 0.$$
\stackrelection{Optimal control on distributions \\displaystyle\fraceltascribed by vector fields}
\stackrelubsetbsection{Infinitesimal deformations and adjointness on \\displaystyle\fracisplaystylestributions}
The same distribution $D$ can be described in terms of smooth vector fields (or generators),
$$D = \hbox{span}\{X_a(x)|\, a_i(x)X^i_a = 0,\, a = 1,...,n-1\},\leqno (16)$$
if and only if $n\geq 3$. Any vector field $Y$ in $D$ can be written in the form $Y(x) = u^a(x)X_a(x)$.
Let $x(t)$ be a curve solution of the differential system
$$\displaystyle\fracot x(t) = u^a(x(t))X_a(x(t)).$$
Let $x(t;\epsilonsilon)$ be a differentiable variation of $x(t)$, i.e.,
$$\displaystyle\fracot x(t;\epsilonsilon) = u^a(x(t;\epsilonsilon))X_a(x(t;\epsilonsilon)),\, x(t;0) = x(t).$$ Denoting $y^i(t) = \displaystyle\fracisplaystyle\frac{\primeartial x^i}{\primeartial \epsilonsilon}(t;0)$,
we find the {\it single-time infinitesimal deformation system}
$$\displaystyle\fracot y^j(t) = \left(\frac{\primeartial u^a}{\primeartial x^i}(x(t)) X^j_a(x(t)) + u^a(x(t)) \frac{\primeartial X^j_a}{\primeartial x^i}(x(t))\rightarrowght)y^i(t).\leqno(17)$$
The {\it single-time adjoint (dual) system} is
$$\displaystyle\fracot p_k (t)= - \left(\frac{\primeartial u^a}{\primeartial x^k}(x(t)) X^j_a(x(t)) + u^a(x(t)) \frac{\primeartial X^j_a}{\primeartial x^k}(x(t))\rightarrowght)p_j(t),\leqno(18)$$
whose solution $p=(p_k)$ is called the {\it costate vector}.
The foregoing PDE systems (17) and (18) are {\it adjoint} ({\it dual}) in the sense of
{\it constant interior product of solutions}, i.e., the scalar product $p_k\,y^k$ is a first integral.
Let $x(t)$ be an $m$-sheet integral submanifold of the distribution $D$, i.e., a solution of the multitime
partial differential system $$\frac{\primeartial x}{\primeartial t^\^alpha}(t) = u^a_\^alpha(x(t))X_a(x(t)),\, \^alpha = 1,...,m < n-1.$$
Let $\epsilonsilon = (\epsilonsilon^\^alpha),\,\^alpha = 1,...,m$ and let $x(t;\epsilonsilon)$ be a differentiable variation of $x(t)$, i.e.,
$$\frac{\primeartial x}{\primeartial t^\^alpha}(t;\epsilonsilon) = u^a_\^alpha(x(t;\epsilonsilon))X_a(x(t;\epsilonsilon)),\,x(t;0) = x(t).$$
Introducing the vector fields $y^i_\^alpha(t) = \displaystyle\fracisplaystyle\frac{\primeartial x^i}{\primeartial \epsilonsilon^\^alpha}(t;0)$,
we find the {\it multitime infinitesimal deformation system}
$$\frac{\primeartial y^j_\^alpha}{\primeartial t^\beta}(t) = \left(\frac{\primeartial u^a_\beta}{\primeartial x^i}(x(t)) X^j_a(x(t)) + u^a_\beta(x(t)) \frac{\primeartial X^j_a}{\primeartial x^i}(x(t))\rightarrowght)y^i_\^alpha(t).\leqno(19)$$
The {\it multitime adjoint (dual) system} is
$$\frac{\primeartial p_k^\^alpha}{\primeartial t^\beta} (t)= - \left(\frac{\primeartial u^a_\beta}{\primeartial x^k}(x(t)) X^j_a(x(t)) + u^a_\beta(x(t)) \frac{\primeartial X^j_a}{\primeartial x^k}(x(t))\rightarrowght)p_j^\^alpha(t),\leqno(20)$$
whose solution $p=(p^\^alpha_k)$ is called the {\it costate matrix}.
The foregoing PDE systems (19) and (20) are {\it adjoint} ({\it dual}) in the sense of
{\it constant interior product of solutions}, i.e., the scalar product $p^\^alpha_k\,y^k_\^alpha$ is a first integral.
Of course, taking the trace, we can define the {\it costate matrix} $p: \Omega_{0T} \to R^{mn},\,\,p=(p^\^alpha_k),$
as the solution of the {\it divergence adjoint PDE system} (trace)
$$\frac{\primeartial p_k^\^alpha}{\primeartial t^\^alpha} (t)= - \left(\frac{\primeartial u^a_\^alpha}{\primeartial x^k}(x(t)) X^j_a(x(t)) + u^a_\^alpha(x(t)) \frac{\primeartial X^j_a}{\primeartial x^k}(x(t))\rightarrowght)p_j^\^alpha(t).\leqno(21)$$
But than, the PDEs systems (19) and (20) are {\it adjoint (dual)} in the sense of {\it zero total divergence}
of the tensor field $Q^\^alpha_\beta = p^\^alpha_k\,y^k_\beta$ produced by their solutions.
The divergence dual PDE system (21) has solutions since it contains $n$ PDEs with $nm$ unknown functions $p^\^alpha_i $.
We can select a solution of the gradient form $p^\^alpha_k(t) = \displaystyle\fracisplaystyle\frac{\primeartial v^\^alpha}{\primeartial x^k}(t,x(t))$.
{\bf Remark} The {\it multitime adjoint Pfaff system} can be defined independent on the dimension
of the parameter $\epsilon$. Particularly, the {\it multitime adjoint Pfaff system} can be
$$\frac{\primeartial p_k}{\primeartial t^\beta} (t)= - \left(\frac{\primeartial u^a_\beta}{\primeartial x^k}(x(t)) X^j_a(x(t)) + u^a_\beta(x(t)) \frac{\primeartial X^j_a}{\primeartial x^k}(x(t))\rightarrowght)p_j(t).\leqno(*)$$
\stackrelubsetbsection{Single-time optimal control problems on \\^a distribution}
Let
$$D = \hbox{span}\{X_a(x)|\, a_i(x)X^i_a = 0,\, a = 1,...,n-1\}.$$
be a distribution on $R^n$ and $x(t),\,\,t\in I = [t_0, t_1],$ be an integral curve of the driftless control system
$$dx(t) = u^a(x(t))X_a(x(t))dt.$$
A {\it single-time optimal control problem is defined to be maximizing the functional
$$I(u(\cdot)) = \int_{t_0}^{t_1}L(t,x(t),u(x(t))) dt\leqno(22)$$
subject to
$$dx(t) = u^a(x(t))X_a(x(t))dt,\,\hbox{a.e.}\,\,\, t\in I = [t_0, t_1],\, x(t_0) = x_1,\, x(t_1)= x_1.\leqno(23)$$}
It is supposed that $L: I \timesmes A\timesmes U \to R$ is a $C^2$ function and $X_a: A \to R, \,a = 1,..., n-1$
are $C^2$ functions. Ingredients: $A$ is a bounded and closed subset of $R^n$, which the trajectory of controlled system is constrained to stay
for $t\in I$, and $x_0$ and $x_1$ are the initial and final states of the trajectory $x(t)$ in the controlled system.
The set in which the control functions $u^a$ takes their values in it, is called as $U$, which is a bounded and
closed subset of $R^{n-1}$. The map $u$ is assumed to be piecewise smooth or piecewise analytic. Such maps are
called {\it admissible} and the space ${\cal U}$ of all such maps is called the {\it set of admissible controls}.
Let us find the first order necessary conditions for an optimal pair $(x,u)$. Firstly, the {\it single-time infinitesimal deformation (Pfaff) equation}
of the constraint $dx(t) = u^a(x(t))X_a(x(t))dt$ is the system (17) the {\it single-time adjoint Pfaff equation} is the system (18).
The control variables may be {\it open-loop} $u^a(t)$, depending directly on the time variable $t$, or {\it closed-loop}
(or {\it feedback}) $u^a(x(t))$, depending on the state $x(t)$.
{\bf Open-loop control variables}
To simplify, we accept an open-loop control $u^a(t)$. Using the Lagrangian $1$-form
$${\cal L}(t,x(t),u(t),p(t)) = L(t,x(t),u(t))dt+p_i(t)[u^a(t)X^i_a(x(t))dt - dx^i(t)],$$
we build the {\it Hamiltonian $1$-form}
$${\cal H} = L(t,x(t),u(t)) dt+ p_i(t) u^a(t) X^i_a(x(t))dt.$$
{\bf Theorem} ({\bf Single-time maximum principle})
{\it Suppose that the problem of maximizing the functional (22) constrained by (23) has
an interior optimal solution $\hat u(t)$, which determines
the optimal evolution $x(t)$. Then there exists a
costate vector p(t) = $(p_i(t))$ such that
$$
dx^i = \frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p_i},\leqno(24)
$$
the function $p(t)$ is the unique solution of the following Pfaff system (adjoint system)
$$
dp_i = - \frac{\primeartial {\cal H}}{\primeartial x^i}\leqno(25)
$$
and the critical point conditions
$$
{\mathcal H}_{u^a}\left( t,x(t),u(t),p(t)\rightarrowght) =0,\,a = \overerline{1,n-1}\leqno(26)
$$
hold.}
{\bf Proof} We use the Lagrangian $1$-form ${\cal L}$.
The solutions of the forgoing problem are between the solutions of the free maximization problem of the curvilinear integral functional
$$J(u(\cdot)) = \int_{\timeslde\Gamma}{\cal L}(t,x(t),u(t),p(t)),$$
where $\timeslde \Gamma = ([t_0,t_1], x([t_0,t_1]))\stackrelubsetbset R_+\timesmes R^n$.
Suppose that there exists a continuous control $\hat u(t)$ defined over the interval
$I$ with $\hat u(t)\in \hbox{Int}\,{U}$
which is an optimum point in the previous problem. Now consider a variation
$u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)$, where $h$ is an arbitrary continuous vector function.
Since $\hat u(t)\in \hbox{Int}\,{\cal U}$ and a continuous function over a compact set
$I$ is bounded, there exists $\epsilonsilon_h>0$ such
that $u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)\in \hbox{Int}\,{U},\,\,\forallrall |\epsilonsilon|<\epsilonsilon_h$.
This $\epsilonsilon$ is used in our variational arguments.
Define $x(t,\epsilonsilon)$ as the $1$-sheet of the state variable corresponding to the control variable $u(t,\epsilonsilon)$, i.e.,
$$dx^i(t;\epsilonsilon) = u^a(x(t;\epsilonsilon))X^i_a(x(t;\epsilonsilon))dt,\, x(t;0) = x(t).$$
and $x(t_0,0)=x_0$, $x(t_1,0)=x_1$. For $|\epsilonsilon|<\epsilonsilon_h$, we define the function
$$J(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}{\cal L}(t,x(t,\epsilonsilon),u(t,\epsilonsilon),p(t))$$
$$ \int_{\timeslde\Gamma(\epsilon)} L(t,x(t,\epsilonsilon),u(t,\epsilonsilon))dt+p_i(t)[u^a(t,\epsilonsilon)X^i_a(x(t,\epsilonsilon))dt - dx^i(t,\epsilonsilon)].$$
Differentiating with respect to $\epsilonsilon$, it follows
$$J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}\left(L_{x^j}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) + p_i(t)u^a(t,\epsilonsilon) X^i_{ax^j}\rightarrowght)x^j_{\epsilonsilon}(t,\epsilonsilon)dt$$
$$ - \int_{\timeslde\Gamma(\epsilon)} p_i(t) dx^i_{\epsilonsilon}(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)}\left({L}_{u^a}(t,x(t,\epsilonsilon),u(t,\epsilonsilon) + p_i(t) X^i_a(x(t,\epsilonsilon))\rightarrowght)h^a(t) dt.$$
Evaluating at $\epsilonsilon = 0$, we find $\timeslde\Gamma(0) = \timeslde\Gamma$ and
$$J^{\primerime}(0)= \int_{\timeslde\Gamma}\left(L_{x^j}(t,x(t),\hat u(t)) + p_i(t)\hat{u}^a(t) X^i_{ax^j}\rightarrowght)x^j_{\epsilonsilon}(t,0)dt$$
$$ - \int_{\timeslde\Gamma} p_i(t) dx^i_{\epsilonsilon}(t,0)$$
$$ + \int_{\timeslde\Gamma}\left({L}_{u^a}(t,x(t),\hat u(t) + p_i(t) X^i_a(x(t))\rightarrowght)h^a(t) dt,$$
where $x(t)$ is the curve of the state variable corresponding to the optimal control $\hat u(t)$. Since the integral from the middle can be written
$$\int_{\timeslde\Gamma} p_i(t) dx^i_{\epsilonsilon}(t,0) = p_i(t) x^i_{\epsilonsilon}|_{\primeartial\timeslde\Gamma} - \int_{\timeslde\Gamma}x^i_{\epsilonsilon}(t,0)dp_i(t),$$
we find $J^{\primerime}(0)$ as
$$J^{\primerime}(0)= \int_{\timeslde\Gamma}\left(\left(L_{x^j}(t,x(t),\hat u(t)) + p_i(t)\hat{u}^a(t) X^i_{ax^j}\rightarrowght)dt + dp_j\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$ - p_i(t) x^i_{\epsilonsilon}(t,0)|_{t_0}^{t_1}$$
$$ + \int_{\timeslde\Gamma}\left({L}_{u^a}(t,x(t),\hat u(t) + p_i(t) X^i_a(x(t))\rightarrowght)h^a(t) dt,$$
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation (boundary value problem)
$$\left(L_{x^j}(t,x(t),\hat u(t)) + p_i(t)\hat{u}^a(t) X^i_{ax^j}\rightarrowght)dt + dp_j = 0,\,\,p(t_0) = 0, p(t_1) = 0.$$
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a(t))$. Since
the variation $h$ is arbitrary, we get (critical point condition)
$${L}_{u^a}(t,x(t),\hat u(t) + p_i(t) X^i_a(x(t)) = 0.$$
{\bf Example} Consider the ODE system $\displaystyle\fracot x^1(t) = {x^2}^2(t), \displaystyle\fracot x^2(t) = u(t)$
generated by the vector fields $X = {x^2}^2 \frac{\primeartial}{\primeartial x^1}$, $Y = \frac{\primeartial}{\primeartial x^2}$.
We compute the Lie brackets
$$[X,Y] = - 2 x^2 \frac{\primeartial}{\primeartial x^1}, [[X,Y],Y] = 2 \frac{\primeartial}{\primeartial x^1}.$$
The vector fields $Y$ and $[[X,Y],Y]$ are linearly independent.
On the other hand, the $x^1$-coordinate is increasing since ${x^2}^2 \geq 0$.
Consequently, the system is not really controllable.
{\bf Closed-loop control variables}
Now, we accept a closed-loop control $u^a(x(t))$. Using the Lagrangian $1$-form
$${\cal L} = L(t,x(t),u(x(t)))dt+p_i(t)[u^a(x(t))X^i_a(x(t))dt - dx^i(t)],$$
we build the {\it Hamiltonian $1$-form}
$${\cal H} = L(t,x(t),u(x(t))dt + p_i(t) u^a(x(t)) X^i_a(x(t))dt.$$
{\bf Theorem} ({\bf Single-time maximum principle})
{\it Suppose that the problem of maximizing the functional (22) constrained by (23) has
an interior optimal solution $\hat u(x(t))$, which determines
the optimal evolution $x(t)$. Then there exists a
costate vector p(t) = $(p_i(t))$ such that
$$
dx^i = \frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p_i},
$$
the function $p(t)$ is the unique solution of the following Pfaff system (adjoint system)
$$
dp_i = - \frac{\primeartial {\cal H}}{\primeartial x^i}
$$
and the critical point conditions
$$
{\mathcal H}_{u^a}\left( t,x(t),u(x(t)),p(t)\rightarrowght) =0,\,a = \overerline{1,n-1}
$$
hold.}
{\bf Proof} The new functional is
$$J(x(\cdot), u(x(\cdot))) = \int_{\timeslde\Gamma}{\cal L}(t,x(t),u(x(t)),p(t)).$$
A variation $x(t,\epsilon)$ induces a variation $u(x(t,\epsilon)) =\hat u(x(t)) + \epsilon h(t)$. Then
$$J(\epsilon) = \int_{\timeslde\Gamma(\epsilon)}{\cal L}(t,x(t,\epsilon), u(x(t,\epsilon)), p(t)).$$
It follows
$$
J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}L_{x^j}(t,x(t,\epsilonsilon),u(x(t,\epsilonsilon)) x^j_{\epsilonsilon}(t,\epsilonsilon)dt
$$
$$
+ \int_{\timeslde\Gamma(\epsilon)}\left(p_i(t)u^a_{x^j}X^i_a+ p_i(t)u^a(x(t,\epsilonsilon)) X^i_{ax^j}\rightarrowght)x^j_{\epsilonsilon}(t,\epsilonsilon)dt
$$
$$ - \int_{\timeslde\Gamma(\epsilon)} p_i(t) dx^i_{\epsilonsilon}(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)}\left({L}_{u^a}(t,x(t,\epsilonsilon),u(x(t,\epsilonsilon)) + p_i(t) X^i_a(x(t,\epsilonsilon))\rightarrowght)h^a(t) dt.$$
Evaluating at $\epsilonsilon = 0$, we find $\timeslde\Gamma(0) = \timeslde\Gamma$ and
$$
J^{\primerime}(0)= \int_{\timeslde\Gamma}L_{x^j}(t,x(t),\hat u(x(t)) x^j_{\epsilonsilon}(t,0)dt
$$
$$
+ \int_{\timeslde\Gamma}\left(p_i(t)\hat u^a_{x^j}X^i_a+ p_i(t)u^a(x(t)) X^i_{ax^j}\rightarrowght)x^j_{\epsilonsilon}(t,0)dt
$$
$$ - \int_{\timeslde\Gamma} p_i(t) dx^i_{\epsilonsilon}(t,0)$$
$$ + \int_{\timeslde\Gamma}\left({L}_{u^a}(t,x(t),\hat u(x(t))) + p_i(t) X^i_a(x(t))\rightarrowght)h^a(t) dt.$$
Since the integral from the middle can be written
$$\int_{\timeslde\Gamma} p_i(t) dx^i_{\epsilonsilon}(t,0) = p_i(t) x^i_{\epsilonsilon}|_{\primeartial\timeslde\Gamma} - \int_{\timeslde\Gamma}x^i_{\epsilonsilon}(t,0)dp_i(t),$$
we find $J^{\primerime}(0)$ as
$$
J^{\primerime}(0)= \int_{\timeslde\Gamma}\left(\left(L_{x^j}(t,x(t),\hat u(x(t))) + p_i(t)\hat u^a_{x^j}X^i_a +p_i(t)\hat{u}^a(x(t)) X^i_{ax^j}\rightarrowght)dt + dp_j\rightarrowght)x^j_{\epsilonsilon}(t,0)
$$
$$ - p_i(t) x^i_{\epsilonsilon}(t,0)|_{t_0}^{t_1}$$
$$ + \int_{\timeslde\Gamma}\left({L}_{u^a}(t,x(t),\hat u(x(t)) + p_i(t) X^i_a(x(t))\rightarrowght)h^a(t) dt,$$
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation (boundary value problem)
$$\left(L_{x^j}(t,x(t),\hat u(x(t))) + p_i(t)\hat{u}^a(x(t)) X^i_{ax^j}\rightarrowght)dt + dp_j = 0,\,\,p(t_0) = 0, p(t_1) = 0.$$
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a(t))$. Since
the variation $h$ is arbitrary, we get (critical point condition)
$${L}_{u^a}(t,x(t),\hat u(x(t)) + p_i(t) X^i_a(x(t)) = 0.$$
\stackrelubsetbsection{Multitime optimal control problems on \\^a distribution}
Let
$$D = \hbox{span}\{X_a(x)|\, a_i(x)X^i_a = 0,\, a = 1,...,n-1\},$$
$n\geq 3$, be a distribution on $R^n$ and $x(t),\,\,t\in \Omega_{t_0t_1}$, be an $m$-sheet of the driftless control system
$$dx(t) = u^a_\^alpha(x(t))X_a(x(t))dt^\^alpha.$$
Let us start with a
\stackrelubsetbsubsection{ Multitime optimal control problem of \\maximizing a multiple integral functional}
{\it Find}
$$\max_{u(\cdot)}\,I(u(\cdot)) = \int_{\Omega_{t_0t_1}}L(t,x(t),u(x(t)))\, \omega \leqno(27)$$
{\it subject to}
$$dx(t) = u^a_\^alpha(x(t))X_a(x(t))dt^\^alpha,\,\hbox{a.e.}\,\,\, t\in \Omega_{t_0t_1},\, x(t_0) = x_0,\, x(t_1)= x_1.\leqno(28)$$
It is supposed that $L: \Omega_{t_0t_1} \timesmes A\timesmes U \to R$ is a $C^2$ function and
$u^a: A \to R^{n-1}, \, a = 1,..., n-1$, $X^i_a: A \to R^{(n-1)n}, \, a = 1,..., n-1, i= 1,...,n$
are $C^2$ functions. Ingredients: $\omega = dt^1\cdots dt^m$ is the volume element, $A$ is a bounded and closed subset of $R^n$,
which the $m$-sheet of controlled system is constrained to stay
for $t\in \Omega_{t_0t_1}$, and $x_0$ and $x_1$ are the initial and final states of the $m$-sheet $x(t)$ in the controlled system.
The set in which the control functions $u^a(t)$ takes their values in it, is called as $U$, which is a bounded and
closed subset of $R^{n-1}$.
Let us find the first order necessary conditions for an optimal pair $(x,u)$. Firstly, the {\it multitime infinitesimal deformation (Pfaff) system}
of the constraint (28) is (19), and the {\it multitime adjoint Pfaff system} is (20).
The control variables may be {\it open-loop} $u^a(t)$, depending directly on the multitime variable $t$, or {\it closed-loop}
(or {\it feedback}) $u^a(x(t))$, depending on the state $x(t)$.
To simplify, we accept an open-loop control. Introducing the $(m-1)$-forms
$$\Omegaga_\lambdabda =\displaystyle\fracisplaystyle \frac{\primeartial}{\primeartial t^\lambdabda}\rfloor\Omegaga,$$
a {\it costate variable matrix or Lagrange multiplier matrix} $p=p^\^alpha_i(t)\displaystyle\fracisplaystyle\frac{\primeartial}{\primeartial t^\^alpha}\otimes dx^i$ is identified to
the $(m-1)$-forms $p_i = p^\lambdabda_i(t) \Omegaga_\lambdabda$.
We use the Lagrangian $m$-form
$${\cal L}(t,x(t),u(t),p(t)) = L(t,x(t),u(t))\omega+p_i^\lambdabda(t)[u^a_\^alpha(t)X^i_a(x(t))dt^\^alpha - dx^i(t)]\widetildedetildeedge \Omegaga_\lambdabda$$
and the {\it Hamiltonian $m$-form}
$${\cal H} = L(t,x(t),u(t))\omega+p_i^\lambdabda(t)u^a_\^alpha(t)X^i_a(x(t))dt^\^alpha \widetildedetildeedge \Omegaga_\lambdabda.$$
{\bf Theorem} ({\bf Multitime maximum principle})
{\it Suppose that the problem of maximizing the functional (27) constrained by (28) has
an interior optimal solution $\hat u(t)$, which determines the optimal evolution $x(t)$. Then there exists a
costate matrix $(p(t))$ such that
$$
dx\widetildedetildeedge \Omegaga_\lambdabda = \frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p^\lambdabda}, \leqno(29)
$$
the function $(p(t))$ is the unique solution of the following Pfaff system (adjoint system)
$$
dp^\lambdabda_j\widetildedetildeedge \Omegaga_\lambdabda = - \frac{\primeartial {\cal H}}{\primeartial x^j},\,\,\, \displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha_i(t) n^\beta(t) = 0\leqno(30)
$$
and the critical point conditions
$$
{\mathcal H}_{u^a}\left( t,x(t),u(t),p(t)\rightarrowght) =0,\,a = \overerline{1,n-1}\leqno(31)
$$
hold.}
{\bf Proof} We use the Lagrangian $m$-form ${\cal L}$.
The solutions of the foregoing problem are between the solutions of the free maximization problem of the functional
$$J(u(\cdot)) = \int_{\timeslde\Omega}{\cal L}(t,x(t),u(t),p(t)),$$
where $\timeslde \Omega = (\Omega_{t_0t_1}, x(\Omega_{t_0t_1}))\stackrelubsetbset R^m_+\timesmes R^n$.
Suppose that there exists a continuous control $\hat u(t)$ defined over the interval
$\Omega_{t_0t_1}$ with $\hat u(t)\in \hbox{Int}\,{U}$
which is an optimum point in the previous problem. Now consider a variation
$u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)$, where $h$ is an arbitrary continuous vector function.
Since $\hat u(t)\in \hbox{Int}\,{\cal U}$ and a continuous function over a compact set
$\Omega_{t_0t_1}$ is bounded, there exists $\epsilonsilon_h>0$ such
that $u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)\in \hbox{Int}\,{U},\,\,\forallrall |\epsilonsilon|<\epsilonsilon_h$.
This $\epsilonsilon$ is used in our variational arguments.
Define $x(t,\epsilonsilon)$ as the $m$-sheet of the state variable corresponding to the control variable $u(t,\epsilonsilon)$, i.e.,
$$dx^i(t;\epsilonsilon) = u^a_\^alpha(x(t;\epsilonsilon))X^i_a(x(t;\epsilonsilon))dt^\^alpha,\, x(t;0) = x(t).$$
and $x(t_0,0)=x_0$, $x(t_1,0)=x_1$. For $|\epsilonsilon|<\epsilonsilon_h$, we define the function
$$J(\epsilonsilon)= \int_{\timeslde\Omega(\epsilon)}{\cal L}(t,x(t,\epsilonsilon),u(t,\epsilonsilon),p(t))$$
$$= \int_{\timeslde\Omega(\epsilon)}L(t,x(t),u(t))\omega+p_i^\lambdabda(t)[u^a_\^alpha(t)X^i_a(x(t))dt^\^alpha - dx^i(t)]\widetildedetildeedge \Omegaga_\lambdabda.$$
Differentiating with respect to $\epsilonsilon$, it follows
$$J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Omega(\epsilon)}\left(L_{x^j}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) + p_i^\^alpha(t)u^a_\^alpha(t,\epsilonsilon) X^i_{ax^j}(x(t,\epsilonsilon)\rightarrowght)x^j_{\epsilonsilon}(t,\epsilonsilon)\omega$$
$$ - \int_{\timeslde\Omega(\epsilon)} p_i^\lambdabda(t) dx^i_{\epsilonsilon}(t,\epsilonsilon)\widetildedetildeedge \Omegaga_\lambdabda$$
$$ + \int_{\timeslde\Omega(\epsilon)}\left({L}_{u^a_\lambdabda}(t,x(t,\epsilonsilon),u(t,\epsilonsilon) + p_i^\lambdabda(t) X^i_a(x(t,\epsilonsilon))\rightarrowght)h^a_\lambdabda(t) \omega.$$
Evaluating at $\epsilonsilon = 0$, we find
$$J^{\primerime}(0)= \int_{\timeslde\Omega(\epsilon)}\left(L_{x^j}(t,x(t),\hat u(t)) + p_i^\^alpha(t)\hat{u}^a_\^alpha(t) X^i_{ax^j}(x(t))\rightarrowght)x^j_{\epsilonsilon}(t,0)\omega$$
$$ - \int_{\timeslde\Omega(\epsilon)} p_i^\lambdabda(t) dx^i_{\epsilonsilon}(t,0)\widetildedetildeedge \Omegaga_\lambdabda$$
$$ + \int_{\timeslde\Omega(\epsilon)}\left({L}_{u^a_\lambdabda}(t,x(t),\hat u(t) + p_i^\lambdabda(t) X^i_a (x(t))\rightarrowght)h^a_\lambdabda(t) \omega.$$
where $x(t)$ is the $m$-sheet of the state variable corresponding to the optimal control $\hat u(t)$.
To evaluate the multiple integral
$$\int_{\timeslde\Omega(\epsilon)} p^\lambdabda_i(t) dx^i_{\epsilonsilon}(t,0)\widetildedetildeedge \Omegaga_\lambdabda,$$
we integrate by parts, via the formula
$$d(p^\lambdabda_i x^i_{\epsilonsilon} \Omegaga_\lambdabda)=(x^i_\epsilonsilon dp^\lambdabda_i + p^\lambdabda_i dx^i_\epsilonsilon)\widetildedetildeedge\Omegaga_\lambdabda,$$
obtaining
$$\displaystyle\fracisplaystyle\int_{\timeslde\Omega(\epsilon)}p^\lambdabda_idx^i_\epsilonsilon\widetildedetildeedge \Omegaga_\lambdabda=\displaystyle\fracisplaystyle\int_{\timeslde\Omega(\epsilon)}d(p^\lambdabda_ix^i_\epsilonsilon\Omegaga_\lambdabda)
- \displaystyle\fracisplaystyle\int_{\timeslde\Omega(\epsilon)} x^i_\epsilonsilon dp^\lambdabda_i\widetildedetildeedge \Omegaga_\lambdabda.$$
Now we apply the Stokes integral formula
$$\displaystyle\fracisplaystyle\int_{\timeslde\Omega(\epsilon)}d(p^\lambdabda_ix^i_\epsilonsilon\Omegaga_\lambdabda) = \displaystyle\fracisplaystyle\int_{\primeartial{\timeslde\Omega(\epsilon)}}\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha_ix^i_\epsilonsilon n^\beta d\stackreligmagma,$$
where $(n^\beta(t))$ is the unit normal vector to the boundary ${\primeartial{\timeslde\Omega}}$.
Since the integral from the middle can be written
$$\displaystyle\fracisplaystyle\int_{\timeslde\Omega(\epsilon)}p^\lambdabda_idx^i_\epsilonsilon\widetildedetildeedge \Omegaga_\lambdabda=\displaystyle\fracisplaystyle\int_{\primeartial{\timeslde\Omega(\epsilon)}}\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha_ix^i_\epsilonsilon n^\beta d\stackreligmagma
- \displaystyle\fracisplaystyle\int_{\timeslde\Omega} x^i_\epsilonsilon dp^\lambdabda_i\widetildedetildeedge \Omegaga_\lambdabda,$$
we find $J^{\primerime}(0)$ as
$$J^{\primerime}(0)= \int_{\timeslde\Omega}\left(L_{x^j}(t,x(t),\hat u(t)) + p_i^\^alpha(t)\hat{u}^a_\^alpha(t) X^i_{ax^j}(x(t))\rightarrowght)x^j_{\epsilonsilon}(t,0)\omega$$
$$- \displaystyle\fracisplaystyle\int_{\primeartial{\timeslde\Omega}}\displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha_ix^i_\epsilonsilon(t,0) n^\beta d\stackreligmagma + \displaystyle\fracisplaystyle\int_{\timeslde\Omega} dp^\lambdabda_i\widetildedetildeedge \Omegaga_\lambdabda x^i_\epsilonsilon(t,0)$$
$$ + \int_{\timeslde\Omega}\left({L}_{u^a_\lambdabda}(t,x(t),\hat u(t) + p_i^\lambdabda(t) X^i_a (x(t))\rightarrowght)h^a_\lambdabda(t) \omega.$$
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation (boundary value problem)
$$\left(L_{x^j}(t,x(t),\hat u(t)) + p_i^\^alpha(t)\hat{u}^a_\^alpha(t) X^i_{ax^j}(x(t))\rightarrowght)\omega + dp^\lambdabda_j\widetildedetildeedge \Omegaga_\lambdabda = 0, \displaystyle\fraceltalta_{\^alpha\beta}p^\^alpha_i(t) n^\beta(t)|_{\primeartial\Omega} = 0.$$
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a(t))$. Since
the variation $h$ is arbitrary, we get (critical point condition)
$${L}_{u^a_\lambdabda}(t,x(t),\hat u(t) + p_i^\lambdabda(t) X^i_a (x(t)) = 0.$$
Let us start with a
\stackrelubsetbsubsection{Multitime optimal control problem of \\maximizing a curvilinear integral functional}
{\it Find}
{$$\max_{u(\cdot)}\,I(u(\cdot)) = \int_{\Gamma_{t_0t_1}}L_\^alpha(t,x(t),u(x(t))) dt^\^alpha \leqno(32)$$
{\it subject to}
$$dx(t) = u^a_\^alpha(x(t))X_a(x(t))dt^\^alpha\,\hbox{a.e.}\,\,\, t\in \Omega_{t_0t_1},\, x(t_0) = x_0,\, x(t_1)= x_1.\leqno(33)$$}
It is supposed that $L_\^alpha: \Omega_{t_0t_1} \timesmes A\timesmes U \to R$ and $u^a: A \to R^{n-1}, \, a = 1,..., n-1$, $X^i_a: A \to R^{(n-1)n}, \, a = 1,..., n-1, i= 1,...,n$
are $C^2$ functions. Ingredients: ${\cal L} = L_\^alpha(t,x(t),u(t)) dt^\^alpha$ is an $1$-form, $A$ is a bounded and closed subset of $R^n$,
which the $m$-sheet of controlled system is constrained to stay
for $t\in \Omega_{t_0t_1}$, and $x_0$ and $x_1$ are the initial and final states of the $m$-sheet $x(t)$ in the controlled system.
The set in which the control functions $u^a$ takes their values in it, is called as $U$, which is a bounded and
closed subset of $R^{n-1}$.
Let us find the first order necessary conditions for an optimal pair $(x,u)$. Firstly, the {\it multitime infinitesimal deformation (Pfaff) system}
of the constraint (33) is (19), and the {\it multitime adjoint Pfaff system} is (*).
The control variables may be {\it open-loop} $u^a(t)$, depending directly on the multitime variable $t$, or {\it closed loop}
(or {\it feedback}) $u^a(x(t))$, depending on the state $x(t)$.
To simplify, we accept an open loop control. Introducing a {\it costate variable vector or
Lagrange multiplier} $p = (p_i(t))$, we build a Lagrangian $1$-form
$${\cal L}(t,x(t),u(t),p(t)) = L_\^alpha(t,x(t),u(t)) dt^\^alpha + p_i(t)[u^a_\^alpha(t)X^i_a(x(t))dt^\^alpha - dx^i(t)].$$
and a {\it Hamiltonian $1$-form}
$${\cal H} = L_\^alpha(t,x(t),u(t)) dt^\^alpha + p_i(t)u^a_\^alpha(t)X^i_a(x(t))dt^\^alpha.$$
{\bf Theorem} ({\bf Multitime maximum principle})
{\it Suppose that the problem of maximizing the functional (32) constrained by (33) has
an interior optimal solution $\hat u(t)$, which determines the optimal evolution $x(t)$. Then there exists a
costate function $(p(t))$ such that
$$
dx^i = \frac{\primeartialrtial {\mathcal H}}{\primeartialrtial p_i}, \leqno(34)
$$
the function $(p(t))$ is the unique solution of the following Pfaff system (adjoint system)
$$
dp_i = - \frac{\primeartial {\cal H}}{\primeartial x^i}\leqno(35)
$$
and the critical point conditions
$$
{\mathcal H}_{u^a_\^alpha}\left( t,x(t),u(t),p(t)\rightarrowght) =0,\,a = \overerline{1,n-1}, \^alpha = 1,...,m\leqno(36)
$$
hold.}
{\bf Proof} We use the Lagrangian $1$-form ${\cal L}$.
The solutions of the foregoing problem are between the solutions of the free maximization problem of the curvilinear integral functional
$$J(u(\cdot)) = \int_{\timeslde\Gamma}{\cal L}(t,x(t),u(t),p(t)),$$
where $\timeslde \Gamma = (\Gamma_{t_0t_1}, x(\Gamma_{t_0t_1}))\stackrelubsetbset R^m_+\timesmes R^n$.
Suppose that there exists a continuous control $\hat u(t)$ defined over the interval $\Omega_{t_0t_1}$ with $\hat u(t)\in \hbox{Int}\,{U}$
which is an optimum point in the previous problem. Now consider a variation
$u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)$, where $h$ is an arbitrary continuous vector function.
Since $\hat u(t)\in \hbox{Int}\,{\cal U}$ and a continuous function over a compact set
$\Omega_{t_0t_1}$ is bounded, there exists $\epsilonsilon_h>0$ such
that $u(t,\epsilonsilon)=\hat u(t) + \epsilonsilon h(t)\in \hbox{Int}\,{U},\,\,\forallrall |\epsilonsilon|<\epsilonsilon_h$.
This $\epsilonsilon$ is used in our variational arguments.
Define $x(t,\epsilonsilon)$ as the $m$-sheet of the state variable corresponding to the control variable $u(t,\epsilonsilon)$, i.e.,
$$dx^i(t;\epsilonsilon) = u^a_\^alpha(x(t;\epsilonsilon))X^i_a(x(t;\epsilonsilon))dt^\^alpha,\, x(t;0) = x(t)$$
and $x(t_0,0)=x_0$, $x(t_1,0)=x_1$. For $|\epsilonsilon|<\epsilonsilon_h$, we define the function
$$J(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}{\cal L}(t,x(t,\epsilonsilon),u(t,\epsilonsilon),p(t))$$
$$= \int_{\timeslde\Gamma(\epsilon)}L_\^alpha(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) dt^\^alpha + p_i(t)[u^a_\^alpha(t,\epsilonsilon)X^i_a(x(t,\epsilonsilon))dt^\^alpha - dx^i(t,\epsilonsilon)].$$
Differentiating with respect to $\epsilonsilon$, it follows
$$J^{\primerime}(\epsilonsilon)= \int_{\timeslde\Gamma(\epsilon)}\left({L}_{\^alpha x^j}(t,x(t,\epsilonsilon),u(t,\epsilonsilon)) dt^\^alpha+ p_i(t)u^a_\^alpha(t,\epsilonsilon)X^i_{ax^j}(x(t,\epsilonsilon))dt^\^alpha\rightarrowght)x^j_{\epsilonsilon}(t,\epsilonsilon)$$
$$ - \int_{\timeslde\Gamma(\epsilon)} p_i(t) dx^i_{\epsilonsilon}(t,\epsilonsilon)$$
$$ + \int_{\timeslde\Gamma(\epsilon)}\left({L}_{\beta u^a_\^alpha}(t,x(t,\epsilonsilon),u(t,\epsilonsilon))dt^\beta + p_i(t)X^i_a(x(t,\epsilonsilon))dt^\^alpha\rightarrowght) h^a_\^alpha(t).$$
Evaluating at $\epsilonsilon = 0$, we find $\timeslde\Gamma(0) = \timeslde\Gamma$ and
$$J^{\primerime}(0)= \int_{\timeslde\Gamma}\left({L}_{\^alpha x^j}(t,x(t),\hat u(t)) dt^\^alpha+ p_i(t)\hat {u}^a_\^alpha(t)X^i_{ax^j}(x(t))dt^\^alpha\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$ - \int_{\timeslde\Gamma} p_i(t) dx^i_{\epsilonsilon}(t,0)$$
$$ + \int_{\timeslde\Gamma}\left({L}_{\beta u^a_\^alpha}(t,x(t),\hat u(t))dt^\beta + p_i(t)X^i_a(x(t))dt^\^alpha\rightarrowght) h^a_\^alpha(t).$$
where $x(t)$ is the $m$-sheet of the state variable corresponding to the optimal control $\hat u(t)$.
To evaluate the curvilinear integral
$$\int_{\timeslde\Gamma} p_i(t) dx^i_{\epsilonsilon}(t,0),$$
we integrate by parts, via the formula
$$d(p_i x^i_{\epsilonsilon})= x^i_\epsilonsilon dp_i + p_i dx^i_\epsilonsilon,$$
obtaining
$$\displaystyle\fracisplaystyle\int_{\timeslde\Gamma}p_idx^i_\epsilonsilon = (p_i(t) x^i_{\epsilonsilon}(t,0))|_{t_0}^{t_1} - \displaystyle\fracisplaystyle\int_{\timeslde\Gamma}(dp_i) x^i_\epsilonsilon.$$
We find
$$J^{\primerime}(0)= \int_{\timeslde\Gamma}\left({L}_{\^alpha x^j}(t,x(t),\hat u(t)) dt^\^alpha+ p_i(t)\hat {u}^a_\^alpha(t)X^i_{ax^j}(x(t))dt^\^alpha+ dp_j\rightarrowght)x^j_{\epsilonsilon}(t,0)$$
$$ - (p_i(t) x^i_{\epsilonsilon}(t,0))|_{t_0}^{t_1} $$
$$ + \int_{\timeslde\Gamma}\left({L}_{\beta u^a_\^alpha}(t,x(t),\hat u(t))dt^\beta + p_i(t)X^i_a(x(t))dt^\^alpha\rightarrowght) h^a_\^alpha(t).$$
We select the costate function $p(t)$ as solution of the adjoint Pfaff equation (boundary value problem)
$${L}_{\^alpha x^j}(t,x(t),\hat u(t)) dt^\^alpha+ p_i(t)\hat {u}^a_\^alpha(t)X^i_{ax^j}(x(t))dt^\^alpha+ dp_j = 0.$$
On the other hand, we need $J^{\primerime}(0)=0$ for all $h(t)=(h^a_\^alpha(t))$. Since
the variation $h$ is arbitrary, we get (critical point condition)
$${L}_{\beta u^a_\^alpha}(t,x(t),\hat u(t))dt^\beta + p_i(t)X^i_a(x(t))dt^\^alpha = 0.$$
{\bf Example: Nonholonomic control of torsion of a cylinder or prism}
Suppose the torsion of a cylinder or prism is described by the controlled Pfaff equation
$$dz = (y + u(x,y))dx + (-x + v(x,y))dy,\,\,z(0,0) = 0, z(x_0,y_0) = z_0,$$
where the control $(u,v)$ is not subject to constraints. If the complete integrability condition $\displaystyle\fracisplaystyle{\primeartial v\over \primeartial x}-\displaystyle\fracisplaystyle{\primeartial u\over \primeartial y} = 2$
is verified identically, then we have a holonomic evolution. Otherwise, we have a nonholonomic evolution. Using
the controlled Pfaff equation as constraint, we want to minimize the functional
$$\int_\Gamma (z(x,y)+u(x,y)^2)c_1dx+(z(x,y)+v(x,y)^2) c_2dy,$$
where $\Gamma$ is a $C^1$ curve joining the points $(0,0)$ and $(x_0,y_0)$, and $c_1, c_2$ are constants.
The minimization of the previous integral is equivalent to the maximization of the cost functional
$$P(u(\cdot),v(\cdot))= - \int_\Gamma (z(x,y)+u(x,y)^2)c_1dx+(z(x,y)+v(x,y)^2) c_2dy$$
subject to controlled Pfaff equation.
Let us find the optimal manifold (surface or curve) of evolution, using the two-variable maximum principle theory.
For that we introduce the 1-forms:
$$\betagin{array}{c}\Omegaga = (y+u)dx+(-x+v)dy - dz\,\, (\hbox{evolution 1-form})\\ \
{\Omegaga}^0 = - (z+u^2)c_1 dx - (z+v^2)c_2 dy\,\, (\hbox{running cost 1-form})\\ \
\eta = p_0 \Omegaga^0 + p\, \Omegaga\,\,(\hbox{control 1-form}).
\end{array}$$
Taking $p_0=1$, we obtain
$$\eta = -(z+u^2)c_1 dx - (z+v^2)c_2 dy) + p((y+u)dx+(-x+v)dy - dz).$$
The adjoint equation $dp = -\displaystyle\fracisplaystyle{\primeartial \over \primeartial z}\eta =c_1 dx+c_2 dy,\,\,p(0,0)=0$ has the solution $p(x,y)=c_1 x+c_2 y$.
The maximization condition
$$H_1(x,y,z(x,y),p(x,y),u(x,y)) =\max_{u,v}\{-(z(x,y)+u^2)c_1 + p(x,y)(y+u)\}$$
$$H_2(x,y,z(x,y),p(x,y),v(x,y)) =\max_{u,v}\{-(z(x,y)+v^2)c_1 + p(x,y)(y+u)\}$$
gives the optimal law
$$u(x,y) = {p(x,y)\over 2c_1},\,v(x,y) = {p(x,y)\over 2c_2}.$$
Replacing in the evolution Pfaff equation we obtain
$$dz = \left(y + {c_1 x+c_2 y\over 2c_1}\rightarrowght)dx + \left(-x + {c_1 x+c_2 y\over 2c_2}\rightarrowght)dy.$$
1) If the complete integrability condition
$${\primeartial\over \primeartial y}\left(y + {c_1 x+c_2 y\over 2c_1}\rightarrowght)={\primeartial\over \primeartial x}\left(-x + {c_1 x+c_2 y\over 2c_2}\rightarrowght),$$
i.e., $4 c_1c_2 + c_2^2 -c_1^2 = 0$ is satisfied, then the evolution surface is
$$z = xy + \frac{x^2}{4} +\frac{y^2}{4}+\frac{c_2}{2c_1}xy.$$
2) If $4 c_1c_2 + c_2^2 -c_1^2 \neq 0$, then the Pfaff evolution equation admits only solutions
which are curves (nonholonomic surface in $R^3$):
$x = x(t), y = y (t), z = z(t), t\in I,$ with
$$\betagin{array}{c}x = x(t), y = y(t) (\hbox{given arbitrary})\\ \
\displaystyle\fracisplaystyle{dz\over dt} =y(t){x}'(t) - x(t) {y}'(t) + (c_1 x(t) +c_2 y(t)){\left(\frac{{x}'(t)}{2c_1} +\frac{{y}'(t)}{2c_2}\rightarrowght)}.
\end{array}$$
In this case, for determining $z = z(t)$, we must take $x = x(t), y = y (t),\, t\in I$ as a parametrization of the curve $\Gamma$
from the cost functional. In fact, the problem is reduced to optimization of a simple integral constrained by
a differential equation.
\stackrelection{Curvilinear integral functionals \\displaystyle\fraceltapending on curves}
Let $\Omega_{t_0t_1}\stackrelubsetbset R^m$, let ${\Gamma_{t_0t_1}}\stackrelubsetbset \Omega_{t_0t_1}$ be a $C^1$ curve and
$$J({\Gamma_{t_0t_1}}; x(\cdot)) = \int_{\Gamma_{t_0t_1}}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha$$
be a curvilinear integral functional depending on the curve ${\Gamma_{t_0t_1}}$. Consider a variation
${\Gamma_{t_0t_1}(\epsilon)}: t = (t^\^alpha(\tau,\epsilon))$ of the curve ${\Gamma_{t_0t_1}}: t = (t^\^alpha(\tau))$, with
the same endpoints. Suppose $L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha$ is stationary with respect to $\epsilon$.
Then $$J(\epsilon) = \int_{\Gamma_{t_0t_1}(\epsilon)}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha.$$
The closed curve $C = \Gamma_{t_0t_1}(\epsilon)\cup {\Gamma_{t_1t_0}(0)}$ is the boundary of a surface $S$.
We evaluate $J(\epsilon) - J(0)$, using Stokes formula,
$$J(\epsilon) - J(0) = \int_{\Gamma_{t_0t_1}(\epsilon)}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha - \int_{\Gamma_{t_0t_1}(0)}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha$$
$$ = \int_{\Gamma_{t_0t_1}(\epsilon)}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha + \int_{\Gamma_{t_1t_0}(0)}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha$$
$$ = \int_C \,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha = \int_S \, d(L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha) = \int_S \, D_\beta L_\^alpha dt^\beta\widetildedetildeedge dt^\^alpha$$
$$= \frac{1}{2}\int_S (D_\^alpha L_\beta - D_\beta L_\^alpha)dt^\^alpha\widetildedetildeedge dt^\beta.$$
Now we use the variation vector field $\frac{\primeartial t^\^alpha}{\primeartial\epsilon}(\tau, \epsilon)|_{\epsilon = 0} = \xi^\^alpha(\tau)$. Replacing $dt^\^alpha =\epsilon\, \xi^\^alpha$, the surface integral
is transformed to a curvilinear integral
$$= \frac{\epsilon}{2}\int_{\Gamma_{t_0t_1}(0)}\, (D_\^alpha L_\beta - D_\beta L_\^alpha)(\xi^\^alpha dt^\beta - \xi^\beta dt^\^alpha) =
\epsilon\,\int_{\Gamma_{t_0t_1}(0)}\, (D_\^alpha L_\beta - D_\beta L_\^alpha)\xi^\^alpha dt^\beta.$$
It follows
$$J^{\primerime}(0) = \int_{\Gamma_{t_0t_1}(0)}\, (D_\^alpha L_\beta - D_\beta L_\^alpha)\xi^\^alpha dt^\beta.$$
Suppose ${\Gamma_{t_0t_1}}$ is a critical point of the functional, hence $J^{\primerime}(0) = 0, \,\forallrall \xi$.
Consequently
$$(D_\^alpha L_\beta - D_\beta L_\^alpha)(t(\tau))\, \frac{dt^\beta}{d\tau}(\tau) = 0.$$
If the curvilinear integral is path independent, then this relation is identically satisfied.
If the curvilinear integral is path dependent, then the discussion depends on $m$
since $a_{\^alpha\beta} = D_\^alpha L_\beta - D_\beta L_\^alpha$ is an anti-symmetric matrix, and consequently its determinant $d = \displaystyle\fraceltat(a_{\^alpha\beta})$
is either $0$, for $m$ odd, or $\geq 0$, for $m$ even. For $m$-odd we have solutions, i.e., critical curves;
for $m$-even, we have either no solution for $d > 0$ or solutions for $d = 0$.
Since the differential system is of order one, the curve solution is determined only by a single condition
(the general bilocal problems have no solution). The extremum problems have sense only if we add supplimentary conditions
(an initial condition + an isoperimetric condition).
{\bf Variant} Let $\Omega_{t_0t_1}\stackrelubsetbset R^m$, let ${\Gamma_{t_0t_1}}\stackrelubsetbset \Omega_{t_0t_1}$ be a $C^1$ curve and
$$J({\Gamma_{t_0t_1}}; x(\cdot)) = \int_{\Gamma_{t_0t_1}}\,L_\^alpha(t,x(t),x_\gamma(t)) dt^\^alpha$$
be a curvilinear integral functional depending on the curve ${\Gamma_{t_0t_1}}$. Consider a variation
${\Gamma_{t_0t_1}(\epsilon)}: t = (t^\^alpha(\tau,\epsilon))$ of the curve ${\Gamma_{t_0t_1}}: t = (t^\^alpha(\tau))$, with
the same endpoints. Denote $M_\^alpha(t) = L_\^alpha(t,x(t),x_\gamma(t))$.
Then
$$J(\epsilon) = \int_{\Gamma_{t_0t_1}(\epsilon)}\,M_\^alpha(t(\tau,\epsilon)) dt^\^alpha(\tau, \epsilon).$$
To compute $J^{\primerime}(0)$, we use the variation vector field $\frac{\primeartial t^\^alpha}{\primeartial \epsilon}(\tau, \epsilon)|_{\epsilon = 0} = \xi^\^alpha(\tau)$.
From
$$J^{\primerime}(\epsilon) = \int_{\Gamma_{t_0t_1}(\epsilon)}\frac{\primeartial M_\^alpha}{\primeartial t^\beta}(t(\tau, \epsilon))\frac{\primeartial t^\beta}{\primeartial\epsilon}(\tau,\epsilon)dt^\^alpha(\tau, \epsilon) +
M_\^alpha(t(\tau, \epsilon))\,d \,\,\frac{\primeartial t^\^alpha}{\primeartial \epsilon}(\tau, \epsilon),$$
we obtain
$$J^{\primerime}(0) = \int_{\Gamma_{t_0t_1}(0)}\frac{\primeartial M_\^alpha}{\primeartial t^\beta}(t(\tau))\xi^\beta(\tau)dt^\^alpha(\tau) + M_\beta(t(\tau))d \xi^\beta(\tau).$$
Integrating by parts, we find
$$J^{\primerime}(0) = M_\beta(t(\tau))\xi^\beta(\tau)|^{\tau_1}_{\tau_0} +\int_{\Gamma_{t_0t_1}(0)}\left(\frac{\primeartial M_\^alpha}{\primeartial t^\beta}- \frac{\primeartial M_\beta}{\primeartial t^\^alpha}\rightarrowght)(t(\tau))\xi^\beta(\tau)dt^\^alpha(\tau).$$
{\bf Remark} The variation of the function $x(t)$ has nothing to do with the variation of the curve.
\stackrelection{Optimization of mechanical work on \\Riemannian manifolds}
Let $(M,g)$ be a Riemannian manifold and $X$ a $C^{2}$ vector field on M.
Let $x=(x^1,...,x^n)$ denote the local coordinates relative to a fixed local map $(V,h)$.
Since $h:V\rightarrowghtarrow R^n$ is a diffeomorphism, we denote by $\Omega_{x_{0}x_{1}}$
a subset of $V$ diffeomorphic through $h$ with the
hyper-parallelepiped in $R^n$ having $h(x_{0})$ and $h(x_1)$ as diagonal points.
Let ${\Gamma_{x_0x_1}}: x^i = x^i(t), t\in [t_0,t_1]$ be an arbitrary $C^1$ curve on $M$ which joins the points $x(t_0) = x_0, x(t_1) = x_1.$
The functional
$$J(\Gamma_{x_0x_1}) = \int_{\Gamma_{x_0x_1}}\, g_{ij}(x)X^i(x) dx^j$$
is generated by the {\it mechanical work} produced by the force $\omega_j = g_{ij}(x)X^i(x)$ along the curve $\Gamma_{x_0x_1}$.
Let $X$ be a nowhere zero vector field. $X$ is called a {\it geodesic vector field} iff $\nablabla_X X = 0$. Thus $X$ is geodesic iff
each of its integral curves is a geodesic.
{\bf Theorem} {\it If $X$ is a unit geodesic vector field and $\gammamma_{x_0x_1}$ is a field line,
then the curve $\gammamma_{x_0x_1}$ is a maximum point of the functional $J(\Gamma_{x_0x_1})$
and the maximum value is the length of $\gammamma_{x_0x_1}$.}
{\bf Proof} Let us find
$$\max_{\Gamma_{x_0x_1}} J(\Gamma_{x_0x_1}) = \int_{\Gamma_{x_0x_1}}\, g_{ij}(x)X^i(x) dx^j,$$
where
$$g_{ij}(x)X^i(x)X^j(x) = 1.$$
The critical point condition, with respect to the curve ${\Gamma_{x_0x_1}}$, is
$$\left(g_{ij} \nablabla_kX^i - g_{ik}\nablabla_j X^i\rightarrowght)(x(t)) \frac{dx^j}{dt}(t) = 0.$$
It is identically satisfied, because ${\gammamma_{x_0x_1}}$ is a field line,
the geodesic condition implies $\nablabla_X X^i = 0$ and the the condition of unit vector field gives
$g_{ij}X^j \nablabla_k X^i = 0.$
On the other hand, the inequality
$$|g_{ij}(x)X^i(x) dx^j| \leq ||X|| ds,\, ds = ||dx|| = \stackrelqrt{g_{ij}dx^i dx^j}$$
becomes an equality if $dx^j = X^j(x(t))dt$, i.e., $x(t)$ is a field line of $X(x)$.
Under the condition $||X|| =1$, the maximum value of the foregoing functional is the length of ${\gammamma_{x_0x_1}}$.
\stackrelection{Bang-bang control on distributions}
The same distribution $D$ can be described in terms of vector fields,
$$D = \hbox{span}\{X_a(x)|\, a_i(x)X^i_a = 0,\, a = 1,...,n-1\}.\leqno (22)$$
Bang-bang control is an optimal or suboptimal piecewise constant control whose values
are defined by bounds imposed on the amplitude of control components.
The control changes its values according to the switching function which may be found using the maximum principle.
The discontinuity of the bang-bang control leads to discontinuity of a value function for the considered optimal control problem.
Typical problems with bang-bang optimal control include time and terminal cost optimal control for linear control systems.
Bang-bang optimization offers a direct explanation for an otherwise perplexing observation and indicates
that evolution is operating according to principles that every engineer knows.
The balck hole applications covered in this Section refer to the controllability of the ODE or PDE system by bang-bang controls.
\stackrelubsetbsection{Single-time bang-bang optimal control}
Let $x(t),\,\,t\in I = [0, \tau] \stackrelubsetbset R$, be an integral curve of the distribution $D$.
Any curve in the distribution $\Delta = \hbox{span}\{X_a, a = 1,...,n-1\}$ is a solution of the controlled ODE system
$$\displaystyle\fracot x(t) = u^a (t)X_a (x(t)),\,\,u(t) = (u^a(t)), \,\, t\in [0, \tau],\eqno(ODE)$$
called {\it driftless control system}.
{\bf (1) Time minimum problem} Let $U = [-1, 1]^{n-1} \stackrelubsetbset R^{n-1}$ be the control set. Giving the starting point $x_0 \in
R^n$, find an optimal control $u^*(\cdot)$ such that
$$
I(u^* (\cdot)) = \min_{u(\cdot)} \int_{0}^{\tau}\, dt,
$$
using (ODE) evolution as constraint. Since
$\tau^{*} = I(u^*(\cdot))$, the optimal point $\tau^*$ ensures the minimum time
to steer to the origin. This time optimum problem is equivalent to a controllability one.
{\bf Solution} To prove the existence of a bang-bang control, we use the single-time Pontryaguin Maximum Principle.
The Hamiltonian $H(x,p,u) = - 1+ p_i X^i_a(x) u^a$ gives the adjoint ODE system $\displaystyle\fracot p_j(t) = - p_i(t) \frac{\primeartial X^i_a}{\primeartial x^j}(x(t)) u^a(t)$.
The extremum of the linear function $u\to H$ exists since each control variable belong to
the interval $[- 1, 1]$; for optimum, the control must be at a vertex of $\primeartial U$ (see, linear optimization, simplex method). If
$Q_a(t) = p_i(t) X^i_a(x(t))$, then the optimal control $u^{*a}$ must be the function (bang-bang control)
$$
u^{*a} = -\, \hbox{sign}\, (Q_{a}(t)) =\left\{\betagin{array}{cc} \hspace{-2.8cm}1 \hspace{2.9cm}\hbox{for}\,\,\, Q_a(t) < 0\\ \
\hspace{-0.2cm}\hbox{undetermined\ \hspace{0.8cm} for}\,\, Q_a (t)= 0 \,(\hbox{singular \,control}) \\ \
\hspace{-3.cm} - 1\hspace{2.7cm} \hbox{for}\,\, Q_a(t) > 0.\end{array}\rightarrowght.
$$
Suppose the Lebesgue measure of each set $\{t \in [0, \tau] : Q_{a}(t) = 0\}$ vanishes.
Then the singular control is ruled out and the remaining possibilities are bang-bang controls.
This optimal control is discontinuous since each component
jumps from a minimum to a maximum and vice versa in response to each change
in the sign of each $Q_a(t)$. The functions $Q_a(t)$ are called {\it switching functions}.
{\bf (2) Optimal terminal value} Let $U = [-1, 1]^{n-1} \stackrelubsetbset R^{n-1}$ be the control set.
Suppose we have to
{\it Minimize the terminal cost functional
$$Q(u(\cdot)) = x^n(t_1)$$
subject to the driftless control system
$$
\displaystyle\fracot x(t) = u^a(t)X_a (x(t)),\,\,u(t)\in {\cal U}, \,\, t\in [t_0, t_1];\,\, x(t_0)=x_0.
$$}
{\bf Solution} Since the control Hamiltonian $H(x,p,u) = p_i X^i_a(x) u^a$ is linear in the control, the optimal control is a bang-bang.
Automatically we find the optimal costate function and the optimal evolution.
\stackrelubsetbsection{Multitime bang-bang optimal control}
Let $\Omega_{0\tau}$ be the hyperparallelipiped determined by two opposite diagonal points $0 =
(0,..., 0)$ and $\tau = (\tau^1, ...,\tau^m)$ in $R^m_+$, endowed with the product order.
Let $x(t),\,\,t\in \Omega_{0\tau} \stackrelubsetbset R^m_+$, be an integral $m$-sheet of the distribution $D$, i.e.,
a solution of a multitime piecewise completely integrable PDE system
$$\frac{\primeartial x}{\primeartial t^\^alpha}(t) = u^a_\^alpha (t)X_a (x(t)),\,t = (t^\^alpha)\in \Omega_{0\tau}, \, a = \overerline{1,n-1}, \,\^alpha = 1,...,m.\eqno(PDE)$$
This sort of controlled PDE is called a {\it driftless control system}. Of course, the piecewise complete integrability conditions
$$\left(\frac{\primeartial u^a_\^alpha}{\primeartial t^\beta} - \frac{\primeartial u^a_\beta}{\primeartial t^\^alpha}\rightarrowght) X_a = u^a_\^alpha u^b_\beta [X_a, X_b]\eqno(CIC)$$
restrict the controls, excepting the case when they are identically satisfied.
To show that the driftless control system is multitime controllable, by bang-bang controls (see also [10]), we use the next
{\it multitime minimum problems}
{\bf Case of multiple integral functional}
Let $U = [-1, 1]^{m(n-1)} \stackrelubsetbset R^{m(n-1)}$ be the control set. Giving the starting point $x_0 \in
R^n$, find an optimal control $u^*(\cdot)$ such that
$$
I(u^* (\cdot)) = \min_{u(\cdot)} \int_{\Omega_{0\tau}}\, dt^1... dt^m,
$$
using a completely integrable two-time evolution (PDE) as constraint and supposing that (CIC) are identically satisfied.
Since $\tau^{*1}\cdots \tau^{*2} = I(u^*(\cdot))$, the optimal point $\tau^* = (\tau^{*1},..., \tau^{*m})$ ensures the minimum multitime
"volume" to steer to the origin. This two-time optimum problem consists in devising a control such that to transfer a given initial state
to a specified target (controllability problem).
{\bf Solution} We apply the multitime maximum principle which proves the existence of a bang-bang control.
The Hamiltonian $H(x,p,u) = - 1+ p_i^\^alpha X^i_a(x) u^a_\^alpha$ gives the adjoint PDE system
$\frac{\primeartial p^\^alpha_j}{\primeartial t^\^alpha}(t) = - p_i^\^alpha(t) \frac{\primeartial X^i_a}{\primeartial x^j}(x(t)) u^a_\^alpha(t)$.
The extremum of the linear function $u\to H$ exists since the set $U$ is compact; for optimum, the control vectors
$u_\^alpha = (u^1_\^alpha, ..., u^{n-1}_\^alpha)$ must be vertices of $\primeartial U$. If
$Q^\^alpha_a(t) = p^\^alpha_i(t) X^i_a(x(t))$ are the switching functions, then each optimal control $u^{*a}_\^alpha$ is of the form
$$
u^{*a}_\^alpha = - \,\hbox{sign}\, (Q^\^alpha_{a}(t)) =\left\{\betagin{array}{cc}\hspace{0.6cm} 1 \hspace{1.9cm}\hbox{for}\,\,\, Q^\^alpha_a(t) < 0: \,\hbox{bang-bang \,control}\\ \
\hspace{-0.6cm}\hbox{undetermined} \hspace{0.2cm} \hbox{for}\,\, Q^\^alpha_a (t)= 0: \,\hbox{singular \,control} \\ \
\hspace{0.3cm}- 1\hspace{1.9cm} \hbox{for}\,\, Q^\^alpha_a(t) > 0: \,\hbox{bang-bang \,control}.\end{array}\rightarrowght.
$$
Suppose the Lebesgue measure of each set $\{t \in \Omega_{0\tau} : Q^\^alpha_{a}(t) = 0\}$ vanishes.
Then the singular control is ruled out and the remaining possibilities are bang-bang controls.
This optimal control is discontinuous since each component
jumps from a minimum to a maximum and vice versa in response to each change
in the sign of each $Q^\^alpha_a(t)$. The piecewise complete integrability identities keep only
the control vectors (vertices of $\primeartial U$) $u_\^alpha$ which satisfy $u_\^alpha = \primem\, u_1$.
Each optimal $m$-sheet $x(t)$ is a {\it soliton} solution.
{\bf Case of curvilinear integral functional}
{\bf Optimal terminal value} Let $U = [-1, 1]^{m(n-1)} \stackrelubsetbset R^{m(n-1)}$ be the control set.
Suppose we have to
{\it Minimize the terminal cost functional
$$Q(u(\cdot)) = x^n(\tau)$$
subject to the driftless control system}
$$\frac{\primeartial x}{\primeartial t^\^alpha}(t) = u^a_\^alpha (t)X_a (x(t)),\,t = (t^\^alpha)\in \Omega_{0\tau},\, x(0) = x_0, \, a = \overerline{1,n-1}, \,\^alpha = 1,...,m.\eqno(PDE)$$
{\bf Solution} Since the control Hamiltonian $H(x,p,u) = p_i X^i_a(x) u^a$ is linear in the control, the optimal control is a bang-bang.
Automatically we find the optimal costate function and the optimal evolution.
\stackrelection{Optimal control problems on \\Tzitzeica surfaces}
Let $R^2_+$ be endowed with the product order. Let $\Omega\stackrelubsetbset R^2_+$ be the bi-dimensional interval
determined by the opposite diagonal points $(0,0)$ and $(u^1, v^1)$.
Problem: {\it find
$$\max_h \int_{\Omega}L(u,v,\vec r(u,v), h(u,v)) du dv$$
constrained by (non-ruled Tzitzeica surfaces)
$$\vec r_{uu} = \frac{h_u}{h}\vec r_u + \frac{1}{h}\vec r_v,\,\,\vec r_{uv} = h \vec r,\,\, \vec r_{vv} = \frac{1}{h}\vec r_u +\frac{h_v}{h}\vec r_v,\,\,
(\ln h)_{uv} = h - \frac{1}{h^2},$$
$$\vec r(0,0) = \vec r_0,\, \vec r(u^1,v^1) = \vec r_1,$$
where $h(u,v)$ is the control.}
To solve the problem we use
$${\cal L} = L + \lambdabdangle \vec a, \frac{h_u}{h}\vec r_u + \frac{1}{h}\vec r_v - \vec r_{uu}\rangle + \lambdabdangle \vec b, h \vec r - \vec r_{uv}\rangle$$
$$+ \lambdabdangle \vec c, \frac{1}{h}\vec r_u +\frac{h_v}{h}\vec r_v - \vec r_{vv}\rangle + d\left(h - \frac{1}{h^2} - (\ln h)_{uv}\rightarrowght).$$
Explicitely, find
$$\max_h \int_{\Omega}(h^2(u,v) + ||\vec r(u,v)||^2)du dv.$$
\stackrelection{Phytoplankton growth model }
{\bf Open problem} {\it Transform the next ODE systems into Pfaff systems and study
their stochastic perturbations}.
Alessandro Abate, Ashish Tiwari, Shankar Sastry, Box Invariance for biologically-inspired dynamical systems
(i) O. Bernard and J.-L. Gouze, "Global qualitative description of a class
of nonlinear dynamical systems," Artificial Intelligence, vol. 136, pp.
29-59, 2002:
Consider the following Phytoplankton Growth Model:
$$\displaystyle\fracot x^1 = 1- x^1- \frac{1}{4}\,x^1x^2,\,\, \displaystyle\fracot x^2 = 2x^2x^3 - x^2,\,\, \displaystyle\fracot x^3 = \frac{1}{4}\,x^1 - 2{x^3}^2,$$
where $x^1$ denotes the substrate, $x^2$ the phytoplankton
biomass, and $x^3$ the intracellular nutrient per biomass.
(ii) A. Julius, A. Halasz, V. Kumar, and G. Pappas, “Controlling biological
systems: the lactose regulation system of Escherichia Coli,” in
American Control Conference 2007:
The dynamics of tetracycline antibiotic in a
bacteria which develops resistance to this drug (by turning on
genes $TetA$ and $TetR$) can be described by the following hybrid
system:
$$\displaystyle\fracot x^1 = f - \frac{1}{3}\,x^1 x^3 + \frac{1}{8000}\, x^2,\,\,\displaystyle\fracot x^2 = \frac{3}{200}\,u_0 - \frac{7}{2}\,x^3x^4$$
$$\displaystyle\fracot x^3 = \frac{1}{3}\,x^1x^3 - \frac{1}{2500}\,x^2,\,\,\displaystyle\fracot x^4 = f - \frac{11}{40000}\,x^4,$$
where $f = \frac{1}{2000}$ if $TetR > \frac{1}{50000}$ and $f = \frac{1}{40}$
otherwise ($f$ is the transcription rate of genes, which are inhibited by $TetR$),
and $x^1$, $x^2$, $x^3$, $x^4$ are the cytoplasmic concentrations
of $TetR$ protein, the $TetR-Tc$ complex, Tetracycline,
and $TetA$ protein, and $u_0$ is the extracellular concentration
of Tetracycline.
{\bf Acknowledgments}
Partially supported by University Politehnica of Bucharest, by UNESCO Chair in Geodynamics, "Sabba S. \c Stef\u anescu"
Institute of Geodynamics, Romanian Academy and by Academy of Romanian Scientists.
\betagin{thebibliography}{30}
\bibitem [2]. O. C\u alin, D.-C. Chang, {\it Sub-Riemannian geometry: general theory and examples},
Encyclopedia of Mathematics and Its Applications, 126, Cambridge University Press, New York, 2009.
\bibitem [3]. A. V. Dmitruk, {\it Quadratic sufficient conditions for strong minimality of abnormal sub-Riemannian geodesics},
Russian Journal of Mathematical Physics, Vol. 6, No. 3, 1999, pp. 363 372.
\bibitem [6]. R. Montgomery, {\it A Tour of Subriemannian Geometries, Their Geodesics and Applications}, Mathematical Surveys and Monographs, 91,
American Mathematical Society, 2002.
\bibitem [8]. S. Shankar Sastry, R. Montgomery, {\it The structure of optimal controls for steering problem},
NOLCOS, 1992, Conf. Proc., Bordeaux, France.
\bibitem [13]. K. -H. Tan, Convex functions on sub-Riemannian manifolds, arXiv: math/0701273v1, [math.DG], 10 January 2007.
\bibitem [14]. C. Udri\c{s}te, {\it Geometric Dynamics}, Kluwer Academic Publishers, 2000.
\bibitem [15]. C. Udri\c ste, O. Dogaru, I. \c Tevy, {\it Extrema with Nonholonomic Constraints}, Geometry Balkan Press, Bucharest, 2002.
\bibitem [16]. C. Udri\c ste, M. Ferrara, D. Opri\c s, {\it Economic Geometric Dynamics}, Monographs and Textbooks 6, Geometry Balkan Press, 2004.
\bibitem[21]. C. Udri\c ste, {\it Multitime controllability, observability and bang-bang principle}, Journal of Optimization Theory and Applications,
139, 1, 141-157 (2008).
\bibitem [22]. Udri\c ste, C.: Simplified multitime maximum principle.
Balkan J. Geom. Appl., 14, 1, 102-119 (2009)
\bibitem [23]. Udri\c ste, C.: Nonholonomic approach of multitime maximum principle.
Balkan J. Geom. Appl., 14, 2, 111-126 (2009)
\bibitem [24]. Udri\c ste, C., \c Tevy, I.: Multitime linear-quadratic regulator problem based on curvilinear integral.
Balkan J. Geom. Appl., 14, 2, 127-137 (2009)
\bibitem [25]. Udri\c ste, C., \c Tevy, I.: Multitime dynamic programming for curvilinear integral actions.
Journal of Optimization Theory and Applications, 146, 1, 189-207 (2010)
\bibitem [26]. Udri\c ste, C.: Equivalence of multitime optimal control problems. Balkan J. Geom. Appl., 15, 1, 155-162 (2010)
\bibitem[27]. Udri\c ste, C.: Multitime maximum principle for curvilinear integral cost.
Balkan J. Geom. Appl., 16, 1, 128-149 (2011)
\bibitem[28]. Udri\c ste, C., Bejenaru, A., Multitime optimal control with area integral costs on boundary. Balkan J. Geom. Appl., 16, 2, 138-154 (2011)
\bibitem[29]. Boothby, W., Frobenius's Theorem in An Introduction to Differentiable Manifolds and Riemannian Geometry, Academic Press, San Diego, California, 2003.
\bibitem[29]. Stefan, P., Accessible sets, orbits and foliations with singularities, Proc. London Math. Soc. 29 (1974), 699-713.
\bibitem[30]. Sussmann, H., Orbits of families of vector fields and integrability of distributions, Trans. Amer. Math. Soc. 180 (1973), 171-188.
\bibitem[31]. Jakubczyk, B., Introduction to Geometric Nonlinear Control; Controllability and Lie Bracket, 2001.
\bibitem[32]. Pukhlikov, A., Optimal Control of Distributions, Computational Mathematics and Modeling, 15, 3 (2011), 223-256.
\bibitem[33]. Kendall, W., Contours of Brownian processes with several-dimensional times, Probability Theory and Related Fields,
52, 3 (1980), 267-276.
\bibitem[34]. Xiao, Y., Local times and related properties of multidimensional iterated Brownian motion,
Journal of Theoretical Probability, 11, 2 (1998), 383-408.
\bibitem[35]. Udri\c ste, C., Damian, V., Two-time stochastic Lagrangian dynamics,
ISTASC'09 Proceedings of the 9th WSEAS International Conference on Systems Theory and Scientific Computation,
Stevens Point, Wisconsin, USA ©2009 , ISBN: 978-960-474-109-0
\end{thebibliography}
Prof. Dr. Constantin Udri\c ste, University Politehnica of Bucharest, Faculty of Applied Sciences, Departament of Mathematics-Informatics,
Splaiul Independentei 313, 060042 Bucharest, Romania, \\E-mail: [email protected], [email protected]
\end{document}
|
\begin{document}
\markboth{ }{}
\title{\bf Equitable Colorings of $l$-Corona Products \\of Cubic Graphs\footnote{Project has been
partially supported by Narodowe Centrum Nauki under contract
DEC-2011/02/A/ST6/00201}}
\date{}
\author{Hanna Furma\'nczyk\footnote{Institute of Informatics,\ University of Gda\'nsk,\ Wita Stwosza 57, \ 80-308 Gda\'nsk, \ Poland. \ e-mail: [email protected]},
\ Marek Kubale \footnote{Department of Algorithms and System Modelling,\ Gda\'nsk University of Technology,\ Narutowicza 11/12, \ 80-233 Gda\'nsk, \ Poland. \ e-mail: [email protected]},
}
\markboth{H. Furma\'nczyk, M. Kubale}{Equitable Colorings of $l$-Corona Products of Cubic Graphs}
\maketitle
\begin{abstract}
A graph $G$ is equitably $k$-colorable if its vertices can be partitioned into $k$ independent sets in such a way that the number of vertices in any two sets differ by at most one. The
smallest integer $k$ for which such a coloring exists is known as the \emph{equitable chromatic number} of $G$ and it is denoted by $\chi_{=}(G)$.
In this paper the problem of determinig the value of equitable chromatic number for multicoronas of cubic graphs $G \circ^l H$ is studied. The problem of ordinary coloring of multicoronas of
cubic graphs is solvable in polynomial time.
The complexity of equitable coloring problem is an open question for these graphs. We provide some polynomially solvable cases of cubical multicoronas and give simple linear time
algorithms for equitable coloring of such graphs which use at most $\chi_=(G \circ ^l H) + 1$ colors in the remaining cases.
\end{abstract}
{\bf Keywords:} {corona graph, $l$-corona products, cubic graph, equitable chromatic number, polynomial algorithm, 1-absolute aproximation algorithm.}
\section{Introduction}\label{intro}
All graphs considered in this paper are connected, finite and simple, i.e. undirected, loopless and without multiple edges.
The paper concerns one of popular graph coloring models, namely equitable coloring. If the set of vertices of a graph $G$ can be partitioned into $k$ (possibly empty) classes
$V_1, V_2, \ldots,V_k$ such that each $V_i$ is an independent set and the condition $||V_i|-|V_j||\leq 1$ holds for every pair ($i, j$), then $G$ is said to be {\it equitably k-colorable}.
The smallest integer $k$ for which $G$ is equitably $k$-colorable is known as the {\it equitable chromatic number} of $G$ and denoted by $\chi_{=}(G)$ \cite{meyer}. When the condition
$||V_i|-|V_j||=0$ holds for every pair ($i, j$), graph $G$ is said to be \emph{strong equitably $k$-colorable}. Given a $k$-coloring of $G$, a vertex with color $i$ is called an
$i$-\emph{vertex}.
It is interesting to note that if a graph $G$ is equitably $k$-colorable,
it does not imply that it is equitably $(k + 1)$-colorable. A counterexample is the complete bipartite graph (also cubic graph) $K_{3,3}$, which can be equitably colored with two colors, but not with three. The smallest integer $k$, for which $G$ is equitably $k'$-colorable for all $k' \geq k$, is called the \emph{equitable chromatic threshold} of $G$ and denoted by $\chi^*_=(G)$.
We use also the concept of semi-equitable coloring. Graph $G$ has a \emph{semi-equitable $k$-coloring},
if there exists a partition of its vertices into independent sets $V_1, \ldots , V_k \subset V$ such that
one of these subsets, say $V_i$, is of size $\not\in \{\lfloor n/k \rfloor, \lceil n/k \rceil \}$, and the remaining subgraph $G - V_i$ is equitably $(k - 1)$-colorable. Note that not all
graphs have such a coloring, for example $K_4$ does not have.
In the following we will say that graph $G$ has $(V_1, \ldots, V_k)$-coloring to express explicitly a partition of $V$ into $k$ independent sets.
If, however, only cardinalities of color classes are important, we will use the notation $[|V_1|, \ldots, |V_k|]$.
The model of equitable graph coloring has many practical applications. Every time when we have to divide a system with binary conflict relations into equal or almost equal conflict-free
subsystems we can model this situation by means of equitable graph coloring. Furma\'nczyk \cite{furm} mentions a specific application of this type of scheduling problem, namely, assigning
university courses to time slots in a way that avoids scheduling incompatible courses at the same time and spreads the courses evenly among the available time slots. Also, the application of
equitable coloring in scheduling of jobs on uniform machines was considered in \cite{3masz} and \cite{4masz}.
In this paper we consider the problem of equitable vertex-coloring for one of known graph products, namely for corona products of cubic graphs. Graph products are interesting and
useful in many situations. The complexity of many problems, also equitable coloring, that deal with very large and complicated graphs is reduced greatly if one is able to fully characterize
the properties of less complicated prime factors. Moreover, corona graphs lie often close to the boundary between easy and hard coloring problems \cite{harder}. More formally,
the \emph{corona} of two graphs, $n_G$-vertex graph $G$ and
$n_H$-vertex graph $H$, is a graph $G \circ H$ formed from one copy of $G$, called the \emph{center graph}, and $n_G$ copies of $H$, named the
\emph{outer graph}, where the $i$-th vertex of $G$ is adjacent to every vertex in the $i$-th copy of $H$. Such type of graph product was introduced by Frucht and Harary \cite{frucht}. In this
paper we extend this concept to $l$-corona products as follows. For any integer $l \geq 2$, we define the graph $G \circ ^l H$ as
$G \circ ^l H = (G \circ ^{l-1} H ) \circ H$, where $G \circ ^1 H =G \circ H$. Graph $G \circ ^l H$ is also named as \emph{$l$-corona product} of $G$ and $H$.
The problem of equitable
coloring of corona products of cubic graphs was considered in \cite{harder}. The authors showed that although the problem of ordinary coloring of coronas of cubic graphs is solvable in polynomial time, the problem of equitable coloring becomes
NP-hard for such graphs. Moreover, they provided polynomially solvable instances of cubical coronas in some cases and 1-absolute approximation algorithms in the remaining cases.
In this paper we extend the results from \cite{harder} for cubical multicoronas.
Now, let us recall some facts concerning cubic graphs. In 1994, Chen et al. \cite{clcub} proved that for every connected cubic graph, the chromatic number of which is 3,
the equitable chromatic number of it is also equal to 3. Moreover, since connected cubic graph $G$, for which $\chi(G)=2$ is a bipartite graph $G(A,B)$ such that $|A|=|B|$, we have:
$$\chi(G)=\chi_=(G)$$
and due to Brooks Theorem \cite{brooks}:
$$2 \leq \chi_=(G) \leq 4,$$
for any cubic graph $G$.
Let
\begin{itemize}
\item $Q_2$ denote the class of $2$-chromatic cubic graphs,
\item $Q_3$ denote the class of $3$-chromatic cubic graphs,
\item $Q_4$ denote the class of $4$-chromatic cubic graphs.
Clearly, $Q_4=\{K_4\}$.
\end{itemize}
Next, let $Q_2(t) \subset Q_2$ ($Q_3(t) \subset Q_3$) denote the class of 2-chromatic (3-chromatic) cubic graphs with partition sets of cardinality $t$, and let $Q_3(u,v,w) \subset Q_3$ denote the class of
3-chromatic graphs with color classes of cardinalities $u$, $v$ and $w$, respectively, where $u \geq v \geq w \geq u-1$.
Hajnal and Szemeredi \cite{hfs:haj} proved
\begin{theorem}
If $G$ is a graph satisfying $\Delta(G) \leq k$ then $G$ has an equitable $(k + 1)$-coloring.
\end{theorem}
This theorem implies that every subcubic graph has an equitable $k$-coloring for every $k \geq 4$. In other words,
\begin{equation}
\chi_=^*(G) \leq 4.\label{prog}
\end{equation}
This result was extended in \cite{sharp} into a semi-equitable coloring of cubic graphs.
\begin{theorem}[\cite{sharp}]
Given an $n$-vertex subcubic graph $G$, $K_4 \neq G \neq K_{3,3}$, a constant $k \geq 4$, and an integer function $s=s(n)$. There is a semi-equitable $k$-coloring of $G$ of type
$[s,\lceil \frac{n-s}{k-1} \rceil, \ldots,\lfloor \frac{n-s}{k-1}\rfloor]$, if $s \leq \lceil n/3 \rceil$.
\label{semi}
\end{theorem}
The remainder of the paper is organized as follows. In Section \ref{aux} we provide some auxilary tools while in Section \ref{main} we give our main results. Namely, we give in some cases
polynomial algorithms for optimal equitable coloring of cubical coronas $G \circ^l H$, $l \geq 1$, while in the remaining
cases we give sharp bounds on the equitable chromatic number of $l$-corona products of such
graphs.
Section \ref{sum} summarizes our results in a tabular form and remains as an open question the complexity status of equitable coloring of graphs under consideration.
\section{Some auxilaries} \label{aux}
In this section we prove lemmas, which are used very often in the further part of the paper.
\begin{lemma}
Let $G$, $H$ be cubic graphs and $l \geq 1$. If $G$ has a strong equitable $k$-coloring, then so does $G \circ^l H$ for any $k \geq 5$.
\label{lma}
\end{lemma}
\begin{proof}
First, notice that every cubic graph $H$ can be seen as a $(k-1)$-partite graph $H(X_1,X_2,\ldots, X_{k-1})$, $k \geq 5$ (due to inequality (\ref{prog})). Next, let
$V(G)=V_1 \cup V_2 \cup \cdots \cup V_k$, where $V_1, \ldots, V_k$ are independent sets each of size $n_G/k$ (due to strong equitable $k$-coloring of $G$).
Now, we determine an equitable $k$-coloring of $G \circ^1 H$, starting from the strong equitable $k$-coloring of $G$: $c:V(G) \rightarrow \{1,2, \ldots, k\}$.
We extend it to the copies of $H$ in $G \circ H$ in the following way:
\begin{itemize}
\item color vertices of each copy of $H$ linked to an $i$-vertex of $G$ using color $(i+j) \bmod k$ for vertices in $X_j$ (we use color $k$ instead of 0), for $i=1,\ldots,k$ and $j=1,\ldots,k-1$.
\end{itemize}
Let us notice that this $k$-coloring of $G \circ^1 H$ is strong equitable. Indeed, every color is used $n_G/k+n_G/k(|X_1|+|X_2|+\cdots +|X_{k-1}|)=n_G(n_H+1)/k$ times.
The thesis follows from induction on $l$.
\end{proof}
\begin{lemma}
Let $G$, $H$ be cubic graphs, $H \in Q_2 \cup Q_3$, and $l \geq 1$. If $G$ has a strong equitable $4$-coloring, then so does $G \circ^l H$.
\label{lma4}
\end{lemma}
\begin{proof}
Let $V(G)=V_1 \cup V_2 \cup V_3 \cup V_4$, where $V_1, \ldots, V_4$ are independent sets of size $n_G/4$ each.
Now, we determine an equitable $4$-coloring of $G \circ^1 H$, starting from the strong equitable $4$-coloring of $G$: $c:V(G) \rightarrow \{1,2, 3, 4\}$.
We extend it to the copies of $H$ in $G \circ H$ in the following way:
\begin{description}
\item[\textnormal{\emph{Case} 1:}] $H \in Q_2$
Let $H=H(X_1,X_2)$. We color the vertices of each copy of $H$ linked to an $i$-vertex of $G$ using color $(i+j) \bmod 4$ for vertices in $X_j$ (we use color 4 instead of 0), for $i=1,\ldots,4$
and $j=1,2$.
\item[\textnormal{\emph{Case} 2:}] $H \in Q_3$
Let $H=H(X_1,X_2,X_3)$. We color vertices of each copy of $H$ linked to an $i$-vertex of $G$ using color $(i+j) \bmod 4$ for vertices in $X_j$ (we use color 4 instead of 0), for $i=1,\ldots,4$
and $j=1,2,3$.
\end{description}
Notice that the $4$-coloring of $G \circ^1 H$ is strong equitable. Indeed, every color is used $n_G(n_H+1)/4$ times.
The thesis follows from induction on $l$.
\end{proof}
Actually, we have proved
\begin{corollary}
Let $G$ and $H \neq K_4$ be cubic graphs and $l \geq 1$. If $G$ has a strong equitable $k$-coloring, then
$$\chi_=(G \circ ^l H) \leq k,$$
for any $k \geq 4$.\label{lma_cor}
\end{corollary}
\section{Main results}\label{main}
\subsection{Case $H \in Q_2$}
In this subsection we obtain exact values of $\chi_=(G \circ ^l H)$, where $H \in Q_2$. We give also polynomial-time algorithms for the corresponding colorings.
First, let us recall a known result.
\begin{proposition}[\cite{harder}]
If $G$ is any cubic graph and $H \in Q_2$, then
$$\chi_=(G \circ H)=
\left \{
\begin{array}{ll}
3 & if \ G \neq K_{3,3} \ and \ 6|n_G, \\
4 & otherwise.
\end{array}
\right.
$$\label{3-4}
\end{proposition}
\begin{theorem}
If $G$ and $H$ are cubic graphs, then
$\chi_=(G \circ ^l H) =3$ if and only if $G$ has a strong equitable $3$-coloring and $H \in Q_2$.\label{3col}
\end{theorem}
\begin{proof}
\noindent $(\Leftarrow)$ The truth for $l=1$ follows from Proposition \ref{3-4}. For $l >1$, since $n_G$
is divisible by 6 by the assumption, and $|V(G \circ ^ l H)|=n_G (n_H + 1)^l$, so the number of vertices in multicorona $G \circ ^l H$ is divisible by 6 for all $l \geq 1$.
Hence, we get the thesis inductively.
\noindent $(\Rightarrow)$ Assume that $\chi_=(G \circ^l H) =3$. This implies:
\begin{itemize}
\item $H$ must be 2-chromatic,
\item $G$ must be 3-colorable (not necessarily equitably), i.e. $\chi(G) \leq 3$, which implies $G \in Q_2 \cup Q_3$.
\end{itemize}
Otherwise, we would have $\chi(G \circ^l H) \geq 4$, which is a contradiction.
We begin with $l=1$. Since $H\in Q_2$ is connected, its bipartition is determined. Let $H\in Q_2(t)$, $t\geq 3$. Observe that every 3-coloring of $G$ determines a 3-partition of
$G \circ H$. Let us consider any 3-coloring of $G$ with color classes of cardinality $n_1, n_2$ and $n_3$, respectively, where $n_G=n_1+n_2+n_3$ and $n_1 \geq n_2 \geq n_3$. Then the cardinalities of
color classes in the 3-coloring of $G \circ H$ form a sequence $(n_1^1,n_2^1,n_3^1)=(n_1+(n_2+n_3)t, n_2+ (n_1+n_3)t, n_3+(n_1+n_2)t)$. Such a 3-coloring of $G \circ H$ is equitable if and
only if $n_1=n_2=n_3$. This means that $G$ must have a strong equitable 3-coloring.
For greater $l$ the cardinalities of color classes in the determined 3-coloring of $G \circ ^l H$, $(n_1^l, n_2^l, n_3^l)$, can be computed from the recursion:
$$
\left\{
\begin{array}{lcl}
n_1^0 &= &n_1,\\
n_2^0& = & n_2,\\
n_3^0 & = & n_3.
\end{array}\right.
$$
For $l \geq 1$:
$$
\left\{
\begin{array}{lcl}
n_1^l & = & n_1^{l-1} + (n_2^{l-1}+n_3^{l-1})t,\\
n_2^l & = & n_2^{l-1} + (n_1^{l-1}+n_3^{l-1})t,\\
n_3^l & = & n_3^{l-1} + (n_1^{l-1}+n_2^{l-1})t.
\end{array}\right.
$$
One can observe that in the determined 3-coloring of $G \circ ^l H$ the following statements are true:
\begin{itemize}
\item the color classes with the biggest difference between their cardinalities are classes of colors 1 and 3,
\item the order relation between cardinalities of color classes of colors 1 and 3 changes alternately, namely $n_1^l \geq n_3^l$ for odd $l$ while $n_1^l \leq n_3^l$ for even $l$,
\item the absolute value of the difference between cardinalities of color classes for colors 1 and 3 does not decrease as $l$ goes to infinity.
\end{itemize}
Due to the above, the 3-coloring of $G \circ^l H$ is equitable if only $n_1=n_2=n_3$, which completes the proof.
\end{proof}
\begin{theorem}
If $G$ is an arbitrary cubic graph and $H \in Q_2$, $l \geq 1$, then
$$\chi_=(G \circ^l H)\leq 4.$$
\label{H_2leq4}
\end{theorem}
\begin{proof} Let $H \in Q_2(y)$.
\begin{description}
\item[\textnormal{\emph{Case} 1:}] $G \in Q_2(x)$
We start from an equitable 4-coloring of graph $G$ such that each of colors 1 and 3 is used $\lceil x/2\rceil$ times, while each of colors 2 and 4 is used $\lfloor x/2\rfloor$ times. Next, we color each copy of graph $H$ in $G \circ H$ with two colors in the following way:
\begin{itemize}
\item copies linked to a 1-vertex or 2-vertex in $G$ are colored with 3 and 4,
\item copies linked to a 3-vertex or 4-vertex in $G$ are colored with 1 and 2.
\end{itemize}
In this way we get an equitable 4-coloring of $G \circ^1 H$ with cardinalities of color classes 1 and 3 equal to $\lceil x/2 \rceil +xy$, and cardinalities of color classes 2 and 4 equal
to $\lfloor x/2 \rfloor +xy$.
Since $G \circ ^l H = (G \circ^{l-1} H) \circ H$, we can inductively extend the equitable 4-coloring of $G \circ^{l-1} H$ into an equitable 4-coloring of $G\circ ^l H$ by coloring each of
the uncolored copies of $H$ in $G \circ^l H$ with two colors:
\begin{itemize}
\item copies linked to a 1-vertex or 2-vertex in $G\circ ^{l-1} H$ are colored with 3 and 4,
\item copies linked to a 3-vertex or 4-vertex in $G\circ ^{l-1} H$ are colored with 1 and 2.
\end{itemize}
\item[\textnormal{\emph{Case} 2:}] $G \in Q_3$
Since the number of vertices in graph $G$ is even, we have to consider two subcases:
\begin{description}
\item[\textnormal{\emph{Subcase} 2.1:}] $n_G=4s$ \emph{for some} $s\geq 2$.
The thesis follows from Lemma \ref{lma4}.
\item[\textnormal{\emph{Subcase} 2.2:}] $n_G=4s+2$ \emph{for some} $s\geq 2$.
We start from an equitable 4-coloring of $G$ - this is possible due to inequality (\ref{prog}). Without loss of generality, we may assume that in this coloring the sets of 1- and
2-vertices contain one more vertex than the sets of 3- and 4-vertices. Let $v_1$ and $v_2$ be two vertices in $G$ with colors $1$ and $2$, respectively.
It is easy to see that $(G-\{v_1, v_2\})\circ H$ has a strong equitable $4$-coloring.
Now, we show that $G[\{v_1, v_2\}]\circ H$ is equitably $4$-colorable, where $G[\{v_1, v_2\}]$ is the subgraph of $G$ induced by vertex set $\{v_1,v_2\}$. For $i=1,2$, let $c(v_i)=i$ and let $H_i$
be a copy of $H$ linked to $v_i$. Furthermore, for $i=1,2$ let $X_i$ and $Y_i$ be the partition sets of $H_i$. Color the vertices of $X_1$ with color $2$, vertices of $Y_1$ with color $3$, vertices of $X_2$ with color $4$, and vertices of $Y_2$ with color $1$, respectively. One can easily check that this results in an equitable $4$-coloring of $G \circ H$.
An equitable 4-coloring of $G \circ ^l H$, in this subcase, follows from induction on $l$.
\end{description}
\item[\textnormal{\emph{Case} 3:}] $G \in Q_4$
The thesis follows immediately from Lemma \ref{lma4}.
\end{description}
\end{proof}
\subsection{Case $H \in Q_3$}
In this subsection we obtain some polynomially solvable cases concerning optimal equitable coloring of multicoronas $G \circ^l H$, where $H \in Q_3$. In the remaining cases we give
1-absolute approximation algorithms.
\begin{theorem}
Let $G$ be any cubic graph and let $H \in Q_3$. If $G$ has a strong equitable 4-coloring, then $$\chi_=(G \circ ^l H)=4$$ for any $l \geq 1$. \label{4_cub3}
\end{theorem}
\begin{proof}
It is clear that $\chi_=(G \circ^l H) \geq 4$, for $G$ and $H$ under assumption. On the other hand, $\chi_=(G \circ^l H) \leq 4$ by Lemma \ref{lma4}, and the thesis follows.
\end{proof}
\begin{proposition}
If $G$ is a subgraph of cubic graph on $n_G \geq 4$ vertices, where $4 | n_G$ and $H \in Q_3$, then there is an equitable $5$-coloring of $G \circ^l H$. \label{n4_cub3}
\end{proposition}
\begin{proof}
Let $n_G = 4x$ for some integer $x \geq 1$. First, let us notice that there is a strong equitable 4-coloring of $G \circ^l H$ due to inequality (\ref{prog}) and Corollary \ref{lma_cor}.
We color equitably $G \circ^l H$ with
4 colors in the way described in the proof of
Lemma \ref{lma4}. In such a coloring every color is used exactly $x(n_H+1)^l$ times. Now, we have to choose vertices in each of four color classes which should be recolored to 5 so
that the resulting 5-coloring of $G \circ^l H$ is equitable.
It turns out that we can choose a proper number of $i$-vertices, $i=1,2,3,$ and 4, that should be recolored to 5 from partitation sets $X_1$ of $H(X_1,X_2,X_3)$ linked to vertices of
$G \circ ^{l-1} H$ during
creating $l$-corona product $G \circ ^l H$ from $G \circ ^{l-1} H$. Moreover, we need only copies of $H$ from this $l$-th step that were linked to vertices of $G$. Since $n_G=4x$,
we have exactly $x$ $i$-vertices in $G$, $i \leq 4$. We will see that we need at most $x|X_1|$ $i$-vertices that should be recolored to 5. We choose them from $X_1$'s linked
to $(i-1)$-vertices in $G$, $i=1,2,3,4$ (we use color 4 instead of color 0). To prove this, let us consider three cases.
\begin{description}
\item[\textnormal{\emph{Case} 1:}] $H(X_1,X_2,X_3) \in Q_3(t+1,t,t)$ \emph{for some odd} $t \geq 3$.
In 4-coloring of $G \circ^l H$ each of four colors is used $x(3t+2)$ times, while in every equitable 5-coloring of the corona, each of five colors must be used $\lceil (12xt+8x)/5\rceil=
2xt+x+\lceil (2xt+3x)/5\rceil$ or $2xt+x+\lfloor (2xt+3x)/5\rfloor$ times. This means that the number of vertices that should be recolored to 5 in each of the four color classes is equal
to at most $$3xt+2x-2xt-x-\lfloor (2xt+3x)/5\rfloor = x(t+1) - \lfloor (2xt+3x)/5\rfloor <x(t+1) =x|X_1|.$$
\item[\textnormal{\emph{Case} 2:}] $H(X_1,X_2,X_3) \in Q_3(t+1,t+1,t)$ \emph{for some even} $t \geq 2$.
In 4-coloring of $G \circ H$ each of four colors is used $x(3t+3)$ times, while in every equitable 5-coloring of the corona each of five colors must be used $\lceil (12xt+12x)/5\rceil=2xt+2x+
\lceil (2xt+2x)/5\rceil$ or $2xt+2x+\lfloor (2xt+2x)/5\rfloor$ times. This means that the number of vertices that should be recolored to 5 in each of the four color classes is equal to
at most $$3xt+3x-2xt-2x-\lfloor (2xt+2x)/5\rfloor = x(t+1) - \lfloor (2xt+2x)/5\rfloor <x(t+1) =x|X_1|.$$
\item[\textnormal{\emph{Case} 3:}] $H(X_1,X_2,X_3) \in Q_3(t,t,t)$ \emph{for some even} $t \geq 2$.
In 4-coloring of $G \circ H$ each of four colors is used $x(3t+1)$ times, while in every equitable 5-coloring of the corona each of five colors must be used $\lceil (12xt+4x)/5\rceil=2xt+
\lceil (2xt+4x)/5\rceil$ or $2xt+\lfloor (2xt+4x)/5\rfloor$ times. This means that the number of vertices that should be recolored to 5 in each of the four color classes is equal to at
most $$3xt+x-2xt-\lfloor (2xt+4x)/5\rfloor =x(t+1) - \lfloor (2xt+4x)/5\rfloor \leq xt =x|X_1|.$$
\end{description}
This completes the proof.
\end{proof}
\begin{theorem}
If $G$ is a cubic graph on $n_G \geq 8$ vertices and $H \in Q_3$, then $$\chi_=(G \circ ^l H) \leq 5.$$ \label{5_cub3}
\end{theorem}
\begin{proof}
If $5 | n_G$, then $G$ has a strong equitable 5-coloring (due to inequality (\ref{prog})) and the thesis follows from Lemma \ref{lma} for $k=5$. We need to consider the cases where
$n_G \bmod 5 \neq 0$.
\begin{description}
\item[\textnormal{\emph{Case} 1:}] $n_G \bmod 5 = 1$ \emph{and} $n_G \geq 16$.
We start from a semi-equitable 5-coloring of cubic graph $G$ of type $[(n+4)/5,(n+4)/5,(n+4)/5,(n+4)/5,(n-16)/5]$ - this is possible due to Theorem \ref{semi} for $k=5$. Next, we choose
four 1-vertices, four 2-vertices,
four 3-vertices, and four 4-vertices from the center graph $G$. They form a set $V^{16}(G)$. We consider the subgraph of $G$ induced by this vertex set - subcubic graph $G[V^{16}]$, and
corona graph
$G[V^{16}]\circ^l H$ being subgraph of $G \circ^l H$. $G[V^{16}]\circ^l H$ has an equitable 5-coloring due to Proposition \ref{n4_cub3}. Note, that this equitable 5-coloring of
$l$-corona, described in the proof of
Proposition \ref{n4_cub3}, starts from a strong equitable 4-coloring of the center graph. Thus, there is possible to extend the strong 4-coloring of $G[V^{16}]$ into equitable 5-coloring of
$G[V^{16}]\circ^l H$. Next, we consider subgraph $G[V\backslash V^{16}]$, strong equitably 5-colored, as a center graph of $l$-corona $G[V\backslash V^{16}] \circ^l H$. Due to Lemma \ref{lma} for $k=5$,
$G[V\backslash V^{16}] \circ^l H$ has an equitable 5-coloring and this coloring is strong equitable. Furthermore, also this coloring is based on strong equitable 5-coloring of
$G[V\backslash V^{16}]$. This means, that equitable 5 colorings of $G[V^{16}]\circ^l H$ and $G[V\backslash V^{16}] \circ^l H$ may be combined into one proper equitable 5-coloring
of $G \circ ^l H$.
\item[\textnormal{\emph{Case} 2:}] $n_G \bmod 5 = 2$.
The idea is similar to that presented in the previous case. This time we start from a semi-equitable 5-coloring of $G$ of type $[(n+3)/5,(n+3)/5,(n+3)/5,(n+3)/5,(n-12)/5]$ -
this is possible due to Theorem \ref{semi}.
Analogously, we choose three 1-vertices, three 2-vertices, three 3-vertices, and three 4-vertices from the graph $G$. They form a set $V^{12}(G)$. First we extend the coloring of $G$ into
$G[V^{12}] \circ^l H$, and then into $G[V \backslash V^{12}] \circ^l H$. Finally, we obtain an equitable 5-coloring of $G \circ^l H$.
\item[\textnormal{\emph{Case} 3:}] $n_G \bmod 5 = 3$.
This time we start from a semi-equitable 5-coloring of $G$ of type $[(n+2)/5,(n+2)/5,(n+2)/5,(n+2)/5,(n-8)/5]$ (possible due to Theorem \ref{semi}). Next, we choose, anoulogously
to the previous case, 8 vertices of $G$, forming set $V^8(G)$. We extend the coloring of $G$ into
$G[V^{8}] \circ^l H$, and then into $G[V \backslash V^{8}] \circ^l H$. Finally, we obtain an equitable 5-coloring of $G \circ^l H$.
\item[\textnormal{\emph{Case} 4:}] $n_G \bmod 5 = 4$.
In the last case we start from a semi-equitable coloring of $G$ of type $[(n+1)/5,(n+1)/5,(n+1)/5,(n+1)/5,(n-4)/5]$. We choose one vertex of each color $i$, $1 \leq i \leq 4$, from graph $G$.
The vertices form the set $V^4$. First, we extend the coloring of $G[V^4]$ into an equitable 5-coloring of $G[V^4] \circ ^ l H$, in the way described in the proof of Proposition \ref{n4_cub3}.
Next, we extend strong equitable 5-coloring of $G[V \backslash V^4]$ into strong equitable 5-coloring of $G[V \backslash V^4] \circ^l H$ (due to method described in the proof of Lemma
\ref{lma}). Finally, we obtain an equitable 5-coloring of
$G \circ ^l H$.
\end{description}
\end{proof}
\subsection{Case $H=K_4$}
\begin {proposition} [\cite{hf}]
If $G$ is a graph with $\chi\left(G\right)\leq m+1$, then $\chi_{=}(G \circ K_m)= m+1$. \label{complete}
\end{proposition}
\begin{theorem}
If $G$ is cubic and $l \geq 1$, then $$\chi_=(G\circ^l K_4)=5.$$ \label{k4}
\end{theorem}
\begin{proof}
Since the following inequalities hold for every cubic graph $G$:
$$ 2 \leq \chi(G) \leq 4,$$ due to Brooks theorem \cite{brooks},
so cubic graph $G$ fulfills the assumption of Proposition \ref{complete} for $m=4$ and we have $\chi_=(G \circ K_4)=5$. As $G \circ ^l K_4 = (G \circ^{l-1} K_4) \circ K_4$ and $\chi_=(G \circ ^2 K_4)=\chi_=(G \circ ^3 K_4)=\cdots = \chi_=(G \circ ^{l-1} K_4)=5$, we get immediately the thesis.
\end{proof}
\section{Conclusion}\label{sum}
In the paper we have given some results concerning the equitable coloring of $l$-corona products $G \circ^l H$, where $G$ and $H$ are cubic graphs. The main of our results are summarized in
Table \ref{tabela1}. In the table the entry '$3$ or $4$' means that we have identified all the cases for which $\chi_=(G \circ^l H)=3$ and/or $\chi_=(G \circ^l H)=4$. The entry '$\leq 5$'
means
merely that $\chi_=(G \circ^l H) \leq 5$.
\begin{table}[htb]
\begin{center}
\begin{tabular}{|c|*{3}{c|}}\hline
\backslashbox[20mm]{$G$}{$H$} & $Q_2$ & $Q_3$ & $Q_4$\\ \hline
\multirow{2}{*}{$Q_2(t)$} & \multirow{2}{*}{3 or 4 \scriptsize{[Thm. \ref{3col}, \ref{H_2leq4}]}} & 4 for $t$ even \scriptsize{[Thm. \ref{4_cub3}]} &\multirow{2}{*}{5 \scriptsize{[Thm. \ref{k4}]}} \\
& & $\leq 5$ for $t$ odd \scriptsize{[Thm. \ref{5_cub3}]} & \\\hline
$Q_3$ & 3 or 4 \scriptsize{[Thm. \ref{3col}, \ref{H_2leq4}]} & $\leq 5^*$ \scriptsize{[Thm. \ref{5_cub3}]} &5 \scriptsize{[Thm. \ref{k4}]} \\ \hline
$Q_4$ & 4 \scriptsize{[Thm. \ref{H_2leq4}]} & 4 \scriptsize{[Thm. \ref{4_cub3}]}& 5 \scriptsize{[Thm. \ref{k4}]} \\\hline
\end{tabular}
\small{*: we remind to the reader the case, where $n_G=6$. One should check that the bound holds also for such center graphs $G$ (there are only two cubic graphs on 6 vertices).}
\caption{Possible values of $\chi_=(G \circ ^l H)$ for cubical multicoronas.}\label{tabela1}
\end{center}
\end{table}
Note that our results confirm the Equitable Coloring Conjecture for graphs under consideration. This conjecture was posed by Meyer \cite{meyer} in 1973.
What about the complexity of equitable coloring of cubical multicoronas? From \cite{harder} we know that this problem is NP-hard for coronas $G \circ^l H$, $l=1$. We remain as an open question
whether this result can be extended to arbitrary cubical coronas $G \circ ^l H$, $l \geq 2$.
We know that ordinary coloring of cubical multicoronas can be determined in polynomial time. The
exact values
of ordinary chromatic number of $l$-corona products under consideration are given in Table \ref{tabela2}. The appropriate coloring of $G \circ H$ is obtained by coloring $G$ with
$\chi(G)$ colors and extending this coloring into copies of $r$-partite cubic graph $H$ linked to $i$-vertex of $G$ by coloring $r$ partition sets with $(i+1) \bmod \chi(G\circ^1 H),
\ldots, (i+r) \bmod \chi(G\circ ^1 H)$, respectively (we use color $\chi(G \circ^1 H)$ instead of color 0). Such a coloring can be extended into copies of $H$ for bigger $l$ in the
similar way.
\begin{table}[htb]
\begin{center}
\begin{tabular}{|c|*{3}{c|}}\hline
\backslashbox[20mm]{$G$}{$H$} & $Q_2$ & $Q_3$ & $Q_4$\\ \hline
$Q_2$ & 3 & 4 & 5 \\ \hline
$Q_3$ & 3 & 4 & 5 \\ \hline
$Q_4$ & 4 & 4 & 5 \\ \hline
\end{tabular}
\caption{The exact values of $\chi(G \circ ^l H)$ for cubical multicoronas.}\label{tabela2}
\end{center}
\end{table}
Simple comparison of Tables \ref{tabela1} and \ref{tabela2} leads us to the conclusion
that our results miss the exact values by at most one color.
\end{document}
|
\begin{document}
\maketitle
\begin{abstract}Let $X$ be the intersection in $\mathbb{R}P^n$ of $k$ quadrics, i.e. the zero locus of the homogeneous, degree two polynomials $q_1, \ldots, q_k$. Let also $W$ be the span of these polynomials in the space of all homogeneous degree two polynomials and for every $r\geq 0$ let $$\Sigma_W^{(r)}=\{q\in W\backslash\{ 0\}\,|\, \dim\ker (q)\geq r\}.$$
Notice that $\Sigma_W^{(1)}$ equals the (spherical) intersection of $W$ with the discriminant hypersurface in the space of quadratic polynomials; moreover for $r\geq 2$ and $W$ generic $\Sigma_W^{(r)}=\textrm{Sing}\big(\Sigma_W^{(r-1)}\big).$\\
We prove that for a generic choice of $q_1, \ldots, q_k$ the following formula holds for the total Betti number of $X$:
\begin{equation}\label{eqabstract}b(X)\leq b(\mathbb{R}P^n)+\frac{1}{2}\sum_{r\geq 1}b\big(\Sigma_W^{(r)}\big)\end{equation}
In the case we remove the nondegeneracy hypotesis the previous formula remains valid upon substitution of $\Sigma_W$ with a pertubation of it obtained by translating $W$ in the direction of a small negative definite quadratic form. The previous sum (both in the generic and the general case) contains at most $O(k)^{1/2}$ summands, as the sets $\Sigma_W^{(r)}$ turns out to be empty for $\binom{r+1}{2}\geq k.$ We study the topology of symmetric determinantal varieties, like the above $\Sigma_W^{(r)}$, and bound their homological complexities, with particular interest at those defined on a sphere. Using formula (\ref{eqabstract}) and the results on the complexity of determinantal varieties, we prove the sharp bound:
$$b(X)\leq O(n)^{k-1}$$
thus improving Barvinok's style bounds (recall that the best previously known bound, due to Basu, has the shape $O(n)^{2k+2}$).
\end{abstract}
\section{introduction}
This paper addresses the question of bounding the topology of the set:
$$X=\textrm{intersection of $k$ quadrics in $\mathbb{R}P^n$}$$
More specifically we are interested in finding a bound for its \emph{homological complexity} $b(X)$, namely the sum of its Betti numbers\footnote{From now on, unless differently specified, all homology and cohomology groups are assumed to be with coefficients in $\mathbb{Z}_2.$}.\\
The problem of bounding the topology of semialgebraic sets belonging to some specified family dates back to the work of J. Milnor, who proved that a semialgebraic set $S$ defined in $\mathbb{R}^n$ by $s$ polynomial inequalities of degree at most $d$ has complexity bounded by $b(S)\leq O(sd)^n$.
What is special about sets defined by \emph{quadratic inequalities} is that the two numbers $k$ and $n$ can be exchanged in the previous to give A. Barvinok's bound $b(S)\leq n^{O(s)}.$\\
This kind of \emph{duality} between the variables and the equations in the quadratic case is the leading theme of this paper, as we will discuss in a while.\\
Barvinok's bound, and its subsequent improvements made by M-F. Roy, S. Basu and D. Pasechnik in \cite{BaPaRo} and M. Kettner and S. Basu in \cite{BaKe}, concerns with sets defined by $s$ quadratic \emph{inequalities}. The most refined estimate for the complexity of such sets is polynomial in $n$ of degree $s$, but since we need two inequalities to produce an equality, this bound when applied to the set $X$ of our interest produces:
$$\textrm{Basu's bound:}\quad b(X)\leq O(n)^{2k+2}$$
In this paper we focus on the algebraic case rather than the semialgebraic one. From the viewpoint of classical algebraic geometry our problem can be stated as follows. We are given a linear system $W$ of real quadrics, i.e. the span of $k$ quadratic forms in the space of all homogeneous degree two polynomials, and we consider the base locus $X=X_W$ of $W$, i.e. the set of points in $\mathbb{R}P^n$ where all these forms vanish. What we consider to be the \emph{dual} object to the previous one is the set $\Sigma_W$ of critical points of $W$, i.e. the set consisting of those nonzero elements in $W$ that are degenerate. More generally we can consider the set $\Sigma_W^{(r)}$ of those nonzero quadratic forms in $W$ whose kernel has dimension at least $r$. Using this notation one of the main results of this paper is the following formula, which holds in the case $W$ is generic:
\begin{equation}\label{introboundgeneric}b(X_W)\leq b(\mathbb{R}P^n)+\frac{1}{2}\sum_{r\geq1} b\big(\Sigma_W^{(r)}\big).\end{equation}
The previous sum is finite, since for a generic $W$ the set $\Sigma_W^{(r)}$ is empty for $\binom{r+1}{2}\geq k$. In fact we notice that for every natural $r$ and a generic $W$ the set $\Sigma_{W}^{(r+1)}$ coincides with the set of singular points of $\Sigma_W^{(r)}$; the codimension of this singular locus is exactly $\binom{r+1}{2}$, thus it is empty for $r>\frac{1}{2}(-1+\sqrt{8k-7}).$ If we remove the genericity assumption a similar formula can be proved, but a \emph{perturbation} of $\Sigma_W$ is introduced. More specifically we have to translate $W$ in the direction of a small negative definite quadratic form $-\epsilon q$, getting in this way an \emph{affine} space $W_\epsilon=W-\epsilon q$. We consider then the unit sphere in $W_\epsilon$ and the set $\Sigma_\epsilon^{(r)}$ of quadratic forms on this sphere where the kernel has dimension at least $r.$ The following formula holds (now for \emph{any} $X$):
\begin{equation}\label{introbound}b(X)\leq b(\mathbb{R}P^n)+\frac{1}{2}\sum_{r\geq1}b\big(\Sigma_\epsilon^{(r)}\big)\end{equation}
The same remark on codimensions as above applies here and this sum is actually finite, containing no more than $O(k)^{1/2}$ summands. For a generic choice of $W$ these two constructions coincide, since the set $\Sigma_W$ deformation retracts on its intersection with any unit sphere in $W$. We will adopt this notation in the sequel and think at $\Sigma_W$ as at its homotopy equivalent intersection with a sphere.\\
Notice that from the point of view of \emph{complex} algebraic geometry the bound of (\ref{introboundgeneric}), even translated to the complex setting, does not say anything interesting since the generic case coincides with the complete intersection one, whose topology is determined. On the contrary in the real setting the topology of $X$, even a smooth one, can vary dramatically and our bound becomes more effective.
\begin{center}
\scalebox{0.6}
{
\begin{pspicture}(0,-6.05)(21.391895,6.05)
\pspolygon[linewidth=0.04,fillstyle=solid](7.166867,-4.3596473)(1.0786866,-4.340118)(4.0778604,-0.0593187)
\pspolygon[linewidth=0.04,fillstyle=solid](0.0,-4.7502356)(7.26,-4.77)(7.8798966,4.9949408)(0.6033332,4.9949408)
\pspolygon[linewidth=0.04,fillstyle=solid](0.93242395,4.2528234)(7.067329,4.28489)(4.0778604,-0.0593187)
\psbezier[linewidth=0.04,fillstyle=solid](0.90721154,4.47054)(0.8072115,3.0053408)(7.1126237,3.0596006)(7.1126237,4.5448003)(7.1126237,6.03)(1.0072116,5.9357395)(0.90721154,4.47054)
\psline[linewidth=0.04cm](1.2066664,4.9949408)(6.8926244,4.9949408)
\psline[linewidth=0.03cm,linestyle=dashed,dash=0.16cm 0.16cm](1.243232,5.014471)(4.0770698,-0.04364715)
\psline[linewidth=0.03cm,linestyle=dashed,dash=0.16cm 0.16cm](6.8560586,4.9949408)(4.0770698,-0.04364715)
\psline[linewidth=0.04cm](4.0770698,-0.06317658)(6.691513,-4.7502356)
\psline[linewidth=0.03cm,linestyle=dashed,dash=0.16cm 0.16cm](4.0770698,-0.06317658)(0.8592927,-4.711177)
\psline[linewidth=0.03cm,linestyle=dashed,dash=0.16cm 0.16cm](0.98,4.17)(7.22,-4.4300003)
\psline[linewidth=0.04cm](4.0770698,-0.06317658)(1.4626259,-4.7502356)
\psbezier[linewidth=0.04,fillstyle=solid](1.46,-4.73)(1.5,-6.03)(6.68,-5.9700003)(6.68,-4.7183723)
\psline[linewidth=0.04cm,arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.4]{->}(4.08,-0.09000007)(7.74,2.6699998)
\psline[linewidth=0.04cm,arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.4]{->}(4.12,-0.11000008)(7.54,-0.51000005)
\usefont{T1}{ptm}{m}{n}
\rput(8.29291,2.8149998){$q_1$}
\usefont{T1}{ptm}{m}{n}
\rput(8.17291,-0.4650001){$q_2$}
\usefont{T1}{ptm}{m}{n}
\rput(0.73291016,-3.265){$W$}
\usefont{T1}{ptm}{m}{n}
\rput(2.77291,-3.825){$\Sigma_W$}
\usefont{T1}{ptm}{m}{n}
\rput(4.81291,3.0549998){$\Sigma$}
\pscircle[linewidth=0.04,dimen=outer](15.42,-0.1300001){2.44}
\psline[linewidth=0.04cm,linestyle=dashed,dash=0.16cm 0.16cm](13.06,3.85)(17.74,-4.07)
\psline[linewidth=0.04cm,linestyle=dashed,dash=0.16cm 0.16cm](17.74,3.8899999)(13.18,-3.99)
\psdots[dotsize=0.12](14.2,1.9499999)
\psdots[dotsize=0.12](16.62,1.9499999)
\psdots[dotsize=0.12](16.66,-2.23)
\psdots[dotsize=0.12](14.22,-2.23)
\usefont{T1}{ptm}{m}{n}
\rput(18.99291,3.8149998){$W$}
\usefont{T1}{ptm}{m}{n}
\rput(19.481455,-3.2450001){$W\cap\{\ker(q)\neq 0\}$}
\usefont{T1}{ptm}{m}{n}
\rput(13.57291,-2.305){$\Sigma_W$}
\psline[linewidth=0.04cm,linestyle=dashed,dash=0.16cm 0.16cm](1.46,-4.75)(6.66,-4.79)
\end{pspicture}
}
\end{center}
The previous bounds (\ref{introboundgeneric}) and (\ref{introbound}) are just the mirror of the mentioned duality between the equations and the variables; the real tool encoding this duality is a spectral sequence introduced by A. A. Agrachev in \cite{Agrachev1} and developed by him and the author in \cite{AgLe}.\\
The powerful of this bounds is that they are intrinsic and different $X$ might produce different ones: for example a set $W$ whose nonzero forms have constant rank has base locus with complexity bounded by $b(\mathbb{R}P^n)$. On the other hand these bounds are sufficiently general to produce sharp numerical estimates. Indeed using them we can get the following, which improve Basu's one:
\begin{equation}\label{intronum}b(X)\leq O(n)^{k-1}\end{equation}
This estimate is sharp in the following sense: if we let $B(k,n)$ be the maximum of the complexities over \emph{all} intersections of $k$ quadrics in $\mathbb{R}P^n$, then:
\begin{equation}\label{B}B(k,n)=O(n)^{k-1}\end{equation}
The upper bound for $B(k,n)$ is provided by (\ref{intronum}) and the lower one by the existence of a maximal real complete intersection, i.e. a complete intersection $M$ of $k$ real quadrics in $\mathbb{C}P^n$ satisfying $b(M_\mathbb{R})=b(M).$ Such a complete intersection for $k\geq 2$ has the property:
$$b(M_\mathbb{R})=c_kn^{k-1}+O(n)^{k-2}$$
(the smooth nonsingular quadric in $\mathbb{C}P^n$ has total Betti number equal to $n+\frac{1}{2}(1+(-1)^{n+1})).$
The problem of determining the leading coefficient $c_k$ is far from trivial; we list as an example the first small values of $c_k$ starting from $k=2:$
$$2, 1, \frac{2}{3}, \frac{1}{3}, \frac{2}{15}, \ldots$$
For small values of $k$ the leading coefficient we get expanding in $n$ the r.h.s. of (\ref{introboundgeneric}) is the same as the complete intersection one. This provides:
$$B(1,n)=n,\quad B(2,n)=2n,\quad B(3,n)=n^2+O(n)$$
We conjecture that in general for $k\geq 2$ we have $B(k,n)=c_kn^{k-1}+O(n^{k-2}).$ The way this conjecture can be tackled, and indeed the way we produced the numerical bound (\ref{intronum}), is the study of the topology of symmetric determinantal varieties. In fact each set $\Sigma_\epsilon^{(r)}$ is defined by the vanishing of some minors of a symmetric matrix depending on parameters (in our case the parameter space is the unit sphere in $W_\epsilon$). The geometry of symmetric determinantal varieties over the complex numbers was studied in \cite{HarrisTu}, where the degrees of such varieties were explicitly computed. Here we do not need this degree computation; though we use the fact that determinantal varieties are defined by (possibly many) polynomials of small degree. Such a property, combined with Milnor's classical bound, produces the general estimate:
\begin{equation}\label{sigmabound}b\big(\Sigma_\epsilon^{(r)}\big)\leq (2n)^{k-1}+O(n)^{k-2}\end{equation}
Notice that if we plug this into (\ref{introboundgeneric}) we immediatley get $B(k,n)\leq O(n)^{k-1}$ (this follows at once using the fact that there are less than $O(k)^{1/2}$ terms in the sum we consider). On the other hand such algebraic sets, among those defined by polynomials of degree less than $n$, are very special. For example they have unavoidable singularities - that is the reason for the appearence of higher order terms in (\ref{introboundgeneric}). This is why we expect the leading coefficient of the bound (\ref{sigmabound}) not to be optimal. In fact for $k=1,2,3$ we bounded the complexities of these varieties with a direct argument, getting the optimal coefficient.
\\As an example for these ideas we compute the cohomology of the set $\Sigma$ of nonzero symmetric matrices with zero determinant. This set coincides with the discriminant hypersurface of homogeneous polynomials of degree two. The degree of this hypersurface is $n$ and Milnor's bound give $b(\Sigma)\leq O(n)^{\binom{n}{2}}.$ Though $\Sigma$ happens to be Spanier-Whitehead dual to a disjoint union of grassmannians and:
\begin{equation}\label{discriminant}H^*(\Sigma)\simeq \bigoplus_{j=0}^nH_*(\textrm{Gr}(j,n))\end{equation}
In particular the complexity of $\Sigma$ is exactly $2^n,$ a number which is much smaller than Milnor's prediction.\\
The paper is organized as follows. Section 2 gives an account of the known numerical bounds present in the literature. Section 3 introduces the spectral sequence approach, from which it is shown to be possible to recover Barvinok's bound. Section 4 deals with symmetric determinantal varieties and contains the proof of (\ref{discriminant}). Section 5 is the technical bulk of the paper and deals with the transversality arguments needed in order to prove (\ref{introbound}). Section 6 contains the proof of (\ref{introboundgeneric}) and (\ref{introbound}). Section 7 contains the proof of the numerical translation (\ref{intronum}) of the previous bounds as well as the discussion on its sharpness. Section 8 discusses some examples.\\
From now on all algebraic sets are assumed to be \emph{real} (in particular projective spaces and Grassmannians are the real ones) unless differently specified. Similarly all homology and cohomology groups are with coefficients in $\mathbb{Z}_2$.
\section{Complexity of intersections of real quadrics}
The aim of this section is to review the numerical bounds that can be derived from the literature for the homological complexity of $X$.\\
The first result is due to J. Milnor, who proved that if $Y$ is an algebraic set defined by homogeneous polynomials of degree at most $d$ in $\mathbb{R}P^n$, then $b(Y)\leq n d(2d-1)^{n-1}$ (this is the content of Corollary 3 of \cite{Milnor}). If we apply this bound to the set $X$ we immediately get the following estimate
$$\textrm{Milnor's bound:}\quad b(X)\leq 2n 3^n$$
What is interesting about this bound is that it does not depend on the \emph{number} of equations defining $Y$ (respectively $X$), but only on their degrees.\\
On a different perspective A. Barvinok studied the complexity of basic semialgebraic subsets of $\mathbb{R}^n$ defined by a fixed number of inequalities of degree at most two. Using the main result from \cite{Ba} we can derive another bound, whose shape is different from the previous one\footnote{According to \cite{BPR} in this context the notation $f(l)=O(l)$ means that there exists a natural number $b$ such that the inequality $f(l)\leq bl$ holds for every $l\in \mathbb{N}$.}
$$\textrm{Barvinok's bound:}\quad b(X)\leq n^{O(k)}$$
\begin{proof}Theorem (1.1) of \cite{Ba} states the following: for every $k\in \mathbb{N}$ there exists a polynomial $P_k$ of degree $O(k)$ such that for any semialgebraic set $Y$ defined by $k$ inequalities of degree at most two in $\mathbb{R}^n$ we have $b(Y)\leq P_k(n)=n^{O(k)}.$\\
We decompose our $X$ into its affine part $A$ and its part at infinity $B$ and we use a Mayer-Vietoris argument. More specifically we let $A=X\cap \{x_0\neq 0\}$ and $B=X\cap \{x_0^2\leq \epsilon\}$. Now $A$ is defined by $2k$ quadratic inequalities in $\mathbb{R}^n$ (each equation is equivalent to a pair of inequalities) and $B$ by $k$ quadratic equations in $\mathbb{R}P^n$ (in fact this set for small $\epsilonilon$ deformation retracts to $X\cap\{x_0=0\}$). The intersection $A\cap B$ is defined in $\mathbb{R}^n$ by $2k+1$ quadratic inequalities: those defining $A$ plus the one defining a big ball. We apply now Theorem (1.1) of \cite{Ba} to $A$ and $A\cap B$ to get a bound of the form $n^{O(k)}$ for their total Betti numbers. Induction on $n$ and the Mayer-Vietoris long exact sequence of the semialgebraic pair $(A,B)$ finally give:
$$b(X)\leq b(A)+b(B)+b(A\cap B)\leq n^{O(k)}$$ \end{proof}
The subtlety about the previous bound is the implied constant in its definition: indeed in Theorem (1.1) of \cite{Ba} this implied constant is at least two. This provides an implied constant of at least \emph{four} in Barvinok's bound. The work \cite{BaKe} of S. Basu and M. Kettner provides a better estimate for this constant:
$$\textrm{Basu's bound}:\quad b(X)\leq O(n)^{2k+2}$$
\begin{proof}Corollary 1.7 of \cite{BaKe} states the following: let $S$ be a semialgebraic subset of $\mathbb{R}^n$ defined by $k$ quadratic inequalities. Then
$$b(S)\leq \frac{n}{2}\sum_{j=0}^k \binom{k}{j} \binom{n+1}{j}2^j=s(k,n)$$
Let us show first that $s(k,n)$ behaves asymptotically as $O(n)^{k+1}$; indeed let us prove that $\lim_n \frac{\log s(k,n)}{\log n}=k+1$.
Notice first that for every $k$ there exists $C_k>0$ such that for every $n$
$$\binom{n+1}{k}\leq \sum_{j=0}^k\binom{k}{j} \binom{n+1}{j}2^j\leq C_k \binom{n+1}{k}$$
The existence of such a $C_k$ is due to the fact the the number of terms we are adding and the number $\binom{k}{j}$ do not depend on $n$ but only on $k$.
Using Stirling's asymptotic at infinity $n!\sim \sqrt{2\pi n}(\frac{n}{e})^n$ we can write:
$$\binom{n+1}{k}\sim \frac{1}{k! e^{k}}\frac{(n+1)^{n+1}}{(n+1-k)^{n+1-k}}\sqrt{\frac{n+1}{n+1-k}}\sim A_k n^{k}$$
for some constant $A_k>0$. The inequalities $\frac{n}{2}\binom{n+1}{k}\leq s(k,n)\leq \frac{C_k n}{2}\binom{n+1}{k}$ and the previous asymptotic immediately give the limit.\\
Proceeding now as in the proof of Barvinok's estimate, i.e. decomposing $X$ into its affine and infinity part and using Mayer-Vietoris bounds, provides the result.
\end{proof}
The bound provided in \cite{BaKe} is the best known for semialgebraic sets defined by quadratic \emph{inequalities}. Surprisingly enough it turns out that in the special case of our interest, i.e. algebraic sets, the exponent of Basu's bound can be lowered down to $k-1$. This will be a straightforward consequence of a deeper approach for bounding the topology of $X$ with the complexity of some determinantal varieties associated (and in a certain sense \emph{dual}) to it. This is based on a spectral sequence argument and has strong consequences, besides the framework of bounding the topology of $X$.
\section{The spectral sequence approach}
In this section we will discuss a different approach for the study of the intersection of real quadrics. This was first introduced by A. A. Agrachev in \cite{Agrachev1} and \cite{Agrachev2} for the nonsingular case and then extended in \cite{AgLe} for the general case.\\
Let $\mathcal{Q}_{n}$ denote the vector space of homogeneous polynomials of degree two in $n$ variables, i.e. the space of quadratic forms over $\mathbb{R}^n$. Then $X$ is the zero locus in in the projective space of the elements $q_1, \ldots, q_k \in\mathcal{Q}_{n+1}$ and we consider the \emph{linear system}\footnote{In classical algebraic geometry $X$ is referred to as the base locus of $W$.} defined by these elements:
$$W=\textrm{span}\{q_1, \ldots, q_k\} \subset \mathcal{Q}_{n+1}$$
For a given quadratic form $p\in \mathcal{Q}_{n+1}$ we denote by $\mathrm{i}^{+}(p)$ its positive inertia index, namely the maximal dimension of a subspace of $\mathbb{R}^{n+1}$ such that the restriction of $p$ to it is positive definite. The idea of the spectral sequence approach is to replace the geometry of $X$ with the one of the restriction of the function $\mathrm{i}^+$ to $W$. More precisely let us consider the sets:
$$W^j=\{q\in W \,|\, \mathrm{i}^+ (q)\geq j\},\quad j\geq1$$
Notice that none of these sets contains the zero and all of them are invariant under multiplication by a positive number, hence they deformation retracts to their intersections with a unit sphere in $W$ (with respect to any scalar product):
$$\Omega^j=W^j \cap \{\textrm{any unit sphere in $W$}\},\quad j\geq1$$
Even if not canonical (it depends on the choice of a scalar product on $W$) sometimes it is more convenient to use this family rather than the previous one; notice though that different scalar products will produce homeomorphic families. The spirit of this approach is to exploit the relation between $X$ and the filtration
$$\Omega^{n+1}\subseteq \Omega^n \subseteq \cdots\subseteq \Omega^2\subseteq \Omega^1$$
This is made precise by a Leray's spectral sequence argument, and produces the following
\begin{teo}$$\emph{\textrm{Agrachev's bound}}:\quad b(X)\leq n+1+\sum_{j\geq0}b(\Omega^{j+1})$$
\end{teo}
\begin{proof}Let $S_W$ be any unit sphere in $W$ and consider the topological space $B=\{(\omega,[x])\in S_W\times \mathbb{R}P^{n}\,|\, (\omega q)(x)>0\}$ together with its two projections $p_{1}:B\to S_W$ and $p_{2}:B\to \mathbb{R}P^{n}.$ The image of $p_{2}$ is easily seen to be $\mathbb{R}P^{n}\backslash X$ and the fibers of this map are contractible sets, hence $p_{2}$ gives a homotopy equivalence $B\sim\mathbb{R}P^{n}\backslash X.$ Consider now the projection $p_{1};$ for a point $\omega \in S_W$ the fiber $p_{1}^{-1}(\omega)$ has the homotopy type of a projective space of dimension $\mathrm{i}^{+}(\omega q)-1$, thus the Leray spectral sequence for $p_{1}$ converges to $H^{*}(\mathbb{R}P^{n}\backslash X)$ and has the second term $E_{2}^{i,j}$ isomorphic to $H^{i}(\Omega^{j+1})$. A detailed proof of the previous statements can be found in \cite{AgLe}. Since $\textrm{rk}(E_{\infty})\leq \textrm{rk}(E_{2})$ then $b(\mathbb{R}P^{n}\backslash X)\leq \sum_{j\geq0} b(\Omega^{j+1})$. Recalling that by Alexander-Pontryagin duality $H_{n-*}(X)\simeq H^{*}(\mathbb{R}P^{n},\mathbb{R}P^{n}\backslash X),$ then the exactness of the long cohomology exact sequence of the pair $(\mathbb{R}P^{n},\mathbb{R}P^{n}\backslash X)$ gives the desired inequality. \end{proof}
It interesting to notice that Agrachev's bound implies Barvinok's one. In fact let us fix a scalar product on $\mathbb{R}^{n+1}$; then the rule $\{\forall x\in \mathbb{R}^{n+1}\,:\,\langle x, Q x\rangle =q(x)\}$ defines a symmetric matrix $ Q$ whose number of positive eigenvalues equals $\mathrm{i}^{+}(q).$ Consider the polynomial
$$f(t,Q)=\det (Q-tI)=a_{0}(Q)+\cdots+a_{n}(Q)t^{n}\pm t^{n+1}$$ defined over $\mathbb{R} \times W=\mathbb{R}\times \textrm{span}\{Q_1, \ldots, Q_k\}$. Then by Descartes' rule of signs the positive inertia index of $Q$ is given by the sign variation in the sequence $(a_{0}(Q), \ldots, a_{n}(Q)).$ Thus the sets $\Omega^{j+1}$ are defined on the unit sphere in $W$ by sign conditions (quantifier-free formulas) whose atoms belong to a set of $n+1$ polynomials in $k$ variables and of degree less than $n+1$. For such sets we have the estimate, proved in \cite{BPR}: $b(\Omega^{j+1})\leq n^{O(k)}$. Putting all this together we get:
$$b(X)\leq n+1+\sum_{j\geq 0}b(\Omega^{j+1})\leq n^{O(k)}$$
\begin{example}\label{toy}Before going on we give an idea of the direction this spectral sequence approach will lead us; this will be just a motivation for next sections, the detailed theory being developed in the final part of the paper.
Let $S_W$ be the unit sphere in $W$ and let us assume that the set $$\Sigma_W=\{q\in S_W \,|\, \ker (q)\neq 0\}$$ is a \emph{smooth} manifold and each time we cross it the index function changes exactly by $\pm 1$. Then the components of this manifold are exactly the boundaries of the sets $\Omega^j$ and $b(\Sigma_W)=\sum_{\Omega^j \neq S_W}b(\partial \Omega^{j})$. On the other hand each $\partial \Omega^j$ is a submanifold of the sphere and it is not difficult to show that $b(\partial \Omega^j)=2 b(\Omega^j)$ (we will give an argument for this in Lemma \ref{boundary}). Combining all this together into Agrachev's bound we get:
\begin{equation}\label{toyeq}b(X)\leq n+1+\frac{1}{2}b(\Sigma_W)\end{equation}
which relates the topology of $X$ (the base locus of $W$) to the topology of $\Sigma_W$ (the singular locus of $W$). In the general case $\Sigma_W$ will not be smooth, nor the index function well behaving and a more refined approach is needed. This approach is based on the study of the topology of $\Sigma_W$ and its singularities. These two objects are very particular algebraic sets: they are defined by the set of points where a family of matrices has some rank degeneracy, i.e. they are determinantal varieties.
\end{example}
\section{Symmetric determinantal varieties}
The aim of this section is to bound the topology and describe some geometry of symmetric determinantal varieties. In a broad sense these will be defined by rank degeneracy conditions of (algebraic) families of symmetric matrices. Recall that our interest comes from families of quadratic forms; the way to switch to symmetric matrices is simply by establishing a linear isomorphism between $\mathcal{Q}_n$ and $\textrm{Sym}_{n}(\mathbb{R})$. This can be done, once a scalar product on $\mathbb{R}^n$ has been fixed, by associating to each quadratic form $q$ the matrix $Q$ defined by:
$$q(x)=\langle x, Qx \rangle \quad\forall x\in \mathbb{R}^n$$
Notice that the dimension of the vector space $\textrm{Sym}_{n}(\mathbb{R})$ is $\binom{n}{2}.$\\
Suppose now that $Y$ is an algebraic subset of $\textrm{Sym}_n(\mathbb{R})$; for every natural number $r$ we define its rank degeneracy locus
$$Y^{(r)}=\{Q\in Y\, |\,\textrm{dim} \ker(Q)\geq r\}$$
Using the bound of \cite{Milnor} we can immediately prove the following proposition, which exploits the idea that symmetric determinantal varieties have relatively simple topology.
\begin{propo}\label{bounddet}Let $Y$ be defined by polynomials of degree less than $d$ in $\textrm{Sym}_n(\mathbb{R})$ and $\mathbb{R}^k$ be a subspace; let also $\delta=\max \{d, n-r+1\}$. Then:
$$b\big(Y^{(r)}\cap \mathbb{R}^k\big)\leq \delta (2\delta-1)^{k-1}$$
\begin{proof} The set $Y^{(r)}$ is defined in $\textrm{Sym}_n(\mathbb{R})$ be the same equations defining $Y$ plus all the equations for the vanishing of minors of order $r+1$; these last equations have degree $r+1$. Once we intersect $Y^{(r)}$ with a linear space of dimension $k$, we get a set defined by equations of degree at most $\delta$ in $k$ variables and Milnor's estimate applies.
\end{proof}
\end{propo}
Let us fix now a scalar product also on the space $\textrm{Sym}_n(\mathbb{R})$, e.g. we can take $\langle A, B\rangle =\frac{1}{2} \textrm{tr}(AB)$. We consider the set of singular matrices of norm one:
$$\Sigma=\{\|Q\|^2=1 \,,\, \det (Q)=0\}$$
This set is a deformation retract of the set of nonzero matrices with determinant zero and for $n>1$ is defined by equations of degree at most $n$ in $\textrm{Sym}_n(\mathbb{R})$. The previous proposition would produce a bound of the form $an^{\binom{n}{2}}$ for its topological complexity; indeed the bound is much better, as shown in the next theorem.
\begin{teo}
$$H^*(\Sigma)\simeq \bigoplus_{j=0}^n H^*(\emph{\textrm{Gr}}(j,n))\quad \textrm{and} \quad b(\Sigma)=2^n$$
\begin{proof}In the space of all symmetric matrices let us consider the open set $A$ where the determinant does not vanish; this set deformation retracts to $S^N\backslash \Sigma$ and by Alexander-Pontryagin duality it follows that:
\begin{equation}\label{sigma}H^*(\Sigma)\simeq H_*(A)\end{equation}
On the other hand $A$ is the \emph{disjoint union} of the open sets:
$$G_{j,n}=\{\det(Q)\neq 0, \, \mathrm{i}^+(Q)=j\},\quad j=0,\ldots,n$$
We prove that each of these sets is homotopy equivalent to a Grasmmannian; this, together with equation (\ref{sigma}), will give the desired result. More specifically we show that the semialgebraic map
$$p_k:G_{j,n}\to \textrm{Gr}(j,n)$$
which sends each matrix $Q$ to its positive eigenspace, is a homotopy equivalence. In fact let $\{e_1,\ldots, e_n\}$ be the standard basis of $\mathbb{R}^n$ and $E_j$ be the span of the first $j$ basis elements. The preimage of $E_j$ under $p_j$ equals the set of all symmetric block matrices of the form $$Q=\left( \begin{matrix} D^2& 0\\0&Q'\end{matrix}\right)$$
with $D$ diagonal invertible and $Q'$ invertible and negative definite, i.e. $Q'\in G_{0, n-j}$. In particular, since the set $G_{0, n-j}$ is an open cone, it is contractible and:
$$p_j^{-1}(E_j)\simeq (\mathbb{R}^+)^j\times G_{0, n-j}\quad\textrm{is contractible}$$
For $W\in \textrm{Gr}(j,n)$ let $M$ be any orthogonal matrix such that $MW=E_j$; then clearly $p_j^{-1}(W)=M^{-1}p_j^{-1}(E_j) M$ and all the fibers of $p_j$ are homeomorphic. Thus $p_j$ is a semialgebraic map with contractible fibers, hence a homotopy equivalence. The last part of the theorem follows from the well known fact that $b(\textrm{Gr}(j,n))=\binom{n}{j}$ and the formula $\sum_{j=0}^n \binom{n}{j}=2^n$
\end{proof}\end{teo}
Let $Z$ be the algebraic set of all singular matrices in $\textrm{Sym}_n(\mathbb{R})$; we will be interested in greater generality in the filtration:
\begin{equation}\label{strataZ}\{0\}=Z^{(n)}\subset Z^{(n-1)}\subset \cdots \subset Z^{(2)}\subset Z^{(1)}=Z\end{equation}
We recall that each $Z^{(r)}$ is a real algebraic subset of $\textrm{Sym}_n(\mathbb{R})$ of codimension
$\binom{r+1}{2}$ and that the singular loci of these varieties are related by:
\begin{equation}\label{sing}\textrm{Sing}( Z^{(j)})=Z^{(j+1)}\end{equation}
References for this statements are \cite{Agrachev1} and \cite{AgLe}; in particular Proposition 9 of \cite{AgLe} shows that $Z$ is Nash stratified by the smooth semialgebraic sets $N_r=Z^{(r)}\backslash Z^{(r+1)}$ (see \cite{BCR} for the definition and properties of Nash stratifications).
Notice also that using the above notation we have the equalities $Y^{(r)}=Y\cap Z^{(r)}$ and $\Sigma=\{\|Q\|^2=1\}\cap Z^{(1)}$.\\
The degrees of the complexifications $Z_\mathbb{C}^{(r)}$ of these varieties are computed in \cite{HarrisTu}:
$$\deg Z_\mathbb{C}^{(r)}=\prod_{\alpha=0}^{r-1}\frac{\binom{n+\alpha}{r-\alpha}}{\binom{2\alpha+1}{\alpha}}=O(n)^{\frac{r(r+1)}{2}} $$
Notice that they (and their hyperplane sections) have big degree but small topological complexity; this is due to the fact that can be defined by (many) equations of low degree.\\
Let us denote by $\mathrm{i}^-(Q)$ the number of \emph{negative} eigenvalues of a symmetric matrix $Q$ and recall that
$$P_j=\{Q\in \textrm{Sym}_n(\mathbb{R})\,|\, \mathrm{i}^-(Q)\leq j\}, \quad0\leq j\leq n-1$$
is a (noncompact) topological submanifold of $\textrm{Sym}_n(\mathbb{R})\simeq \mathcal{Q}_n$ with boundary (see \cite{Agrachev1}); let us set $$A_j=\partial P_j,\quad 0\leq j\leq n-1.$$
Here we show the picture for $ \textrm{Sym}_{2}(\mathbb{R})\simeq \mathbb{R}^3.$ The set $P_0$ is the closed cone of positive semidefinite matrices, its boundary is a topological manifold; $P_1$ is the set of sign-indefinite matrices, it contains $P_0$ ant its boundary is again a topological manifold. The union $\partial P_0\cup \partial P_1$ is the set of singular matrices and this set is not a manifold: its singular locus (the zero matrix) is given by $\partial P_0\cap \partial P_1.$
\begin{center}
\fontsize{15}{0}
\scalebox{0.5}
{
\begin{pspicture}(0,-5.89)(16.201895,5.87)
\pspolygon[linewidth=0.04,fillstyle=solid](0.08,4.63)(6.187329,4.6248903)(3.1978602,0.2806814)
\psellipse[linewidth=0.04,dimen=outer,fillstyle=solid](3.12,4.84)(3.12,1.03)
\pspolygon[linewidth=0.04,fillstyle=solid](15.28,-4.0993185)(9.16,-4.19)(12.16214,0.25)
\usefont{T1}{ptm}{m}{n}
\rput(4.261455,5.115){$P_0$}
\usefont{T1}{ptm}{m}{n}
\rput(3.1414552,2.315){$\partial P_0$}
\usefont{T1}{ptm}{m}{n}
\rput(14.871455,0.515){$P_1\supset P_0$}
\usefont{T1}{ptm}{m}{n}
\rput(12.161455,-2.045){$\partial P_1$}
\pspolygon[linewidth=0.04,linestyle=dashed,dash=0.16cm 0.16cm,fillstyle=solid](6.28,-4.0793185)(0.17267115,-4.074209)(3.16214,0.27)
\rput{-180.0}(6.44,-8.578637){\psellipse[linewidth=0.04,linestyle=dashed,dash=0.16cm 0.16cm,dimen=outer,fillstyle=solid](3.22,-4.2893186)(3.1,1.03)}
\pspolygon[linewidth=0.04,linestyle=dashed,dash=0.16cm 0.16cm,fillstyle=solid](9.08,4.61)(15.187329,4.6048903)(12.19786,0.2606814)
\psellipse[linewidth=0.04,linestyle=dashed,dash=0.16cm 0.16cm,dimen=outer,fillstyle=solid](12.12,4.82)(3.12,1.03)
\rput{-180.0}(24.48,-8.658637){\psellipse[linewidth=0.04,linestyle=dashed,dash=0.16cm 0.16cm,dimen=outer,fillstyle=solid](12.24,-4.3293185)(3.1,1.03)}
\psbezier[linewidth=0.04,fillstyle=solid](9.14,-4.19)(9.0,-5.87)(15.32,-5.790941)(15.32,-4.319339)
\end{pspicture}
}
\end{center}
The following proposition describes in more detail the structure of the sets $Z^{(r)}$, using the combinatorics of the $A_j$'s.
\begin{propo}\label{zetar}For every $r\geq 0$ let $I_r$ be the set of all the subsets $\alpha$ of $\{0, \ldots, n-1\}$ consisting of $r$ consecutive integers. Then
$$Z^{(r)}=\bigcup_{\alpha \in I_r} \bigcap_{j\in \alpha} A_j.$$
\end{propo}
\begin{proof}
For $l\geq 0$ let us say that a matrix $Q$ has the property $s(l)$ if there exists a sequence $\{Q_n\}_{n\geq0}$ converging to $Q$ such that $\mathrm{i}^-(Q_n)\geq l.$ Using this notation we have that $A_j=\{\mathrm{i}^-(Q)\leq j\textrm{ and $Q$ has the property $s(j+1)$}\}$.
From this it follows that for every $r\geq 0$
$$A_i\cap A_{i+r-1}=\{\mathrm{i}^-(Q)\leq i\textrm{ and $Q$ has the property $s(i+r)$}\}$$
which also says that $A_i\cap A_{i+1}\cap \cdots \cap A_{i+r-1}=A_i\cap A_{i+r-1}$. Let now $Q\in Z^{(r)}$ and $M$ be an orthogonal matrix such that:
$$M^{-1}Q M=\textrm{diag}(-\lambda_1^2,\ldots, -\lambda_{\mathrm{i}^-(Q)}^2, \mu_{\mathrm{i}^-(Q)+1}, \ldots, \mu_{n-r}, 0, \dots, 0)$$
with the $\lambda_i$'s greater than zero. Let now $D_n$ be defined by changing each zero on the diagonal of the previous matrix to $-\frac{1}{n}.$
Then if we set $Q_n=M D_n M^{-1}$ we have that $Q$ satisfies property $s(\mathrm{i}^-(Q)+r)$ and thus it belongs to $\bigcup_{\alpha \in I_r} \bigcap_{j\in \alpha} A_j.$ Viceversa let $Q$ be in $\bigcup_{\alpha \in I_r} \bigcap_{j\in \alpha} A_j$; then $Q$ satisfies $s(\mathrm{i}^-(Q)+r)$ and there exist $\{Q_n\}_{n\geq 0}$ such that:
$$\dim \ker Q=n-\mathrm{i}^+(Q)-\mathrm{i}^-(Q)\geq n-\mathrm{i}^+(Q_n)-\mathrm{i}^-(Q_n)+r\geq r$$
(for the inequality $\mathrm{i}^+(Q_n)\geq \mathrm{i}^+(Q)$ we have used that $\{\mathrm{i}^+(Q)\geq j\}$ is an open set), i.e. $Q$ is in $Z^{(r)}.$
\end{proof}
\section{Transversality arguments}
In this section we will discuss the following idea. Suppose we are given $X$ by the vanishing of some quadratic polynomials in $\mathbb{R}P^n$ and let $W$ be the span of these polynomials as in the previous sections. The homological complexity of $X$ (up to a $n+1$ term) can be bounded, using Agrachev's bound, with the sum of the complexities of the sets $\Omega^j$. To have an alternate description of these sets let us introduce the following notation. Let $q_1,\ldots, q_k\in \mathcal{Q}_{n+1}$ be the quadratic forms defining $X$ and $q: S^{k-1} \to \mathcal{Q}_{n+1}$ the map defined by:
$$\omega=(\omega_1, \ldots, \omega_k) \mapsto \omega q= \omega_1 q_1+\ldots+\omega_k q_k$$
The map $q$ is the restriction to the sphere of the linear map sending the standard basis of $\mathbb{R}^k$ to $\{q_1, \ldots, q_k\}.$ We redefine now:
$$\Omega^j=\{\omega \in S^{k-1}\, | \, \mathrm{i}^+(\omega q)\leq j\},\quad j\geq 1$$ If $q_1,\ldots, q_k$ are linearly independent, then this definition agrees with previous one; if they are not linearly independent the map $q$ is no longer an embedding, though a look at the proof of Agrachev's bound shows that it still holds:
$$b(X)\leq n+1+\sum_{j\geq 0}b(\Omega^{j+1})$$
(it is sufficient to use the set $B'=\{(\omega, [x])\in S^{k-1}\times \mathbb{R}P^n\, |\, (\omega q)(x)\geq 0\}$ instead of $B$ and the proof works the same; actually it can also be proved that these new sets deformation retracts to the previously defined ones). The question we address is now the following: what happens if we perturb the map $q$?\\
The perturbations we will be interested in are those of the form:
$$q_\epsilon:\omega\mapsto \omega q-\epsilon p$$
where $p$ is a positive definite quadratic form; in other words we will be interested in small \emph{affine} translations $q-\epsilon p$ of the map $q$.
It turns out that if $p$ is a positive definite quadratic form and $\epsilon>0$ is small enough then each set $\Omega^j$ is homotopy equivalent to the set:
$$\Omega_{n-j}(\epsilon)=\{\omega \in S^{k-1}\, |\, \mathrm{i}^{-}(\omega q-\epsilon p)\leq n-j\}$$
where $\mathrm{i}^{-}$ denotes the negative inertia index, i.e. $\mathrm{i}^{-}(\omega q-\epsilon p)=\mathrm{i}^{+}(\epsilon p-\omega q).$ In particular the Betti numbers of $\Omega^{j+1}$ and of its perturbation $\Omega_{n-j}(\epsilon)$ are the same, as proved in the following lemma from \cite{Le3}.
\begin{lemma}
\label{union}For every positive definite form $p\in \mathcal{Q}_{n+1}$ and for every $\epsilon>0$ sufficiently small
$$b(\Omega^{j+1})=b(\Omega_{n-j}(\epsilon)).$$
\begin{proof}
Let us first prove that $\Omega^{j+1}=\bigcup_{\epsilon>0}\Omega_{n-j}(\epsilon).$\\
Let $\omega \in \bigcup_{\epsilon>0}\Omega_{n-j}(\epsilon);$ then there exists $\overline{\epsilon}$ such that $\omega\in \Omega_{n-j}(\epsilon)$ for every $\epsilon<\overline{\epsilon}.$ Since for $\epsilon$ small enough $$\mathrm{i}^{-}(\omega q-\epsilon p)=\mathrm{i}^{-}(\omega q)+\dim (\ker (\omega q))$$ then it follows that $$\mathrm{i}^{+}(\omega q)=n+1-\mathrm{i}^{-}(\omega q)-\dim (\ker \omega q)\geq j+1.$$ Viceversa if $\omega \in \Omega^{j+1}$ the previous inequality proves $\omega\in \Omega_{n-j}(\epsilon)$ for $\epsilon$ small enough, i.e. $\omega \in \bigcup_{\epsilon>0}\Omega_{n-j}(\epsilon).$\\
Notice now that if $\omega\in \Omega_{n-j}(\epsilon)$ then, eventually choosing a smaller $\epsilon$, we may assume $\epsilon$ properly separates the spectrum of $\omega$ and thus, by continuity of the map $ q$, there exists an open neighborhood of $\omega$, $U$, such that $\epsilon$ properly separates also the spectrum of $\eta q$ for every $\eta \in U$. Hence every $\eta \in U$ also belongs to $\Omega_{n-j}(\epsilon)$. From this consideration it easily follows that each compact set in $\Omega^{j+1}$ is contained in some $\Omega_{n-j}(\epsilon)$ and thus $$\varinjlim_{\epsilon}\{H_{*}(\Omega_{n-j}(\epsilon))\}=H_{*}(\Omega^{j+1}).$$ It remains to prove that the topology of $\Omega_{n-j}(\epsilon)$ is definitely stable in $\epsilon$ going to zero. Consider the semialgebraic compact set $S_{n-j}=\{(\omega, \epsilon)\in S^{k-1}\times [0, \infty)\, |\, \mathrm{i}^{-}(\omega q-\epsilon p)\leq n-j\}$. By Hardt's triviality theorem (see \cite{BCR}) we have that the projection $(\omega, \epsilon)\mapsto \omega$ is a locally trivial fibration over $(0,\epsilon)$ for $\epsilon$ small enough; from this the conclusion follows.
\end{proof}
\end{lemma}
The following is a variation of Lemma 4 of \cite{Le3} and describes the structure of the sets of degenerate quadratic forms on the 'perturbed sphere'.
We recall that the space $Z$ of all degenerate forms in $\mathcal{Q}_{n+1}$ admits the semialgebraic Nash stratification $Z=\coprod N_r $ where $N_r=Z^{(r)}\backslash Z^{(r+1)}$ (as above we use the linear identification between quadratic forms and symmetric matrices).
\begin{lemma}\label{perturb}There exists a positive definite form $p\in \mathcal{Q}_{n+1}$ such that for every $\epsilon>0$ small enough the map $q_\epsilon:S^{k-1}\to \mathcal{Q}_{n+1}$ defined by:
$$\omega \mapsto \omega q-\epsilon p$$
is transversal to all strata of $Z=\coprod N_r.$ In particular $q_\epsilon^{-1}(Z)=\coprod q_\epsilon^{-1}(N_r)$ is a Nash stratification, the closure of $q_\epsilon^{-1}(N_r)$ equals $q_\epsilon^{-1}(Z^{(r)})$ and
$$\emph{\textrm{Sing}}\big(q_\epsilon^{-1}\big(Z^{(r)}\big)\big)= q_\epsilon^{-1}\big(Z^{(r+1)}\big)$$
\end{lemma}
\begin{proof}Let $\mathcal{Q}^{+}$ be set of positive definite quadratic forms in $\mathcal{Q}_{n+1}$ and consider the map $F:S^{k-1}\times \mathcal{Q}^{+}$ defined by
$$(\omega, p)\mapsto \omega q-p.$$
Since $\mathcal{Q}^{+}$ is open in $\mathcal{Q},$ then $F$ is a submersion and $F^{-1}(Z)$ is Nash-stratified by $\coprod F^{-1}(N_{i}).$ Then for $p\in \mathcal{Q}^{+}$ the evaluation map $\omega \mapsto f(\omega)-p$ is transversal to all strata of $Z$ if and only if $p$ is a regular value for the restriction of the second factor projection $\pi:S^{k-1}\times \mathcal{Q}^{+}\to \mathcal{Q}^{+}$ to each stratum of $F^{-1}(Z)=\coprod F^{-1}(N_{i}).$
Thus let $\pi_{i}=\pi|_{F^{-1}(N_{i})}:F^{-1}(N_{i})\to \mathcal{Q}^{+};$ since all datas are smooth semialgebraic, then by semialgebraic Sard's Lemma (see \cite{BCR}), the set $\Sigma_{i}=\{\hat{q}\in \mathcal{Q}^{+}\, | \, \hat{q}\textrm{ is a critical value of $\pi_{i}$}\}$ is a semialgebraic subset of $\mathcal{Q}^{+}$ of dimension strictly less than $\dim (\mathcal{Q}^{+}).$ Hence $\Sigma=\cup_{i}\Sigma_{i}$ also is a semialgebraic subset of $\mathcal{Q}^{+}$ of dimension $\dim (\Sigma)<\dim (\mathcal{Q}^{+})$ and for every $p\in \mathcal{Q}^{+}\backslash \Sigma$ the map $\omega\mapsto f(\omega)-p$ is transversal to each $N_{i}.$ Since $\Sigma$ is semialgebraic of codimension at least one, then there exists $p\in \mathcal{Q}^{+}\backslash \Sigma$ such that $\{t p\}_{t>0}$ intersects $\Sigma$ in a finite number of points, i.e. for every $\epsilon>0$ sufficiently small $\epsilon p\in \mathcal{Q}^{+}\backslash \Sigma$. This concludes the proof.\end{proof}
\begin{center}
\fontsize{12}{0}
\scalebox{0.7}
{
\begin{pspicture}(0,-4.07)(15.72291,4.07)
\pscircle[linewidth=0.04,dimen=outer,fillstyle=solid](3.2610157,0.03){2.22}
\pscircle[linewidth=0.04,dimen=outer,fillstyle=solid](11.641016,0.01){2.22}
\psline[linewidth=0.04cm](3.2610157,3.93)(3.2810156,-4.05)
\psbezier[linewidth=0.04](11.021015,4.05)(11.038329,-0.83)(12.163702,-0.7251527)(12.181016,4.05)
\psdots[dotsize=0.12](3.2610157,2.23)
\psdots[dotsize=0.12](3.2810156,-2.17)
\psdots[dotsize=0.12](11.101016,2.13)
\psdots[dotsize=0.12](12.101016,2.15)
\usefont{T1}{ptm}{m}{n}
\rput(0.5,-3.0){$W$}
\usefont{T1}{ptm}{m}{n}
\rput(4.6,3.2){$\det(\omega q)=0$}
\usefont{T1}{ptm}{m}{n}
\rput(15,-3.0){$W-\epsilon p$}
\usefont{T1}{ptm}{m}{n}
\rput(14,3.2){$\det( \omega q-\epsilon p)=0$}
\end{pspicture}
}
\end{center}
Since the codimension of $Z^{(r)}$ is $\binom{r+1}{2}$ we can immediately derive the following.
\begin{coro}\label{codim}Assume $r>\frac{1}{2}(-1+\sqrt{8k-7})$; then there exists a positive definite form $p$ such that for $\epsilon>0$ small enough:
$$\{\omega \in S^{k-1}\,|\,\dim \ker (\omega q-\epsilon p)\geq r\}=\emptyset$$
\end{coro}
We recall the following result describing the local topology of the space of quadratic forms (see \cite{AgLe}, Proposition 9).
\begin{propo}\label{topquad}Let $q_{0}\in \mathcal{Q}$ be a quadratic form and let $V$ be its kernel. Then there exists a neighborhood $U_{q_{0}}$ of $q_{0}$ and a smooth semialgebraic map $\phi:U_{q_{0}}\to \mathcal{Q}(V)$ such that: 1) $\phi(q_{0})=0$; 2) $\mathrm{i}^{-}(q)=\mathrm{i}^{-}(q_{0})+\mathrm{i}^{-}(\phi(q));$ 3) $\dim\ker (q)=\dim\ker(\phi(q));$ 4) for every $p\in \mathcal{Q}$ we have $ d\phi_{q_{0}}(p)=p|_{V}.$
\end{propo}
Combining Lemma \ref{perturb} and the previous proposition we can prove the following corollary, which shows that after the perturbation the sets $\Omega_{n-j}(\epsilon)$ have a very nice structure.
\begin{coro}\label{boundary}Let $p$ be the positive definite form provided by Lemma \ref{perturb}. Then for every $\epsilon>0$ small enough:
$$\Omega_{n-j}(\epsilon)\textrm{ is a \emph{topological} submanifold of $S^{k-1}$ with boundary}.$$
\end{coro}
\begin{proof}
Let $p$ be the quadratic form given by Lemma \ref{perturb} and $f=q_\epsilon:S^{k-1}\to \mathcal{Q}_{n+1}$ the map consequently defined. Let us consider a point $\omega$ in $\Omega_{n-j}(\epsilon)$ and the map $\phi:U_{f(\omega)}\to \mathcal{Q}(\ker f(\omega))$ given by Proposition \ref{topquad}. Since $d\phi_{f(\omega)}p=p|_{\ker f(\omega)}$ then $d\phi_{f(\omega)}$ is surjective. On the other hand by transversality of $f$ to each stratum $N_{r}$ we have:
$$ \textrm{im} (df_{\omega})+T_{f(\omega)}N_{r}=\mathcal{Q}_{n+1}$$
Since $\phi(N_{r})=\{0\}$ (notice that this condition implies $(d\phi_{f(\omega)})|_{T_{f(\omega)}N_{r}}=0$) then
$$\mathcal{Q}(\ker f(\omega))=\textrm{im}(d\phi_{f(\omega)})=\textrm{im}(d(\phi\circ f)_{\omega})$$
which tells that $\phi \circ f$ is a submersion at $\omega.$ Thus by the Rank Theorem there exist an open neighborhood $U_{\omega}$ of $\omega$ and an open diffeomorphism onto its image $\psi$ such that the following diagram is commutative:
$$\begin{tikzpicture}[xscale=1.5, yscale=1.5]
\node (A2_0) at (2, 0) {$\mathcal{Q}(\ker f(\omega))$};
\node (A1_1) at (1, 1) {$U_\omega$};
\node (A3_1) at (3, 1) {$\mathcal{Q}(\ker f(\omega))\times \mathbb{R}^{l}$};
\path (A1_1) edge [->] node [auto] {$\psi$} (A3_1);
\path (A1_1) edge [->] node [auto,swap] {$\phi \circ f$} (A2_0);
\path (A3_1) edge [->] node [auto] {$p_{1}$} (A2_0);
\end{tikzpicture}
$$
(in particular $\psi(U_{\omega})$ is an open subset of $\mathcal{Q}(\ker f(\omega))\times \mathbb{R}^{l}$). Let us pick up an open neighborhood of $\psi(\omega)$ of the form $A\times B$, with $A\subset \mathcal{Q}(\ker f(\omega))$ and $B\subset \mathbb{R}^{l}$ contractible, and consider the open set $U'=U_\omega\cap \psi^{-1}(A\times B)$ and the commutative diagram:
$$\begin{tikzpicture}[xscale=1.5, yscale=1.5]
\node (A2_0) at (2, 0) {$A$};
\node (A1_1) at (1, 1) {$U'$};
\node (A3_1) at (3, 1) {$A\times B$};
\path (A1_1) edge [->] node [auto] {$\psi$} (A3_1);
\path (A1_1) edge [->] node [auto,swap] {$\phi \circ f$} (A2_0);
\path (A3_1) edge [->] node [auto] {$p_{1}$} (A2_0);
\end{tikzpicture}
$$
Notice now that for every $\eta$ in $U'$ the second point of Proposition \ref{topquad} implies that $\mathrm{i}^{-}(f(\eta))=\mathrm{i}^{-}(f(\omega))+\mathrm{i}^{-}(\phi(f(\eta))).$ In particular we see that $U'\cap \Omega_{n-j}(\epsilon)$ is homeomorphic, through $\psi$, to the set$$(A\cap \{q\in \mathcal{Q}(\textrm{ker} f(\omega))\textrm{ such that }\mathrm{i}^-(q)\leq n-j-\mathrm{i}^-(f(\omega))\} )\times B$$
The left hand side factor is the intersection of $A$ with the set of quadratic forms in $\mathcal{Q}(\textrm{ker} f(\omega))$ with \emph{negative} inertia index less or equal than $n-j-\mathrm{i}^- (f(\omega));$ since this set is a topological submanifold with boundary in $\mathcal{Q}(\textrm{ker} f(\omega))$, then $U'\cap \Omega_{n-j}$ is homeomorphic to an open neighborhood of a topological manifold with boundary. This proves that for every point $\omega\in \Omega_{n-j}(\epsilon)$ there is an open neighborhood $U'_{\omega}$ such that $U'_{\omega}\cap \Omega_{n-j}(\epsilon)$ is homeomorphic to an open set of a topological manifold with boundary; thus $\Omega_{n-j}(\epsilon)$ itself is a topological manifold with boundary (the boundary being possibly empty).
\end{proof}
\section{A topological bound}
The scope of this section is to provide a formula which generalizes (\ref{toyeq}) from Example \ref{toy}. The idea is to use Lemma \ref{union} and Corollary \ref{boundary} in Agrachev's bound: the first says that we can perturb each set $\Omega^j$ to a set $\Omega_{n-j}(\epsilon)$ without changing its Betti numbers, the second says that we can do that \emph{and} make the new sets topological manifolds with boundary. As we will see we can use the topological manifold structure of these sets to get more information out of Agrachev's bound. We start by proving the following lemma from algebraic topology.
\begin{lemma}\label{boundary2} Let $M$ be a semialgebraic topological submanifold of the sphere $S^n$ with nonempty boundary and nonempty interior. Then:
$$b(M)=\frac{1}{2}b(\partial M)$$
\end{lemma}
\begin{proof}
By assumption also $N=S^n\backslash \textrm{int}(M)$ is a semialgebraic topological manifold with boundary $\partial N=\partial M$. Let us consider collar neighborhoods $A$ of $M$ and $B$ of $N$ such that $A\cap B$ deformation retracts to $\partial M$ (such collar neighborhoods certainly exist by semialgebraicity and the Collaring Theorem). From the reduced Mayer-Vietoris sequence for the pair $(A, B)$ we get: $\tilde b_i(A)+ \tilde b_i(B)=\tilde b_i(A\cap B)$ for $i\neq n-1$, and $\tilde b_{n-1}(A)+ \tilde b_{n-1}(B)= \tilde b_{n-1}(A\cap B)-1$. Summing up all these equations we obtain:
$$\tilde b(A)+ \tilde b(B)=\tilde b(A\cap B) -1$$
(here we are using the notation $\tilde b(Y)$ for the sum of the \emph{reduced} Betti numbers of a semialgebraic set $Y$).
Alexander-Pontryiagin duality implies that $\tilde b(N)=\tilde b(M)$; on the other hand $A$ and $B$ deformation retract respectively to $M$ and $N$, which means $\tilde b(A)=\tilde b(M)=\tilde b(N)=\tilde b(B)$. Plugging this equality in the previous formula immediately gives the statement.
\end{proof}
We prove now the main technical theorem of the paper. A toy model proof in the case $\Sigma_\epsilon$ is smooth was provided in Example \ref{toy}; another proof for the case $\Sigma_\epsilon$ has only isolated singularities is given in Example \ref{ex:four}; the reader uncomfortable with technical details is advised to take a look at them first.
\begin{teo}\label{main}Let $X$ be defined by the vanishing of the quadratic forms $q_1,\ldots, q_k$ in $\mathbb{R}P^n.$ For every quadratic form $p$ and real number $\epsilon$ let us define
$$\Sigma_\epsilon=\{\omega \in S^{k-1}\,|\, \det (\omega q-\epsilon p)=0\}$$
where $q=(q_1,\ldots, q_k)$. There exists a positive definite form $p\in Q_{n+1}$ such that for every $\epsilon>0$ small enough:
\begin{itemize}
\item[i)] the map $q_\epsilon:S^{k-1}\to \mathcal{Q}_{n+1}$ given by $\omega \mapsto \omega q-\epsilon p$ is transversal to all strata of $Z$, stratified as in equation (\ref{strataZ}); in particular
$$\Sigma_\epsilon^{(r)}=\{\omega \in S^{k-1}\,|\, \dim \ker (\omega q-\epsilon p)\geq r\}$$
is an algebraic subset of $S^{k-1}$ of codimension $\binom{r+1}{2}$.
\item[ii)] if we let $\mu$ and $\nu$ be respectively the maximum and the minimum of the negative inertia index on the image of $q_\epsilon$, then
\begin{equation}\label{boundstrong}b(X)\leq n+1-2(\mu-\nu)+\frac{1}{2}\sum_{r\geq1} b\big(\Sigma_\epsilon^{(r)}\big)\end{equation}
\end{itemize}
The last sum is indeed finite since for $\binom{r+1}{2}\geq k$ part \emph{i)} implies $\Sigma_\epsilon^{(r)}=\emptyset$.
\end{teo}
\begin{proof}
The first part of the statement follows directly from Lemma \ref{perturb}. For the second part, in order to get the $-2(\mu-\nu)$ term in equation (\ref{boundstrong}), we will use a refined version of Agrachev's bound. The refined bound follows by considering in the proof of Agrachev's one a spectral sequence converging directly to $H_*(X)$ and whose second term is isomoprhic to $E_2\simeq\oplus_j H^*(B^{k}, \Omega^{j+1})$, where $B^{k}$ is the unit ball in $\mathbb{R}^{k}$ such that $\partial B^{k}=S^{k-1}.$ The existence of such a spectral sequence is the content of Theorem A of \cite {AgLe}; repeating verbatim the above argument we get $b(X)\leq \sum_{j\geq 0} b(B, \Omega^{j+1})$. Let now $p$ be given by Lemma \ref{perturb}; since $p$ is positive definite then Lemma \ref{union} implies $b(\Omega^{j+1})=b(\Omega_{n-j}(\epsilon))$ for $\epsilon>0$ sufficiently small and for every $j\geq 0.$ In particular we can rewrite the refined Agrachev's bound as:
\begin{equation}\label{refined}b(X)\leq n+1-2(\mu-\nu)+\sum_{\nu\leq j\leq \mu-1}b(\Omega_j(\epsilon))\end{equation}
The rest of the proof is devoted to bound the last term $\sum b(\Omega_j(\epsilon)).$ First notice that Corollary \ref{boundary} says that each nonempty $\Omega_{j}(\epsilon)$ is a topological submanifold of $S^{k-1}$ with boundary and nonempty interior; thus applying Lemma \ref{boundary2} we get for such a manifold $$b(\Omega_j(\epsilon))=\frac{1}{2}b(\partial \Omega_j(\epsilon)),\quad \nu\leq j\leq \mu-1$$ For convenience of notation let us rename these boundaries as following:
$$C_j=\partial \Omega_{\nu+j-1}(\epsilon),\quad j=1, \ldots, l=\mu-\nu$$ Thus (\ref{refined}) can be rewritten as:
\begin{equation}\label{refined2}b(X)\leq n+1-2(\mu-\nu)+\frac{1}{2}\sum_{j=1}^{l}b(C_j).\end{equation}
Let us now analize the structure of $\Sigma_\epsilon=\Sigma_\epsilon^{(1)}.$ By construction this set equals the union of all the $C_j$'s, but this union is not disjoint since $\Sigma_\epsilon$ might have singularities and these singularities precisely occur when two sets $C_j$ and $C_{j+1}$ intersect (this immediately follows from the fact that $q_\epsilon$ is transversal to all the strata of $Z$ and that $\Sigma_\epsilon$ is stratified by the preimages of the strata of $Z$ as described in Lemma \ref{perturb}). For convenience of notation, let us write $S(\omega, j)$ if the point $\omega$ has the property that there exists a sequence $\{\omega_n\}_{n\geq0}$ converging to $\omega$ such that $\mathrm{i}^-(q_\epsilon(\omega_n))\geq j$. Corollary \ref{boundary} implies now that $C_j=\Omega_{\nu+j-1}(\epsilon)\cap \textrm{Cl}(\Omega_{\nu+j-1}(\epsilon)^c)$, i.e.
\begin{equation}\label{cj}C_j=\{\omega \,|\, \mathrm{i}^-(q_\epsilon(\omega))\leq j\quad\textrm{and} \quad S(\omega, j+1)\}\end{equation}
Let us denote by $I_r$ the set of all subsets $\alpha$ of $\{1,\ldots, l\}$ consisting of $r$ consecutive integers; if $\alpha=\{\alpha_1,\ldots, \alpha_r\}\in I_r$ let us assume its elements are ordered in increasing way $\alpha_1\leq\cdots \leq \alpha_r$. Let now $r\in\{1,\ldots, l\}$, $\alpha \in I_r$ and for $i\in\{1,\ldots, l-r\}$ consider the sets:
$$E_{i,r}=\bigcup_{\alpha_1\leq i}\bigcap_{j\in\alpha}C_j,\quad F_{i+1,r}=\bigcap_{j=i+1}^{i+r}C_j$$
For example if $r=1$ we have $E_{i,1}=C_1\cup\cdots \cup C_i$ and $F_{i+1,1}=C_{i+1}$; if $r=2$ then $E_{i,2}=(C_{1}\cap C_2)\cup\cdots\cup (C_{i-1}\cap C_{i})$ and $F_{i+1,2}=C_{i+1}\cap C_{i+2}.$ We have the following combinatorial properties: $$E_{i,r}\cup F_{i+1,r} =E_{i+1,r}\quad \textrm{and}\quad E_{i,r}\cap F_{i+1,r}=\bigcap_{j=i}^{i+r}C_j.$$
The first equality is clear from the definition; for the second one notice that equation (\ref{cj}) implies: \begin{align*} E_{i,r}\cap F_{i+1,r}&=\bigcup_{\alpha_1\leq i}\{\omega\,|\,\mathrm{i}^-(q_\epsilon(\omega))\leq \alpha_1\quad \textrm{and}\quad S(\omega, l+r+1)\}\\
&= \{\omega\,|\,\mathrm{i}^-(q_\epsilon(\omega))\leq i\quad \textrm{and}\quad S(\omega, l+r+1)\}\\
&=\bigcap_{j=i}^{i+r}C_j.\end{align*}
Plugging these equalities in the semialgebraic Mayer-Vietoris exact sequence of the pair $(E_{i,r}, F_{i+1,r})$ we get: \begin{equation}\label{sum}b\bigg( \bigcap_{j=i+1}^{i+r}C_j\bigg)\leq b\bigg(\bigcup_{\alpha_1\leq j+1}\bigcap_{j\in\alpha}C_j \bigg)+b\bigg(\bigcap_{j=i}^{i+r}C_j\bigg)-b\bigg(\bigcup_{\alpha_1\leq j}\bigcap_{j\in\alpha}C_j\bigg)\end{equation}
If we take now the sum of all these inequalities we obtain:
\begin{equation}\label{eq}\sum_{i=0}^{l-r} b\bigg( \bigcap_{j=i+1}^{i+r}C_j\bigg)\leq b\bigg(\bigcup_{\alpha_1\leq l-r+1}\bigcap_{j\in\alpha}C_j \bigg)+\sum_{i=0}^{l-r-1} b\bigg(\bigcap_{j=i}^{i+r}C_j\bigg) \end{equation}
In fact when we take the sum of all inequalities (\ref{sum}) all the first and the last terms in the r.h.s. cancel (since they appear with opposite signs), except for the last inequality which gives the contribution $b\big(\bigcup_{\alpha_1\leq l-r+1}\bigcap_{j\in\alpha}C_j \big).$ Moreover since $q_\epsilon$ is transversal to all strata of $Z$, then Proposition \ref{zetar} implies:
$$\bigcup_{\alpha_1\leq l-r+1}\bigcap_{j\in\alpha}C_j =\bigcup_{\alpha\in I_r}\bigcap_{j\in \alpha}C_j=\Sigma_\epsilon^{(r)}$$
Substituting this formula into equation (\ref{eq}) we finally get:
\begin{equation}\label{interm}\sum_{i=0}^{l-r} b\bigg( \bigcap_{j=i+1}^{i+r}C_j\bigg)\leq b\big(\Sigma_\epsilon^{(r)}\big)+\sum_{i=0}^{l-r-1} b\bigg(\bigcap_{j=i}^{i+r}C_j\bigg) \end{equation}
In particular we have the following chain of inequalities (we keep on substituting at each step what we get from (\ref{interm})):
\begin{align*}\sum_{i=1}^{l}b(C_i)&=\sum_{i=0}^{l-1} b\bigg( \bigcap_{j=i+1}^{i+1}C_j\bigg)\leq b\big(\Sigma_\epsilon^{(1)}\big)+\sum_{i=0}^{l-2} b\bigg(\bigcap_{j=i}^{i+1}C_j\bigg)\\
&\leq b\big(\Sigma_\epsilon^{(1)}\big)+b\big(\Sigma_\epsilon^{(2)}\big)+\sum_{i=0}^{l-3} b\bigg(\bigcap_{j=i}^{i+2}C_j\bigg)\leq \cdots\\
&\leq \sum_{r\geq 1}b\big(\Sigma_\epsilon^{(r)}\big)\end{align*}
Substituting this into equation (\ref{refined2}) and recalling that $\Sigma_\epsilon^{(r)}$ is empty for $\binom{r+1}{2}\geq k$ the result follows.
\end{proof}
As a corollary we immediately get the following theorem.
\begin{teo}\label{topological}Let $\sigma_k=\lfloor \frac{1}{2}(-1+\sqrt{8k-7})\rfloor$; then
$$\emph{\textrm{Topological bound:}}\quad b(X)\leq b(\mathbb{R}P^n)+\frac{1}{2}\sum_{r=1}^{\sigma_k} b\big(\Sigma_\epsilon^{(r)}\big)$$
\end{teo}
\begin{proof}This is simply a reformulation of previous theorem in a nicer form. In fact $\mu-\nu\geq 0$, $n+1=b(\mathbb{R}P^n)$ and $\sigma_k$ is given by Corollary \ref{codim}.
\end{proof}
In the particular case we assume also nondegeneracy of the linear system $W$ we get the following theorem.
\begin{teo}\label{nondegeneracy}For a generic choice of $W=\textrm{\emph{span}}\{q_1, \ldots, q_k\}$ and $r\geq 1$
$$\Sigma_W^{(r)}=\{q \in W\backslash\{0\}\,|\, \dim \ker (q)\geq r\}=\textrm{\emph{Sing}}\big(\Sigma_W^{(r-1)}\big)$$
and the following formula holds:
$$b(X)\leq b(\mathbb{R}P^n)+\frac{1}{2}\sum_{r\geq 1}b\big(\Sigma_W^{(r)}\big)$$
\begin{proof}
Let us fix a scalar product on $\mathcal{Q}_{n+1}$; then for a generic choice of $q_1, \ldots, q_k$ the unit sphere $S^{k-1}$ in $W$ is transversal to all strata of $Z=\coprod N_r$ and the first part of the statement follows from equation (\ref{sing}).\\
Notice that the set of linear affine embeddings $f:\mathbb{R}^k\to \mathcal{Q}_{n+1}$ whose restriction to $S^{k-1}$ is transversal to all the strata of $Z$ is an open dense set; moreover if two such embeddings $f_0$ and $f_1$ are joined by a nondegenerate homotopy, then by Thom Isotopy Lemma the two sets $f_0^{-1}(Z^{(r)})$ and $f_1^{-1}(Z^{(r)})$ are homotopy equivalent. In particular for $\epsilon>0$ small enough the map $q_\epsilon$ given by Theorem \ref{main} is nondegenerate homotopic to $S^{k-1}\hookrightarrow \mathcal{Q}_{n+1}$ and thus for every $r\geq 0$ we can substitute:
$$b\big( \Sigma_\epsilon^{(r)}\big)=b\big(\Sigma_W^{(r)}\big)$$
in equation (\ref{boundstrong}), which gives the result.
\end{proof}
\end{teo}
\begin{remark}
From the point of view of classical agebraic geometry, it is natural to consider the projectivization $\mathbb{P}W$ rather than $W$ itself; similarly we can consider $\mathbb{P}\Sigma$ and by the Gysin exact sequence we get, for a generic $W$:
$$b(X)\leq b(\mathbb{R}P^n)+\sum_{r\geq 1}b\big( \mathbb{P}\Sigma_W^{(r)}\big)$$
Unfortunately there is no such formula for the general case: this is due to the fact that the perturbation $\Sigma_\epsilon$ is not invariant by the antipodal map.\\
Moreover, the way we got our formula shows that if we are interested in the topology of the complement of $X$, then we can remove the $b(\mathbb{R}P^n)$ from the sum and get again for a generic choice of $W$:
$$b(\mathbb{R}P^n\backslash X)\leq\sum_{r\geq 1} b\big( \mathbb{P}\Sigma_W^{(r)}\big)$$
\end{remark}
\section{A numerical bound}
From the previous discussion we can derive quantitative bounds on the homological complexity of the intersection of real quadrics. We start by proving the following proposition, which essentially refines Corollary 2.3 of \cite{BaBa}.
\begin{propo}\label{sphericalbound}
Let $Y\subset S^{k-1}$ be defined by polynomial equations of degree less or equal than $d$. Then:
$$b(Y)\leq (2d)^{k-1}+\frac{1}{8}\binom{k+1}{3}(6d)^{k-2}$$
\end{propo}
\begin{proof}
Using Alexander-Pontryiagin duality our problem is equivalent to that of bounding $b(S^{k-1}\backslash Y)=b(Y).$ Let $Y$ be defined on the sphere by the polynomials $f_1, \ldots, f_R$ and consider the new polynomial $F=f_1^2+\cdots+f_R^2$; then clearly $Y$ is defined also by $\{F=0\}$ on the sphere, and since $F\geq 0$ we have $S^{k-1}\backslash Y=\{F|_{S^{k-1}}>0\};$ notice that the degree of $F$ is $\delta=2d.$ By semialgebraic triviality for $\epsilon>0$ small enough we have the homotopy equivalences:
$$S^{k-1}\backslash Y\sim\{F|_{S^{k-1}}>\epsilon\}\sim \{F|_{S^{k-1}}\geq \epsilon\}$$
Let now $\epsilon>0$ be a small enough regular value of $F|_{S^{k-1}}$; then $\{F|_{S^{k-1}}\geq \epsilon\}$ is a submanifold of the sphere with smooth boundary $\{F|_{S^{k-1}}=\epsilon\}$ and by Lemma \ref{boundary2} we obtain:
$$b\big(S^{k-1}\backslash Y\big)=\frac{1}{2}b(\{F|_{S^{k-1}}=\epsilon\}).$$
Thus we reduced to study the topology of $\{F|_{S^{k-1}}=\epsilon\}$: this set is given in $\mathbb{R}^k$ by the two equations $F-\epsilon=0$ and $\|\omega\|^2-1=0$. Equivalently we can consider their homogenization $g_1={}^hF-\epsilon \omega_0^{2d}=0$ and $g_2=\|\omega\|^2-\omega_0^2=0$ and their common zero locus in $\mathbb{R}P^k$: since there are no common solutions on $\{\omega_0=0\}$ (the hyperplane at infinity) these two equations still define $\{F|_{S^{k-1}}=\epsilon\}$. By Fact 1 in \cite{Le3} it follows that we can \emph{real} perturb the coefficients of $g_1$ and $g_2$ and make their common zero set in $\mathbb{C}P^k$ a smooth complete intersection. This perturbation of the coefficients will not change the topology of the zero locus set in $\mathbb{R}P^k$ since before the perturbation it was a smooth manifold; the fact that the perturbation is \emph{real} allows us to use Smith's theory. Thus let $\tilde g_1$ and $\tilde g_2$ be the perturbed polynomials; we have:
$$b(\{F|_{S^{k-1}}=\epsilon\})=b(Z_{\mathbb{R}P^k}(\tilde g_1, \tilde g_2))\leq b(Z_{\mathbb{C}P^k}(\tilde g_1, \tilde g_2))$$
where in the last step we have used Smith's inequalities. Eventually we end up with the problem of bounding the homological complexity of the complete intersection $C$ of multidegree $(2,\delta)$ in $\mathbb{C}P^k.$ Let us compute first the Euler characteristic of $C.$ By Hirzebruch's formula this is given by the $(k-2)$th coefficient in the series expansion around zero of the function:
$$H(x)=\frac{2\delta(1+x)^{k+1}}{(1+2x)(1+\delta x)}$$
In other words we have:
$$\chi(C)=\frac{H^{(k-2)}(0)}{{(k-2)!}}$$
To compute this number let us write $H(x)= F(x)G(x)$ with $F(x)=\frac{2\delta(1+x)^{k+1}}{1+2x}$ and $G(x)=\frac{1}{1+\delta x}.$ In this way we have:
$$H^{(k-2)}(0)=\sum_{j=0}^{k-2}\binom{k-2}{j}F^{(j)}(0)G^{(k-2-j)}(0).$$
To compute the derivatives of $F$ we do the same trick as for $H:$ we write $F(x)=A(x)B(x)$ where $A(x)=2\delta(1+x)^{k+1}$ and $B(x)=\frac{1}{1+2x}$. In this way, using the series expansion $B(x)=\sum_{i=0}^{\infty}(-1)^{i}2^{i}x^i$, we get:
$$F^{(j)}(0)=j!2(-2)^{j}\delta \sum_{i=0}^{j}\binom{k+1}{i}\bigg(-\frac{1}{2}\bigg)^i$$
We also have $G^{(i)}(0)=(-1)^{i}\delta^{i}i!$ (this last equality follows from the series expansion around zero $G(x)=\sum_{i=0}^{\infty}(-1)^{i}\delta^{i}x^i$). Plugging these equalities into the above one we get:
$$\chi(C)=(-1)^k\sum_{j=0}^{k-2}\bigg(\sum_{i=0}^j \binom{k+1}{i}\bigg(-\frac{1}{2}\bigg)^i\bigg)2^{j+1}\delta^{k-j-1}$$
Recall now that the formula $b(C)=(k-1)(1+(-1)^{k+1})+(-1)^k \chi(C)$ gives:
\begin{align*}b(C)&=(k-1)(1+(-1)^{k+1})+\sum_{j=0}^{k-2}\bigg(\sum_{i=0}^j \binom{k+1}{i}\bigg(-\frac{1}{2}\bigg)^i\bigg)2^{j+1}\delta^{k-j-1}\\
&= (k-1)(1+(-1)^{k+1})+2\delta^{k-1}+\sum_{j=1}^{k-2}\bigg(\sum_{i=0}^j \binom{k+1}{i}\bigg(-\frac{1}{2}\bigg)^i\bigg)2^{j+1}\delta^{k-j-1}\end{align*}
Since $|\sum_{i=0}^j \binom{k+1}{i}(-\frac{1}{2})^i|\leq \binom{k+1}{3}(\frac{3}{2})^{k-2}$, from the above equality we can deduce:
$$b(C)\leq 2(k-1)+2\delta^{k-1}+2\delta^{k-1}\bigg(\frac{3}{2}\bigg)^{k-2}\binom{k+1}{3}\sum_{j=1}^{k-2}\bigg(\frac{2}{\delta}\bigg)^j$$
Since now $\delta^{k-1}\sum_{j=1}^{k-2}(\frac{2}{\delta})^j=2(\delta^{k-2}+2\delta^{k-3}+\cdots+2^{k-3}\delta+2^{k-2})$ and $2^{k-2}\geq (k-1)$, we can bound $2(k-1)+ 2\delta^{k-1}\sum_{j=1}^{k-2}(\frac{2}{\delta})^j$ with $2^{k}\delta^{k-2}$ and finally write:
\begin{equation}\label{completedelta}b(C)\leq 2\delta^{k-1}+\frac{1}{4}\binom{k+1}{3}(3\delta)^{k-2}\end{equation}
The previous inequality, together with $b(Y)\leq \frac{1}{2}b(C)$ and $\delta=2d$ gives the result.\end{proof}
\begin{remark}
We notice that as long as $d$ is large enough with respect to $k$, the previous bound improves Milnor's one, which gives $b(Y)\leq d(2d-1)^{k-1}$; here it is essential that $Y$ is on the sphere, as we used Alexander-Pontryiagin duality.
\end{remark}
As a corollary we get the following theorem.
\begin{teo}\label{numerical} Let $X$ be the intersection of $k$ quadrics in $\mathbb{R}P^n$. Then:
$$b(X)\leq O(n)^{k-1}$$
\end{teo}
\begin{proof}
We use the bound given in Theorem \ref{topological}: the proof is essentially collecting the estimates given by the previous proposition for each summand $b\big(\Sigma_{\epsilon}^{(r)}\big)$. By construction we have that $\Sigma_\epsilon^{(r)}$ is a determinantal variety and it is defined by polynomials $f_1,\ldots, f_R$ of degree $d=n-r+2$ on the sphere $S^{k-1}$. The result now follows by plugging the bounds given in Proposition \ref{sphericalbound} in the summands of Theorem \ref{topological} (there are only $\sigma_k$ of such summand). \end{proof}
\begin{remark}
As suggested to the author by S. Basu, there are two other possible ways to get such numerical estimates. The first one is using a general position argument similar to \cite{BaBa} and combinatorial Mayer-Vietoris bounds as in \cite{BPR}; the second one is using again a general position argument and stratified Morse theory (which in the semialgebraic case is very well controlled, as noticed in \cite{BasuCell}). Both this approaches work also in the \emph{affine} case producing a bound of the same shape; in the projective case it seems that also the leading \emph{coefficient} is the same. As for numerical uniform bounds, the advantage of the first one is that it is applicable to more general cases, i.e. besides the quadratic one. To the author knowledge nothing has been published on the subject: together with S. Basu he plans to give an account of these different techniques in a forthcoming paper. \end{remark}
We introduce the following notation:
$$B(k,n)=\max\{b(X)\,|\,\textrm{$X$ is the intersection of $k$ quadrics in $\mathbb{R}P^n$}\}.$$
We discuss now the sharpness of the previous bound, showing that:
$$B(k,n)=O(n)^{k-1}$$
Theorem \ref{numerical} gives the inequality $B(k,n)\leq O(n)^{k-1}$; for the opposite inequality we need to produce for every $k$ and $n$ an intersection $M_\mathbb{R}$ of $k$ quadrics in $\mathbb{R}P^n$ with $b(M_\mathbb{R})=O(n)^{k-1}.$
Let us first notice that repeating the same argument of Proposition \ref{sphericalbound}, we can deduce that the complete intersection $M$ of $k$ quadrics in $\mathbb{C}P^n$ has:
$$b(M)=b(M;\mathbb{Z})=O(n)^{k-1}$$
(this computation is already performed in \cite{BaBa}). It is a known result that there exists a real maximal $M$, i.e. a complete intersection of $k$ \emph{real} quadrics in $\mathbb{C}P^n$ whose real part $M_\mathbb{R}$ satisfies:
$$b(M_\mathbb{R})=b(M)$$
Such an existence result holds in general for any complete intersection of multidegree $(d_1, \ldots, d_k)$. An asymptotic construction is provided in \cite{ItVi1}; the proof for the general case has not been published yet but the author has been informed that it will be the subject of a forthcoming paper of the same authors as \cite{ItVi1}.
\section{Examples}
\begin{example}[$k=2$]
In the case $X$ is the intersection of \emph{two} quadrics in $\mathbb{R}P^n$, the ideas previously discussed produces the sharp bound $b(X)\leq 2n$: in fact by inequality (\ref{refined2}) we have:
$$b(X)\leq n+1-2(\mu-\nu) +\frac{1}{2}b(\Sigma_\epsilon)$$
(every $\Sigma_\epsilon^{(r)}$ with $r>1$ is empty). On the other hand $\Sigma_\epsilon$ is defined by an equation of degree $n+1$ on the circle $S^1$ and thus it consists of at most $2(n+1)$ points. This gives:
$$b(X)\leq n+1-2(\mu-\nu)+n+1\leq 2n$$
(in the case $\mu=\nu$ we have $b(X)\leq n+1$ ). Moreover for every $n$ there exist two quadrics in $\mathbb{R}P^n$ whose intersection $X$ satisfies $b(X)=2n$ (see Example 2 of \cite{Le2}). Notice that the example provided there in the case $n$ \emph{odd} gives a \emph{singular} $X.$ Using the notation introduced above, this reads:
$$B(2,n)=2n$$
What is interesting now is that for \emph{odd} $n$ this number $B(2,n)$ is attained only by a singular intersection of quadrics: the nonsingular one has at most $b(X)\leq 2n-2$ (this follows from Smith's inequality and the Hirzebruch's formula for the complete intersection of two quadrics in $\mathbb{C}P^n$). For a more detailed discussion the reader is referred to section 6 of \cite{Le2}.
\end{example}
\begin{example}[$k=3$]
In the case $X$ is the intersection of three quadrics then, inequality (\ref{refined2}) gives:
$$b(X)\leq n+1-2(\mu-\nu) +\frac{1}{2}b(\Sigma_\epsilon)$$
Again, since the codimension of $\Sigma_\epsilon^{(r)}$ is greater than three for $r\geq 2$, then in this case all these sets except $\Sigma_\epsilon$ are empty (since $k=3$ these sets are subsets of the sphere $S^2$). This also says that $\Sigma_\epsilon$ is a smooth curve on $S^2$; let $f=\|\omega\|^2-1 $ and $g=\det(\omega q-\epsilon p)$ be the polynomials defining this curve and $F,G$ their homogenization. Then there exists a \emph{real} perturbation $\tilde G$ of $G$ that makes the common zero locus $C$ of $\tilde G$ and $F$ a smooth complete intersection in $\mathbb{C}P^3$. Since $\Sigma_\epsilon$ is smooth, the real part $C_\mathbb{R}$ of this complete intersection has the same topology of it and by Theorem \label{topological}:
$$b(X)\leq \frac{1}{2}b(C)+O(n)$$
Recall that equation (\ref{completedelta}) was for a complete intersection of multidegree $(2,\delta)$ in $\mathbb{C}P^3$; specifying it to this case $\delta=n+1$ we get $b(C)\leq 2n^2+O(n)$, which plugged into the previous inequality gives:
$$b(X)\leq n^2+O(n).$$
(indeed Theorem 1 of \cite{Le3} gives the refined bound $b(X)\leq n^2+n$).
We notice now that in this case:
$$B(3,n)=n^2+O(n).$$
In fact the previous inequality provides the upper bound, and the lower bound is given by the existence of almost maximal real complete intersections of three quadrics (see \cite{Krasnov2} for an explicit construction of such maximal varieties).\\
In the case $X$ is smooth, using the spectral sequence approach the authors of \cite{DeItKh} have proved that the maximum value $B_0(3,n)$ of $b_0(X)$ satisfies:
$$\frac{1}{4}(n-1)(n+5)-2<B_0(3,n)\leq \frac{3}{2}l(l-1)+2$$
where $l=\lfloor \frac{1}{2}n\rfloor+1$. Notice in particular that $\frac{1}{4}\leq\liminf \frac{B_0(n,3)}{n^2}\leq \frac{3}{8}$ as $n$ goes to infinity. \end{example}
\begin{example}[$k=4$]\label{ex:four} This is the first case where we need to take into account the complexity of the singular points of $\Sigma_\epsilon$. As promised, we give a simplified proof of part ii) of Theorem \ref{main} for this case, with the scope of getting the reader acquainted to the idea of that proof. Let $p\in \mathcal{Q}_{n+1}$ be the positive definite form given by Lemma \ref{perturb}, then by Lemma \ref{union} and Agrachev's bound we get:
$$b(X)\leq \sum_{\nu\leq j\leq \mu-1}b( \Omega_j(\epsilon))+O(n)$$
where the $\Omega_{j}(\epsilon)$ are now subsets of the sphere $S^3.$
Corollary 9 says that each of them is a manifold with boundary; let us rename as above such boundaries: $C_j=\partial \Omega_{\nu+j-1}(\epsilon)$ for $j=1,\ldots, l=\mu-\nu.$ Lemma \ref{boundary2} allows now to use $\frac{1}{2}b(C_j)$ instead of $b(\Omega_{\nu+j-1}(\epsilon))$ in the previous bound, getting:
$$b(X)\leq \frac{1}{2}\sum_{j=1}^lb(C_j)+O(n)$$
Now we have that $\Sigma_\epsilon$ is a surface on $S^3$ given by $C_1\cup\cdots\cup C_l$, but this union is not disjoint since singular points may occur. These singular points are isolated, since their union (if nonempty) has codimension $3$ on the sphere $S^3$. The set $\Sigma_\epsilon^{(2)}=\textrm{Sing}(\Sigma_\epsilon)$ equals exactly the set of points where two different $C_j$ intersect. On the other hand if $|i-j|\geq2$ then $C_j\cap C_i=\emptyset$, since any point on this intersection would have kernel at least of dimension three. Thus $\Sigma_\epsilon$ is made up taking the abstract disjoint union of the sets $C_1, \ldots, C_l$ and identifying the points on $C_j\cap C_{j+1}$ for $j=1, \ldots, l-1.$ This identification procedure can increase the number of the generators of the fundamental group; the number of connected components instead can decrease of at most $b(\textrm{Sing}(\Sigma_\epsilon)),$ which is to say:
$$b(\Sigma_\epsilon)\geq b\bigg(\coprod_{j=1}^lC_j\bigg)-b(\textrm{Sing}(\Sigma_\epsilon))$$
The following picture shows an example how this identification procedure works: $\Sigma_\epsilon$ has two singular points and is obtained by glueing the disjoint union of $C_1$ and $C_2$ along two copies of these singular points (one copy is on $C_1$ and one on $C_2$).
\begin{center}
\fontsize{15}{0}
\scalebox{0.55}
{
\begin{pspicture}(0,-2.148847)(22.819946,2.1288471)
\psbezier[linewidth=0.04](5.1113057,1.0390087)(4.85164,1.3660076)(4.392474,1.6463987)(3.717313,1.7790587)(3.0421524,1.9117188)(0.68332964,1.899532)(0.34166482,0.84969366)(0.0,-0.20014468)(1.0796608,-1.4909295)(2.6239858,-1.4909295)(4.1683106,-1.4909295)(4.564642,-1.2155621)(5.0429726,-0.50993305)
\psbezier[linewidth=0.04](6.429306,-0.50993305)(6.866637,-0.68203765)(7.2133727,-1.0719202)(7.8642983,-1.1639307)(8.5152235,-1.2559413)(11.097085,-1.2170762)(11.168515,-0.36126745)(11.239946,0.49454126)(10.829948,2.0888472)(8.886195,1.9793557)(6.9424405,1.8698643)(6.770971,1.2283239)(6.494541,1.0155697)
\psbezier[linewidth=0.04](2.2686543,-0.2552181)(2.2686543,-0.7600582)(3.4989479,-0.7853002)(3.4989479,-0.2804601)
\psbezier[linewidth=0.04](2.326893,-0.4192911)(2.5311434,-0.08149369)(3.229179,-0.028040027)(3.4261494,-0.45715412)
\psbezier[linewidth=0.04](0.95666146,0.58631426)(0.95666146,0.05860586)(2.0536885,0.30267102)(2.0636556,0.8445869)
\psbezier[linewidth=0.04](0.9953732,0.41480902)(1.1311399,0.76790804)(1.8943169,1.0734297)(2.0237641,0.6157441)
\psbezier[linewidth=0.04](8.506628,0.674147)(8.506628,0.16930687)(9.736921,0.14406487)(9.736921,0.648905)
\psbezier[linewidth=0.04](8.564867,0.51007396)(8.769117,0.8478714)(9.467153,0.90132505)(9.664124,0.47221094)
\psbezier[linewidth=0.04](5.097639,1.0217983)(4.85164,0.79806226)(4.2776437,0.5226948)(4.236644,0.19569601)
\psbezier[linewidth=0.04](5.0429726,-0.50993305)(4.6466413,-0.3894598)(4.2093105,-0.18293421)(4.1819773,0.50548434)
\psbezier[linewidth=0.04](6.497639,1.0190088)(7.0169697,0.5715367)(7.3859677,0.64037853)(7.4953003,0.36501113)
\psbezier[linewidth=0.04](6.4429727,-0.52993304)(6.97597,0.17569602)(7.5226336,-0.16851328)(7.508967,0.5199053)
\psbezier[linewidth=0.04](18.071306,1.0590087)(17.81164,1.3860075)(17.352474,1.6663986)(16.677313,1.7990588)(16.002151,1.9317188)(13.64333,1.919532)(13.301664,0.86969364)(12.96,-0.18014467)(14.03966,-1.4709295)(15.583985,-1.4709295)(17.128311,-1.4709295)(17.524641,-1.1955621)(18.002974,-0.48993304)
\psbezier[linewidth=0.04](17.989305,-0.48993304)(18.426638,-0.6620377)(18.773373,-1.05192)(19.424297,-1.1439307)(20.075224,-1.2359413)(22.657085,-1.1970762)(22.728516,-0.34126747)(22.799946,0.51454127)(22.389948,2.1088471)(20.446196,1.9993557)(18.502441,1.8898643)(18.33097,1.2483239)(18.05454,1.0355697)
\psbezier[linewidth=0.04](15.228654,-0.23521808)(15.228654,-0.7400582)(16.458948,-0.7653002)(16.458948,-0.26046008)
\psbezier[linewidth=0.04](15.286893,-0.39929113)(15.491143,-0.061493687)(16.189178,-0.008040028)(16.386148,-0.43715414)
\psbezier[linewidth=0.04](13.916661,0.60631424)(13.916661,0.07860586)(15.013688,0.322671)(15.023656,0.8645869)
\psbezier[linewidth=0.04](13.955373,0.43480903)(14.09114,0.787908)(14.854317,1.0934297)(14.983764,0.63574415)
\psbezier[linewidth=0.04](20.066628,0.694147)(20.066628,0.18930689)(21.29692,0.16406487)(21.29692,0.668905)
\psbezier[linewidth=0.04](20.124866,0.53007394)(20.329117,0.8678714)(21.027153,0.921325)(21.224123,0.49221095)
\psbezier[linewidth=0.04](18.057638,1.0417984)(17.81164,0.81806225)(17.237644,0.5426948)(17.196644,0.215696)
\psbezier[linewidth=0.04](18.002974,-0.48993304)(17.606642,-0.36945978)(17.16931,-0.16293421)(17.141977,0.5254844)
\psbezier[linewidth=0.04](18.057638,1.0590087)(18.57697,0.6115367)(18.945967,0.68037856)(19.0553,0.40501112)
\psbezier[linewidth=0.04](18.002974,-0.48993304)(18.53597,0.215696)(19.082634,-0.12851328)(19.068966,0.5599053)
\psdots[dotsize=0.12](5.099947,1.0488482)
\psdots[dotsize=0.12](5.019947,-0.5111518)
\psdots[dotsize=0.12](6.439947,-0.5111518)
\psdots[dotsize=0.12](6.499947,1.0288482)
\psline[linewidth=0.027999999cm,linestyle=dashed,dash=0.16cm 0.16cm](5.099947,1.0488482)(6.479947,1.0088482)
\psline[linewidth=0.027999999cm,linestyle=dashed,dash=0.16cm 0.16cm](5.019947,-0.5111518)(6.419947,-0.5111518)
\usefont{T1}{ptm}{m}{n}
\rput(2.5014021,-1.9261518){$C_1$}
\usefont{T1}{ptm}{m}{n}
\rput(8.761402,-1.9261518){$C_2$}
\usefont{T1}{ptm}{m}{n}
\rput(18.091402,-1.9261518){$\Sigma_\epsilon$}
\usefont{T1}{ptm}{m}{n}
\rput(12.131402,0.41384822){$\longrightarrow$}
\end{pspicture}
}
\end{center}
Plugging this into the above inequality for $b(X)$ we get:
$$b(X)\leq \frac{1}{2}\big\{b(\Sigma_\epsilon)+b(\textrm{Sing}(\Sigma_\epsilon))\big\}+O(n)$$
Proposition \ref{sphericalbound} says that both $\Sigma_\epsilon$ and $\textrm{Sing}(\Sigma_\epsilon)$ have complexity bounded by $16n^3$ and thus:
\begin{equation}\label{four}b(X)\leq 16n^3+O(n).\end{equation}
On the other hand we notice that if $X$ is the real part of a smooth complete intersection of four real quadrics in $\mathbb{C}P^n$, then $b(X)\leq \frac{2}{3}n^3+O(n)$; thus the above bound is sharp only in its shape. By the above discussion on the topology of determinantal varieties, we expect (\ref{four}) can be improved.\end{example}
\end{document}
|
\begin{document}
\begin{abstract}
Hardy's inequality for Laguerre expansions of Hermite type with the index $\al\in(\{-1/2\}\cup[1/2,\infty))^d$ is proved in the multi-dimensional setting with the exponent $3d/4$. We also obtain the sharp analogue of Hardy's inequality with $L^1$ norm replacing $H^1$ norm at the expense of increasing the exponent by an arbitrarily small value.
\end{abstract}
\maketitle
\footnotetext{
\emph{2010 Mathematics Subject Classification:} Primary: 42C10; Secondary: 42B30, 33C45\\
\emph{Key words and phrases:} Hardy's inequality, Hardy's space, Laguerre expansions of Hermite type\\
Research partially supported by funds of Faculty of Pure and Applied Mathematics, Wrocław University of Science and Technology, \#0401/0121/17.\\
The paper is a part of author's doctoral thesis written under the supervision of Professor Krzysztof Stempak.}
\section{Introduction}
The well known Hardy inequality states that
\begin{equation*}
\sum_{k\in\mathbb{Z}}\frac{\vert \hat{f}(k)\vert}{\vert k\vert+1}\lesssim \Vert f\Vert_{\mathrm{ Re} H^1},
\end{equation*}
where $\hat{f}(k)$ is $k$-th Fourier coefficient of $f$. Here $\mathrm{ Re} H^1$ is the real Hardy space composed of the boundary values of the real parts of functions in the Hardy space $H^1(\mathbb{D})$, where $\mathbb{D}$ is the unit disk in the plane.
Kanjin \cite{Kanjin1} established an analogue of Hardy's inequality in the context of Hermite functions $\{h_k\}_{k\in\mathbb{N}}$ and the standard Laguerre functions $\{\mathcal{L}_k^{\al}\}_{k\in\mathbb{N}}$, $\al\geq 0$, namely
\begin{equation*}
\sum_{k\in\mathbb{N}}\frac{\vert \langle f,h_k\rangle_{L^2(\mr)}\vert}{ (k+1)^{29/36}}\lesssim \Vert f\Vert_{H^1(\mr)},\qquad \sum_{k\in\mathbb{N}}\frac{\vert \langle f,\mathcal{L}_k^{\al}\rangle_{L^2(\mr_+)}\vert}{k+1}\lesssim \Vert f\Vert_{H^1(\mr_+)},
\end{equation*}
where $H^1(\mr)$ and $H^1(\mr_+)$ denote the real Hardy spaces on $\mr$ and $\mr_+$, respectively.
Hardy's inequality in the context of Hermite functions was further intensively studied by many authors. Radha \cite{Radha} proved a similar inequality in an arbitrary dimension. In \cite{RadhaThangavelu} an improved version of Hardy's inequality was introduced in the multi-dimensional case, $d\geq 2$, by Radha and Thangavelu. The exponent in the denominator was $3d/4$. This led to the hypothesis that in the one-dimensional case the exponent should be equal to $3/4$. It was indeed proved in \cite{LiYuShi} by Z. Li, Y. Yu and Y. Shi. A generalization of Kanjin's results, with the spaces $H^p(\mr)$ and $H^p(\mr_+)$, $p\in(0,1]$, instead of $H^1(\mr)$ and $H^1(\mr_+)$, was also considered in the context of Hermite functions (see \cite{BalasRadha, RadhaThangavelu}) and in the context of Laguerre functions (see \cite{RadhaThangavelu, Satake}).
In this paper we study multi-dimensional Hardy's inequality in the context of Laguerre functions of Hermite type $\{\lfun\}_{n\in\mathbb{N}^d}$. In view of the uniform boundedness of the derivatives of functions $\lfun$ and \cite[Lemma]{Kanjin1} we have the one-dimensional inequality
$$\sum_{k=0}^{\infty}\frac{\vert\langle f, \lfunk\rangle\vert}{(k+1)^{29/36}}\lesssim \Vert f\Vert_{H^1(\mr_+)}.$$
Our aim is to obtain the analogue of this inequality with the power $3d/4$, which does not depend on $\al$, and in dimension $d\geq1$.
The proof of one of the main results, Theorem \ref{main_thm}, is based on the atomic decomposition of functions from $H^1(\mr_+^d)$ and relies on a uniform estimate for atoms and an additional argument of the "weak" continuity of certain operators. Without this argument, which was often omitted in papers concerning this topic, the proof would have a gap. We remark that the uniform estimate for atoms does not imply continuity of operators that appear in analysis that involves the atomic decomposition of $H^1(\mr^d)$ (see \cite{Bownik}).
The range of the Laguerre type multi-index $\al$ that is considered in Theorem \ref{main_thm}, is the set $(\{-1/2\}\cup[1/2,\infty))^d$. This kind of restraint appeared before (see for example \cite{NowakStempak}). Note that the one-dimensional Laguerre functions of Hermite type with the Laguerre type multi-index equal to $-1/2$ or $1/2$ are, up to a multiplicative constant, the Hermite functions of even or odd degree, respectively. Therefore, it was fair to assume that this values of $\al$ should be included. Technically, the restraint emerges from the range of $\al$'s for which the derivatives of the Laguerre functions of Hermite type are uniformly bounded. It may also be related to the fact that the associated heat semi-group is a semi-group of $L^p$ contractions precisely for this set of $\al$'s (see \cite{NowakStempak2}).
In \cite{Kanjin2} Kanjin proved that if the exponent in one-dimensional Hardy's inequality in the context of Hermite functions is strictly greater than $3/4$, then one can replace $H^1(\mr)$ norm by $L^1(\mr)$ norm and, moreover, the exponent $3/4$ is sharp. In Theorem \ref{L1_thm} we shall prove that this is also the case in the context of Laguerre functions of Hermite type and extend this result to an arbitrary dimension.
We shall frequently use two basic estimates: for $a,\,A>0$ we have $\sup_{x>0} x^a e^{-Ax}< \infty$ and $(n_1+\ldots+n_d+1)^d\geq (n_1+1)\cdot\ldots\cdot(n_d+1)$, where $n_i\in\mathbb{N}$, $i=1,\ldots,d$.
\subsection*{Notation}
Throughout this paper we write $n=(n_1,\ldots,n_d)\in\mathbb{N}^d$ for a multi-index and $\vert n\vert=n_1+\ldots+n_d$ for its length, where $\mathbb{N}=\{0,1,.\ldots\}$ and $d\geq 1$. The Laguerre type multi-index $\al=(\al_1,\ldots,\al_d)$, unless stated otherwise, is considered in the full range, i.e. $\al\in(-1,\infty)^d$. We shall also use the notation $\mr^d_+=(0,\infty)^d$ and $\mathbb{N}_+=\{1,2,\ldots\}$. For functions $f,\,g\in L^2(\mr_+^d,dx)$ we denote the inner product by $\langle f,g\rangle$. Sometimes we shall use this notation for functions that are not in $L^2(\mr_+^d,dx)$ but the underlying integral makes sense. We shall use the symbol $\lesssim$ denoting an inequality with a constant that does not depend on relevant parameters. Also, the symbol $\simeq$ means that $\lesssim$ and $\gtrsim$ hold simultaneously. Moreover, we will denote asymptotic equality by $\approx$.
\section{Preliminaries}
The {\it Laguerre functions of Hermite type} of order $\al$ on $\mr^d_+$ are the functions
$$\lfun(x)=\prod_{i=1}^d \lfuni (x_i),\qquad x=(x_1,\ldots,x_d)\in\mr^d_+, $$
where $\lfuni(x_i)$ is the one-dimensional Laguerre function of Hermite type defined by
$$\lfuni(x_i)=\Big(\frac{2\Gamma(n_i+1)}{\Gamma(n_i+\al_i+1)} \Big)^{1/2}L_{n_i}^{\al_i}(x_i^2)x_i^{\al_i+1/2}e^{-x_i^2/2},\qquad x_i>0.$$
The functions $\{\lfun\colon n\in\mathbb{N}^d\}$ form an orthonormal basis in $L^2(\mr_+^d,dx)$.
The one-dimensional {\it standard Laguerre functions} $\{\mathcal{L}_k^{\alpha}\}_{k\in\mathbb{N}}$ of order $\al$ are
$$\mathcal{L}_{k}^{\al}(u)=\Big(\frac{\Gamma(k+1)}{\Gamma(k+\al+1)} \Big)^{1/2}L_{k}^{\al}(u)u^{\al/2}e^{-u/2},\qquad u>0.$$
Note that
\begin{equation*}
\lfunk(u)=(2u)^{1/2}\mathcal{L}_{k}^{\al}(u^2).
\end{equation*}
We shall use the pointwise asymptotic estimates (see \cite[p.~435]{Muckenhoupt} and \cite[p.~699]{AskeyWainger})
\begin{equation*}
\vert \mathcal{L}_{k}^{\al}(u)\vert\lesssim\left\{ \begin{array}{ll}
(u\nu)^{\al/2}, & 0<u\leq 1/\nu,\\
(u\nu)^{-1/4}, & 1/\nu<u\leq\nu/2,\\
(\nu(\nu^{1/3}+\vert u- \nu\vert))^{-1/4}, & \nu/2<u\leq 3\nu/2,\\
\exp(-\gamma u), & 3\nu/2<u<\infty,
\end{array}\right.
\end{equation*}
where $\nu=\nu(\al,k)=\max(4k+2\al+2,2)$ and with $\gamma>0$ depending only on $\alpha$. Hence,
\begin{equation}\label{pointwise_estimates_lfun}
\vert\lfunk(u)\vert\lesssim\left\{ \begin{array}{ll}
u^{\al+1/2}\nu^{\al/2}, & 0<u\leq 1/\sqrt{\nu},\\
\nu^{-1/4}, & 1/\sqrt{\nu}<u\leq\sqrt{\nu/2},\\
u^{1/2}(\nu(\nu^{1/3}+\vert u^2- \nu\vert))^{-1/4}, & \sqrt{\nu/2}<u\leq \sqrt{3\nu/2},\\
u^{1/2}\exp(-\gamma u^2), & \sqrt{3\nu/2}<u<\infty.
\end{array}\right.
\end{equation}
There is the known formula for the derivatives of functions $\lfunk$,
\begin{equation}\label{derivative_formula}
\frac{d}{d u}\lfunk(u)=-2\sqrt{k}\varphi_{k-1}^{\al+1}(u)+\left(\frac{2\al+1}{2u}-u\right)\lfunk(u),
\end{equation}
where $\varphi_{-1}^{\al}\equiv 0$.
From \eqref{pointwise_estimates_lfun} it follows that for $\al\geq -1/2$ we have
\begin{equation}\label{Linfinity_boundedness}
\Vert \lfunk\Vert_{L^{\infty}(\mr_+)}\lesssim (k+1)^{-1/12},
\end{equation}
and also by \eqref{derivative_formula} for $\al\in\{-1,2\}\cup[1/2,\infty)$,
\begin{equation}\label{Linfinity_bound_lfun}
\left\Vert \frac{d}{d \boldsymbol{\cdot}}\lfunk(\boldsymbol{\cdot})\right\Vert_{L^{\infty}(\mr_+)}\lesssim (k+1)^{5/12}.
\end{equation}
We introduce the family of operators $\{\Rop\}_{r\in(0,1)}$, defined spectrally for $f\in L^2(\mr^d_+)$, by
\begin{equation*}
\Rop f=\sum_{n\in\mathbb{N}^d}^{\infty} r^{\vert n\vert} \langle f,\lfun\rangle\lfun.
\end{equation*}
It is easily seen by means of Parseval's identity that for every $r\in(0,1)$, the operator $\Rop$ is a contraction on $L^2(\mr_+^d)$.
The kernel associated with $\Rop$ is defined by
\begin{equation*}
\Rop(x,y)=\sum_{n\in\mathbb{N}^d} r^{\vert n\vert} \lfun(x)\lfun(y),\qquad x,y\in\mr^d_+.
\end{equation*}
Note that
$$\Rop(x,y)=\prod_{i=1}^d \Ropi(x_i,y_i) $$
and there is also the explicit formula (compare \cite[p.~102]{Szego})
\begin{equation}\label{def_R_ker_explic}
\Ropi(x_i,y_i)=\frac{2 (x_i y_i)^{1/2}}{(1-r)r^{\al_i/2}}\exp\left(-\frac{1}{2}\frac{1+r}{1-r}(x_i^2+y_i^2)\right)I_{\al_i }\left(\frac{2r^{1/2}}{1-r}x_i y_i\right),
\end{equation}
where $I_{\alpha_i}$ denotes the modified Bessel function of the first kind, which is smooth and positive on $(0,\infty)$. Notice that with $r=e^{-4t}$, $t>0$, $r^{(\vert\al\vert+d)/2}\Rop(x,y)$ is just the kernel $G_t^{\al}(x,y)$, see \cite[(2.3)]{NowakStempak}, for a differential operator $L^{\al}$ associated with $\{\lfun\}$-expansions.
Let $H^1(\mr^d)$ be the real Hardy space on $\mr^d$ (see, for example, \cite[III]{Stein}). A measurable function $a(x)$ supported in a Euclidean ball $B$ is an $H^1(\mr^d)$ atom if $\Vert a\Vert_{L^{\infty}(\mr^d)}\leq \vert B\vert^{-1}$, where $\vert B\vert$ denotes the Lebesgue measure of $B$, and $\int_B a(x)\,dx=0$. Every function $f\in H^1(\mr^d)$ has an atomic decomposition, namely there exist a sequence of complex coefficients $\{\lambda_i\}_{i=0}^{\infty}$ and a sequence of $H^1(\mr^d)$ atoms $\{a_i\}_{i=0}^{\infty}$ such that
\begin{equation}\label{atomic_decomposition}
f=\sum_{i=0}^{\infty}\lambda_i a_i, \qquad \sum_{i=0}^{\infty} \vert \lambda_i\vert\lesssim \Vert f\Vert_{H^1(\mr^d)},
\end{equation}
where the convergence of the first series is in $H^1(\mr^d)$.
The Hardy space on $\mr_+^d$ is defined by
$$H^1(\mr_+^d)=\{f\in L^1(\mr^d_+)\colon \exists \tilde{f}\in H^1(\mr^d),\ \mathrm{supp}(\tilde{f})\subset [0,\infty)^d \text{ and } \tilde{f}\big\vert_{\mr_+^d}=f\}, $$
with the norm $\Vert f\Vert_{H^1(\mr_+^d)}=\Vert \tilde{f}\Vert_{H^1(\mr^d)}$.
The properties of $H^1(\mr_+^d)$ given below follow from \cite[Lemma 7.40]{GarciaFrancia} stated in the one-dimensional case therein, however easily generalizable to the case of an arbitrary dimension. Every $f\in H^1(\mr_+^d)$ has an atomic decomposition as in \eqref{atomic_decomposition} with supports of $a_i$ in $[0,\infty)^d$; we shall call them $H^1(\mr_+^d)$ atoms. Note that a ball in $\mr_+^d$ is a ball in $\mr^d$ restricted to $\mr_+^d$. We may assume that every ball associated with an $H^1(\mr_+^d)$ atom has its center in $\mr_+^d$.
For $f\in L^1(\mr_+^d)$ we define the multi-even extension $f_e$ of $f$ by
$$f_e(x_1,\ldots,x_d)= f(\vert x_1\vert,\ldots,\vert x_d\vert),\qquad x=(x_1,\ldots,x_d)\in\mr^d.$$
We remark (again see \cite[Lemma 7.40]{GarciaFrancia}) that $f_e\in H^1(\mr^d)$ if and only if $f\in H^1(\mr_+^d)$, and $\Vert f\Vert_{H^1(\mr_+^d)}\simeq \Vert f_e\Vert_{H^1(\mr^d)}$, thus we have
\begin{equation}\label{L1&H1_inequality}
\Vert f\Vert_{L^1(\mr_+^d)}\lesssim \Vert f\Vert_{H^1(\mr_+^d)}.
\end{equation}
\section{One-dimensional kernel estimates}\label{section_one_dimension}
We shall estimate the kernels $\Ropi(x_i,y_i)$. For the sake of convenience we will write $x,y,\al$ instead of $x_i,y_i,\al_i$.
There are known the asymptotic estimates (see \cite[p.~136]{Lebedev})
\begin{align*}
&I_{\al}(u) \lesssim u^{\al}, \quad 0<u<1,\\
&I_{\al}(u) \lesssim u^{-1/2}e^{u}, \quad u\geq 1.
\end{align*}
Hence,
\begin{equation}\label{R_ker_estim}
\Rop(x,y)\lesssim\left\{\begin{array}{ll}
(1-r)^{-\al-1}(xy)^{\al+1/2}\exp\left(-\frac{1}{2}\frac{1+r}{1-r}(x^2+y^2) \right), & y\leq \frac{1-r}{2\sqrt{r}x},\\
(1-r)^{-1/2}r^{-\al/2-1/4}\exp\left(-\frac{1}{2}\frac{1+r}{1-r}(y-x)^2-\frac{xy(1-r)}{(1+\sqrt{r})^2} \right), & y\geq \frac{1-r}{2\sqrt{r}x}.
\end{array}\right.
\end{equation}
{\lm\label{lemma_R} For $\al\geq -1/2$ there is
$$\sup_{x\in\mr_+}\Vert \Rop(x,\boldsymbol{\cdot})\Vert_{L^2(\mr_+)}\lesssim (1-r)^{-1/4},\qquad r\in(0,1). $$}
\begin{proof}
For $0<r\leq 1/2$ we use Parseval's identity and \eqref{Linfinity_boundedness} obtaining
$$\sup_{x\in\mr_+}\Vert \Rop(x,\boldsymbol{\cdot})\Vert_{L^2(\mr_+)}\leq \sup_{x\in\mr_+}\Big\Vert \sum_{k\in\mathbb{N}}2^{-k} \lfunk(x)\lfunk \Big\Vert_{L^2(\mr_+)}\leq \Big(\sum_{k\in\mathbb{N}}2^{- 2k}\Vert \lfunk\Vert_{L^{\infty}(\mr_+)}^2\Big)^{1/2}\lesssim 1.$$
For $1/2<r<1$ we denote $y_0=(1-r)/(2\sqrt{r}x)$ and estimate the integrals over $(0,y_0]$ and $(y_0,\infty)$. Thus, using the substitution $u=(y\sqrt{1+r})/\sqrt{1-r}$ we obtain
\begin{align*}
&\int_0^{y_0} \Rop(x,y)^2\,dy\\
&\lesssim \Big(\frac{x^2}{1-r}\Big)^{\al+1/2}\exp\Big(-\frac{1+r}{1-r}x^2\Big) (1-r)^{-\al-3/2}\int_0^{y_0}y^{2\al+1}\exp\Big(-\frac{1+r}{1-r}y^2\Big)\,dy\\
&\lesssim (1-r)^{-\al-3/2}\int_0^{\frac{y_0\sqrt{1+r}}{\sqrt{1-r}}}(1-r)^{\al+1/2}u^{2\al+1}e^{-u^2}(1-r)^{1/2}\,du\\
&\lesssim (1-r)^{-1/2},
\end{align*}
uniformly in $x\in\mr_+$ and $r\in(0,1)$. Similarly,
\begin{align*}
\int_{y_0}^{\infty} \Rop(x,y)^2\,dy&\lesssim (1-r)^{-1}\int_{y_0}^{\infty}\exp\Big(-\frac{1+r}{1-r}(y-x)^2\Big)\,dy\\
&\lesssim (1-r)^{-1}\int_{-\infty}^{\infty}\exp\Big(-\frac{1+r}{1-r}y^2\Big)\,dy\\
&\lesssim (1-r)^{-1/2},
\end{align*}
uniformly in $x\in\mr_+$ and $r\in(0,1)$. Combining the above gives the claim.
\end{proof}
{\lm\label{deri_R_ker} For $\al>0$ it holds
\begin{align*}
\partial_x\Rop(x,y)&=\Rop(x,y)\left(-\frac{2\al-1}{2x}-\frac{(1+r)x}{1-r}+\frac{2\sqrt{r}y}{1-r}\frac{I_{\al-1}(\frac{2\sqrt{r}xy}{1-r})}{I_{\al}(\frac{2\sqrt{r}xy}{1-r})} \right)\\
&=\frac{2y}{1-r}R^{\alpha-1}_r(x,y)-\left(\frac{2\al-1}{2x}+\frac{(1+r)x}{1-r}\right)\Rop(x,y).
\end{align*}
}
\begin{proof}
It suffices to use the formula
$$\frac{d}{du}I_{\al}(u)=-\frac{\al}{u}I_{\alpha}(u)+I_{\al-1}(u) $$
that holds for $\al>0$ (see \cite[p.~110]{Lebedev}), and differentiate.
\end{proof}
{\lm\label{Bessel_estim} For $\alpha\geq 1/2$ there is
$$\left\vert \frac{I_{\al-1}(u)}{I_{\al}(u)}-1\right\vert\leq \frac{2\al}{u},\qquad u>0. $$ }
For the proof in the case $\al>1/2$ see \cite[pp.~6-7]{Nasell}. If $\al=1/2$, then it suffices to use the explicit formulas (see \cite[p.~112]{Lebedev})
\begin{equation}\label{explicit_formulas}
I_{-1/2}(u)=\Big(\frac{2}{\pi u}\Big)^{1/2}\cosh u,\qquad I_{1/2}(u)=\Big(\frac{2}{\pi u}\Big)^{1/2}\sinh u,\qquad u>0.
\end{equation}
Lemma \ref{Bessel_estim} is of paramount importance in our estimates wherever the cancellations are needed. It has been used before in the context of Laguerre functions (see for example \cite{NowakStempak}).
Note that Lemma \ref{deri_R_ker} works for $\al>0$, but we want to include the case $\al=-1/2$ as well. Thus, using \eqref{def_R_ker_explic} and \eqref{explicit_formulas} we obtain
$$\Ropal(x,y)=\frac{2}{\sqrt{\pi}}(1-r)^{-1/2}\exp\Big( -\frac{1}{2}\frac{1+r}{1-r}(x^2+y^2)\Big) \cosh\Big(\frac{2\sqrt{r}xy}{1-r}\Big). $$
Hence,
\begin{align}\label{ker_-1/2_deri}
\big(\partial_x\Ropal (x,y)\big)^2&=\frac{4}{\pi}(1-r)^{-3}\exp\Big(-\frac{1+r}{1-r}(x^2+y^2)\Big)\nonumber\\
&\times \bigg( 2\sqrt{r}y\sinh\Big(\frac{2\sqrt{r}xy}{1-r}\Big)-(1+r)x\cosh\Big(\frac{2\sqrt{r}xy}{1-r}\Big)\bigg)^2.
\end{align}
Using basic estimates for $\cosh$ and $\sinh$ and combining \eqref{ker_-1/2_deri} with \eqref{R_ker_estim} and Lemma \ref{deri_R_ker} we obtain for $\al\in\{-1/2\}\cup[1/2,\infty)$
\begin{align}\label{final_estimate_without_cancellations}
&\big(\partial_x\Rop (x,y)\big)^2\nonumber\\
&\lesssim\left\{\begin{array}{ll}
(1-r)^{-2\al-2}(xy)^{2\al+1}(A_\al (x,y)+x^2(1-r)^{-2})\exp\left(-\frac{1+r}{1-r}(x^2+y^2) \right), & y\leq \frac{1-r}{2\sqrt{r}x},\\
(1-r)^{-3}(x^2+y^2+(1-r)^2x^{-2})\exp\left(-\frac{1+r}{1-r}(y-x)^2-\frac{2xy(1-r)}{(1+\sqrt{r})^2} \right), & y\geq \frac{1-r}{2\sqrt{r}x},
\end{array}\right.
\end{align}
where $A_\al(x,y)=x^{-2}$ for $\al\geq 1/2$ and $A_{-1/2}(x,y)=y^2(1-r)^{-2}$.
{\prop\label{Ker_der_estim} For $\al\in\{-1/2\}\cup [1/2,\infty)$ we have
$$\sup_{x\in\mr_+}\left\Vert\partial_x\Rop(x,\boldsymbol{\cdot})\right\Vert_{L^2(\mr_+)}\lesssim (1-r)^{-3/4},\qquad r\in(0,1). $$}
\begin{proof}
Fix $x\in\mr_+$. If $0<r\leq 1/2$, then we use \eqref{Linfinity_bound_lfun} and Parseval's identity obtaining
$$\left\Vert\partial_x\Rop(x,\boldsymbol{\cdot})\right\Vert_{L^2(\mr_+)}=\Big\Vert\sum_{k\in\mathbb{N}}r^k(\lfunk)'(x)\lfunk\Big\Vert_{L^2(\mr_+)}\leq\Big(\sum_{k\in\mathbb{N}}2^{-2k}\Vert(\lfunk)'\Vert_{L^{\infty}(\mr_+)}^2\Big)^{1/2}\lesssim 1.$$
From now on we assume that $1/2<r<1$. We use the notation $y_0=(1-r)/(2\sqrt{r}x)$ again and split the integration over two intervals: $(0,y_0]$ and $(y_0,\infty)$. In the first case, using \eqref{final_estimate_without_cancellations} and the substitution $y=(\sqrt{1-r})/(\sqrt{1+r})u$, we obtain for $\al\geq 1/2$
\begin{align*}
&\int_0^{y_0}\left(\partial_x\Rop(x,y)\right)^2dy\\
&\lesssim (1-r)^{-2\al-2}x^{2\al-1}\left(1+(1-r)^{-2}x^4\right)\exp{\left(-\frac{1+r}{1-r}x^2\right)}\int_0^{y_0}y^{2\al+1}\exp{\left(-\frac{1+r}{1-r}y^2\right)}dy\\
&\lesssim(1-r)^{-3/2}\left(\left(\frac{x^2}{1-r}\right)^{\al-1/2}+\left(\frac{x^2}{1-r}\right)^{\al+3/2}\right)\exp\left(-\frac{1+r}{1-r}x^2\right)\int_0^{\infty}u^{2\al+1}e^{-u^2}du\\
&\lesssim (1-r)^{-3/2}.
\end{align*}
For $\al=-1/2$ the corresponding computation is similar. The above estimate, as well as the following, are uniform in $x\in\mr_+$ and $r\in(0,1)$.
The case of integration over $(y_0,\infty)$ is more complicated. Firstly we assume that $y_0\geq x$ and applying \eqref{final_estimate_without_cancellations} and the substitution $y-x=\sqrt{(1-r)/(1+r)}t$ we compute
\begin{align*}
\int_{y_0}^{\infty}\left(\partial_x\Rop(x,y)\right)^2 dy&\lesssim (1-r)^{-3}\int_{y_0}^{\infty}y^2\exp\left(-\frac{(1+r)(y-x)^2}{1-r}\right)dy\\
&\lesssim (1-r)^{-5/2}\int_0^{\infty}\left(t^2(1-r)+x^2\right)e^{-t^2}dt\\
&\lesssim (1-r)^{-3/2}.
\end{align*}
Now, we assume that $y_0\leq x$, and integrate over the interval $[2x,\infty)$. Similarly, we obtain
\begin{align*}
\int_{2x}^{\infty}\left(\partial_x\Rop(x,y)\right)^2 dy&\lesssim (1-r)^{-3}\int_{2x}^{\infty} y^2\exp\left(-\frac{(1+r)(y-x)^2}{1-r}\right)dy\\
&\lesssim (1-r)^{-3}\int_{x}^{\infty}(y+x)^2\exp\left(-\frac{(1+r)y^2}{1-r}\right)dy\\
&\lesssim (1-r)^{-5/2}\int_{0}^{\infty}y^2(1-r)e^{-y^2}dy\\
&\lesssim (1-r)^{-3/2}.
\end{align*}
Finally, we integrate over the interval $(y_0,2x)$ with the restrictions $1/2<r<1$ and $x\geq y_0$. Here we shall use the cancellations. Firstly we present the proof for $\al\geq 1/2$. By Lemmas \ref{deri_R_ker}, \ref{Bessel_estim}, \ref{lemma_R} and estimate \eqref{R_ker_estim} we have
\begin{align*}
&\int_{y_0}^{2x}\left(\partial_x\Rop(x,y)\right)^2dy\\
&\lesssim \int_{y_0}^{2x} \Rop(x,y)^2\Bigg(x^{-2}+\left(\frac{2\sqrt{r}y}{1-r}-\frac{(1+r)x}{1-r}\right)^2+y^2(1-r)^{-2}\bigg(1-\frac{I_{\al-1}\left(\frac{2\sqrt{r}xy}{1-r} \right)}{I_{\al}\left(\frac{2\sqrt{r}xy}{1-r}\right)}\bigg)^2\Bigg)dy\\
&\lesssim x^{-2}\int_{y_0}^{2x} \Rop(x,y)^2dy+\int_{y_0}^{2x} \Rop(x,y)^2\left(\frac{2\sqrt{r}y}{1-r}-\frac{(1+r)x}{1-r}\right)^2dy\\
&\lesssim x^{-2}(1-r)^{-1/2}\\
&\qquad+(1-r)^{-3}\int_{y_0}^{2x}\exp\Big(-\frac{(1+r)(y-x)^2}{1-r}-\frac{2xy(1-r)}{(1+\sqrt{r})^2}\Big)\big(2y\sqrt{r}-(1+r)x\big)^2dy\\
&\lesssim (1-r)^{-3/2}\\
&\qquad+(1-r)^{-3}\int_{y_0-x}^x \exp\Big(-\frac{(1+r)y^2}{1-r}-\frac{2x(y+x)(1-r)}{(1+\sqrt{r})^2}\Big)\Big(2y\sqrt{r}-\frac{x(1-r)^2}{(1+\sqrt{r})^2}\Big)^2dy\\
&\lesssim (1-r)^{-3/2}+(1-r)^{-3}\int_{y_0-x}^x y^2\exp\Big(-\frac{y^2}{1-r}\Big)dy+(1-r)x^3\exp\Big(-\frac{x^2(1-r)}{(1+\sqrt{r})^2}\Big)\\
&\lesssim (1-r)^{-3/2}+(1-r)^{-3/2}\int_{-\infty}^{\infty}y^2e^{-y^2}dy\\
&\lesssim (1-r)^{-3/2}.
\end{align*}
Now, we consider $\al=-1/2$. We denote $z=(2\sqrt{r}xy)/(1-r)$. Equality \eqref{ker_-1/2_deri} and the estimate $\vert (1-\coth u )\sinh u \vert\leq 1$, $u>0$, yield
\begin{align*}
&\big(\partial_x\Ropal (x,y)\big)^2\\
&\qquad\lesssim(1-r)^{-3}\exp\Big(-\frac{1+r}{1-r}(x^2+y^2)\Big) \big( 2\sqrt{r}y-(1+r)x\coth z \big)^2\sinh^2 z\\
&\qquad\lesssim (1-r)^{-3}\exp\Big(-\frac{1+r}{1-r}(x^2+y^2)\Big) \Big(x^2+\big( 2\sqrt{r}y-(1+r)x\big)^2\sinh^2 z\Big).
\end{align*}
Note that
\begin{align*}
&(1-r)^{-3}x^2\int_{y_0}^{2x}\exp\Big(-\frac{1+r}{1-r}(x^2+y^2)\Big)\,dy\\
&\lesssim (1-r)^{-2}\frac{x^2}{1-r}\exp\Big(-\frac{1+r}{1-r}x^2\Big)\int_{y_0}^{2x}\exp\Big(-\frac{1+r}{1-r}y^2\Big)\,dy\\
&\lesssim (1-r)^{-3/2}.
\end{align*}
Moreover, using the estimate for the hyperbolic sine we obtain
\begin{align*}
&(1-r)^{-3}\int_{y_0}^{2x}\exp\Big(-\frac{1+r}{1-r}(x^2+y^2)\Big)\sinh^2\Big(\frac{2\sqrt{r}xy}{1-r}\Big)\big( 2\sqrt{r}y-(1+r)x\big)^2\,dy\\
&\lesssim (1-r)^{-3}\int_{y_0}^{2x}\exp\Big(-\frac{1+r}{1-r}(y-x)^2-\frac{2xy(1-r)}{(1+\sqrt{r})^2}\Big)\big( 2\sqrt{r}y-(1+r)x\big)^2\,dy,
\end{align*}
but this is the same quantity as in the corresponding estimate in the case $\al\geq 1/2$.
\end{proof}
Now we can state the multi-dimensional corollary.
{\cor\label{cor_of_prop} For $\al\in (\{-1/2\}\cup[1/2,\infty))^d$ and $j\in\{1,\ldots,d\},$ we have
$$\sup_{x\in\mr_+^d}\left\Vert\partial_{x_j}\Rop(x,\boldsymbol{\cdot})\right\Vert_{L^2(\mr_+^d)}\lesssim (1-r)^{-(d+2)/4}, \qquad\ r\in(0,1). $$ }
\begin{proof}
For simplicity we can assume that $j=1$. Thus,
$$\partial_{x_1}\Rop(x,y)=\frac{\partial}{\partial x_1}R^{\alpha_1}_r(x_1,y_1)\prod_{i=2}^d \Ropi(x_i,y_i). $$
Hence, Lemma \ref{lemma_R} and Proposition \ref{Ker_der_estim} imply
$$\left\Vert\partial_{x_1}\Rop(x,\boldsymbol{\cdot})\right\Vert_{L^2(\mr_+^d)}=\left\Vert \partial_{x_1}R^{\alpha_1}_r(x_1,\boldsymbol{\cdot})\right\Vert_{L^2(\mr_+)}\prod_{i=2}^d \left\Vert \Ropi(x_i,\boldsymbol{\cdot})\right\Vert_{L^2(\mr_+)}\lesssim (1-r)^{-(d+2)/4},$$
uniformly in $x=(x_1,\ldots,x_d)\in\mr_+^d$ and $r\in(0,1)$.
\end{proof}
\section{Main results}
{\prop\label{prop_atoms} For $\al\in (\{-1/2\}\cup[1/2,\infty))^d$ there is
$$\int_0^1 \Vert \Rop a\Vert_{L^2(\mr_+^d)}(1-r)^{(d-4)/4}dr\lesssim 1, $$
uniformly in $H^1(\mr_+^d)$ atoms $a$.}
\begin{proof}
Let us fix an $H^1(\mr_+^d)$ atom $a$ supported in a ball $B$. Let $x'=(x_1',\ldots,x_d')\in\mr_+^d$ be the center of $B$. Note that since $\Rop$ are contractions on $L^2(\mr_+^d)$ we have for every $0<r<1$
$$\Vert \Rop a\Vert_{L^2(\mr_+^d)}\leq\Vert a\Vert_{L^2(\mr_+^d)}\leq \vert B\vert^{-1/2}.$$
This finishes the proof in case $\vert B\vert\geq 1$. From now on, let us assume $\vert B\vert<1$. Minkowski's integral inequality and Corollary \ref{cor_of_prop} imply
\begin{align*}
\Vert \Rop a&\Vert_{L^2(\mr_+^d)}\\
&=\Big( \int_{\mr_+^d}\Big\vert\int_{B}\big(\Rop(x_1,x_2,\ldots,x_d,y)-\Rop(x_1',\ldots,x_d',y)\big)a(x)dx\Big\vert^2 dy\Big)^{1/2}\\
&=\Big( \int_{\mr_+^d}\Big\vert\int_{B}\Big(\sum_{i=1}^d \int_{x_i'}^{x_i} \partial_{x_i}\Rop(x'_1,\ldots,x'_{i-1},s,x_{i+1},\ldots,x_d,y)\,ds\Big)a(x)\,dx\Big\vert^2 dy\Big)^{1/2}\\
&\lesssim \int_{B} \vert a(x)\vert \vert B\vert^{1/d}\sum\limits_{i=1}^d\sup_{\xi\in\mr_+^d}\big\Vert\partial_{x_i}\Rop(\xi,\boldsymbol{\cdot})\big\Vert_{L^2(\mr_+^d)}\, dx\\
&\lesssim \vert B\vert^{1/d} (1-r)^{-(d+2)/4}.
\end{align*}
Thus, using the above estimates we obtain
\begin{align*}
\int_0^1 \Vert &\Rop a\Vert_{L^2(\mr^d_+)}(1-r)^{(d-4)/4}dr\\
&\lesssim \int_0^{1-\vert B\vert^{2/d}} \vert B\vert^{1/d} (1-r)^{-3/2}dr+\int_{1-\vert B \vert^{2/d}}^1 \vert B\vert^{-1/2}(1-r)^{(d-4)/4}dr,
\end{align*}
and this quantity is bounded by a constant that does not depend on $\vert B\vert$.
\end{proof}
Now we can state the main theorem.
{\thm\label{main_thm} Let $\al\in (\{-1/2\}\cup[1/2,\infty))^d$. Then
$$\sum_{n\in\mathbb{N}^d}\frac{\vert\langle f,\lfun\rangle\vert}{(\vert n\vert+1)^{3d/4}}\lesssim \Vert f\Vert_{H^1(\mr_+^d)},$$
uniformly in $f\in H^1(\mr_+^d)$.}
\begin{proof}
Firstly we prove that
$$\sum_{n\in\mathbb{N}^d}\frac{\vert\langle a,\lfun\rangle\vert}{(\vert n\vert+1)^{3d/4}}\lesssim 1,$$
uniformly in $H^1(\mr_+^d)$ atoms $a$.
We shall employ the same argument that is used in \cite{LiYuShi}. For the Beta function there is the known asymptotic $B(k,m)\approx\Gamma(m)k^{-m}$ for large $k$ and fixed $m$. Let $a$ be an $H^1(\mr_+^d)$ atom. Using H\"{o}lder's inequality and Proposition \ref{prop_atoms} we obtain
\begin{align*}
\sum_{n\in\mathbb{N}^d}\frac{\vert\langle a, \lfun\rangle\vert}{(\vert n\vert+1)^{3d/4}}&\lesssim\sum_{n\in\mathbb{N}^d}\int_0^1 r^{2\vert n\vert}(1-r)^{(3d-4)/4}\vert\langle a, \lfun\rangle\vert dr\\
&\leq\int_0^1 (1-r)^{(3d-4)/4}\left(\sum_{n\in\mathbb{N}^d}r^{2\vert n\vert}\right)^{1/2}\left(\sum_{n\in\mathbb{N}^d}r^{2\vert n\vert}\vert\langle a,\lfun\rangle\vert^2\right)^{1/2}dr\\
&\lesssim\int_0^1 (1-r)^{(3d-4)/4}(1-r)^{-d/2}\Vert \Rop a\Vert_{L^2(\mr_+^d)}dr\\
&\lesssim 1.
\end{align*}
Now, we define $T(f)=\{\langle f,\lfun\rangle\}_{n\in\mathbb{N}^d}$ for $f\in H^1(\mr_+^d)$. Our aim is to prove that $T\colon H^1(\mr_+^d)\rightarrow ll^1((\vert n\vert+1)^{-3d/4})$, is bounded. Note that \eqref{Linfinity_boundedness} and \eqref{L1&H1_inequality} yield
$$\vert\langle f,\lfun\rangle\vert\leq \Vert \lfun\Vert_{L^{\infty}(\mr_+^d)} \Vert f\Vert_{L^1(\mr_+^d)}\lesssim\prod_{i=1}^d (n_i+1)^{-1/12}\Vert f\Vert_{H^1(\mr_+^d)}\leq (\vert n\vert +1)^{-1/12}\Vert f\Vert_{H^1(\mr_+^d)}.$$
Thus, $T\colon H^1(\mr_+^d)\rightarrow ll^1((\vert n\vert+1)^{-d})$ is bounded. Note also that
\begin{equation}\label{ell_norms_ineq}
\Vert \boldsymbol{\cdot}\Vert_{ll^1((\vert n\vert+1)^{-d})}\leq \Vert \boldsymbol{\cdot}\Vert_{ll^1((\vert n\vert+1)^{-3d/4})}.
\end{equation}
Let us take $f\in H^1(\mr_+^d)$ and $f=\sum_{i=0}^{\infty}\lambda_i a_i$ be an atomic decomposition of $f$. Denote $f_m=\sum_{i=0}^{m}\lambda_i a_i$ and note that $T(f_m)$ is a Cauchy sequence in $ll^1((\vert n\vert+1)^{-3d/4})$. Indeed, we have for $l<m$,
$$\Vert T(f_m)-T(f_l) \Vert_{ll^1((\vert n\vert+1)^{-3d/4})}\leq \sum_{i=l+1}^{m}\vert\lambda_i\vert\Vert T(a_i)\Vert_{ll^1((\vert n\vert+1)^{-3d/4})}\lesssim \sum_{i=l+1}^m \vert \lambda_i\vert. $$
Hence, $T(f_m)$ converges to a sequence $g$ in $ll^1((\vert n\vert+1)^{-3d/4})$ and, by \eqref{ell_norms_ineq}, also in $ll^1((\vert n\vert+1)^{-d})$. Since $T\colon H^1(\mr_+^d)\rightarrow ll^1((\vert n\vert+1)^{-d})$ is bounded we have $T(f_m)\rightarrow T(f)$ in $ll^1((\vert n\vert+1)^{-d})$, therefore $g=T(f)$. To obtain the boundedness of $T\colon H^1(\mr_+^d)\rightarrow ll^1((\vert n\vert+1)^{-3d/4})$ we fix $\ve>0$ and take $m$ such that $\Vert T(f-f_m)\Vert_{ll^1((\ven+1)^{-3d/4})}<\ve$ and calculate
\begin{align*}
\Vert T(f)\Vert_{ll^1((\ven+1)^{-3d/4})}&\leq \Vert T(f-f_m)\Vert_{ll^1((\ven+1)^{-3d/4})}+\Vert T(f_m)\Vert_{ll^1((\ven+1)^{-3d/4})}\\
&\leq \ve +\sum_{i=0}^m \vert \lambda_i\vert \Vert T(a_i)\Vert_{ll^1((\ven+1)^{-3d/4})}\\
&\lesssim \ve+\Vert f\Vert_{H^1(\mr_+^d)}.
\end{align*}
This finishes the proof.
\end{proof}
\section{$L^1$ result}
In this section we shall prove that the inequality in Theorem \ref{main_thm} holds also with $L^1(\mr_+^d)$ norm replacing $H^1(\mr_+^d)$ norm provided that the exponent in the denominator is strictly greater than $3d/4$. Our reasoning is similar to Kanjin's in \cite{Kanjin2}. The main tool in the proof of this fact is the asymptotic estimate for functions $\lfun$.
{\thm\label{L1_thm} Let $\ve>0$ and $\al\in [-1/2,\infty)^d$. Then
\begin{equation}\label{L1_thm_ineq}
\sum_{n\in\mathbb{N}^d}\frac{\vert\langle f,\lfun\rangle\vert}{(\vert n\vert+1)^{\frac{3d}{4}+\ve}}\lesssim \Vert f\Vert_{L^1(\mr_+^d)},
\end{equation}
uniformly in $f\in L^1(\mr_+^d)$.
The result is sharp in the sense that there is $f\in L^1(\mr_+^d)$ such that
\begin{equation}\label{L1_counterexample}
\sum_{n\in\mathbb{N}^d}\frac{\vert\langle f,\lfun\rangle\vert}{(\vert n\vert+1)^{3d/4}}=\infty.
\end{equation}
}
\begin{proof}
Given $\ve>0$ and $\al\in[-1/2,\infty)^d$, for the proof of \eqref{L1_thm_ineq} it suffices to verify that
$$\sum_{n\in\mathbb{N}^d}\frac{\vert\lfun(x)\vert}{(\vert n\vert+1)^{\frac{3d}{4}+\ve}}\lesssim 1,\qquad x\in\mr_+^d.$$
We shall prove this estimate in the one-dimensional case. This is indeed sufficient, since
$$\sum_{n\in\mathbb{N}^d}\frac{\vert\lfun(x)\vert}{(\vert n\vert+1)^{\frac{3d}{4}+\ve}}\leq \prod_{i=1}^d \sum_{n_i=0}^{\infty}\frac{\vert\lfuni(x_i)\vert}{( n_i+1)^{\frac{3}{4}+\ve/d}}. $$
But $\varphi^{\al}_0(u)\lesssim 1$ uniformly in $u\in\mr_+$, so given $\al\in[-1/2,\infty)$ we are reduced to proving
\begin{equation}\label{L1_new_claim}
\sum_{k=1}^{\infty}\frac{\vert \lfunk(u)\vert}{k^{3/4+\ve}}\lesssim 1,\qquad u\in\mr_+.
\end{equation}
Denote $\nti=4k+2\al+2$ and for $u\in\mr_+$ define
$$\mathcal{N}_u=\big\{k\in\mathbb{N}_+\colon \nti/2\leq u^2\leq 3\nti/2\big\}.$$
We have
\begin{equation*}
\sum_{k=1}^{\infty}\frac{\vert \lfunk(u)\vert}{k^{3/4+\ve}}=\sum_{k\notin \mathcal{N}_u}\frac{\vert \lfunk(u)\vert}{k^{3/4+\ve}}+\sum_{k\in \mathcal{N}_u}\frac{\vert \lfunk(u)\vert}{k^{3/4+\ve}}.
\end{equation*}
Note that by \eqref{pointwise_estimates_lfun}, if $k\notin \mathcal{N}_u$, then $\vert \lfunk(u)\vert\lesssim k^{-1/4}$ uniformly in $u$ and $k$, and hence the sum over the complement of $\mathcal{N}_u$ is bounded uniformly in $u\in\mr_+$. We claim that the same is true for the sum over $\mathcal{N}_u$.
Assume $\mathcal{N}_u\neq \emptyset$ and let $k_0=k_0(u)=\min\{k\in\mathbb{N}_+\colon k\in \mathcal{N}_u\}$. Definition of $\mathcal{N}_u$ implies that $\mathcal{N}_u\subset [k_0,k^*_0]$, where $k^*_0=3k_0+1+\lceil\al\rceil$. Thus, \eqref{pointwise_estimates_lfun} yields
\begin{align*}
\sum_{k\in \mathcal{N}_u}\frac{\vert \lfunk(u)\vert}{k^{3/4+\ve}}\lesssim \sum_{k=k_0}^{k^*_0}\frac{\sqrt{u}}{k^{3/4+\ve}\,\nti^{1/4}(\nti^{1/3}+\vert u^2-\nti\vert)^{1/4}}&\lesssim \sum_{k=k_0}^{k^*_0}\frac{(3\widetilde{k_0}/2)^{1/4}}{k_0^{1+\ve}(\nti^{1/3}+\vert u^2-\nti\vert)^{1/4}}\\
&\lesssim k_0^{-3/4-\ve}\sum_{k=k_0}^{k^*_0}\frac{1}{(1+\vert u^2-\nti\vert)^{1/4}},
\end{align*}
uniformly in $u$. Note that $\vert u^2-\nti\vert$ increases in $k$ provided $\nti\geq u^2$. Since $u^2\leq 3\widetilde{k_0}/2$, we have for $\nti\geq 3\widetilde{k_0}/2$ or, equivalently, for $k\geq k_0^{**}:=\lceil 3k_0/2+\al/4+1/4\rceil$, that $\vert u^2-\nti\vert$ increases in $k$. Hence,
\begin{align*}
\sum_{k=k_0}^{k^*_0}\frac{1}{(1+\vert u^2-\nti\vert)^{1/4}}\lesssim\sum_{k=k_0^{**}}^{k^*_0}\frac{1}{(1+\vert u^2-\nti\vert)^{1/4}}&\lesssim \int_{k_0^{**}}^{k^*_0}\frac{dt}{(1+4t+2\al+2-u^2)^{1/4}}\\
&\lesssim (4k^*_0+2\al+3-u^2)^{3/4}\\
&\simeq k_0^{3/4},
\end{align*}
uniformly in $u$. This completes the proof of the claim and hence the justification of \eqref{L1_new_claim} and thus finishes the verification of \eqref{L1_thm_ineq}.
Now we pass to the proof of \eqref{L1_counterexample}. Assume a contrario that the sum in \eqref{L1_counterexample} is finite for every $f\in L^1(\mr_+^d)$. The uniform boundedness principle implies that
$$\sum_{n\in\mathbb{N}^d}\frac{\vert \langle f,\lfun\rangle\vert}{(\vert n\vert+1)^{3d/4}}\lesssim \Vert f\Vert_{L^1(\mr_+^d)}, $$
uniformly in $f\in L^1(\mr_+^d)$. Hence, by an obvious adaptation of \cite[Lemma~1]{Kanjin2} we obtain
\begin{equation*}
\sum_{n\in\mathbb{N}^d}\frac{\vert \lfun(x)\vert}{(\vert n\vert+1)^{3d/4}}\lesssim 1,\qquad x\in\mr_+^d.
\end{equation*}
But, as we shall see, it does not hold. In fact, we shall prove that for any $x\in\mr_+^d$ we have
\begin{equation}\label{claim_L1_counterexample}
\sum_{n\in\mn^d_+}\frac{\vert \lfun(x)\vert}{\ven^{3d/4}}=\infty.
\end{equation}
Notice that using the asymptotic estimate for Laguerre polynomials (see \cite[(4.22.19)]{Lebedev}) and the known asymptotic for the Gamma function, $\Gamma(k+a)/\Gamma(k+b)\approx k^{a-b}$, $k\rightarrow\infty$, where $a,\,b\geq 0$ are fixed, we obtain for $u\in\mr_+$ and $\beta\geq-1/2$
$$\varphi_k^{\beta}(u)\approx \pi^{-1/2} k^{-1/4}\cos\Big(2\sqrt{k}u-\frac{\pi(2\beta+1)}{4}\Big),\qquad k\rightarrow \infty. $$
Hence, we reduce verifying \eqref{claim_L1_counterexample} to checking that
\begin{equation}\label{claim_cosinus}
\sum_{n\in\mn_+^d}\frac{\bigg\vert \prod_{i=1}^d \cos\Big(2\sqrt{n_i}x_i-\frac{\pi(2\al_i+1)}{4}\Big)\bigg\vert}{\ven^d}=\infty.
\end{equation}
We first prove the one-dimensional case. Fix $u\in\mr_+$ and notice that, for $d=1$, the corresponding sum in \eqref{claim_cosinus} is greater than
\begin{align*}
\sum_{k=1}^{\infty}\frac{ \cos^2\Big(2\sqrt{k}u-\frac{\pi(2\beta+1)}{4}\Big)}{k}=\sum_{k=1}^{\infty}\frac{ 1+\cos 4\sqrt{k}u\cos\frac{\pi(2\beta+1)}{2}+\sin4\sqrt{k}u\sin\frac{\pi(2\beta+1)}{2}}{2k}.
\end{align*}
Thus, \eqref{claim_cosinus} holds, since for any $t\in\mr\smallsetminus\{0\}$ each of the two series
\begin{equation*}
\sum_{k=1}^{\infty}\frac{ \Big\{\substack{\sin\\ \cos}\Big\}\, \big(t\sqrt{k}\big)}{k}
\end{equation*}
converges. Since we could not find a proof of this fact in the literature, we offer a short argument (for the cosine series and $t=1$).
Let $H(k)=\sum_{j=1}^{k}1/j$ denote the $k$-th harmonic number. Applying summation by parts, for any $K\in\mathbb{N}_+$ we obtain
\begin{equation}\nonumber
\sum_{k=1}^{K}\frac{\cos\sqrt{k}}{k}=H(K)\cos\sqrt{K}+\int_1^K H(\lfloor u\rfloor)\frac{\sin\sqrt{u}}{2\sqrt{u}}\,du.
\end{equation}
We use the asymptotic $H(k)=\log k+\gamma+r(k)$, where $r(k)=O(1/k)$ and $\gamma$ is the Euler-Mascheroni constant, and plug it into the both summands on the right hand side of the above formula. The terms resulting from the error parts, namely $r(K)\cos\sqrt{K}$ and
$$\int_1^K r(\lfloor u\rfloor)\frac{\sin\sqrt{u}}{2\sqrt{u}}\,du$$
are easily seen to converge with $K\rightarrow\infty$. This is also true for
$$\int_1^{K}\log\lfloor u\rfloor\frac{\sin\sqrt{u}}{\sqrt{u}}\,du-\int_1^{K}\log u\,\frac{\sin\sqrt{u}}{\sqrt{u}}\,du=\int_1^{K}\log\Big(1+\frac{\lfloor u\rfloor-u}{u}\Big)\frac{\sin\sqrt{u}}{\sqrt{u}}\,du.$$
Thus we are left with
\begin{align*}
(\log K+\gamma)\cos\sqrt{K}+\int_1^K (\log u+\gamma)\frac{\sin\sqrt{u}}{2\sqrt{u}}\,du&=\gamma\cos1+\int_1^K \frac{\cos\sqrt{u}}{u}\,du.
\end{align*}
The latter integral, after a change of variable, is also easily seen to converge with $K\rightarrow\infty$. This finishes the proof of the convergence of the investigated series.
Now we continue and prove \eqref{claim_cosinus} in the multi-dimensional setting. Given $x\in\mr_+^d$ and proceeding similarly as before we reduce justifying \eqref{claim_cosinus} to verifying that each of the $3^d-1$ iterated series
\begin{equation}\label{all_series}
\sum_{n_1=1}^{\infty}\ldots\sum_{n_d=1}^{\infty}\vert n\vert^{-d}\prod_{j\in J}\Big\{\substack{\sin\\ \cos}\Big\}\big(t_j\sqrt{n_j}\big)
\end{equation}
converges, where $J$ is any non-empty subset of $\{1,\ldots,d\}$ and $t_j\neq0$, $j\in J$. We shall use the induction over the dimension. Suppose that every series of the form as in \eqref{all_series} converges. We will prove that also the analogous series in dimension $d+1$ converge. Fix such a series and consider the associated set $J\subset\{1,\ldots,d+1\}$. We distinguish two cases depending on whether $d+1\in J$ or not.
If $d+1\notin J$, then the investigated series is of the form
$$\sum_{n_1=1}^{\infty}\ldots\sum_{n_d=1}^{\infty}\prod_{j\in J}\Big\{\substack{\sin\\ \cos}\Big\}\big(t_j\sqrt{n_j}\big)\sum_{k=1}^{\infty}(\vert n\vert+k)^{-d-1}.$$
It now suffices to use the asymptotic
$$\sum_{k=1}^{\infty}(\vert n\vert +k)^{-d-1}=\vert n\vert^{-d}+O(\vert n\vert^{-d-1})$$
and the inductive assumption.
The case $d+1\in J$ is more involved. We simplify matters, without any loss o generality, assuming $t_j=1$. The considered series is of the form
$$\sum_{n_1=1}^{\infty}\ldots\sum_{n_d=1}^{\infty}\Lambda(J,n)\sum_{k=1}^{\infty}\frac{\cos\sqrt{k}}{(\vert n\vert+k)^{d+1}}$$
(or with the sine in place of the cosine, but this is not an obstacle), where $\Lambda(J,n)$ is a product of the sines or the cosines taken at $\sqrt{n_j}$, $j\in J$, respectively. In fact, we shall prove the slightly stronger result that
\begin{equation}\label{abs_convergence}
\sum_{n\in\mn^d_+}\Big\vert\sum_{k=1}^{\infty}\frac{\cos\sqrt{k}}{(\vert n\vert+k)^{d+1}}\Big\vert<\infty.
\end{equation}
We remark that the cancellation provided by one trigonometric functions are sufficient in our estimates. Note that we cannot use the triangle inequality in the innermost series, because the resulting series would diverge.
To verify \eqref{abs_convergence} we check the convergence of the innermost series with a control of the decrease of its sum in $\vert n\vert$. We will use the following asymptotic estimate
$$\sum_{k=1}^{\lfloor u\rfloor}\frac{1}{\ven +k}=\log\Big(1+\frac{u}{\ven} \Big)+r_u(\vert n\vert),$$
where $r_u(\ven)=O(\ven^{-1})$ uniformly in $u\in\mr_+$. Summation by parts and the above asymptotic yield
\begin{align*}
&\sum_{k=1}^{K}\frac{1}{\ven +k}\frac{\cos\sqrt{k}}{(\ven+k)^{d}}\\
&=\left(\sum_{k=1}^{K}\frac{1}{\ven +k}\right)\frac{\cos\sqrt{K}}{(\ven+K)^{d}}-\int_1^K\left(\sum_{k=1}^{\lfloor u\rfloor}\frac{1}{\ven +k}\right)\bigg(\frac{\cos\sqrt{u}}{(\ven +u)^{d}}\bigg)'\,du\\
&=\log\Big(1+\frac{K}{\ven}\Big)\frac{\cos\sqrt{K}}{(\ven+K)^{d}}-\int_1^K\log\Big(1+\frac{u}{\ven}\Big)\bigg(\frac{\cos\sqrt{u}}{(\ven+u)^{d}}\bigg)'\,du\\
&+ r_K(\ven) \frac{\cos\sqrt{K}}{(\ven+K)^{d}}-\int_1^K r_u(\ven)\bigg(\frac{\cos\sqrt{u}}{(\ven+u)^{d}}\bigg)'\,du.
\end{align*}
The term with the error part $r_K(\ven)$ converges to zero with $K\rightarrow\infty$, while the integral term of the error part $r_u(\ven)$ is absolutely convergent with proper decrease in $\ven$, namely
$$\int_1^K \Big\vert r_u(\ven)\bigg(\frac{\cos\sqrt{u}}{(\ven+u)^{d}}\bigg)'\Big\vert\,du\lesssim \ven^{-1}\int_1^{\infty} \frac{1}{\sqrt{u}(\ven+u)^d}\,du\lesssim \ven^{-(d+1/4)}. $$
On the other hand, for the main terms, using integration by parts twice we obtain
\begin{align*}
&\log\Big(1+\frac{K}{\ven}\Big)\frac{\cos\sqrt{K}}{(\ven+K)^{d}}-\int_1^K\log\Big(1+\frac{u}{\ven}\Big)\bigg(\frac{\cos\sqrt{u}}{(\ven+u)^{d}}\bigg)'\,du\\
&=\log\Big(1+\frac{1}{\ven}\Big)\frac{\cos1}{(\ven+1)^{d}}+\int_1^K\frac{\cos\sqrt{u}}{(\ven+u)^{d+1}}\,du\\
&=\log\Big(1+\frac{1}{\ven}\Big)\frac{\cos1}{(\ven+1)^{d}}+\frac{2\sqrt{K}\sin\sqrt{K}}{(\ven+K)^{d+1}}-\frac{2\sin1}{(\ven+1)^{d+1}}\\
&\qquad+\int_1^K \frac{\sin\sqrt{u}\,((2d+1)u-\ven)}{\sqrt{u}(\ven+u)^{d+2}}\,du.
\end{align*}
Thus, combining the above and passing to the limit with $K\rightarrow\infty$ we get
\begin{align*}
\Big\vert\sum_{k=1}^{\infty}\frac{\cos\sqrt{k}}{(\ven+k)^{d+1}}\Big\vert&\lesssim \ven^{-d-1}+\int_1^{\infty} \frac{(2d+1)u+\ven}{\sqrt{u}(\ven+u)^{d+2}}\,du+\ven^{-d-1/4}\lesssim \ven^{-d-1/4}.
\end{align*}
This finishes the justification of the convergence of the considered series and thus the verification of \eqref{claim_cosinus}. The validation of \eqref{L1_counterexample} is completed and also the proof of the whole theorem is finished.
\end{proof}
\end{document}
|
\begin{document}
\title{On double coset separability and the Wilson-Zalesskii property}
\author{Ashot Minasyan}
\address{CGTA, School of Mathematical Sciences,
University of Southampton, Highfield, Southampton, SO17~1BJ, United Kingdom.}
\email{[email protected]}
\begin{abstract}
A residually finite group $G$ has the Wilson-Zalesskii property if for all finitely generated subgroups $H,K \leqslant G$, one has $\overline{H} \cap \overline{K}=\overline{H \cap K}$, where the closures are taken in the profinite completion $\widehat G$ of $G$. This property played an important role in several papers, and is usually combined with separability of double cosets. In the present note we show that the Wilson-Zalesskii property is actually enjoyed by every double coset separable group. We also construct an example of a LERF group that is not double coset separable and does not have the Wilson-Zalesskii property.
\end{abstract}
\keywords{Double coset separability, Wilson-Zalesskii property, profinite completion}
\subjclass[2020]{20E26, 20E18}
\maketitle
\section{Introduction}
Every residually finite group $G$ has a natural embedding into its profinite completion $\widehat G$, which is a compact topological group. The topology on $\widehat G$ induces the \emph{profinite topology} on $G$. A subset $S \subseteq G$ is said to be \emph{separable} if it is closed in this topology, i.e., $S= \overline{S} \cap G$, where $\overline{S}$ denotes the closure of $S$ in $\widehat G$.
Many residual properties of $G$ can be interpreted in terms of the profinite topology or the embedding of $G$ into $\widehat G$. In establishing various such properties it is often useful to have control over the intersections of images of two subgroups $H,K \in \mathcal{S}$ in finite quotients of $G$, where $\mathcal{S}$ is a class of subgroups of $G$ (e.g., $\mathcal{S}$ could consist of all cyclic subgroups, all abelian subgroups or all finitely generated subgroups).
The best one can hope for is that for all $H,K \in \mathcal{S}$ we have
\begin{equation}\label{eq:WZ_cond}
\overline{H} \cap \overline{K}=\overline{H \cap K} \text{ in } \widehat{G}
\end{equation}
(see Remark~\ref{rem:control_of_intersec} and Proposition~\ref{prop:tract_intersec_equiv} below, which explain how this is related to controlling the intersection of the images of $H$ and $K$ in finite quotients of $G$).
Condition \eqref{eq:WZ_cond} played an important role in the papers of Ribes and Zalesskii \cite{R-Z-amalg}, Ribes, Segal and Zalesskii \cite{Rib-Seg-Zal}, Wilson and Zalesskii \cite{Wil-Zal} and Antol\'{\i}n and Jaikin-Zapirain \cite{A-J}, to mention a few. In all of these papers, this condition was established along with (and after) the double coset separability condition, stating that for all $H,K \in \mathcal{S}$
\begin{equation}\label{eq:dc_sep}
HK \text{ is separable in } G.
\end{equation}
The purpose of this note is to demonstrate that condition \eqref{eq:dc_sep} implies \eqref{eq:WZ_cond}, provided $\mathcal S$ is closed under taking finite index subgroups. More precisely, we prove the following.
\begin{thm} \label{thm:main-spec_case} Let $H,K$ be subgroups of a residually finite group $G$. Then the following are equivalent:
\begin{itemize}
\item[(a)] the double coset $HK$ is separable in $G$ and $\overline{H} \cap \overline{K}=\overline{H \cap K}$ in $\widehat{G}$;
\item[(b)] for every finite index subgroup $L \leqslant_f G$, with $H \cap K \subseteq L$, the double coset $(H \cap L)K$ is separable in $G$.
\end{itemize}
\end{thm}
The above theorem follows from Proposition~\ref{prop:tract_intersec_equiv} below, which restates condition \eqref{eq:WZ_cond} in terms of finite index subgroups of the group $G$, and Proposition~\ref{prop:main}, which characterises this restatement in terms of double cosets. Both of these propositions are stated in the general situation of a pro-$\mathcal{C}$ topology, where $\mathcal{C}$ is a formation of finite groups. In particular, analogues of Theorem~\ref{thm:main-spec_case} are also true for the pro-$p$ topology, the pro-soluble topology, etc.
Following \cite{A-J}, we say that a group $G$ has the \emph{Wilson-Zalesskii property} if \eqref{eq:WZ_cond} holds for arbitrary finitely generated subgroups $H, K \leqslant G$. This property is named after Wilson and Zalesskii, who established it in the case of finitely generated virtually free groups in \cite{Wil-Zal}. We will call a group $G$ \emph{double coset separable} if \eqref{eq:dc_sep} holds for all finitely generated subgroups $H,K \leqslant G$.
\begin{cor}\label{cor:dc->WZ} Every double coset separable group satisfies the Wilson-Zalesskii property.
\end{cor}
Note that for (virtually) free groups the double coset separability was first proved by Gitik and Rips \cite{Gitik-Rips}. This was extended by Niblo \cite{Niblo} to finitely generated Fuchsian groups and fundamental groups of Seifert-fibred $3$-manifolds. In \cite{M-M} the author and Mineh showed that all finitely generated Kleinian groups and limit groups are double coset separable.
Hence, by Corollary~\ref{cor:dc->WZ}, such groups have the Wilson-Zalesskii property. For limit groups this answers a question of Antol\'{\i}n and Jaikin-Zapirain from \cite[Subsection~2.2]{A-J}.
More generally, separability of double cosets of ``convex'' subgroups is known in many non-positively curved groups (see \cite{Min-GFERF,Groves-Manning,Shepherd-imitator,M-M}). By combining these results with Theorem~\ref{thm:main-spec_case} we gain control over the intersection of such subgroups in finite quotients. Our last corollary describes one such application.
\begin{cor} \label{cor:QCERF_rh_gps} Let $G$ be a finitely generated group hyperbolic relative to a family of double coset separable subgroups. If every finitely generated relatively quasiconvex subgroup is separable in $G$ then any two finitely generated relatively quasiconvex subgroups $H,K \leqslant G$ satisfy \eqref{eq:WZ_cond}.
\end{cor}
\begin{proof} Let $G$ be a group from the statement. By \cite[Corollary~1.4]{M-M}, the product of two finitely generated relatively quasiconvex subgroups is separable in $G$. Since a finite index subgroup of a relatively quasiconvex subgroup is again relatively quasiconvex \cite[Lemma~5.22]{M-M}, the claim of the corollary follows from Theorem~\ref{thm:main-spec_case}.
\end{proof}
We finish this note by constructing, in Section~\ref{sec:example}, an example of a finitely presented LERF group which is not double coset separable and does not have the Wilson-Zalesskii property.
\section{A restatement of condition \eqref{eq:WZ_cond}}
Let us fix a formation $\mathcal{C}$ of finite groups; in other words, $\mathcal{C}$ is a non-empty class of finite groups which is closed under taking quotients and subdirect products (see \cite[Section~2.1]{Rib-Zal}).
\subsection{Pro-$\mathcal{C}$ topology and completion}
In this subsection we summarise basic definitions and properties of pro-$\mathcal{C}$ topology and pro-$\mathcal{C}$ completions. We refer the reader to \cite[Sections~3.1, 3.2]{Rib-Zal} for a detailed exposition.
Given a group $G$, we define the \emph{pro-$\mathcal{C}$ topology} on $G$ by taking the family of normal subgroups $\mathcal{N}_\mathcal{C}(G)=\{N \lhd G \mid G/N \in \mathcal{C}\}$ as a basis of open neighborhoods of the identity element.
A subset $A \subseteq G$ will be called \emph{$\mathcal{C}$-open} if it is open in the pro-$\mathcal{C}$ topology on $G$. \emph{$\mathcal{C}$-closed} and \emph{$\mathcal{C}$-clopen} subsets are defined similarly.
We will write $H \leqslant_o G$ and $N \lhd_o G$ to indicate that $H$ is an open subgroup of $G$ and $N$ is an open normal subgroup of $G$ in the pro-$\mathcal{C}$ topology. Note that a subgroup $H \leqslant G$ is $\mathcal{C}$-open if and only if it contains a $\mathcal{C}$-open normal subgroup; and $N \lhd G$ is $\mathcal{C}$-open if and only if $G/N \in \mathcal{C}$. If $H \leqslant_o G$ and $X \subseteq G$ then $XH$ and $G \setminus XH$ are both open as unions of cosets modulo $H$, thus $XH$ is a $\mathcal{C}$-clopen subset of $G$.
We will use $G_{\widehat{\mathcal{C}}}$ to denote the \emph{pro-$\mathcal{C}$ completion} of a group $G$. Equipped with its pro-$\mathcal{C}$ topology, $G_{\widehat{\mathcal{C}}}$ is a profinite group; in particular, it is compact.
The natural homomorphism $G \to G_{\widehat{\mathcal{C}}}$ has dense image. This homomorphism is injective if and only if $G$ is \emph{residually-$\mathcal{C}$}, i.e., $\bigcap_{N \in \mathcal{N}_\mathcal{C}(G)}N=\{1\}$.
\subsection{Tractable intersections}
\begin{defn}
Let $G$ be a group and let $H,K \leqslant G$ be two subgroups. We will say that the intersection $H \cap K$ is \emph{pro-$\mathcal{C}$ tractable} in $G$ if
for every $M \lhd_o G$ there exists $N \lhd_o G$ such that $N \subseteq M$ and
\begin{equation} \label{eq:tract}
HN \cap KN \subseteq (H \cap K) M ~\text{ in } G.
\end{equation}
\end{defn}
\begin{rem}\label{rem:control_of_intersec}
Note that condition \eqref{eq:tract} can be restated as $\phi(H) \cap \phi(K) \subseteq \phi(H \cap K) \phi(M)$ in the finite quotient $G/N \in \mathcal{C}$, where $\phi:G \to G/N$ denotes the natural homomorphism.
\end{rem}
\begin{rem} The following observation will be used throughout this note without further justification. If $A,B$ are subsets of a group $G$ and $H' \leqslant H \leqslant G$ are subgroups then
\[AH' \cap BH=(A \cap BH)H' ~\text{ and }~ H'A \cap HB=H'(A \cap HB).\]
\end{rem}
\begin{prop}\label{prop:tract_intersec_equiv} For subgroups $H, K$ of a residually-$\mathcal{C}$ group $G$ the following are equivalent:
\begin{itemize}
\item[(i)] the intersection $H \cap K$ is pro-$\mathcal{C}$ tractable in $G$;
\item[(ii)] $\overline{H} \cap \overline{K}= \overline{H \cap K}$ in $G_{\widehat{\mathcal{C}}}$, where $\overline{H}$ denotes the closure of $H$ in the pro-$\mathcal{C}$ completion $G_{\widehat{\mathcal{C}}}$.
\end{itemize}
\end{prop}
\begin{proof}
Since $G$ is residually-$\mathcal{C}$, we will treat it as a subgroup of $G_{\widehat{\mathcal{C}}}$. Note that for an arbitrary $L \lhd_o G$ its closure $\overline{L}$ is a clopen subgroup of $G_{\widehat{\mathcal{C}}}$ and $\overline{L} \cap G=L$, so that $G_{\widehat{\mathcal{C}}}/\overline{L}=G/L$ (see \cite[Proposition~3.2.2]{Rib-Zal}). Given any $M \lhd_o G$, let $\mathcal{N}_\mathcal{C}(M,G)=\{N \lhd_o G \mid N \subseteq M\}$ and observe that $\{\overline{N} \mid N \in \mathcal{N}_\mathcal{C}(M,G)\}$ is a basis of open neighborhoods of the identity element in $G_{\widehat{\mathcal{C}}}$.
Let us start with showing that (i) implies (ii). Assuming (i), we know that for every $M \lhd_o G$ there exists $N \in \mathcal{N}_\mathcal{C}(M,G)$ such that \eqref{eq:tract} holds. After taking closures of both sides we obtain
\begin{equation} \label{eq:first_inclusion}\overline{HN \cap KN} \subseteq \overline{(H \cap K)M} ~\text{ in } G_{\widehat{\mathcal{C}}}.
\end{equation}
Note that $HN,KN \leqslant_o G$, so, by \cite[Proposition~3.2.2]{Rib-Zal}, $\overline{HN \cap KN}=\overline{HN} \cap \overline{KN}$. Clearly $\overline{H} \cap \overline {K} \subseteq \overline{HN} \cap \overline{KN}$ and
$\overline{(H \cap K)M}=(H \cap K)\overline{M}$, because $\overline{M}$ is a clopen subgroup of $G_{\widehat{\mathcal{C}}}$.
Hence, in view of \eqref{eq:first_inclusion}, we obtain
\begin{equation} \label{eq:second_inclusion}
\overline{H} \cap \overline{K} \subseteq (H \cap K) \overline{M} ~\text{ in } G_{\widehat{\mathcal{C}}}, \text{ for every } M\lhd_o G.
\end{equation}
It is easy to see that $\overline{H \cap K}=\bigcap_{M \lhd_o G} (H \cap K)\overline{M}$, because $\mathcal{N}_{\mathcal{C}}(G_{\widehat{\mathcal{C}}})=\{\overline{L} \mid L \in \mathcal{N}_{\mathcal{C}}(G) \}$. Therefore \eqref{eq:second_inclusion} implies that $\overline{H} \cap \overline{K} \subseteq \overline{H \cap K}$. The opposite inclusion is obvious, so (ii) has been established.
We will now prove that (ii) implies (i) (in the case of profinite topology this was done in \cite[Corollary~10.4]{A-J}). Suppose that (ii) holds and $M \lhd_o G$ is arbitrary.
If (i) is false, then for every $N \in \mathcal{N}_\mathcal{C}(M,G)$, we have
\[(HN \cap KN) \setminus (H \cap K) M\neq \emptyset \text{ in } G, \]
hence
\begin{equation} \label{eq:ovls}
(H \overline{N} \cap K \overline{N}) \setminus (H \cap K) \overline{M}\neq \emptyset \text{ in } G_{\widehat{\mathcal{C}}}, \text{ for all } N \in \mathcal{N}_\mathcal{C}(M,G),
\end{equation}
where we used the fact that $(H \cap K) \overline{M} \cap G=(H \cap K) (\overline{M} \cap G)= (H \cap K)M$.
The family $ \{(H \overline{N} \cap K \overline{N}) \setminus (H \cap K) \overline{M} \mid N \in \mathcal{N}_\mathcal{C}(M,G)\}$ consists of clopen sets in $G_{\widehat{\mathcal{C}}}$ and has the finite intersection property by \eqref{eq:ovls} (because the intersection of finitely subgroups from $\mathcal{N}_\mathcal{C}(M,G)$ is again in $\mathcal{N}_\mathcal{C}(M,G)$). Compactness of $G_{\widehat{\mathcal{C}}}$ now implies that
\begin{equation}\label{eq:big_intersec}
\bigcap_{N \in \mathcal{N}_\mathcal{C}(M,G)} (H \overline{N} \cap K \overline{N}) \setminus (H \cap K) \overline{M} \neq \emptyset.
\end{equation}
Since $\bigcap_{N \in \mathcal{N}_\mathcal{C}(M,G)} H \overline{N}=\overline{H}$, $\bigcap_{N \in \mathcal{N}_\mathcal{C}(M,G)} K \overline{N}=\overline{K}$ and $\overline{H \cap K} \subseteq (H \cap K) \overline{M}$, \eqref{eq:big_intersec} demonstrates
that $(\overline{H} \cap \overline{K}) \setminus \overline{H \cap K} \neq \emptyset$, contradicting (ii). Thus we have proved that (ii) implies (i).
\end{proof}
\section{Characterising tractableness of intersections using double cosets}
As before we will work with a fixed formation of finite groups $\mathcal{C}$.
For a subgroup $H$ of a group $G$ the pro-$\mathcal{C}$-topology on $G$ induces a topology on $H$ (which may, in general, be different from the pro-$\mathcal{C}$ topology of $H$). We will use $\mathcal{O}_\mathcal{C}(H,G)$ to denote the open subgroups of $H$ in this induced topology. In other words,
\[\mathcal{O}_\mathcal{C}(H,G)=\{H \cap L \mid L \leqslant_o G\}.\]
Note that for every $H ' \in \mathcal{O}_\mathcal{C}(H,G)$, the index $|H:H'|$ is finite because any $L \leqslant_o G$ has finite index in $G$.
\begin{prop} \label{prop:main}
Let $G$ be a group with subgroups $H,K$. Then the following are equivalent:
\begin{itemize}
\item[(i)] the double coset $HK$ is $\mathcal{C}$-closed and the intersection $H \cap K$ is pro-$\mathcal{C}$ tractable in $G$;
\item[(ii)] for every $H' \in \mathcal{O}_\mathcal{C}(H,G)$, with $H \cap K \subseteq H'$, the double coset $H' K$ is $\mathcal{C}$-closed in $G$.
\end{itemize}
\end{prop}
\begin{proof} Let us start with showing that (i) implies (ii). So, assume that (i) is true. Consider any $H' \in \mathcal{O}_\mathcal{C}(H,G)$, containing $H \cap K$. Then $H'=H \cap L$, for some $L \leqslant_o G$, with $H \cap K \subseteq L$. Let $M \lhd_o G$ denote the normal core of $L$ (it is $\mathcal{C}$-open by \cite[Lemma~3.1.2]{Rib-Zal}). Since $H\cap K$ is pro-$\mathcal{C}$ tractable, there exists $N \lhd_o G$ such that $N \subseteq M$ and
\[HN \cap KN \subseteq (H \cap K)M \subseteq L.\]
Since $NK=KN$, as $N \lhd G$, we can conclude that
\begin{equation*}\label{eq:H_cap_KN_in_H'}
H \cap NK= H \cap KN \subseteq H\cap L=H'.
\end{equation*}
Therefore, we have
\[H' K \subseteq HK \cap H'KN=H'(HK \cap NK )=H'(H \cap NK)K \subseteq H' H' K=H'K,\]
whence $H'K=HK \cap H'KN$ in $G$. Note that the subset $H'KN$ is $\mathcal{C}$-clopen in $G$, as $N \lhd_o G$, and the double coset $HK$ is $\mathcal{C}$-closed by the assumption (i). Thus $H'K$ is $\mathcal{C}$-closed as the intersection of closed subsets, so (ii) holds.
Now let us assume (ii) and deduce (i). Then the double coset $HK$ is $\mathcal{C}$-closed in $G$ because $H \in \mathcal{O}_\mathcal{C}(H,G)$ and $H \cap K \subseteq H$. Thus it remains to show that $H \cap K$ is pro-$\mathcal{C}$ tractable in $G$.
Take any $M \lhd_o G$ and set $L=(H \cap K) M \leqslant_o G$. Then $H'=H \cap L \in \mathcal{O}_\mathcal{C}(H,G)$ and we can write
$H= \bigsqcup_{i=1}^n H' h_i$, where $h_1=1$ and $h_i \in H \setminus H'$, for $i=2,\dots,n$. Note that $H \cap K \subseteq H'$, by construction, which easily implies that $h_i \notin H'K$, for $i=2,\dots,n$ (indeed, if $h_i=xy$, where $x \in H'$ and $y \in K$, then $x^{-1}h_i=y \in H \cap K \subseteq H'$, so $h_i \in H'$, whence and $i=1$). By the assumption (ii), the double coset $H'K$ is $\mathcal{C}$-closed in $G$, hence there exists $N \lhd_o G$ such that
\begin{equation}\label{eq:h_i_notin_H_dash_K}
h_i \notin H'K N, \text{ for }i=2,\dots,n.
\end{equation}
After replacing $N$ with $N \cap M$, we can suppose that $N \subseteq M$. Let us show that
\[HN \cap KN \subseteq L=(H \cap K)M.\]
Since $HN \cap KN=(H \cap KN)N$ and $N \subseteq L$, it is enough to check that $H \cap KN \subseteq L$. But, in view of \eqref{eq:h_i_notin_H_dash_K}, we know that $H'h_i \cap KN=\emptyset$, for $i=2,\dots,n$, hence $H \cap KN \subseteq H'h_1=H' \subseteq L$, as required. Therefore $H\cap K$ is pro-$\mathcal{C}$ tractable in $G$ and (i) holds.
\end{proof}
\begin{cor} If $H, K$ are subgroups of a group $G$ then the following are equivalent:
\begin{itemize}
\item[(i)] the double coset $HK$ is $\mathcal{C}$-closed and the intersection $H \cap K$ is pro-$\mathcal{C}$ tractable in $G$.
\item[(ii)] for every $H' \in \mathcal{O}_\mathcal{C}(H,G)$, with $H \cap K \subseteq H'$, the double coset $H' K$ is $\mathcal{C}$-closed in $G$;
\item[(iii)] for every $K' \in \mathcal{O}_\mathcal{C}(K,G)$, with $H \cap K \subseteq K'$, the double coset $HK'$ is $\mathcal{C}$-closed in $G$;
\item[(iv)] for all $H' \in \mathcal{O}_\mathcal{C}(H,G)$ and $K' \in \mathcal{O}_\mathcal{C}(K,G)$, with $H \cap K= H' \cap K'$, the double coset $H' K'$ is $\mathcal{C}$-closed in $G$;
\end{itemize}
\end{cor}
\begin{proof}
The equivalence between (i) and (ii) is the subject of Proposition~\ref{prop:main}, and the equivalence between (i) and (iii) follows by symmetry (or because $HK'=(K'H)^{-1}$). Evidently (iv) implies (ii). Conversely, (iv) follows from (ii) and (iii) because
\[H'K \cap HK'=H'(K \cap HK')=H'(K \cap H)K'=H'K',\]
where the last equality is valid since $K \cap H \subseteq H'$.
\end{proof}
\section{A LERF group without the Wilson-Zalesskii property}\label{sec:example}
Throughout this section we assume that $\mathcal{C}$ is the family of all finite groups. In this case the pro-$\mathcal{C}$ topology on a group $G$ is the {profinite topology},
$\mathcal{C}$-open subgroups of $G$ are precisely the finite index subgroups and the $\mathcal{C}$-closed subsets of $G$ are called {separable}. Recall that $G$ is said to be \emph{ERF} if all subgroups are separable and \emph{LERF} if all finitely generated subgroups are separable.
In this section we show that separability of a double coset $HK$ does not necessarily yield that the intersection $H \cap K$ is profinitely tractable even for finitely generated subgroups $H, K$ of a LERF group $G$. Our construction is based on examples of Grunewald and Segal from \cite{G-S}.
Let $A=M_2(\mathbb{Z})$ be the additive group of $2 \times 2$ matrices with integer entries, and let $H=SL_2(\mathbb{Z})$ act on $A$ by left multiplication. We define the group $G$ as the resulting semidirect product $A \rtimes H=M_2(\mathbb{Z}) \rtimes SL_2(\mathbb{Z})$. Recall that $H$ is finitely generated and virtually free and $A$ is the free abelian group of rank $4$, hence $A$ is ERF and $H$ is LERF, so $G$ is LERF (see \cite[Theorem~4]{All-Greg}).
Denote by $i \in A$ the identity matrix from $M_2(\mathbb{Z})$ and set $K=iHi^{-1} \leqslant G$.
For any subgroup $F \leqslant H=SL_2(\mathbb{Z})$ the conjugacy class $i^F=\{fif^{-1} \mid f \in F\}\subseteq A$ is the orbit of the identity matrix under the left action of $F$, so it consists of matrices from $F$, but now considered as a subset of $M_2(\mathbb{Z})=A$. Since the determinant map $\det: A=M_2(\mathbb{Z}) \to \mathbb{Z}$ is clearly continuous with respect to the profinite topologies on $A$ and $\mathbb{Z}$, the conjugacy class $i^H=\det^{-1}(\{1\})$ is closed in the profinite topology on $A$.
Now let us show that the product $i^H H$ is closed in the profinite topology on $G$. Indeed, suppose that $xy \in G \setminus i^H H$, where $x \in A$ and $y \in H$. Then $x \notin i^H$, so there is $m \in \mathbb{N}$ such that for the finite index characteristic subgroup $A'=M_2(m\mathbb{Z}) \lhd A$ we have $x \notin i^H A'$. The latter implies that $xy \notin i^H A' H$. Since $A'H$ is a finite index subgroup of $G$, we see that $i^H A'H$ is a clopen subset in the profinite topology on $G$ containing $i^HH$ but not containing $xy$. Thus $i^H H$ is indeed profinitely closed in $G$. Note that $i^HH=HiH$, thus the double coset $HK=(HiH)i^{-1}$ is separable in $G$.
As Grunewald and Segal observed in \cite[Section~5]{G-S}, $H$ contains a finite index free subgroup $H'$ (in fact, $|H:H'|=36$) such that the orbit of $i$ under the action of $H'$ is not separable in
the profinite topology on $A$ (equivalently, $H'$ is not closed in the congruence topology on $H=SL_2(\mathbb{Z})$).
Observe that $H'iH=i^{H'} H$, so $H'iH \cap A=i^{H'}$. Since $i^{H'}$ is not separable in $A$, it follows that the double cosets $H'iH$ and $H'K=(H'iH)i^{-1}$ cannot be separable in $G$ (this is true because the topology on the subgroup $A$, induced from the profinite topology on $G$, is always weaker than the profinite topology on $A$).
Finally, we note that $H \cap K=\{1\}$ because the $H$-stabiliser of $i$ is trivial, and every finite index subgroup of $H$ belongs to $\mathcal{O}_{\mathcal{C}}(H,G)$, as $G$ is LERF and $H$ is finitely generated (see, for example, \cite[Lemma 4.17]{M-M}).
Thus we have constructed the following example.
\begin{ex}
There is a LERF group $G$ (isomorphic to a split extension of $\mathbb{Z}^4$ by $SL_2(\mathbb{Z})$) and finitely generated subgroups $H,K \leqslant G$ such that $H \cap K=\{1\}$ and the double coset $HK$ is separable in $G$, but the double coset $H'K$ is not separable in $G$, for some finite index subgroup $H' \leqslant_f H$. We deduce, from Proposition~\ref{prop:main}, that the intersection $H \cap K$ is not profinitely tractable in $G$, so $G$ does not have the Wilson-Zalesskii property by Proposition~\ref{prop:tract_intersec_equiv}.
\end{ex}
\end{document}
|
\begin{document}
\baselineskip=1.2\baselineskip
\pagestyle{plain}
\begin{center}
{\large \bf Visualizing departures from marginal homogeneity for square contingency tables with ordered categories}
Satoru Shinoda${}^{1}$, Takuya Yoshimoto${}^{2}$ and Kouji Tahata${}^{3}$\\
${}^{1}${\it Department of Biostatistics, Yokohama City University, School of Medicine, Japan}\\
${}^{2}${\it Biometrics Department, Chugai Pharmaceutical Co., Ltd., Japan}\\
${}^{3}${\it Department of Information Sciences, Faculty of Science and Technology, Tokyo University of Science, Japan}\\
E-mail: [email protected]\\
{\bf Abstract}
\end{center}
Square contingency tables are a special case commonly used in various fields to analyze categorical data.
Although several analysis methods have been developed to examine marginal homogeneity (MH) in these tables, existing measures are single-summary ones.
To date, a visualization approach has yet to be proposed to intuitively depict the results of MH analysis.
Current measures used to assess the degree of departure from MH are based on entropy such as the Kullback-Leibler divergence and do not satisfy distance postulates.
Hence, the current measures are not conducive to visualization.
Herein we present a measure utilizing the Matusita distance and introduce a visualization technique that employs sub-measures of categorical data.
Through multiple examples, we demonstrate the meaningfulness of our visualization approach and validate its usefulness to provide insightful interpretations.
\begin{flushleft}
\textit{Key words}: Marginal homogeneity, Matusita distance, power-divergence, visualization.
\\
\end{flushleft}
\noindent \textbf{\large 1. Introduction}
Numerous research areas employ categorical data analysis.
Such data is summarized in a contingency table (see e.g., Agresti, 2013; Kateri, 2014).
A special case is a square contingency table where the row and column variables have the same ordinal categories.
When we cannot obtain data as continuous variables for the evaluation of the efficacy and safety/toxicity of treatments in clinical studies, ordered categorical scales are used alternatively.
For example, Sugano $et~al.$ (2012) conducted a clinical study where they examined the modified LANZA score (MLS) after 24 weeks’ treatment with esomeprazole 20 mg once daily or a placebo.
The MLS is a popular evaluation scale with five stages (from 0 to +4) and is used for clinical evaluations of gastroduodenal mucosal lesions.
Table 1 shows a square contingency table that summarizes the location shift of the MLS from pre-treatment to post-treatment for each patient.
Such research is interested in whether the treatment effect tends to improve or worsen after an intervention relative to before the intervention.
Thus, the evaluation is interested in the similarity from marginal homogeneity (MH), but not independence.
Stuart (1955) introduced the MH model to indicate homogeneity with respect to two marginal distributions.
We are also interested in the structure of inhomogeneity of the two marginal distributions when the MH model does not hold.
This is because we are more interested in the deviation between the pre-treatment and post-treatment marginal distributions (i.e., intervention results) than whether the MH model that represents the structure shows an equal marginal distribution for the data in Table 1.
Consequently, our strategy is to estimate measures representing the degree of departure from MH.
Measures must quantify the differences in probability distributions, mainly using information divergences such as Kullback-Leibler divergence or power-divergence.
\begin{table}[h]
\small\sf\centering
\caption{Shift analysis data of MLS after treatment for 24 weeks with esomeprazole 20 mg once daily or a placebo.\label{T1}}
\begin{tabular}[!bh]{cccccc} \hline
& \multicolumn{5}{c}{Baseline} \\ \cline{2-6}
Study end & 0 & +1 & +2 & +3 & +4 \\ \hline
\multicolumn{5}{l}{(a) Esomeprazole 20~mg once daily}\\
0 & 78 & 9 & 26 & 3 & 1 \\
+1 & 1 & 5 & 6 & 4 & 0 \\
+2 & 9 & 1 & 10 & 3 & 1 \\
+3 & 1 & 0 & 1 & 0 & 0 \\
+4 & 3 & 0 & 1 & 1 & 2 \\ \hline
\multicolumn{5}{l}{(b) Placebo}\\
0 & 41 & 2 & 19 & 0 & 0 \\
+1 & 8 & 0 & 4 & 0 & 0 \\
+2 & 12 & 4 & 14 & 3 & 0 \\
+3 & 0 & 1 & 1 & 3 & 0 \\
+4 & 29 & 7 & 11 & 6 & 0 \\ \hline
\end{tabular}
\end{table}
To this end, Tomizawa, Miyamoto and Ashihara (2003) proposed a measure using the marginal cumulative probability for square contingency tables with ordered categories.
This measure ranges from 0 to 1 and directly represents the degree of departure from MH.
However, it cannot distinguish the direction of degree of departure.
The two marginal distributions are interpreted as equal (no intervention effect) when the value is 0.
When the values are greater than 0, an improvement is indistinguishable from a worsening effect.
Yamamoto, Ando and Tomizawa (2011) proposed a measure, which lies between -1 and 1, to distinguish the directionality.
This measure cannot represent the degree of departure directly from MH.
Even if the value of the measure is 0, the marginal distribution cannot be exactly interpreted as having no intervention effect.
To simultaneously analyze the degree and directionality of departure from MH, Ando, Noguchi, Ishii and Tomizawa (2021) proposed a two-dimensional visualized measure that combines the measure proposed by Tomizawa $et~al.$ (2003) and the measure proposed by Yamamoto $et~al.$ (2011).
They also considered visually comparing the degrees of departure from MH in several tables because their measure is independent of the dimensions (i.e., number of categorical values) and sample size.
Appendix 1 explains the main points of the above measures.
These measures proposed by Tomizawa $et~al.$ (2003), Yamamoto $et~al.$ (2011) and Ando $et~al.$ (2021) are single-summaries.
They are expressed using the sub-measure weights at each categorical level.
For a given category level, different behaviors cannot be distinguished as a single-summary measure.
The artificial data examples in the data analysis section provide specific situations.
Hence, a single-summary-measure may overlook different behaviors in a given categorical level.
To address this limitation, we apply visualization as a method utilizing sub-measures defined at each category level.
This visualization also assumes that satisfying distance postulates can achieve a natural interpretation.
To date, a measure for ordered categories does not exist because the Kullback-Leibler divergence or power-divergence used in existing measures do not satisfy the distance postulates.
Therefore, we consider a measure using the Matusita distance to capture the discrepancy between two probability distributions while satisfying the distance postulates (see Matusita, 1954, 1955; Read and Cressie, 1988, p.112).
Both academia and general society employ methods to visualize quantitative data.
Examples include pie charts, histograms, and scatterplots.
Although visualizing categorical data has attracted attention recently, different visualization techniques from those for quantitative data are necessary (see, e.g., Blasius and Greenacre, 1998; Friendly and Meyer, 2015; Kateri, 2014).
Visualization of categorical data has two main objectives: revealing the characteristics of the data and intuitively understanding analysis results (Friendly and Meyer, 2015).
Methods for the former include the ``mosaic plot'' and ``sieve diagram'' (see e.g., Friendly, 1995; Hartigan and Kleiner, 1981, 1984; Riedwyl and Sch\"{u}pbach, 1983, 1994).
Methods for the latter include the ``fourfold display'' for odds ratios and the ``observer agreement chart'' for Cohen’s $\kappa$ (see e.g., Bangdiwala 1985, 1987; Fienberg, 1975; Friendly, 1994).
Although the visualization objectives for categorical data may vary, they share common techniques: (i) separating data by categorical levels and (ii) adjusting the size of figure objects based on the frequency of each cell.
Our research aims to realize a visualization for an intuitive understanding of the analysis results for MH.
To date, such a visualization has yet to be proposed.
Although the ``mosaic plot'' and ``sieve diagram'' can be applied to square contingency tables, they are not suitable for examining the structure of MH.
These visualizations are designed to observe the data itself and identify features or patterns without making hypotheses before analyzing the data.
Therefore, our proposed visualization provides an intuitive understanding of the structure of MH using categorical data visualization techniques (i) and (ii).
This paper conducts a comprehensive analysis of the degree and directionality of departure from MH for square contingency tables with ordered categories.
Our approach has two components: (i) measures to quantify the degree of departure of MH using information divergence satisfying distance postulates and (ii) a visualization technique designed for categorical data.
The rest of this paper is organized as follows.
Section 2 defines the proposed measure and visualization.
Section 3 derives an approximated confidence interval for the proposed measure.
Section 4 provides examples of the utility for the proposed measure and visualization.
Section 5 presents the discussion.
Finally, Section 6 closes with concluding remarks.
\\
\noindent \textbf{\large 2. Proposed measure and visualization}
Here, we detail the proposed measure and visualization.
Section 2.1 explains the probability structure of the MH model using formulas.
Section 2.2 defines the sub-measures and single-summary-measure expressed using weights for the sub-measures at each categorical level along with the properties of the proposed measure.
Section 2.3 details the visualization of the proposed measures.
\\
\noindent \textbf{\large 2.1. MH model}
Consider an $r \times r$ square contingency table with the same row and column ordinal classifications.
Let $X$ and $Y$ denote the row and column variables, respectively, and let Pr$(X = i , Y = j) = p_{ij}$ for $i = 1, \ldots , r; j = 1, \ldots , r$.
The MH model can be expressed with various formulas.
For example, the MH model is expressed as
\[
p_{i \cdot} = p_{\cdot i} \quad {\rm for} ~ i = 1, \ldots , r,
\]
where $p_{i \cdot} = \sum^r_{t=1}p_{it}$ and $p_{\cdot i} = \sum^r_{s=1}p_{si}$.
See e.g., Stuart (1955) and Bishop, Fienberg and Holland (1975, p.294).
This indicates that the row marginal distribution is identical to the column marginal distribution.
To consider ordered categories, the MH model can be expressed using the marginal cumulative probability as
\[
F_{1(i)} = F_{2(i)} \quad {\rm for} ~ i = 1, \ldots , r-1,
\]
where $F_{1(i)} = \sum^i_{s=1} p_{s \cdot} = {\rm Pr}(X \leq i)$ and $F_{2(i)} = \sum^i_{t=1} p_{\cdot t} = {\rm Pr}(Y \leq i)$.
The MH model can also be expressed as
\[
G_{1(i)} = G_{2(i)} \quad {\rm for} ~ i = 1, \ldots , r-1,
\]
where $G_{1(i)} = \sum^i_{s=1} \sum^r_{t=i+1} p_{st} = {\rm Pr}(X \leq i, Y \geq i+1)$ and $G_{2(i)} = \sum^r_{s=i+1} \sum^i_{t=1} p_{st} = {\rm Pr}(X \geq i+1, Y \leq i)$.
Furthermore, the MH model can be expressed as
\[
G^c_{1(i)} = G^c_{2(i)} \left( = \frac{1}{2} \right) \quad {\rm for} ~ i = 1, \ldots , r-1,
\]
where
\[
G^c_{1(i)} = \frac{G_{1(i)}}{G_{1(i)} + G_{2(i)}},
\quad
G^c_{2(i)} = \frac{G_{2(i)}}{G_{1(i)} + G_{2(i)}}.
\]
The MH model states that the conditional probability of $X \leq i$ is given if either $X$ or $Y \leq i$ and the other $\geq i+1$ is equal to the conditional probability that $Y \leq i$ for the same conditions.
\\
\noindent \textbf{\large 2.2. Measure of departure from MH}
Several measures have been proposed for various formulas of the MH model.
Here, we consider a measure that is independent of the diagonal probabilities because the MH model does not have constraints on the main-diagonal cell probabilities.
For instance, Tomizawa $et~al.$ (2003) and Yamamoto $et~al.$ (2011) proposed measures that do not depend on the diagonal probabilities.
First, we consider a sub-measure satisfying the distance postulates.
Assuming that $G_{1(i)} + G_{2(i)} \neq 0$, the degree of departure from MH at each categorical level $i$ ($i=1, \ldots, r-1$) is given as
\[
\gamma_i = \left[ \frac{2+\sqrt{2}}{2} \left( \upsilon_{1(i)}^2 + \upsilon_{2(i)}^2 \right) \right]^{\frac{1}{2}},
\]
where
\[
\upsilon_{1(i)} = \sqrt{G^c_{1(i)}} - \sqrt{\frac{1}{2}},
\quad
\upsilon_{2(i)} = \sqrt{G^c_{2(i)}} - \sqrt{\frac{1}{2}}.
\]
The sub-measure $\gamma_i$ has the following characteristics:
\begin{enumerate}
\item[(i)] $0 \leq \gamma_i \leq 1$
\item[(ii)] $\gamma_i = 0$ if and only if $G^c_{1(i)} = G^c_{2(i)} (= 1/2)$
\item[(iii)] $\gamma_i = 1$ if and only if $G^c_{1(i)} =1$ (then $G^c_{2(i)} = 0$) or $G^c_{1(i)} =0$ (then $G^c_{2(i)} = 1$)
\end{enumerate}
The sub-measure $\gamma_i$ is the Matusita distance between $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ and $\left( \frac{1}{2}, \frac{1}{2} \right)$, and satisfies all three distance postulates.
When the value of the sub-measure is 0, it means the marginal cumulative probabilities are equivalent until categorical level $i$.
The value of the sub-measure increases as the separation between the marginal cumulative distributions increases.
The separation is maximized when the value of the sub-measure is 1.
Noting that a distance $d$ is defined on a set $W$ if for any two elements $x, y \in W$, a real number $d(x, y)$ is assigned that satisfies the following postulates:
\begin{enumerate}
\item[(i)] $d(x, y) \geq 0$ with equality if and only if $x=y$;
\item[(ii)] $d(y, x) = d(x, y)$;
\item[(iii)] $d(x, z) \leq d(x, y) + d(y, z)$ for $x, y, z \in W$ (the triangle inequality).
\end{enumerate}
See also Read and Cressie (1988, p.111).
Then the power-divergence $I^{(\lambda)}$ (especially, the Kullback-Leibler divergence $I^{(0)}$) does not satisfy postulates (ii) and (iii).
The Matusita distance, which is the square root of $I^{(-\frac{1}{2})}$, satisfies all three postulates.
Assuming that $\{ G_{1(i)} + G_{2(i)} \neq 0 \}$, we consider a measure using sub-measure $\gamma_i$ to represent the degree of departure from MH, which is given as
\[
\Gamma = \sum^{r-1}_{i=1} \left( G^{\ast}_{1(i)} + G^{\ast}_{2(i)} \right) \gamma_i,
\]
where
\[
\Delta = \sum^{R-1}_{i=1} \left( G_{1(i)} + G_{2(i)} \right),
\]
and
\[
G^{\ast}_{1(i)} = \frac{G_{1(i)}}{\Delta},
\quad
G^{\ast}_{2(i)} = \frac{G_{2(i)}}{\Delta},
\]
for $i=1, \ldots, r-1$.
The measure $\Gamma$ has the following characteristics:
\begin{enumerate}
\item[(i)] $0 \leq \Gamma \leq 1$
\item[(ii)] $\Gamma = 0$ if and only if the MH model holds
\item[(iii)] $\Gamma = 1$ if and only if the degree of departure from MH is a maximum, in the sense that $G^c_{1(i)}=1$ (then $G^c_{2(i)}=0$) or $G^c_{1(i)}=0$ (then $G^c_{2(i)}=1$), for $i = 1, \ldots, r-1$
\end{enumerate}
Thus, this measure is the weighted sum of the Matusita distance for the two distributions $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ and $\left( \frac{1}{2}, \frac{1}{2} \right)$.
\\
\noindent \textbf{\large 2.3. Visualization of the proposed measure}
To visualize the proposed measure, we used the techniques for visualizing categorical data.
First, for the fixed $i$ ($i=1, \ldots, r-1$), $\gamma_i$, which represents the relationship between $G^c_{1(i)}$ and $G^c_{2(i)}$, is defined by the following steps:
\begin{enumerate}
\item[(i)] Plot the $x$-axis is $G^c_{1(i)}$ and the $y$-axis is $G^c_{2(i)}$ point for each $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ coordinate
\item[(ii)] Adjust the point size according to the weight $\left( G^{\ast}_{1(i)} + G^{\ast}_{2(i)} \right)$
\item[(iii)] Display the value of $\gamma_i$ as a text label at each $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ point
\item[(iv)] Color the points red when $\left(G^c_{1(i)} < G^c_{2(i)}\right)$ and blue when $\left( G^c_{1(i)} \geq G^c_{2(i)}\right)$
\item[(v)] Draw the dashed line within the diagonal point’s range of movement and color the dashed line using the same rules
\end{enumerate}
Therefore, the top-left side is red, while the bottom-right side is blue with respect to the point $\left( \frac{1}{2}, \frac{1}{2} \right)$ in the visualization.
Table 2 shows a visualization image.
\begin{table}[h]
\small\sf\centering
\caption{True cell probabilities in a $6 \times 6$ square contingency table.\label{T2}}
\begin{tabular}[!bh]{ccccccc} \hline
& 1 & 2 & 3 & 4 & 5 & 6 \\ \hline
1 & 0.000 & 0.031 & 0.219 & 0.031 & 0.031 & 0.000 \\
2 & 0.000 & 0.000 & 0.031 & 0.031 & 0.031 & 0.000 \\
3 & 0.000 & 0.031 & 0.000 & 0.031 & 0.031 & 0.000 \\
4 & 0.000 & 0.031 & 0.031 & 0.000 & 0.031 & 0.000 \\
5 & 0.000 & 0.031 & 0.031 & 0.031 & 0.000 & 0.000 \\
6 & 0.000 & 0.031 & 0.031 & 0.219 & 0.031 & 0.000 \\ \hline
\end{tabular}
\end{table}
Table 3 presents the necessary information to visualize Table 2, including $G^c_{1(i)}$ and $G^c_{2(i)}$ used for the coordinates of the point, the weight used for the point size, and the sub-measure $\gamma_i$ used for the text label.
\begin{table}[h]
\small\sf\centering
\caption{Visualization values of $\gamma_i$.\label{T3}}
\begin{tabular}[!bh]{ccccc} \hline
$i$ & $x$-axis & $y$-axis & size & label \\ \hline
1 & 1.000 & 0.000 & 0.156 & 1.000 \\
2 & 0.750 & 0.250 & 0.250 & 0.341 \\
3 & 0.500 & 0.500 & 0.188 & 0.000 \\
4 & 0.250 & 0.750 & 0.250 & 0.341 \\
5 & 0.000 & 1.000 & 0.156 & 1.000 \\ \hline
\end{tabular}
\end{table}
Step 1 visualizes each level $i$.
As an example, Figure 1 depicts how $\gamma_i$ is visualized at level $i=1$.
\begin{figure}
\caption{Visualization of $\gamma_1$.}
\end{figure}
Next, we provide additional definitions to integrate each $\gamma_i$ in step 1 into one figure:
\begin{enumerate}
\item[(i)] Consider the $x$-axis as $i$ for $G^c_{1(i)}$ and the $y$-axis as $i$ for $G^c_{2(i)}$
\item[(ii)] Place the figure of $\gamma_i$ on the diagonal
\end{enumerate}
Figure 2 shows the integrated figure using the example from Table 2 in step 2 according to the definition of the proposed visualization.
\begin{figure}
\caption{Proposed visualization to provide an intuitive understanding of the structure of MH}
\end{figure}
The visualization of the proposed measure using the categorical data methods has the following benefits.
First, the visualization provides information about each $i$, allowing trends in MH to be identified in a square contingency table.
Since the figure visualizes each $\gamma_i$, points do not overlap even if their coordinates are close.
Thus, points are easily identifiable.
It is important to visualize each $\gamma_i$ separately since each one is assumed to be nearly the same value.
Ando $et~al.$ (2021) used a Kullback-Leibler divergence-type measure, but the Kullback-Leibler divergence does not satisfy the distance postulates.
To naturally interpret the point distances in the figure, the distance postulates must be satisfied. (Section 4.1.1. gives a specific example).
Additionally, the proposed visualization can be considered as utilizing sub-measures.
\\
\noindent \textbf{\large 3. Approximate the confidence interval for the measure}
Let $n_{ij}$ denote the observed frequency in the $i$th row and $j$th column of a table ($i =1, \ldots, r;~j = 1, \ldots, r$).
The sample version of $\Gamma$ (i.e., $\hat{\Gamma}$) is given by $\Gamma$ in which \{$p_{ij}$\} is replaced by \{$\hat{p}_{ij}$\}, where $\hat{p}_{ij} = n_{ij}/n$ and $n = \sum \sum n_{ij}$.
It should be noted that the sample version of $G^c_{k(i)}$, $\gamma_i$ and $F_{k(i)}$, which are $\hat{G}^c_{k(i)}$, $\hat{\gamma}_i$ and $\hat{F}_{k(i)}$, respectively, are given in a similar manner ($i=1, \ldots, r-1;~k=1, 2$).
Given that \{$n_{ij}$\} arises from a full multinomial sampling, we can estimate the standard error for $\hat{\Gamma}$ and construct a large-sample confidence interval for $\Gamma$.
The delta method can approximate the standard error.
$\sqrt[]{\mathstrut n}(\hat{\Gamma} - \Gamma)$ has an asymptotic (as $n \rightarrow \infty$) normal distribution with mean zero and variance $\sigma^2[ \Gamma ]$.
See Appendix 2 for the details of $\sigma^2[ \Gamma ]$.
Let $\hat{\sigma}^2[ \Gamma ]$ denote $\sigma^2[ \Gamma ]$ where \{$p_{ij}$\} is replaced by \{$\hat{p}_{ij}$\}.
Then $\hat{\sigma} [ \Gamma ]/\sqrt[]{\mathstrut n}$ is the estimated approximate standard error for $\hat{\Gamma}$, and
$\hat{\Gamma} \pm z_{p/2} \hat{\sigma} [ \Gamma ]/\sqrt[]{\mathstrut n}$ is an approximate $100(1-p)$ percent confidence interval for $\Gamma$,
where $z_{p/2}$ is the $100 (1-p/2)$th percentile of the standard normal distribution.
The asymptotic normal distribution may not be applicable when estimating measures on small sample datasets.
In small dataset, the sample proportion of $(i, j)$ cell may fall 0 (i.e., $\hat{p}_{ij} = 0$).
Thus, we consider Bayesian methods.
Although the sample proportion is typically used to estimate the approximate standard error for $\hat{\Gamma}$, herein we consider the Bayes estimator derived from the uninformed prior probability.
To have a vague prior, the Haldane prior is used for the prior information (see Haldane 1932; Berger 1985, p.89).
We set all parameters of the Dirichlet distribution to 0.0001 when estimating the approximate variance of the proposed measure.
\\
\noindent \textbf{\large 4. Data analysis}
\noindent \textbf{\large 4.1. Artificial data}
\noindent \textbf{\large 4.1.1. Role of distance postulates for visualization}
To illustrate the concept of visualization, we used artificial datasets in two scenarios: one that satisfies the structure of MH and one that has location-shifted marginal distributions.
The visualization in Table 4(a) shows that all values of sub-measure $\hat{\gamma}_i$ are equal to zero, and the value of the proposed measure $\hat{\Gamma}$ is zero (i.e., the MH model holds).
In terms of information divergences, the two marginal distributions can be interpreted as the same.
Therefore, the values of the label, which is the sub-measure using the Matusita distance, are zero, and points are drawn at $\left( \frac{1}{2}, \frac{1}{2} \right)$ in the visualization (Figure 3(a)).
The visualization in Table 4(b) shows that all values of sub-measure $\hat{\gamma}_i$ are equal to 0.341 because the assumed structure shows location-shifted marginal distributions.
Since we estimated $\left(\hat{G}^c_{1(i)} < \hat{G}^c_{2(i)}\right)$, the point on the graph is drawn from $\left( \frac{1}{2}, \frac{1}{2} \right)$ to the upper left (Figure 3(b)).
Because the label values are sub-measures using the Matusita distance that satisfies distance postulate (ii), it can be interpreted as the distance between $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ and $\left( \frac{1}{2}, \frac{1}{2} \right)$.
However, the direction is crucial when using the Kullback-Leibler divergence (see Appendix 1).
When using the Kullback-Leibler divergence in Table 4(b), the distance from $\left( \frac{1}{2}, \frac{1}{2} \right)$ to $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ and the distance from $\left( G^c_{1(i)}, G^c_{2(i)} \right)$ to $\left( \frac{1}{2}, \frac{1}{2} \right)$ differ (Table 5).
Therefore, the label value must be selected carefully because this divergence may hinder an intuitive interpretation.
In addition, it can be evaluated appropriately in indirect comparisons between two points for the distance from a reference since the proposed measure satisfies the triangular inequalities.
Thus, the visualization must use a divergence that satisfies the distance postulates.
In addition, the proposed visualization gives a natural and intuitive interpretation because we can understand the degree of departure from MH for each level $i$, and the sub-measure calculated by $\hat{G}^c_{1(i)}$ and $\hat{G}^c_{2(i)}$ compares the marginal cumulative distributions $\left( \hat{F}_{1(i)}~{\rm and}~\hat{F}_{2(i)} \right)$.
This section shows the visualization in monotonic differences of the marginal cumulative distributions, but the next section illustrates the relationship between marginal cumulative distributions and visualizations in several patterns.
\begin{table}[h]
\small\sf\centering
\caption{Artificial data.\label{T4}}
\begin{tabular}[!bh]{ccccc} \hline
& 1 & 2 & 3 & 4 \\ \hline
\multicolumn{5}{l}{(a)}\\
1 & 0 & 10 & 10 & 10 \\
2 & 10 & 0 & 10 & 10 \\
3 & 10 & 10 & 0 & 10 \\
4 & 10 & 10 & 10 & 0 \\ \hline
\multicolumn{5}{l}{(b)}\\
1 & 0 & 10 & 10 & 10 \\
2 & 30 & 0 & 10 & 10 \\
3 & 30 & 30 & 0 & 10 \\
4 & 30 & 30 & 30 & 0 \\ \hline
\end{tabular}
\end{table}
\begin{figure}
\caption{Visualization result of Table 4.}
\end{figure}
\begin{table}[h]
\small\sf\centering
\caption{Values of the Kullback-Leibler divergence.\label{T5}}
\begin{tabular}[!bh]{ccccc} \hline
$i$ & $\hat{G}^c_{1(i)}$ & $\hat{G}^c_{2(i)}$ & $\hat{K}^1_i$ & $\hat{K}^2_i$ \\ \hline
1 & 0.250 & 0.750 & 0.131 & 0.144 \\
2 & 0.250 & 0.750 & 0.131 & 0.144 \\
3 & 0.250 & 0.750 & 0.131 & 0.144 \\ \hline
\multicolumn{5}{l}{$\hat{K}^1_i$ = $I^{(0)}_i \left( \left\{ \hat{G}^c_{1(i)}, \hat{G}^c_{2(i)} \right\} ; \left\{ \frac{1}{2}, \frac{1}{2} \right\} \right)$} \\
\multicolumn{5}{l}{$\hat{K}^2_i$ = $I^{(0)}_i \left( \left\{ \frac{1}{2}, \frac{1}{2} \right\} ; \left\{ \hat{G}^c_{1(i)}, \hat{G}^c_{2(i)} \right\} \right)$} \\
\end{tabular}
\end{table}
\\
\noindent \textbf{\large 4.1.2. Perception of different behaviors between categorical levels}
Our visualization can interpret the relationships between the marginal cumulative distributions, which is difficult using a single-summary-measure.
Here, we treat artificial data where the values of the single-summary-measure are the same, but the visualizations of the sub-measures behave differently.
Tables 6(a)–(d) show the artificial data, which are setup so that the value of the measure is 0.341.
Figures 4(a)–(d) show the visualizations of Tables 6(a)–(d).
Table 6(a) illustrates a scenario where the marginal cumulative distribution is location-shifted constantly.
This structure would be expected based on the value of the measure.
In a clinical study, assuming such a situation implies a constant treatment effect from pre-treatment to post-treatment.
In contrast, Table 6(b) represents a scenario where the marginal cumulative distribution spreads as the categorical level $i$ increases.
Moreover, Tables 6(c)–(d) show situations where the marginal cumulative distribution differs at the categorical level $i$.
In a clinical study, assuming such a situation suggests that the treatment effect depends on the pre-intervention condition.
\begin{table}[h]
\small\sf\centering
\caption{Artificial data.\label{T6}}
\begin{tabular}[!bh]{ccccc} \hline
& 1 & 2 & 3 & 4 \\ \hline
\multicolumn{5}{l}{(a)}\\
1 & 0 & 30 & 30 & 30 \\
2 & 10 & 0 & 30 & 30 \\
3 & 10 & 10 & 0 & 30 \\
4 & 10 & 10 & 10 & 0 \\ \hline
\multicolumn{5}{l}{(b)}\\
1 & 0 & 5 & 5 & 6 \\
2 & 5 & 0 & 11 & 36 \\
3 & 5 & 10 & 0 & 86 \\
4 & 5 & 10 & 10 & 0 \\ \hline
\multicolumn{5}{l}{(c)}\\
1 & 0 & 30 & 30 & 30 \\
2 & 10 & 0 & 0 & 30 \\
3 & 10 & 240 & 0 & 30 \\
4 & 10 & 10 & 10 & 0 \\ \hline
\multicolumn{5}{l}{(d)}\\
1 & 0 & 30 & 30 & 30 \\
2 & 10 & 0 & 30 & 30 \\
3 & 10 & 10 & 0 & 0 \\
4 & 10 & 10 & 160 & 0 \\ \hline
\end{tabular}
\end{table}
\begin{figure}
\caption{Visualization result of Table 6.}
\end{figure}
\\
\noindent \textbf{\large 4.2. Simulation studies}
Monte Carlo simulations were performed to theoretically derive the coverage probabilities of the approximate $95\%$ confidence intervals assuming random sampling of an underlying bivariate normal distribution.
Here, we considered random variables $Z_1$ and $Z_2$ with means ${\rm E}(Z_1) = 0$ and ${\rm E}(Z_2) = d$, variances ${\rm Var}(Z_1) = {\rm Var}(Z_2) = 1$, and correlation ${\rm Corr}(Z_1, Z_2 ) = 0.2$.
Assuming a $6 \times 6$ table is formed using the cutoff points for each variable at $-1.2, -0.6, 0, 0.6, 1.2$, we evaluated several simulation scenarios where $d = 0.00~{\rm to}~4.00$ by 0.25 and $n = 36, 180, 360, 3600$ (${\rm sparseness~index}=1, 5, 10, 100$).
The simulation studies were performed based on 100,000 trials per scenario.
Figure 5 plots the mean of random variable $Z_2$ along with the true value of the measure based on a bivariate normal distribution.
When $d=0$, the true value of the measure is observed as 0 because there is no difference in the means whose condition is stronger than the structure of the MH.
Although the true value increases monotonically for $d=0, \ldots, 1$, a large mean difference between random variables is necessary for the true value to reach 1.
\begin{figure}
\caption{Mean of random variable $Z_2$ and the true value of the measure, which are based on a bivariate normal distribution.}
\end{figure}
Figure 6 shows the coverage probability according to the true values.
For a small sample size, it is difficult to obtain a nominal coverage probability, whereas the coverage probability is maintained at a $95\%$ confidence interval for a sufficient sample size.
\begin{figure}
\caption{Transitions of true values and coverage probabilities.}
\end{figure}
\\
\noindent \textbf{\large 4.3. Example}
As an example, consider the data in Table 1.
In the original work (Sugano $et~al$., 2012), the proportion of improvement or deterioration for the esomeprazole group (drug group) and placebo group were described.
Table 7 shows the results of applying the proposed measure $\Gamma$ to these data to statistically consider the treatment effects for the drug or placebo.
The estimate of asymptotic variance using the sample proportion cannot be calculated because $\hat{G}^c_{1(4)}=0$ in Table 1(b).
Hence, a Bayes estimator is used to estimate the asymptotic variance.
The $95\%$ confidence intervals do not cross zero, suggesting that both groups have a higher degree of deviation from MH.
That is, the marginal distribution after the treatment shifts compared to that before the treatment.
For an intuitive understanding, Figure 7 plots the trend, where blue indicates an improving trend and red a deteriorating one.
The drug group shows an improving trend ($\hat{G}^c_{1(i)} \geq \hat{G}^c_{2(i)}$), while the placebo group displays a deteriorating trend ($\hat{G}^c_{1(i)} < \hat{G}^c_{2(i)}$).
For the drug group, $i=1, 2, 3$ show an improvement trend, while $i=4$ shows a deteriorating trend although the circle is small (i.e., the proportion of observed frequencies comprising $\hat{G}^c_{1(4)}$ and $\hat{G}^c_{2(4)}$ is small relative to the total).
These results imply that there might be differences in treatment effects between $i$ levels.
\begin{table}[h]
\small\sf\centering
\caption{Estimates of the measure, approximate standard error, and approximate $95\%$ confidence intervals applied to the data in Table 1.\label{T7}}
\begin{tabular}[!bh]{cccc} \hline
Applied & Estimated &Standard & Confidence \\
data & measure &error & interval \\ \hline
Table 1(a) & $0.308$ & $0.078$ & $(0.156, 0.460)$ \\
Table 1(b) & $0.511$ & $0.059$ & $(0.395, 0.627)$ \\ \hline
\end{tabular}
\end{table}
\begin{figure}
\caption{Visualization results of Table 1.}
\end{figure}
\\
\noindent \textbf{\large 5. Discussion}
In the proposed measure, sub-measures are used in the visualization to capture features overlooked by a single summary measure.
Previous studies have adopted similar approaches, except that the sub-measures are not used for interpretation (Tomizawa $et~al.$, 2003; Yamamoto $et~al.$, 2011).
This study demonstrates that sub-measures allow two kinds of marginal inhomogeneities to be visualized, providing a more detailed interpretation of the single-summary-measure.
The proposed visualization is analyzed using Table 1.
First, because the Matusita distance satisfies the distance postulates, the visualization that draws points on two-dimensional coordinates can give a natural and intuitive interpretation.
In particular, the values of existing measures based on the power-divergence (Kullback-Leibler divergence) that do not satisfy distance postulate (ii) would give different values if the distance from the start point to the end point is swapped.
That is, the data in Table 1 would create two visualization patterns.
In contrast, for the Matusita distance, the same value is obtained even if the distance from the start point to the end point is swapped.
Hence, a special annotation is unnecessary for a visual interpretation.
Furthermore, the point in Figure 7(a) where $i=1, 2, 3$ and $i=4$ show different directions is difficult to discern using the existing measure proposed by Yamamoto $et~al.$ (2011) because it is a single-summary-measure.
However, the different directions can be considered intuitively through visualization by level $i$.
The proposed visualization does not draw the points on one coordinate because the degree of departure from MH is likely the same for each level in real data analysis (Figure 7).
This is because identifying which level $i$ of points is drawn is difficult.
Hence, it is important to satisfy the distance postulates and to consider methods for visualizing categorical data of square contingency tables.
The visualization program was implemented in the R programming language (R Core Team, 2023).
Noting that a graphical layout in package ``ggplot2'' is defined by ``gtable'' (and also ``grid'').
In addition, the arrangement of multiple figure objects can be set by package ``gridExtra''.
We used ``grid'' and ``gridExtra'' packages for visualization purposes.
We referenced the function ``agreementplot()'' by the ``vcd'' package, which is the categorical data visualization package for the ``observer agreement chart''.
\\
\noindent \textbf{\large 6. Conclusion}
The proposed measure $\Gamma$ is the weighted sum of the sub-measures that satisfy all three distance postulates.
Here, we demonstrate the approximated confidence interval for $\Gamma$.
The proposed visualization using the Matusita distance provides a natural visual interpretation of MH in a square contingency table.
In addition, we show that the visualization can provide useful interpretations using an example.
\\
\noindent \textbf{\large Appendix 1}
Assuming that $\{ G_{1(i)} + G_{2(i)} \neq 0 \}$, the power-divergence-type measure representing the degree of departure from MH proposed by Tomizawa, Miyamoto and Ashihara (2003) for $\lambda > -1$ is given as
\begin{equation}
\begin{split}
\Phi^{(\lambda)} &= \frac{\lambda(\lambda+1)}{2^{\lambda}-1} \sum^{r-1}_{i=1} \left( G^{\ast}_{1(i)} + G^{\ast}_{2(i)}\right) \\
& ~~~~~~~~~~~~~~~ \times I^{(\lambda)}_i \left( \left\{ G^c_{1(i)}, G^c_{2(i)} \right\} ; \left\{ \frac{1}{2}, \frac{1}{2} \right\} \right), \nonumber
\end{split}
\end{equation}
where
\begin{equation}
\begin{split}
I^{(\lambda)}_i (\cdot, \cdot) &= \frac{1}{\lambda(\lambda+1)} \Biggl[ G^c_{1(i)} \left\{ \left( \frac{G^c_{1(i)}}{1/2}\right)^{\lambda} - 1 \right\} \\
& ~~~~~~~~~~~~~~~~~~~~~ + G^c_{2(i)} \left\{ \left( \frac{G^c_{2(i)}}{1/2}\right)^{\lambda} - 1 \right\} \Biggr], \nonumber
\end{split}
\end{equation}
and the value at $\lambda=0$ is taken to the limit as $\lambda \rightarrow 0$.
Note that $I^{(\lambda)}_i (\cdot, \cdot)$ is the power-divergence between two distributions (see Cressie and Read, 1984; Read and Cressie, 1988, p.15).
Namely,
\[
I^{(0)}_i (\cdot, \cdot) = G^c_{1(i)} \log \left( \frac{G^c_{1(i)}}{1/2} \right) + G^c_{2(i)} \log \left( \frac{G^c_{2(i)}}{1/2} \right).
\]
This measure has the following characteristics:
\begin{enumerate}
\item[(i)] $\Phi^{(\lambda)} = 0$ if and only if the MH model holds
\item[(ii)] $\Phi^{(\lambda)} = 1$ if and only if the degree of departure from MH is a maximum, in the sense that $G^c_{1(i)}=1$ (then $G^c_{2(i)}=0$) or $G^c_{1(i)}=0$ (then $G^c_{2(i)}=1$), for $i = 1, \ldots, r-1$
\end{enumerate}
Second, assuming that $\{ G_{1(i)} + G_{2(i)} \neq 0 \}$, the measure representing two kinds of marginal inhomogeneities proposed by Yamamoto, Ando and Tomizawa (2011) is given as
\[
\Psi = \frac{4}{\pi} \sum^{r-1}_{i=1} \left( G^{\ast}_{1(i)} + G^{\ast}_{2(i)}\right) \left( \theta_i - \frac{\pi}{4} \right),
\]
where
\[
\theta_i = \cos^{-1} \left( \frac{G_{1(i)}}{\sqrt{ G^2_{1(i)} + G^2_{2(i)} }} \right).
\]
This measure has the following characteristics:
\begin{enumerate}
\item[(i)] $\Psi = -1$ if and only if there is a structure of maximum upper-marginal inhomogeneity
\item[(ii)] $\Psi = 1$ if and only if there is a structure of maximum lower-marginal inhomogeneity
\item[(iii)] If the MH model holds then $\Psi = 0$, but the converse does not hold
\end{enumerate}
Yamamoto $et~al.$ (2011) defined this structure ($\Psi = 0$) as the average MH model.
Third, assuming that $\{ G_{1(i)} + G_{2(i)} \neq 0 \}$, the two-dimensional measure that can simultaneously analyze the degree and directionality of departure from MH proposed by Ando, Noguchi, Ishii and Tomizawa (2021) is given as
\[
\boldsymbol{\tau} = \begin{pmatrix} \Phi^{(0)} \\ \Psi \end{pmatrix}.
\]
This two-dimensional measure has the following characteristics:
\begin{enumerate}
\item[(i)] $\boldsymbol{\tau} = (0, 0)^t$ if and only if the MH model holds
\item[(ii)] $\boldsymbol{\tau} = (1, -1)^t$ if and only if there is a structure of maximum upper-marginal inhomogeneity
\item[(iii)] $\boldsymbol{\tau} = (1, 1)^t$ if and only if there is a structure of maximum lower-marginal inhomogeneity
\end{enumerate}
\noindent \textbf{\large Appendix 2}
Using the delta method, $~\sqrt[]{\mathstrut n}(\hat{\Gamma} - \Gamma)$ has an asymptotic variance $\sigma^2[ \Gamma ]$, which is given as
\[
\sigma^2[ \Gamma ] = \sum^{r-1}_{k=1} \sum^{r}_{l=k+1} \left( p_{kl} D^2_{kl} + p_{lk} D^2_{lk} \right),
\]
where
\begin{equation}
\begin{split}
D_{kl} &= \frac{1}{\Delta} \sqrt{ \frac{2+\sqrt{2}}{2} } \sum^{r-1}_{i=1} I(k \leq i, l \geq i+1) A_i - \frac{(l-k)}{\Delta} \Gamma, \\
D_{lk} &= \frac{1}{\Delta} \sqrt{ \frac{2+\sqrt{2}}{2} } \sum^{r-1}_{i=1} I(k \leq i, l \geq i+1) B_i - \frac{(l-k)}{\Delta} \Gamma, \\
A_i &= \frac{1}{2\sqrt{C_i}} \left( 2C_i + \upsilon_{1(i)} \frac{G^c_{2(i)}}{\sqrt{G^c_{1(i)}}} - \upsilon_{2(i)} \sqrt{G^c_{2(i)}} \right), \\
B_i &= \frac{1}{2\sqrt{C_i}} \left( 2C_i - \upsilon_{1(i)} \sqrt{G^c_{1(i)}} + \upsilon_{2(i)} \frac{G^c_{1(i)}}{\sqrt{G^c_{2(i)}}} \right), \\
C_i &= \upsilon_{1(i)}^2 + \upsilon_{2(i)}^2, \nonumber
\end{split}
\end{equation}
and $I(\cdot)$ is indicator function.
\end{document}
|
\betaegin{document}
\tauitle[Unified 3--manifold invariants]{On the
unification of
quantum 3--manifold invariants}
\alphaphauthor{Anna Beliakova}
\alphaphaddress{Institut f\"ur Mathematik, Universit\"at Zurich,
Winterthurerstrasse 190, 8057 Z\"urich, Switzerland}
\epsilonsilonmail{[email protected], [email protected]}
\alphaphauthor{Thang Le}
\alphaphaddress{Department of Mathematics, Georgia Institute of Technology,
Atlanta, GA 30332--0160, USA }
\epsilonsilonmail{[email protected]}
{\mathbf k}eywords{3--manifold, unified quantum invariant, Habiro ring,
Andrews identity, Frobenius map}
\betaegin{abstract}
In 2006 Habiro initiated a construction of generating functions for
Witten--Reshetikhin--Turaev (WRT) invariants
known as unified WRT invariants. In a series of papers
together with Irmgard B\"uhler
and Christian Blanchet
we extended his construction to a larger class of 3--manifolds.
The unified invariants provide a strong tool to study properties
of the whole collection of WRT invariants, e.g. their integrality,
and hence, their categorification.
In this paper we give a survey
on ideas and techniques used in the construction of the unified
invariants.
\epsilonsilonnd{abstract}
{\mathbf m}aketitle
\vskip1mm
{\mathbf n}oindent
{\it 2000 Mathematics Subject Classification:}
57N10 (primary), 57M25 (secondary)
\sigmaection*{Introduction}
\sigmaubsection*{Background}
In the 60s and 70s Rochlin, Lickorish and Kirby established a remarkable
connection between links and 3--manifolds.
They showed that every 3--manifold can be obtained by surgery (on $S^3$) along framed links,
and surgeries along two links give the same 3--manifold if and only if the links
are related by a sequence of Kirby moves. This allows us to think of 3--manifolds as
equivalence classes of framed links modulo the relation generated by Kirby moves.
After
the discovery of the Jones polynomial in 1984, knot theory experienced
the transformation
from an esoteric branch of pure mathematics to a dynamic
research field with deep connections to mathematical physics,
the theory of integrable and dynamic systems, von Neumann algebras,
representation theory, homological algebra, algebraic geometry, etc.
Among important developments were the constructions of {\epsilonsilonm quantum link invariants} (generalization of the Jones polynomial), and
of {\epsilonsilonm the Kontsevich integral} (universal finite type invariant).
The quantum link invariants were extended to 3--manifolds by
Witten and Reshetikhin--Turaev (WRT), the lift of the
Kontsevich integral to 3--manifolds was defined by Le--Murakami--Ohtsuki (LMO).
However, while the relationship between the Kontsevich integral and quantum link invariants is simple,
the relationship between the LMO invariant and the WRT invariants is much more complicated and remains mysterious in many cases.
Let us explain this in more details.
The Kontsevich integral of a knot
takes values in a certain algebra ${\mathbf m}athcal A(S^1)$ of chord diagrams.
Any semi--simple Lie algebra and its module define a map ${\mathbf m}athcal A(S^1) \tauo \BQ[[h]]$
called a weight system. One important result of Le--Murakami and Kassel is
that the following diagram commutes, where the ${\mathbf f}rak{sl}_2$ weight system uses the 2--dimensional defining representation.
\betaegin{diagram}
& &{\mathbf m}athbb Z[q^{\primem 1}]& &\\
&\ruTo^{\tauext{Jones polynomial}} & &\rdTo^{q=e^h} &\\
\{\tauext{knots}\}& & & &{\mathbf m}athbb Q[[h]]\\
&\rdTo_{\tauext{Kontsevich integral}}& &\ruTo_{{\mathbf f}rak {sl}_2\tauext{-weight system}}&\\
& &{\mathbf m}athcal A(S^1) & &
\epsilonsilonnd{diagram}
In particular, this proves that the Kontsevich integral dominates the Jones polynomial, and similarly all
quantum link invariants coming from
Lie algebras. Hence in addition to being universal for finite type, the Kontsevich integral is also universal
for all quantum link invariants. It is conjectured that
the Kontsevich integral separates knots.
Does there exist a similar commutative diagram for 3--manifolds?
The quantum WRT invariant
associates with a compact orientable 3--manifold $M$, a root of unity
$\xi$ and a semi--simple Lie algebra, say ${\mathbf f}rak {sl}_2$
for simplicity,
a complex number $\tauau_M(\xi)$. The LMO invariant takes values
in a certain algebra ${\mathbf m}athcal A$ of Feynman diagrams. Every semi--simple Lie algebra defines a weight system map from ${\mathbf m}athcal A$ to ${\mathbf m}athbb Q[[h]]$.
Hence, we have
\betaegin{diagram}
& &{\mathbf m}athbb C& &\\
&\ruTo^{\tauau_M(\xi)} & &\langlembdauDotsto &\\
\{\tauext{3--manifolds}\}& & & &{\mathbf m}athbb Q[[h]]\\
&\rdTo_{\tauext{LMO}}& &\ruTo_{{\mathbf f}rak {sl}_2\tauext{--weight system}}&\\
& &{\mathbf m}athcal A & &
\epsilonsilonnd{diagram}
The image of the composition of the two bottom arrows is
known as the Ohtsuki series \cite{Oh}. Ohtsuki showed that for
any rational homology 3--sphere $M$ and a root of unity $\xi$
of {\it prime} order $p$, the first $(p-1)/2$ coefficients, modulo $p$,
of the Ohtsuki series
are determined by $\tauau_M(\xi)$. This is
shown by the dotted arrow in the diagram.
This result of Ohtsuki raises many interesting questions: Does there exist
any relationship between the LMO and WRT invariants at roots of
unity of non--prime order? Is the whole set of WRT invariants determined by the LMO invariant?
The discovery of the series prompted Ohtsuki to build the theory of
finite type invariants of homology 3--spheres, which was further developed by
Goussarov and Habiro.
In the 90s, Habegger, Garoufalidis and Beliakova
showed that the LMO invariant is trivial if the first Betti number of a 3--manifold
is bigger than 3, and for the Betti numbers 1,2 and 3,
the LMO invariant is determined
by the classical Casson--Walker--Lescop invariant. However, in the case
of rational homology 3--spheres, the LMO invariant is more powerful, it is
a universal finite type invariant in the Goussarov-Habiro sense.
The relationship between the LMO and WRT invariants
at non--prime roots of unity remained open
for quite a while.
This is because all known techniques heavily
rely on the fact that the order of the root is prime and can not
be extended to other roots. However, recently
Habiro's theory \cite{Ha} of unified
invariants provided
a complete solution of this problem in the case
of integral homology 3--spheres. In this case, also
the question of integrality of the WRT invariants at non--prime roots of unity
was solved simultaneously. Though
intensively studied (see
\cite{Mu}, \cite{MR}, \cite{GM}, \cite{Le10} and the references there),
the integrality of WRT invariant was previously known
for prime roots of unity only.
Note that a conceptual solution of the integrality problem is of primary
importance for any attempt of categorification of the
WRT invariants (compare \cite{Kho}).
\sigmaubsection*{Unified invariants of integral homology 3--spheres}
The unification of the WRT invariants was initiated in 2006 by Habiro.
For any integral homology
3--sphere $M$,
Habiro \cite{Ha} constructed a {\epsilonsilonm unified invariant} $J_M$
whose evaluation at any root of unity coincides with
the value of the WRT
invariant at that root.
Habiro's unified invariant $J_M$ is an element of the following
ring (Habiro's ring)
\[
\widehat{\Z[q]}:=\langlembdaim_{\overleftarrow{\hspace{2mm}k\hspace{2mm}}}
{\mathbf f}rac{\BZ[q]}{
((q;q)_k)}, \qquad \tauext{ where} \quad (q;q)_k = \primerod_{j=1}^k (1-q^j).
\]
Every element $f(q)\in \widehat{\Z[q]}$ can be written (non--uniquely) as an infinite sum
\[
f(q)= \sigmaum_{k\gammae 0} f_k(q)\, (1-q)(1-q^2)...(1-q^k),
\]
with $f_k(q)\in \BZ[q]$. When $q=\xi$, a root of unity,
only a finite number of terms on the right hand side are not zero,
hence the evaluation
$\epsilonsilonv_\xi(f(q))$ is well--defined and is an algebraic integer.
However, the fact that the unified invariant belongs to the Habiro ring
is stronger than just integrality of $\tauau_M(\xi)$.
The Habiro ring has beautiful
arithmetic properties.
Every element $f(q) \in \widehat{\Z[q]}$ can be considered
as a function whose domain is the set
of roots of unity.
Moreover, there is a natural Taylor series for $f$ at every root
of unity.
In \cite{Ha} it is shown that two elements $f,g \in \widehat{\Z[q]}$ are the same if and
only if their Taylor series at a root of unity coincide.
In addition, each function $f(q) \in \widehat{\Z[q]}$ is totally determined
by its values at, say,
infinitely many roots of order $3^n,\, n\in {{\mathbf m}athbb N}$.
Due to these properties the Habiro ring is also called
a ring of ``analytic functions at roots of unity''. Thus belonging to $\widehat{\Z[q]}$ means that the
collection of the WRT invariants is far from a random
collection of algebraic integers; together they form a nice function.
General properties of the Habiro ring imply that for any integral homology
3--sphere $M$,
the Taylor expansion of the unified invariant $J_M$
at $q= 1$ coincides with the Ohtsuki series
and dominates WRT invariants of $M$ at all roots
of unity (not only of prime order). This is summarized in the following
commutative diagram.
\betaegin{diagram}
& &{\mathbf m}athbb C & &\\
&\ruTo^{\tauau_M(\xi)}&\uTo_{q=\xi} & &\\
\{{\mathbf m}athbb Z\tauext{HS}\}&\rTo^{J_M(q)} &\omegaegaidehat{{\mathbf m}athbb Z[q]}&\rInto &{\mathbf m}athbb Z[[1-q]] \\
&\rdTo_{\tauext{LMO}} & & &\deltaInto_{h=1-q} \\
& &{\mathbf m}athcal A &\rTo^{{\mathbf f}rak {sl}_2\tauext{--weight}}_{\tauext{system}} &{\mathbf m}athbb Q[[h]]
\epsilonsilonnd{diagram}
By ${\mathbf m}athbb Z\tauext{HS}$ we denoted here the set of integral homology 3--spheres.
In particular, this shows that Ohtsuki series has integral coefficients (which was conjectured by Lawrence and first proved by Rozansky).
Recently, Habiro ring found applications in algebraic geometry
for constructing
varieties over the non--existing field of one element \cite{Ma}.
\sigmaubsection*{Unified invariants of rational homology 3--spheres}
In \cite{BBL}, we give a full generalization of the Habiro theory to
rational homology 3--spheres. This requires completely new
techniques coming from number theory, commutative algebra,
quantum group and knot theory.
Let us explain this in more details.
Assume $M$ is a rational homology 3--sphere with
$|H_1(M,\BZ)|=b$, where for a finite group $G$ we denote
by $|G|$ the number of its elements.
Then
our unified invariant $I_M$ belongs to a modification ${\mathbf m}athcal R_b$ of the Habiro ring
where $b$ is inverted. Unlike the case $b=1$, the modified Habiro ring
is not an integral domain, but a direct product of integral domains,
where each factor is determined by its own Taylor
expansion at some root of unity. There is a decomposition
$I_M=\primerod_{c|b} I_{M,c}$, where $I_{M,c}$ dominates the set
$\{\tauau_M(\xi)|(\operatorname{ord}(\xi),b)=c\}$. If $b=1$, then $I_M$ coincides with Habiro's $J_M$.
The invariant $I_{M,1}$ was first defined in \cite{Le}.
Our results can be summarized in the following commutative
diagram. Here we assume for simplicity
that $b=p^k$ is a power of a prime and put $e_n:=\epsilonsilonxp(2\primei I/n) $
the primitive $n$th root of unity.
\betaegin{diagram}
& &{\mathbf m}athbb C & & &\\
&\ruTo(2,4)^{\tauau_M(\xi)}&\uTo_{q=\xi} & & &\\
& &{\mathbf m}athcal R_b&\rInto &
\primerod^\infty_{i=0}{\mathbf m}athbb Z\langlembdaeft[{\mathbf f}rac{1}{p}, e_{p^i}
\right]\langlembdaeft[\langlembdaeft[e_{p^i}-q\right]\right]&\\
&\ruTo~{I_M(q)} & & & &\\
\{{\mathbf m}athbb Q\tauext{HS}\}& & & & \deltaTo_{\tauext{projection to}\; i=0} &\\
&\rdTo_{\tauext{LMO}} & & & &\\
& &{\mathbf m}athcal A &\rTo^{{\mathbf f}rak {sl}_2\tauext{--weight}}_{\tauext{system}}&{\mathbf m}athbb Q[[h]] &
\epsilonsilonnd{diagram}
By ${\mathbf m}athbb Q\tauext{HS}$ we denote the set of
rational homology 3--spheres $M$, with $|H_1(M,\BZ)|=b$.
In particular, for any $M \in {\mathbf m}athbb Q\tauext{HS}$,
we generalize Ohtsuki series as follows.
Let us fix a divisor $c<b$ of $b$,
then the Taylor expansion of $I_M$ at $e_c$ is a power series in $(q-e_c)$ with
coefficients in $\BZ[1/b][e_c]$ which dominates the WRT invariants
at roots of unity whose order has the greatest common divisor $c$ with $b$.
If $c=b=p^k$, then $I_{M,b}$ is a priory determined by a product of power
series $\primerod_{i\gammaeq k} {\mathbf m}athbb Z\langlembdaeft[{\mathbf f}rac{1}{p}, e_{p^i}
\right]\langlembdaeft[\langlembdaeft[e_{{p^i}}-q\right]\right]$,
however, conjecturally, it is enough to consider the series in $q-e_b$.
The commutative diagram tells us that LMO invariant determines $I_{M,1}$, or
the set of WRT invariants at roots of unity coprime with $b$.
Since there is no direct way to obtain power series in $(q-e_c)$ from the
LMO invariant, we conjecture the existence of the refined universal
finite type invariant, dominating our power series.
On the physical level of rigor, this means that the new refined
invariant should capture
more than just the contribution of flat connections
from the Chern--Simons theory.
The methods used in the unification
of the WRT invariants also led to the full solution of
the integrality problem for quantum $SO(3)$ and $SU(2)$ WRT invariants.
In \cite{BL,BCL}, we showed that
$\tauau_M^G(\xi)$ for any 3--manifold $M$ and any root of unity
is always an algebraic integer. Here $G=SO(3)$ or $SU(2)$.
The integrality of the spin
and cohomological refinements is work in progress.
Assume $M$ is the Poincar\'e homology 3-sphere, obtained by surgery on a left-hand trefoil
with framing -1. Then
$$ I_M={\mathbf f}rac{1}{1-q} \sigmaum^\infty_{k=0} q^k (1-q^{k+1})(1-q^{k+2})
\deltaots (1-q^{2k+1})\, .$$
We expect that the categorification of
the WRT invariants will lead to a
homology theory with Euler characteristic given by $I_M$.
The paper is organized as follows. In Section \ref{defs} we recall
the definitions of Kirby moves,
WRT invariants and of the cyclotomic expansion for the colored Jones polynomial.
In Section 2 we state our main results and outline the proofs.
Section 3 is
devoted to the discussion of the rings, where the unified
invariants take their values.
In addition, we construct generalized
Ohtsuki series.
The Laplace transform method, the Andrews identity, Frobenius maps
and the integrality of WRT invariants are explained in the last Section.
\sigmaection{Quantum (WRT) invariants}
\langlembdaabel{defs}
\sigmaubsection{Notations and conventions}
We will consider $q^{1/4}$ as a free parameter. Let
\[
\{n\} = q^{n/2}-q^{-n/2},
\quad \{n\}!=
\primerod_{i=1}^n \{i\} ,\quad [n] ={\mathbf f}rac{\{n\}}{\{1\}}.
\]
We denote the set $\{1,2,3,\langlembdadots\}$ by
${{\mathbf m}athbb N}$.
We also use the following notation from $q$--calculus:
$$ (x;q)_n := \primerod_{j=1}^n (1-x q^{j-1}).$$
Throughout this paper,
$\xi$ will be a primitive root
of unity of {\epsilonsilonm odd} order $r$ and $e_n:=\epsilonsilonxp(2\primei I/n)$.
All 3--manifolds in this paper are supposed to be closed and
oriented. Every link in $S^3$ is framed, oriented, and has
components ordered.
For a link $L$,
let $(L_{ij})$ be its linking matrix, where for $i{\mathbf n}eq j$,
$L_{ij}:={\tauext{lk}}(L_i,L_j)$ is the linking number of the $i$th and the $j$th
components,
and $L_{ii}=b_i$ is the
framing of $L_i$,
given by the linking number of $L_i$ with its push off
along the first vector in the framing.
Surgery along the framed link $L$ consists of removing a tubular neighborhood of $L$
from $S^3$ and then gluing it back with the diffeomorphism given by framing
(i.e. the meridian of each removed solid torus, diffeomorhic to a neighborhood of $L_i$,
is identified with the
longitude of the complement twisted $b_i$ times along the meridian).
We denote by $M$ the result of the surgery.
\sigmaubsection{The colored Jones polynomial}
{\mathbf n}ewcommand{{\mathbf m}athcal RR} {{\mathbf m}athbf R}
Suppose L is a framed oriented link with $m$ ordered components and $V_1,\deltaots,V_m$ are finite-dimensional modules
over a ribbon Hopf algebra. Then one can define the quantum invariant $J_L(V_1,\deltaots,V_m)$ through the machinery of quantum link invariant theory, see \cite{Tu}.
The quantized enveloping algebra $U_h(sl_2)$ of $sl_2$ is a ribbon algebra, and
for each positive integer $n$ there is
a unique $U_h(sl_2)$-module of dimension $n$. The quantum link invariant $J_L(n_1,\deltaots,n_m)$, where $n_j$ stands for the $n_j$-dimensional $U_h(sl_2)$-module, is usually called
the colored Jones polynomial, with the $n_j$'s being the colors. When all the colors are 2, $J_L(2,\deltaots,2)$ is the usual Jones polynomial, which can be defined using
skein relation.
One can construct the colored Jones polynomial without the quantum group theory by first defining the Jones polynomial through the skein relation, and then
defining the colored Jones polynomial by using cablings, see e.g. \cite{KM, Lic}.
Let us recall here a few well--known formulas.
For the unknot $U$ with 0 framing one has
\betaegin{equation} J_U(n) = [n] \langlembdaabel{unknot}
\epsilonsilonnd{equation}
Moreover, $J_L$ is multiplicative with respect to the disjoint union.
If $L_1$ is obtained from $L$
by increasing the framing of the $i$th component by 1, then
\betaegin{equation}\langlembdaabel{framing}
J_{L_1}(n_1,\deltaots,n_m) = q^{(n_i^2-1)/4} J_{L}(n_1,\deltaots,n_m).
\epsilonsilonnd{equation}
If all the colors $n_i$ are odd, then $J_{L}(n_1,\deltaots,n_m) \in \BZ[q^{\primem 1}]$.
\sigmaubsection{Kirby moves}
By Kirby theorem, any link invariant which does not change
under Kirby moves is an invariant of a 3--manifold given
by surgery on that link.
Let us first recall what the Kirby moves are.
{\mathbf n}ewcommand{\tauilde L}{\tauilde L}
\vskip2mm
{{\mathbf n}oindent \betaf K1--Move} (handle slide):
For some $i{\mathbf n}eq j$, replace the $i$th component $L_i$
with $L'_i$, a band connected sum of $L_i$ with a push
off of $L_j$ (defined by the framing), with
$b'_i=b_i+b_j+2{\tauext{lk}}(L_i,L_j)$.
\vskip2mm
{{\mathbf n}oindent \betaf K2--Move} (blow up): Add (or delete) a split unknotted component with framing
$\primem 1$.
\vskip2mm
These two moves are equivalent to the one Fenn--Rourke move
defined as follows:
\vskip2mm
{{\mathbf n}oindent \betaf FR--Move} Locally the following two pictures are
interchangeable
$$\primesdiag{10}{30}{FR-move}$$
where $\varepsilon \in \{1,-1\}$ and
the closed component has framing $\varepsilon$.
Note that K2--Move corresponds to the case when $m=0$.
\vskip2mm
The main idea of the construction of the WRT invariants
is to make the colored Jones polynomial invariant under Kirby moves
by averaging over all colors.
To make this precise, we have to choose the quantum parameter $q$
to be a root of unity, otherwise the sum would be infinite.
\sigmaubsection{Evaluation and Gauss sums}
For each root of unity $\xi$ of odd order $r$,
we define the evaluation map
$\epsilonsilonv_\xi$ by replacing $q$ with $\xi$.
Suppose
$f(q;n_1,\deltaots,n_m)$ is a function
of variables $q^{\primem 1}$ and integers $n_1,\deltaots,n_m$.
In quantum topology, the following sum plays an important role
\[
{\sigmaum_{n_i}}^\xi f := \sigmaum_{\sigmaubstack{0< n_i< 2r\\ n_i \tauext{ odd}}}
\epsilonsilonv_\xi f(q; n_1,\deltaots, n_m)
\]
where in the sum all the $n_i$ run over the set of {\epsilonsilonm odd} numbers
between $0$ and $2r$.
In particular, the following variation of the Gauss sum
\[
\gammaammamma_b(\xi):= {\sigmaum_{n}}^\xi q^{b{\mathbf f}rac{n^2-1}{4}}
\]
is well--defined, since for odd $n$, $4{\mathbf m}id n^2-1$.
It is known that, for odd $r$, $|\gammaammamma_b(\xi)|$ is never 0.
\sigmaubsection{Definition of the WRT invariants}
Let
\[
F_{L}(\xi):= {\sigmaum_{n_i}}^\xi\; \primerod_{i=1}^m [n_i]\;
J_{L}(n_1,\deltaots,n_m).\]
\betaegin{Thm} \langlembdaabel{K1M}{\rm [Reshetikhin--Turaev]}
$F_L(\xi)$ is invariant under K1--Move.
\epsilonsilonnd{Thm}
An important special case is when $L=U^b$, the unknot with framing
$b {\mathbf n}eq 0$. In that case $F_{U^{b}}(\xi)$ can be calculated
using the Gauss sum and is nonzero.
Let $\sigmaigma_+ $ (respectively $\sigmaigma_-$) be the number of
positive (negative) eigenvalues of the linking matrix of $L$.
Then we define
\betaegin{equation}
\tauau_{M}(\xi) =
{\mathbf f}rac{F_{L}(\xi)}{(F_{U^{+1}}(\xi))^{\sigmaigma_+}\,
(F_{U^{-1}}(\xi))^{\sigmaigma_-} }\, .
\langlembdaabel{def_qi}
\epsilonsilonnd{equation}
It is easy to see that $\tauau_M(\xi)$ is invariant under
K2--Move, and hence, by Theorem \ref{K1M}, it is a topological
invariant of $M$ called the $SO(3)$ WRT invariant.
Moreover,
$\tauau_{M}(\xi)$ is multiplicative with
respect to the connected sum.
\vskip2mm
{\mathbf n}oindent
{\betaf Remark.}
If we drop the condition that the colors in the summation are
odd, and sum over all (odd and even) colors, the result will be
the $SU(2)$ WRT invariant $\tauau^{SU(2)}_M (\xi)$.
In this case, the order of $\xi$ could also be even.
\vskip2mm
The $SO(3)$ WRT invariant extends naturally to the invariant
of the pair $(M,L')$, where
the manifold $M$ contains a link $L'$ inside. In this case we have
to replace the surgery link $L$ of $M$ by $L\cup L'$ in all definitions,
fix colors on $L'$ and sum over all colorings of
$L$ only. We omit here the precise definition and refer to \cite{BBL}
for more details.
For example, the $SO(3)$ invariant of the lens space $L(b,1)$,
obtained by surgery along $U^b$, is
\betaegin{equation} \tauau_{L(b,1)} (\xi)= {\mathbf f}rac{ F_{U^b}(\xi)}{F_{U^{\operatorname{sn}(b)}}(\xi) },
\langlembdaabel{2005}
\epsilonsilonnd{equation}
were $\operatorname{sn}(b)$ is the sign of the integer $b$.
Suppose
$M$ is a rational homology
3--sphere.
Then there is a unique decomposition
$ H_1(M,\BZ)=\betaigoplus_{i}
\BZ/{b_{i}\BZ}$, where each $b_i$ is a prime power.
We renormalize the $SO(3)$ WRT invariant as follows:
\betaegin{equation}
\tauau'_{M}(\xi)={\mathbf f}rac{\tauau_{M}(\xi)}
{\primerod\langlembdaimits_{i}\;\,
\tauau_{L(b_{i},1)}(\xi)}\; ,
\langlembdaabel{0910}
\epsilonsilonnd{equation}
where $L(b,a)$ denotes the $(b,a)$ lens space. Note that
$\tauau_{L(b,1)}(\xi)$ is always nonzero.
Let us focus on the special case when the linking matrix of
$L$ is diagonal.
Assume each $L_{ii}=b_i$ is a power of a prime or 1, up to sign.
Then $H_1(M,\BZ) = \oplus_{i=1}^m \BZ/|b_i|$, and
$$\sigmaigma_+ = {\rm card}\, \{ i{\mathbf m}id b_i >0\}, \quad \sigmaigma_- =
{\rm card}\, \{ i {\mathbf m}id b_i < 0\}.$$
Thus from the definitions \epsilonsilonqref{def_qi}, \epsilonsilonqref{2005} and \epsilonsilonqref{0910} we have
\betaegin{equation}
\tauau'_{M}(\xi) = \langlembdaeft( \primerod_{i=1}^m \tauau'_{L(b_i,1)}(\xi)
\right)\,
{\mathbf f}rac{F_{L}(\xi)}
{\primerod_{i=1}^m F_{U^{b_i}}(\xi) }
\, ,
\langlembdaabel{0077}
\epsilonsilonnd{equation}
with
$$\tauau'_{L(b_i,1)}(\xi)= {\mathbf f}rac{\tauau_{L(b_i,1)}(\xi)}{\tauau_{L(|b_i|,1)}(\xi)}\, .$$
The collection of $SO(3)$ WRT invariants is difficult to study, since
their definition heavily depends on the order of $\xi$.
The following cyclotomic expansion will play an important role
for the unification of the WRT invariants.
\sigmaubsection{Habiro's cyclotomic expansion of the colored Jones
polynomial}
For non--negative integers $n,k$ we define
$$ A(n,k) := {\mathbf f}rac{\primerod^{k}_{i=0}
\langlembdaeft(q^{n}+q^{-n}-q^i -q^{-i}\right)}{(1-q) \, (q^{k+1};q)_{k+1}}.$$
For $\betak=(k_1,\deltaots,k_m)$ let
$$ A({\mathbf n},\betak):= \primerod_{j=1}^m \; A(n_j,k_j).$$
Note that $A({\mathbf n},\betak)=0$ if $k_j \gammae n_j$ for some index $j$. Also
$ A(n,0)= q^{-1} J_U(n)^2.$
The colored Jones polynomial $J_{L}
({\mathbf n})$ can be repackaged into the invariant $C_{L}
(\betak)$ as stated in the following theorem.
\betaegin{Thm} {\rm [Habiro]}\langlembdaabel{GeneralizedHabiro}
Suppose $L$ is a link in $S^3$ having zero linking matrix.
Then there are invariants
\betaegin{equation}\langlembdaabel{Jones2}
C_{L}(\betak) \in {\mathbf f}rac{(q^{k+1};q)_{k+1}}{(1-q)}
\,\,{\mathbf m}athbb Z[q^{\primem 1}] ,\quad \tauext{where $k={\mathbf m}ax\{k_1,\deltaots, k_m\}$}
\epsilonsilonnd{equation}
such that for every ${\mathbf n} =(n_1,\deltaots, n_m)$
\betaegin{equation}\langlembdaabel{Jones}
J_{L}
({\mathbf n}) \, \primerod^m_{i=1}\; [n_i] = \sigmaum_{0\langlembdae k_i \langlembdae n_i-1}
C_{L}(\betak)\;
A({\mathbf n}, \betak).
\epsilonsilonnd{equation}
\epsilonsilonnd{Thm}
Note that the existence of $C_{L}(\betak)$ as
rational functions in $q$ satisfying \epsilonsilonqref{Jones} is
easy to establish. They correspond to the Jones polynomial
colored by different elements of the Grothendieck ring
of $U_q({\mathbf f}rak {sl}_2)$, i.e. by linear combinations of representations.
The difficulty here is to show the integrality
of \epsilonsilonqref{Jones2}.
Since $A({\mathbf n}, \betak) =0$ unless $ \betak < {\mathbf n}$, in the sum on the right
hand side of \epsilonsilonqref{Jones} one can assume that $\betak$ runs over the set
of all $m$--tuples $\betak$ with non--negative integer components. We will use
this fact later.
\sigmaection{Main Results}
\langlembdaabel{strategy}
Let us state our main results announced in the introduction.
For any positive integer $b$, we define the
cyclotomic completion ring ${\mathbf m}athcal R_b$ to be
\betae
\langlembdaabel{ab} {\mathbf m}athcal R_b:=\langlembdaim_{\overleftarrow{\hspace{2mm} k\hspace{2mm}}}
{\mathbf f}rac{\BZ[1/b][q]}{
\langlembdaeft((q;q^2)_k\right)}, \qquad \tauext{where} \quad
(q;q^2)_k = (1-q)(1-q^3) \deltaots (1-q^{2k-1}).
\epsilonsilone
For any $f(q)\in {\mathbf m}athcal R_b$ and a root of unity $\xi$ of {\epsilonsilonm odd} order,
the evaluation $\epsilonsilonv_\xi (f(q)):= f(\xi)$ is well--defined.
Similarly, we put
$${\mathbf m}athcal S_b :=\langlembdaim_{\overleftarrow{\hspace{2mm}k\hspace{2mm}}}
{\mathbf f}rac{\BZ[1/b][q]}
{((q;q)_k)}\; .$$
Here the evaluation at any root of unity is well--defined. For odd $b$,
there is a natural embedding
${\mathbf m}athcal S_b\hookrightarrow {\mathbf m}athcal R_b$.
Let us denote by ${\mathbf m}athcal M_b$ the set of rational homology 3--spheres such that
$|H_1(M,\BZ)|$ divides $b^n$ for some $n$.
Our main result is the following.
\betaegin{Thm}\langlembdaabel{main} {\rm [Beliakova--B\"uhler--Le]}
Suppose the components of a framed oriented link $L \sigmaubset M$ have odd colors, and $M\in {\mathbf m}athcal M_b$. Then
there exists an invariant $I_{M,L} \in {\mathbf m}athcal R_b$,
such that for any root of unity $\xi$ of odd order
$$\epsilonsilonv_\xi(I_{M,L})=\tauau'_{M,L}(\xi)\, .$$
In addition, if $b$ is odd, then
$I_{M,L}\in {\mathbf m}athcal S_b$.
\epsilonsilonnd{Thm}
If $b=1$ and $L$ is the empty link, $I_{M}$
coincides with Habiro's unified invariant $J_M$ and
${\mathbf m}athcal S_1=\widehat{\Z[q]}$.
One may ask what is the evaluation of $I_M$ at an even
root of unity in the case when $b$ is odd.
In her PhD thesis \cite{B}, B\"uhler shows that it
coincides with $\tauau^{\primerime SU(2)}_M(\xi)$.
Hence, for odd $b$, $I_M$ dominates both $SO(3)$ and $SU(2)$
WRT invariants. An analogous result for $b$ even is
work in progress.
Compared to Habiro's case,
the proof of Theorem \ref{main} uses the following
new techniques:
1) the Laplace transform method;
2) the difficult number theoretical identity of Andrews generalizing those of
Roger--Ramanujan;
3) the Frobenius type isomorphism providing the existence of the
$b$--th root of $q$ in ${\mathbf m}athcal R_b$.
In addition, we had to generalize the deep integrality result of Habiro
(Theorem \ref{GeneralizedHabiro}),
to a union of an algebraically split link
with any odd colored one.
The rings ${\mathbf m}athcal R_b$ and ${\mathbf m}athcal S_b$ have properties
similar to those of the Habiro ring.
An element $f(q) \in {\mathbf m}athcal R_b$ is totally determined by the values at many
infinite sets of roots of unity (see Section \ref{cyc}),
one special case is the following.
\betaegin{proposition}\langlembdaabel{main-cor} {\rm [Beliakova--B\"uhler--Le]}
Let $p$ be an odd prime
not dividing $b$ and $T$ the set of all integers of the form
$p^k b'$ with $k\in {{\mathbf m}athbb N}$ and $b'$ any odd divisor of
$b^n$ for some $n$. Any element $f(q) \in {\mathbf m}athcal R_b$, and hence also
$\{\tauau_M(\xi)\}$, is totally determined by the values at roots of
unity with orders in $T$.
\epsilonsilonnd{proposition}
The general properties of the ring ${\mathbf m}athcal R_b$
allow to introduce generalized
Ohtsuki series as the Taylor expansions
of $I_M$ at roots of unity. In addition, we show
that these Taylor expansions satisfy
congruence relations similar to the original definition of the Ohtsuki
series (see Section \ref{Oht}).
\sigmaubsection{Strategy of the proof}
Let us outline the proof of Theorem \ref{main} and state the main
technical results that will be explained later.
We restrict to the case $L=\epsilonsilonmptyset$ for simplicity.
We would like to define $I_{M}\in {\mathbf m}athcal R_{b}$, such that
\betaegin{equation}
\tauau'_{M}(\xi)\;=\; \epsilonsilonv_{\xi}\langlembdaeft(I_{M}\right)
\langlembdaabel{0080}
\epsilonsilonnd{equation}
for any root of unity $\xi$ of odd order.
This unified invariant is multiplicative with respect
to the connected sum.
The following observation is important.
By Proposition \ref{main-cor}, there is {\epsilonsilonm at most one} element $f(q)\in {\mathbf m}athcal R_b$ such that for every root $\xi$ of odd order one has
$$ \tauau'_{M} (\xi) = \epsilonsilonv_\xi\langlembdaeft( f(q)\right).$$
That is, if we can find such an element,
it is unique, and we put
$I_{M} := f(q)$.
\sigmaubsection{Laplace transform}
The following is the main technical result of \cite{BBL}. A proof will be
explained in the next Section.
\betaegin{Thm} {\rm [Beliakova--B\"uhler--Le]}
Suppose $b=\primem 1$ or $b= \primem p^l$ where $p$ is a prime and $l$ is
positive. For any non--negative integer $k$,
there exists an element $Q_{b,k} \in {\mathbf m}athcal R_b $ such that
for every root $\xi$ of odd order $r$ one has
\[
{\mathbf f}rac{{\sigmaum\langlembdaimits_n}^\xi \, q^{b{\mathbf f}rac{n^2-1}{4}} A(n,k) }{F_{U^b}(\xi)}
= \epsilonsilonv_\xi (Q_{b,k}).
\]
\langlembdaabel{0078}
In addition, if $b$ is odd, $Q_{b,k} \in {\mathbf m}athcal S_b $.
\epsilonsilonnd{Thm}
\sigmaubsection{Definition of the unified invariant: diagonal case} \langlembdaabel{2501}
Suppose that the linking number between any two components of $L$
is 0, and the framing on components of $L$ are $b_i=\primem p_i^{k_i}$ for
$i=1,\deltaots, m$, where each $p_i$ is prime or 1.
Let us denote the link $L$ with all framings switched to zero by $L_0$.
Using \epsilonsilonqref{Jones}, taking into account the framings $b_i$'s, we have
\[
J_{L}({\mathbf n})\primerod_{i=1}^m [n_i] = \sigmaum_{\betak\gammae 0} C_{L_0 }
(\betak) \, \primerod_{i=1}^m q^{b_i {\mathbf f}rac{n_i^2-1}{4}} A(n_i,k_i).
\]
By the definition of $F_{L}$, we have
\[ F_{L}(\xi)= \sigmaum_{\betak \gammae 0}
\epsilonsilonv_\xi(C_{L_0 }(\betak)) \, \primerod_{i=1}^m
{\sigmaum_{n_i}}^\xi \, q^{b_i {\mathbf f}rac{n_i^2-1}{4}} A(n_i,k_i).
\]
From \epsilonsilonqref{0077} and Theorem \ref{0078}, we get
\[
\tauau'_{M}(\xi) = \epsilonsilonv_\xi \langlembdaeft \{ \primerod_{i=1}^m I_{L(b_i,1)} \,
\sigmaum_{\betak} C_{L_0 }(\betak) \, \primerod_{i=1}^m Q_{b_i,k_i} \right \},
\]
where the existence of
the unified invariant of the lens space $I_{L(b_i,1)}\in {\mathbf m}athcal R_b$,
with
$\epsilonsilonv_\xi(I_{L(b_i,1)})=\tauau'_{L(b_i,1)}(\xi)$ can be shown by a
direct computation (we refer to \cite{BBL} for more details).
Thus if we define
\[
I_{M}:= \primerod_{i=1}^m I_{L(b_i,1)} \, \sigmaum_{\betak}
C_{L_0 }(\betak) \, \primerod_{i=1}^m Q_{b_i,k_i}\, ,
\]
then \epsilonsilonqref{0080} is satisfied. By Theorem \ref{GeneralizedHabiro},
$C_{L_0 }(\betak)$ is divisible by $(q^{k+1};q)_{k+1}/(1-q)$, which is
divisible by $(q;q)_k$, where $k = {\mathbf m}ax k_i$. It follows that
$I_{M} \in {\mathbf m}athcal R_b$. In addition, if $b$ is odd,
then $I_{M} \in {\mathbf m}athcal S_b$.
\sigmaubsection{Diagonalization using lens spaces} The general
case reduces to the diagonal case by
the well--known trick of diagonalization using lens spaces. We say that
$M$ is {\epsilonsilonm diagonal} if it can be obtained from $S^3$ by surgery along a framed link
$L$ with diagonal linking matrix, where the diagonal entries are of the
form $\primem p^k$ with $p=0,1$ or a prime.
The following lemma was proved in \cite[Proposition 3.2 (a)]{Le}.
\betaegin{lemma} For every rational homology sphere $M$,
there are lens spaces $L(b_i,a_i)$ such that the connected
sum of $M$ and these
lens spaces is diagonal. Moreover, each $b_i$ is a prime power
divisor of $|H_1(M,\BZ)|$.
\langlembdaabel{diagonalization}
\epsilonsilonnd{lemma}
To define the unified invariant for a general rational homology sphere $M$,
one first adds to $M$ lens spaces to get a diagonal $M'$, for which
the unified invariant $I_{M'}$ had been defined in Subsection \ref{2501}. Then
$I_M$ is the quotient of $I_{M'}$ by the unified invariants of the lens
spaces. But unlike the simpler
case
of \cite{Le}, the unified invariant of lens spaces are {\epsilonsilonm not} invertible
in general.
To overcome this difficulty we insert knots in lens spaces and split the
unified invariant into different components.
This is also the reason why we need to generalize Habiro's integrality
result to algebraically split links together with odd colored components.
\sigmaection{Cyclotomic completions of polynomial rings}\langlembdaabel{cyc}
Since unified invariants belong to
cyclotomic completions of polynomial rings, we outline
their construction and the main properties. For simplicity, only the case
$b$ is a power of a prime is considered, the general case is treated in
\cite{BBL}.
\sigmaubsection{On cyclotomic polynomials}
Recall that $e_n := \epsilonsilonxp(2\primei I/n)$
and denote by
${\mathbf m}athcal Phi_n(q)$ the cyclotomic polynomial
\[
{\mathbf m}athcal Phi_n(q) = \primerod_{\sigmaubstack{(j,n)=1\\0<j<n}} (q - e_n^j).
\]
For example, ${\mathbf m}athcal Phi_1(q)=q-1$ and ${\mathbf m}athcal Phi_2(q)=q+1$.
The degree of ${\mathbf m}athcal Phi_n(q)\in \BZ[q]$ is given by the Euler function $\varphi(n)$.
Suppose $p$ is a prime and $n$ an integer. Then (see e.g. \cite{Na})
\betaegin{equation} {\mathbf m}athcal Phi_n(q^p)= \betaegin{cases} {\mathbf m}athcal Phi_{np}(q) & \tauext{ if } p {\mathbf m}id n \\
{\mathbf m}athcal Phi_{np}(q) {\mathbf m}athcal Phi_n(q) & \tauext{ if } p {\mathbf n}mid n.
\epsilonsilonnd{cases}
\epsilonsilonnd{equation}
It follows that ${\mathbf m}athcal Phi_n(q^p)$ is always divisible by ${\mathbf m}athcal Phi_{np}(q)$.
The ideal of $\BZ[q]$ generated by ${\mathbf m}athcal Phi_n(q)$ and ${\mathbf m}athcal Phi_m(q)$ is well--known,
see e.g. \cite[Lemma 5.4]{Le}:
\betaegin{lemma}$\tauext{ }$
\betaegin{itemize}
\item[(a)] If ${\mathbf f}rac{m}{n} {\mathbf n}eq p^e$ for any prime $p$ and any integer $e{\mathbf n}eq 0$, then
$({\mathbf m}athcal Phi_n)+ ({\mathbf m}athcal Phi_m)=(1)$ in $\BZ[q]$.
\item[(b)] If ${\mathbf f}rac{m}{n} = p^e$ for a prime $p$ and some integer $e {\mathbf n}eq 0$, then $({\mathbf m}athcal Phi_n)+ ({\mathbf m}athcal Phi_m)=(p)$ in $\BZ[q]$.
\langlembdaabel{0911}
\epsilonsilonnd{itemize}
\epsilonsilonnd{lemma}
\sigmaubsection{Habiro's results} Let us summarize some of Habiro's
results on cyclotomic completions of polynomial rings \cite{Ha1}.
Let $R$ be a commutative integral domain of characteristic zero
and $R[q]$ the polynomial
ring over $R$.
For any
$S\sigmaubset {{\mathbf m}athbb N}$, Habiro defined the $S$--cyclotomic completion
ring $R[q]^S$ as follows:
\betae\langlembdaabel{rs} R[q]^S:=\langlembdaim_{\overleftarrow{f(q)\in
{\mathbf m}athcal Phi^*_S}} \;\;{\mathbf f}rac{R[q]}{(f(q))} \epsilonsilone
where ${\mathbf m}athcal Phi^*_S$ denotes the multiplicative
set in $\BZ[q]$ generated by ${\mathbf m}athcal Phi_S=\{{\mathbf m}athcal Phi_n(q){\mathbf m}id n\in S\}$
and directed with respect to the
divisibility relation.
For example, since the sequence $(q;q)_n$, $n\in {{\mathbf m}athbb N}$,
is cofinal to ${\mathbf m}athcal Phi^*_{{\mathbf m}athbb N}$, we have
\betae\langlembdaabel{cofinal}
\widehat{\Z[q]}\sigmaimeq\BZ[q]^{{\mathbf m}athbb N}.
\epsilonsilone
Note that if $S$ is finite, then
$R[q]^S$ is identified with the $(\primerod {\mathbf m}athcal Phi_S)$--adic completion of $R[q]$.
In particular,
\[
R[q]^{\{1\}}\sigmaimeq R[[q-1]], \quad
R[q]^{\{2\}}\sigmaimeq R[[q+1]].
\]
Suppose $S' \sigmaubset S$, then ${\mathbf m}athcal Phi^*_{S'}\sigmaubset {\mathbf m}athcal Phi^*_S$, hence
there is a natural map
\[
\rho^R_{S, S'}: R[q]^S \tauo R[q]^{S'}.
\]
Recall important results concerning $R[q]^S$ from \cite{Ha1}. Two
positive integers $n, n'$ are called {\epsilonsilonm adjacent} if
$n'/n=p^e$ with a nonzero
$e\in \BZ$ and a prime $p$, such that
the ring $R$ is $p$--adically separated, i.e. $\betaigcap_{n=1}^\infty (p^n) =0$ in $R$.
A set of positive integers is
{\epsilonsilonm $R$--connected} if for any two distinct elements $n,n'$ there is a
sequence $n=n_1, \,n_2, \deltaots,\, n_{k-1},\, n_k= n'$ in the set, such that
any two consecutive numbers of this sequence are adjacent.
Theorem 4.1 of \cite{Ha1}
says that if $S$ is $R$--connected,
then for any subset $S'\sigmaubset S$
the natural map $ \rho^R_{S,S'}: R[q]^S \hookrightarrow R[q]^{S'}$ is an
embedding.
\vskip2mm
{\mathbf n}oindent
{\betaf Example}: Assume $R=\BZ$, $S={{\mathbf m}athbb N}$ and $S'=\{1\}$, then
we have that the map
$\widehat{\Z[q]} \tauo \BZ[[q-1]]$ is an embedding. This implies that,
for any integral homology 3--sphere $M$,
$J_M$ is determined by the Ohtsuki series and this series has
integral coefficients.
\vskip2mm
If $\zeta$ is a root of unity of order in $S$, then for every $f(q)\in R[q]^S$
the evaluation $\epsilonsilonv_\zeta(f(q))\in R[\zeta]$ can be defined by sending
$q\tauo\zeta$.
For a set $\Xi$ of roots of unity whose orders form a subset
${\mathbf m}athcal T\sigmaubset S$, one defines the evaluation
\[
\epsilonsilonv_\Xi: R[q]^S \tauo \primerod_{\zeta \in \Xi} R[\zeta].
\]
Theorem 6.1 of \cite{Ha1} shows that if
$R\sigmaubset \BQ$, $S$ is $R$--connected, and there
exists $n\in S$ that is adjacent to infinitely many elements in
${\mathbf m}athcal T$, then $\epsilonsilonv_\Xi$ is injective.
\vskip2mm
{\mathbf n}oindent
{\betaf Example}: Consider again the case when $R=\BZ$, $S={{\mathbf m}athbb N}$
and put ${\mathbf m}athcal T=\{3^n| n\in {{\mathbf m}athbb N}\}$, then $3\in S$ is adjacent to infinitely many
elements of ${\mathbf m}athcal T$ and hence, for any integral homology 3--sphere $M$,
the whole set of its WRT invariants is determined by the evaluations
of $J_M$ at roots of unity of order in ${\mathbf m}athcal T$.
\vskip2mm
\sigmaubsection{Taylor expansion} Fix a natural number $n$, then we have
$$ R[q]^{\{n\}} =
\langlembdaim_{\overleftarrow{\hspace{2mm}k\hspace{2mm}}}
{\mathbf f}rac{R[q]}{({\mathbf m}athcal Phi^k_n(q))}\; .$$
Suppose $ \BZ \sigmaubset R \sigmaubset \BQ$, then the natural algebra homomorphism
$$ h: {\mathbf f}rac{R[q]}{({\mathbf m}athcal Phi^k_n(q))} \tauo {\mathbf f}rac{R[e_n][q]}{((q-e_n)^k)}$$
can be proved to be
injective. Taking the inverse limit, we see that there is a natural injective algebra homomorphism
$$ h : R[q]^{\{n\}} \tauo R[e_n][[q-e_n]].$$
Suppose $n \in S$. Combining $h$ and $\rho_{S, \{n\}}: R[q]^S \tauo R[q]^{\{n\}}$, we get an algebra map
{\mathbf n}ewcommand{{{\mathbf m}athfrak t}}{{{\mathbf m}athfrak t}}
$$ {{\mathbf m}athfrak t}_n: R[q]^S \tauo R[e_n][[q-e_n]].$$
If $f\in R[q]^S$, then ${{\mathbf m}athfrak t}_n(f)$ is called the Taylor expansion of $f$ at $e_n$.
\sigmaubsection{Splitting of ${\mathbf m}athcal S_p$ and evaluation}
For every integer $a$, we put ${{\mathbf m}athbb N}_a := \{ n \in {{\mathbf m}athbb N} {\mathbf m}id (a,n)=1\}$.
Suppose $p$ is a prime. Analogously to \epsilonsilonqref{cofinal},
we have
$${\mathbf m}athcal S_p \sigmaimeq \BZ[1/p][q]^{{\mathbf m}athbb N}\,.$$
Observe that ${{\mathbf m}athbb N}$ is not $\BZ[1/p]$--connected. In fact one has
${{\mathbf m}athbb N} =\alphaphamalg_{j=0}^\infty \; p^j {{\mathbf m}athbb N}_p$, where each $p^j{{\mathbf m}athbb N}_p$ is
$\BZ[1/p]$--connected.
Let us define
\[
{\mathbf m}athcal S_{p,j}:= \BZ[1/p][q]^{p^j {{\mathbf m}athbb N}_p}.
\]
Note that for every $f \in {\mathbf m}athcal S_p$, the evaluation $\epsilonsilonv_\xi(f)$ can be
defined for every root $\xi$ of unity. For $f\in {\mathbf m}athcal S_{p,j}$, the evaluation
$\epsilonsilonv_\xi(f)$ can be defined when $\xi$ is a root of unity of order
in $p^j{{\mathbf m}athbb N}_p$. In \cite{BBL} we proved the following.
\betaegin{proposition} For every prime $p$ one has
\betaegin{equation} {\mathbf m}athcal S_p\sigmaimeq \primerod_{j=0}^\infty {\mathbf m}athcal S_{p,j}.
\langlembdaabel{0912}\epsilonsilonnd{equation}
\epsilonsilonnd{proposition}
Let $\primei_j: {\mathbf m}athcal S_p \tauo {\mathbf m}athcal S_{p,j}$ denote the projection onto the
$j$th component in the above decomposition.
Suppose $\xi$ is a root of unity of order $r= p^j r'$,
with $(r',p)=1$. Then
for any $x\in {\mathbf m}athcal S_p$, one has
$$ \epsilonsilonv_\xi(x) = \epsilonsilonv_\xi(\primei_j(x)).$$
If $i{\mathbf n}eq j$ then $\epsilonsilonv_\xi(\primei_i(x))=0$.
\sigmaubsection{On the Ohtsuki series at roots of unity}\langlembdaabel{Oht}
Suppose $M$ is a rational
homology 3--sphere with $|H_1(M,\BZ)|=b$.
By Theorem \ref{main}, for any root of unity $\xi$ of order $pr$
$$\tauau'_M(\xi)\in \BZ[1/b][e_{pr}] \sigmaimeq {\mathbf f}rac{\BZ[1/b,e_r][x]}{(f_p(x+e_r))}\; .$$ where
\[
f_p(t):= {\mathbf f}rac{t^p - e_r^p}{t-e_r}.
\]
Hence we can write
\betae\langlembdaabel{Ohtsuki}
\tauau'_M(e_{r}e_{p})= \sigmaum_{n=0}^{p-2} a_{p,n} x^n
\epsilonsilone
where $a_{p,n}\in \BZ[1/b,e_r]$.
The following proposition proven in \cite{BBL}
shows that the coefficients $a_{p,n}$
stabilize as $p\tauo \infty$.
\betaegin{proposition}{\rm [Beliakova--B\"uhler--Le]}
\langlembdaabel{main-cor1} Suppose $M$ is a
rational homology 3--sphere with $|H_1(M,\BZ)|=b$, and $r$ is an odd
positive integer.
For every non--negative integer $n$,
there exists a unique invariant $a_n= a_n(M) \in \BZ[1/b,e_r]$ such that
for every prime $p > {\mathbf m}ax (b,r)$,
we have
\betaegin{equation}
a_n\epsilonsilonquiv a_{p,n} \primemod p\;\;\; \tauext{in $ \BZ[1/b,e_r]$ for} \;\;\;
0\langlembdae n \langlembdae p-2.
\langlembdaabel{5501}
\epsilonsilonnd{equation}
Moreover, the formal series $\sigmaum_{n} a_n (q-e_r)^n$ is equal to the Taylor
expansion of the unified invariant $I_M$ at $e_r$.
\epsilonsilonnd{proposition}
\sigmaection{Laplace transform, Andrews identity
and Frobenius maps} \langlembdaabel{map-frob}
The aim of this section is to define the Laplace transform
and to study its image.
\sigmaubsection{Laplace transform}
To define the unified invariant we have to compute
$${\sigmaum_n}^\xi q^{b{\mathbf f}rac{n^2-1}{4}} A(n,k)$$
where terms depending on $n$ in $A(n,k)$ look as follows
$$\primerod^k_{i=0} (q^n+q^{-n}-q^i-q^{-i})\,=(-1)^{k+1}\;(q^n;q)_{k+1}\;
(q^{-n};q)_{k+1}\, .$$
Formally, the last expression can be considered as a Laurent polynomial
in $q$ and $q^{n}$. Hence we only have to compute
$${\sigmaum_n}^\xi q^{b{\mathbf f}rac{n^2-1}{4}} q^{na}\; ,$$
which can be easily done by the square completing argument.
Let us state the result.
Suppose $r$ is an odd number, $b$ is positive integer and
$$ c:= (r,b), \quad b_1:= b/c, \quad r_1:=r/c.$$
{\mathbf n}ewcommand{{\rm Tor}}{{\rm Tor}}
\betaegin{lemma}{\rm [Beliakova--Le]}
\langlembdaabel{33}
One has
\betaegin{equation}
\gammaammamma_b(\xi) = c \,\gammaammamma_{b_1}(\xi^c). \langlembdaabel{02}
\epsilonsilonnd{equation}
\betaegin{equation}{\sigmaum_{n}}^\xi q^{b {\mathbf f}rac{n^2-1}{4}}\, q^{an} = \betaegin{cases}
0
&\tauext{if $c\, {\mathbf n}mid \, a$;}\\
(\xi^c)^{-a_1^2 b_1^*}\; \gammaammamma_b(\xi) & \tauext{if $a=ca_1$},
\epsilonsilonnd{cases}
\epsilonsilonnd{equation}
where $b_1^\alphaphast$ is an integer satisfying $b_1 b_1^* \epsilonsilonquiv 1 \primemod{r_1}$.
\epsilonsilonnd{lemma}
This computation inspired us to introduce the following operator,
called the Laplace transform. Remember that $\int e^{ax^k} f(x)\, dx$
is called the Laplace transform
of $f$ of order $k$.
Let ${\mathbf m}athcal L_{b,c;n}: {\mathbf m}athbb Z[q^{\primem n},q^{\primem 1}] \tauo {\mathbf m}athbb Z[q^{\primem c/b}]$ be
the ${\mathbf m}athbb Z[q^{\primem 1}]$--linear operator, called the (discrete)
Laplace transform (of the second order),
defined by
\betae\langlembdaabel{four}
{\mathbf m}athcal L_{b,c;n}(q^{na}) := \betaegin{cases}
0
&\tauext{if $c\, {\mathbf n}mid\, a$;}\\
q^{-a^2/b} & \tauext{if $a=ca_1$},
\epsilonsilonnd{cases}
\epsilonsilone
\betaegin{lemma} {\rm [Beliakova--Le]}
Suppose $f \in {\mathbf m}athbb Z[q^{\primem n},q^{\primem 1}]$. Then
$$ {\sigmaum_{n}}^\xi q^{b{\mathbf f}rac{n^2-1}{4}} f = \gammaammamma_b(\xi) \,
\epsilonsilonv_\xi({\mathbf m}athcal L_{b,c;n}(f)).
$$
\langlembdaabel{1000}
\epsilonsilonnd{lemma}
The point is that ${\mathbf m}athcal L_{b,c;n}(f)$, unlike the left hand side
${\sigmaum_{n}^\xi} q^{b{\mathbf f}rac{n^2-1}{4}} f$, does not depend on $\xi$.
To prove Theorem \ref{0078} we need to show that
${\mathbf m}athcal L_{b,c;n} ((q^{n};q)_{k+1}\; (q^{-n};q)_{k+1})$ is divisible by
$(q^{k+1};q)_{k+1}$.
For this we use the remarkable identity
discovered by Andrews.
\sigmaubsection{Andrews identity}
To warm up
we start with the identity from the Ramanujan ``Lost'' Notebook:
$$\primerod_{k\gammaeq 1}{\mathbf f}rac{1}{(1-q^{5k-4})(1-q^{5k-1})}=\sigmaum_{n\gammaeq 0}
{\mathbf f}rac{q^{n^2}}{(1-q)(1-q^2)\deltaots (1-q^n)} $$
Only in the
60s, MacMahon gave a combinatorial interpretation of this identity
as follows.
Let $\langlembdaambda=(\langlembdaambda_1, \langlembdaambda_2, \deltaots, \langlembdaambda_t)$ with
$\sigmaum_i\langlembdaambda_i=n$ be a partition of $n$ into non--increasing integers.
Then the identity can derived from the fact that
the number of partitions of $n$ with all $\langlembdaambda_i$ of the
form $5k+1$ or $5k+4$
is equal to the number of partitions where $\langlembdaambda_{i}-\langlembdaambda_{i+1}\gammaeq 2$
for all $i$.
This identity is a very special
case of the Andrews identity we used in \cite{BBL}: For any numbers $b_i,c_i,
i=1,\deltaots,k$ and positive integer $N$ we have
\betaegin{multline*} 1+ \sigmaum_{n =1}^N q^{kn+Nn} (1+ q^n) {\mathbf f}rac{(q^{-N})_n}{(q^{N+1})_n} \primerod_{i=1}^k
{\mathbf f}rac{(b_i)_{n}}{b_i^{n}} {\mathbf f}rac{(c_i)_{n}}{c_i^{n}}
{\mathbf f}rac{1}{({\mathbf f}rac{q}{b_i})_n ({\mathbf f}rac{q}{c_i})_n}=
\\
{\mathbf f}rac{(q)_N\, ({\mathbf f}rac{q}{b_kc_k})_N}{({\mathbf f}rac{q}{b_k})_N\,
({\mathbf f}rac{q}{c_k})_N} \sigmaum_{n_k \gammae n_{k-1} \gammae \deltaots \gammae n_2 \gammae n_1=0}
{\mathbf f}rac{q^{n_k} (q^{-N})_{n_k} (b_k)_{n_k}
(c_k)_{n_k}}{(q^{-N}b_kc_k)_{n_k}}\primerod_{i=1}^{k-1}{\mathbf f}rac{q^{n_i}
{\mathbf f}rac{(b_i)_{n_i}}{b_i^{n_i}} {\mathbf f}rac{(c_i)_{n_i}}{c_i^{n_i}}
({\mathbf f}rac{q}{b_i c_i})_{n_{i+1}-n_i} }{(q)_{n_{i+1}-n_i}
({\mathbf f}rac{q}{b_i})_{n_{i+1}} ({\mathbf f}rac{q}{c_i})_{n_{i+1}}} \langlembdaabel{1002}
\epsilonsilonnd{multline*}
where $(a)_n=(a;q)_n$.
The point is that (for a special choice of parameters)
the left hand side of the identity
can be identified with
${\mathbf m}athcal L_{b,c;n}\langlembdaeft((q^{n};q)_{k+1}\; (q^{-n};q)_{k+1}\right)$, where
the right hand side is a sum with all summands
divisible by $(q^{k+1};q)_{k+1}$.
In the case $b=\primem 1$, the computations are especially simple and we get
that
$${\mathbf m}athcal L_{-1,1;n}((q^{n};q)_{k+1}\; (q^{-n};q)_{k+1})=2(q^{k+1};q)_{k+1}\, .$$
The same holds also for ${\mathbf m}athcal L_{1,1;n}$ up to units.
This allows to write the explicit formula for $I_M$ given in the introduction.
\sigmaubsection{Frobenius isomorphism}
It remains to
show that the image of the
Laplace transform
belongs to ${\mathbf m}athcal R_b$, i.e. that
certain roots of $q$ exist in ${\mathbf m}athcal R_b$.
In \cite{BBL} we proved the following.
\betaegin{Thm}\langlembdaabel{frob}{\rm [Beliakova--B\"uhler--Le]}
The Frobenius endomorphism
$F_b: \BZ[1/b][q]^{{{\mathbf m}athbb N}_b} \tauo \BZ[1/b][q]^{{{\mathbf m}athbb N}_b}$, sending $q$ to $q^b$,
is an isomorphism.
\epsilonsilonnd{Thm}
This implies the
existence of the $b$th root of $q$ in ${\mathbf m}athcal S_{b,0}$
defined by
\[
q^{1/b}:=F^{-1}_b (q) \in {\mathbf m}athcal S_{b,0}\,.
\]
Let us mention that this result does not hold over $\BQ$, i.e.
for $y \in\BQ^{{{\mathbf m}athbb N}_b}$ with $y^b=1$ we have $y=\primem 1$.
Further, we introduce
another Frobenius homomorphism
$$G_m : R[q]^{{{\mathbf m}athbb N}_b} \tauo R[q]^{m{{\mathbf m}athbb N}_b} \quad \tauext{ by } \qquad G_m(q) = q^m.$$
Since ${\mathbf m}athcal Phi_{mr}(q)$ always divides ${\mathbf m}athcal Phi_{r}(q^m)$, $G_m$ is well--defined.
This map allows us to transfer $q^{1/b}$ from ${\mathbf m}athcal S_{p,0}$
to ${\mathbf m}athcal S_{p,i}$ with $i> 0$, and hence to define a
realization of $q^{a^2/b}$ in ${\mathbf m}athcal S_p$ with the correct evaluation.
\sigmaubsection{Integrality}
The proof of integrality of the SO(3) WRT invariants for
all roots of unity and for all 3--manifolds is given in \cite{BL}.
Note that even restricted to rational homology 3--spheres,
this fact requires a separate proof, since
the existence
of $I_M \in {\mathbf m}athcal R_b$ does not imply integrality of the WRT invariants,
unless $b= 1$. In this subsection
we define a subring ${\mathbf m}athcal Gamma_b \sigmaubset {\mathbf m}athcal R_b$, such that
for any $f\in {\mathbf m}athcal Gamma_b$, $\epsilonsilonv_\xi(f)\in \BZ[\xi]$.
The fact that $I_{M,1}$ belongs
to this subring was proved in \cite{BL}. We conjecture that
this holds in general.
For any divisor $c$ of $b$, let us decompose
${{\mathbf m}athbb N}=\cup_{c|b}\; c\; {{\mathbf m}athbb N}_{b/c}$. Then
$$\BZ[1/b][q]^{{\mathbf m}athbb N}=\primerod_{c|b} \BZ[1/b][q]^{c\,{{\mathbf m}athbb N}_{b/c}}\, .$$
Analogously, we have
${\mathbf m}athcal Gamma_b=\primerod_{c|b} {\mathbf m}athcal Gamma_{b,c}$, where
${\mathbf m}athcal Gamma_{b,c} \sigmaubset \BZ[1/b][q]^{c\, {{\mathbf m}athbb N}_{b/c}}$ is defined as follows.
Let $A_{b,c}=\BZ[e_c][q^{\primem 1}, q^{\primem c/b}]$.
Put $t=q^{c/b}$ and let $A^{(m)}_{b,c}$ be the algebra generated over $A_{b,c}$
by
$${\mathbf f}rac{(t;t)_m}{(q^c;q^c)_{m}}\;
{\mathbf f}rac{(e_c;e_c)_{(c-1)/2}}{\omegaegaidetilde {(q;q)_m}}\; $$
where $$\omegaegaidetilde{(q;q)_m}=\primerod^m_{i=1, c\, {\mathbf n}mid\, i}(1-q^i)\, .$$
Then
every element $f\in {\mathbf m}athcal Gamma_{b,c}$ has a presentation
$$f=\sigmaum^\infty_{m=0} f_m \; {\mathbf f}rac{(q^{m+1};q)_{m+1}}{1-q}
$$
with $f_m\in A^{(m)}_{b,c}$.
For any root of unity $\xi$ of odd order $r$ with $(r,b)=c$
and $f\in {\mathbf m}athcal Gamma_{b,c}$, we have
$$ \epsilonsilonv_\xi (f)= \epsilonsilonv_\xi \langlembdaeft(\sigmaum^{(r-3)/2}_{m=0} f_m {\mathbf f}rac{(q^{m+1};q)_{m+1}}{1-q}
\right)\; .$$
Observe that for $m<(r-1)/2$,
$$
\epsilonsilonv_\xi (\omegaegaidetilde {(q;q)_{m}}) \; {\mathbf m}id\;
(e_c;e_c)_{(c-1)/2}
\;\;\tauext{and}\;\;
\epsilonsilonv_\xi \langlembdaeft({\mathbf f}rac{(t;t)_m}{(q^c;q^c)_{m}}
\right) \in \BZ[\xi]\, .$$
\betaegin{conjecture}
For any $M\in {\mathbf m}athcal M_b$,
there exists an invariant $I'_M \in {\mathbf m}athcal Gamma_b$, such that
for any root of unity $\xi$ of odd order
$\epsilonsilonv_\xi(I'_M)=\tauau_M(\xi)$.
\epsilonsilonnd{conjecture}
We also expect that ${\mathbf m}athcal Gamma_{b,b}$ is determined by its Taylor
expansion at $e_b$.
\betaegin{thebibliography}{[EMSS]}
\betaibitem{And} G. Andrews, {\epsilonsilonm q--series: their
development and applications in analysis, number theory,
combinatorics, physics, and computer algebra}, regional conference
series in mathematics, Amer. Math. Soc. {\betaf 66} (1985)
\betaibitem{BBL} A. Beliakova, I. B\"uhler, T. T. Q. Le,
{\epsilonsilonm A unified quantum $SO(3)$ invariants for rational
homology 3-spheres}, arXiv: 0801.3893
\betaibitem{BCL} A. Beliakova, Q. Chen, T. T. Q. Le,
{\epsilonsilonm On the integrality of Witten-Reshetikhin-Turaev 3-manifold invariants}, arXiv:1010.4750
\betaibitem{BL} A. Beliakova, T. T. Q. Le, {\epsilonsilonm Integrality of quantum
3--manifold invariants and rational surgery formula},
{Compositio Math.} {\betaf 143} (2007) 1593--1612
\betaibitem{B} I. B\"uhler, {\epsilonsilonm Unified quantum $SO(3)$ and $SU(2)$
invariants for
rational homology 3--spheres},
PhD thesis, University of Z\"urich, 2010
\betaibitem{GM} P. Gilmer, G. Masbaum, {\epsilonsilonm Integral lattices in TQFT},
Ann. Scient. Ecole Normale Sup. {\betaf 40} (2007) 815--844
\betaibitem{Ha} K. Habiro,
{\epsilonsilonm A unified Witten--Reshetikhin--Turaev invariant for integral homology
spheres}, Invent. Math. {\betaf 171} (2008) 1--81
\betaibitem{Ha1} K. Habiro, {\epsilonsilonm Cyclotomic completions of polynomial rings},
Publ. Res. Inst. Math. Sci. {\betaf 40} (2004) 1127--1146
\betaibitem{Kho} M. Khovanov, {\epsilonsilonm Hopfological algebra and categorification
at a root of unity: the first steps}, math.QA/0509083 (2005), to appear in
Commun. Contemp. Math.
\betaibitem{KM} R. Kirby, P. Melvin, {\epsilonsilonm The $3$--manifold invariants of Witten and Reshetikhin--Turaev for ${\rm sl}(2,C)$},
Invent. Math. {\betaf 105} (1991) 473--545
\betaibitem{Lic} R. Lickorish, {\epsilonsilonm An introduction to knot theory}, Springer 1997
\betaibitem{Le} T. T. Q. Le, {\epsilonsilonm Strong integrality of quantum
invariants of 3--manifolds}, Trans. Amer. Math. Soc. {\betaf 360} (2008)
2941--2963
\betaibitem{LMO} T. T. Q. Le, J. Murakami, T. Ohtsuki,
{\epsilonsilonm On a universal perturbative
invariante of 3-manifolds}, Topology {\betaf 37} (1998) 539--574
\betaibitem{Le10} T. T. Q. Le, {\epsilonsilonm Quantum invariants of 3--manifolds:
integrality,
splitting, and perturbative expansion}, Topology Appl. {\betaf 127}
(2003) 125--152.
\betaibitem{Ma} Y. Manin, {\epsilonsilonm Cyclotomy and analytic geometry over ${\mathbf m}athcal F_1$},
arXiv:0809.1564
\betaibitem{MR} G. Masbaum, J. Roberts,
{\it A simple proof of integrality of quantum invariants at prime
roots of unity}, Math. Proc. Camb. Phil. Soc. {\betaf 121} (1997),
443--454
\betaibitem{Mu} H. Murakami,
{\it Quantum $SO(3)$--invariants dominate the $SU(2)$--invariant of
Casson and Walker}, Math. Proc. Camb. Phil. Soc. {\betaf 117} (1995)
237--249
\betaibitem{Na} T. Nagell, {\it Introduction to Number Theory}, Almqvist \& Wiksells Boktryckeri (1951)
\betaibitem{Ohtsukibook} T. Ohtsuki, {\epsilonsilonm Quantum invariants.
A study of knots, 3-manifolds, and their sets}, Series on Knots and
Everything, World Scientific {\betaf 29}
(2002)
\betaibitem{Oh} T. Ohtsuki, {\epsilonsilonm A polynomial invariant of rational
homology $3$--spheres}, Invent. Math. {\betaf 123} (1996) 241--257
\betaibitem{Tu} V. Turaev, {\epsilonsilonm Quantum invariants of knots and
3--manifolds}, de Gruyter Studies in Math. 18 (1994)
\epsilonsilonnd{thebibliography}
\epsilonsilonnd{document}
|
\begin{document}
\title{Relaxed optimality conditions
for mu-differentiable functions\thanks{Accepted (18/June/2008) for
International Journal of Applied Mathematics \& Statistics
(IJAMAS).}}
\author{\textbf{Ricardo Almeida$^1$ and Delfim F. M. Torres$^2$}}
\date{$^1$Department of Mathematics\\
University of Aveiro\\ 3810-193 Aveiro, Portugal\\
[email protected]\\[0.3cm] $^2$Department of Mathematics\\
University of Aveiro\\ 3810-193 Aveiro, Portugal\\ [email protected]}
\maketitle
\begin{abstract}
\noindent \emph{We prove some fundamental properties of
mu-differentiable functions. A new notion of local minimizer and
maximizer is introduced and several extremum conditions are
formulated using the language of nonstandard analysis. }
\noindent\textbf{Keywords:} nonstandard analysis,
mu-differentiability, extremum conditions.
\noindent\textbf{2000 Mathematics Subject Classification:} 26E35,
26E05, 26A24.
\end{abstract}
\section{Introduction}
In this work we introduce some sufficient and necessary conditions
to ensure the existence of extreme points for mu-differentiable
functions. As we will see, this type of differentiability has some
advantages when compared to others in the literature: the more
interesting one is that infinitesimal perturbations on the
function do not influence the differentiability.
The paper is organized as follows. In section \ref{NSAuniverse} we
present the usual concepts and results of Nonstandard Analysis.
The study of mu-differentiation is given in section \ref{muDif}
and in section \ref{ExtrCond} we exhibit some new (as far as we
know) sufficient and necessary conditions to guarantee the
existence of extremum points.
\section{The nonstandard universe}\label{NSAuniverse}
To prove theorems in mathematics using the $\epsilon-\delta$
definition of limit is sometimes difficult and usually not
obvious, due to the presence of three quantified, non-commutating,
expressions $\forall \epsilon \, \exists \delta \, \forall x$. In
spite of the fact that Calculus was initially formulated using
infinitesimals, in the nineteenth century mathematicians like
Augustin Cauchy, Karl Weierstrass and Richard Dedekind, working in
Mathematical Analysis, after two centuries following Isaac Newton,
usually did not mention infinitesimals. This fact lead to a 20th
century Infinitesimal Calculus where the only ``infinitesimal"
mention was in its name. However, in the past decades, things have
changed with the so called ``Nonstandard Analysis".
A number $\epsilon$ is called infinitesimal if $|\epsilon|<r$ for
all $r \in \mathbb R^+$ and $\omega=1/\epsilon$ (with $\epsilon
\not= 0$) is called an infinitely large number. In the real number
system $\mathbb R$, the only infinitesimal number is $\epsilon
=0$. However we can consider a larger system, the hyper-real
numbers ${^*\mathbb R}$, which is an ordered field that contains
$\mathbb R$ as a subfield, but also contains infinitesimals and
infinitely large numbers.
Nonstandard Analysis was invented by Abraham Robinson in the
$1960$'s, and among other things, he showed that we can embed the
ordered field of real numbers $(\mathbb R,+,\cdot,\leq)$ as an
ordered subfield of a structure $({^*\mathbb R},\Sop
+,\Sop\cdot,\Sop\leq)$ (the set of hyper-real numbers) which,
besides being a totally ordered field, contains other numbers such
as infinitesimal numbers and infinitely large numbers.
For the convenience of the reader, and in order to fix notation,
we make here a short presentation on the subject. For more about
\textit{Nonstandard Analysis} see \cite{Almeida}, \cite{Cutland},
\cite{HL}, \cite{R} and \cite{SL}.
\begin{figure}
\caption{The hyper-real line.}
\end{figure}
In the following, $E$ and $F$ will denote two arbitrary (non-null)
normed spaces and ${^*E}$ and ${^*F}$ their nonstandard
extensions, respectively. These new sets contain a copy of the
primitive set
$$E \subset {^*E} \, \mbox{ and } \, F \subset {^*F}$$
but also new \textit{ideal} vectors, such as
\textit{infinitesimals}, \textit{infinite vectors}, etc (see
below).
\begin{definition} Let $x$ and $y$ be two vectors of ${^*E}$. We
say that
\begin{enumerate}
\item{$x$ is \emph{infinitesimal} if $|x|<r$ for all $r \in
\mathbb R^+$ and we write $x\approx 0$; the set of
infinitesimal vectors of ${^*E}$ is denoted by
$inf({^*E})$; otherwise we write $x \not\approx 0$.}
\item{$x$ is \emph{finite} if $|x|<r$ for some $r \in
\mathbb R^+$ and we write $x \in fin({^*E})$.} \item{$x$
is \emph{infinite} (or \emph{infinitely large}) if $x$ if
not finite and we write $x\approx \infty$.} \item{$x$ and
$y$ are \emph{infinitely close} if $x-y\approx 0$ and we
write $x\approx y$; if not, $x \not\approx y$.} \item{$x$
is \emph{nearstandard} ($x \in ns({^*E})$) if $x$ is
infinitely close to some (unique) $a\in E$; in this case
we say that $a$ is the \emph{standard part} of $x$ and we
write $a=st(x)$.}
\end{enumerate}
\end{definition}
The rules for computing numbers in ${^*\mathbb R}$ are very simple
and they agree with our intuition. The tables below summarize
those rules. The symbols $\epsilon, a, \infty$ denote an
infinitesimal number, a finite but not infinitesimal number (what
is usually called \emph{appreciable number}) and an infinite
number, respectively (\textrm{e.g.}, $\epsilon+\epsilon=\epsilon$
means ``the sum of two infinitesimals is an infinitesimal'').
$$\begin{array}{ll}
\multicolumn{2}{l}{\,\,\curvearrowleft}\\
&\begin{array}{|c|c|c|c|} \hline
\textcolor[rgb]{0.00,0.00,0.63}{\pm} &
\textcolor[rgb]{0.00,0.00,0.63}{\epsilon} &
\textcolor[rgb]{0.00,0.00,0.63}{a} &
\textcolor[rgb]{0.00,0.00,0.63}{\infty} \\ \hline
\textcolor[rgb]{0.00,0.00,0.63}{\epsilon} & \epsilon & a &
\infty \\ \hline \textcolor[rgb]{0.00,0.00,0.63}{a} & a & ? &
\infty \\ \hline \textcolor[rgb]{0.00,0.00,0.63}{\infty} & \infty
& \infty & ?\\ \hline
\end{array}
\end{array}
\hspace{1cm}
\begin{array}{ll}
\multicolumn{2}{l}{\,\,\curvearrowleft}\\
&\begin{array}{|c|c|c|c|} \hline
\textcolor[rgb]{0.00,0.00,0.63}{\times} &
\textcolor[rgb]{0.00,0.00,0.63}{\epsilon} &
\textcolor[rgb]{0.00,0.00,0.63}{a} &
\textcolor[rgb]{0.00,0.00,0.63}{\infty} \\ \hline
\textcolor[rgb]{0.00,0.00,0.63}{\epsilon} & \epsilon & \epsilon
& ? \\ \hline \textcolor[rgb]{0.00,0.00,0.63}{a} & \epsilon & a
& \infty \\ \hline \textcolor[rgb]{0.00,0.00,0.63}{\infty} & ? &
\infty & \infty\\ \hline
\end{array}\end{array}
\hspace{1cm}
\begin{array}{ll}
\multicolumn{2}{l}{\,\,\curvearrowleft}\\
&\begin{array}{|c|c|c|c|} \hline
\textcolor[rgb]{0.00,0.00,0.63}{\div} &
\textcolor[rgb]{0.00,0.00,0.63}{\epsilon} &
\textcolor[rgb]{0.00,0.00,0.63}{a} &
\textcolor[rgb]{0.00,0.00,0.63}{\infty}\\ \hline
\textcolor[rgb]{0.00,0.00,0.63}{\epsilon} & ? & \infty & \infty
\\ \hline \textcolor[rgb]{0.00,0.00,0.63}{a} & \epsilon & a &
\infty \\ \hline \textcolor[rgb]{0.00,0.00,0.63}{\infty} &
\epsilon & \epsilon & ?\\ \hline
\end{array}\end{array}$$
To denote the set of infinitely large positive hyper-integers, we
use the symbol ${^*\mathbb N}_{\infty}$,
$${^*\mathbb N}_{\infty}:= \{ N \in {^*\mathbb N} \, | \, N
\approx \infty \}.$$
However, if $\omega$ and $\nu$ are two positive (resp. negative)
infinitely large numbers then $\omega + \nu$ is also a positive
(resp. negative) infinitely large number. Observe that, if
$\epsilon \not= 0$ is an infinitesimal then
\begin{itemize}
\item $\displaystyle \frac{\epsilon^2}{\epsilon}=\epsilon$ is
infinitesimal;
\item $\displaystyle
\frac{\epsilon}{\epsilon}=1$ is finite but not
infinitesimal;
\item $\displaystyle
\frac{\epsilon}{\epsilon^2}=\frac{1}{\epsilon}$ is
infinite.
\end{itemize}
So infinitesimals (and infinite numbers) have different orders of
magnitude. We may view the symbolic expression ``in a limit
computation, $\displaystyle\frac00$ is indeterminate" as a
short-hand for the fact that the quotient between two
infinitesimal numbers can be infinitesimal, finite but not
infinitesimal, or infinite.
It must be noted that every nearstandard vector is finite but the
opposite is false. In fact,
\begin{center}
$E$ is a finite dimensional space if and only if
$ns({^*E})=fin({^*E}).$
\end{center}
Given a vector $x \in{^*E}$, we define the \emph{monad} of $x$
has
$$\mu(x):=\{ y \in {^*E} \, | \, y-x \mbox{ is infinitesimal}
\}.$$
For example, $\mu(0)$ is simply the set of infinitesimals vectors.
For every $a \in E$, $\mu(a)=a+\mu(0)$.
\begin{theorem} Let $x$ and $y$ be two vectors in $ns({^*\mathbb
R})$. Then,
\begin{enumerate}
\item $st(x \pm y) =st(x) \pm st(y)$, $st(xy)=st(x)st(y)$ and
$st(x/y)=st(x)/st(y)$ if $st(y) \not= 0$; \item $st(x)
\leq st(y)$ if and only if $x \approx y$ or $x \leq y$;
\item $st(x) \geq st(y)$ if and only if $x \approx y$ or
$x \geq y$; \item for any $z \in \mathbb R$, $st(z)=z$;
\item $x \approx y$ if and only if $st(x)=st(y)$.
\end{enumerate}
\end{theorem}
Clearly, some of the previous rules hold for nearstandard vectors
of ${^*E}$. For example, if $x,y \in ns({^*\mathbb E})$, it is
true that $st(x \pm y)=st(x) \pm st(y)$.
Let $U$ be a nonempty subset of $E$. The set of \emph{nearstandard
vectors} of $U$, denoted by $ns({^*U})$, is given by
$$ns({^*U}):= \{ x \in {^*U} \, | \, x \in ns({^*E}) \, \mbox{ and
} \, st(x) \in U \}.$$
\begin{definition} Given an internal function $f:{^*U}\to{^*F}$,
we say that f is \emph{S-continuous}
at $a\in {^*U}$ if for all $x\in{^*U}$, if $x \approx a$ then
$f(x) \approx f(a)$. If $f$ is S-continuous at all $a\in U$, we
say that $f$ is \emph{S-continuous}. If it still holds for all
$a\in{^*U}$, then $f$ is said to be \emph{SU-continuous}.
\end{definition}
There exists a relation between S-continuity and (classical)
continuity. It can be proven the following.
\begin{theorem} A standard function $f:U\to F$ is continuous
(resp. uniformly continuous) if and only if its nonstandard
extension ${^*f}:{^*U}\to{^*F}$ is S-continuous (resp.
SU-continuous).
\end{theorem}
This is a very nice characterization of continuity. For example,
let us prove that $f(x)=x^2, \, x \in \mathbb R$, is continuous
but not uniformly continuous. Fix $a \in \mathbb R$. It is enough
to see that, given any infinitesimal $\epsilon$,
$$f(a+\epsilon)=a^2+2\epsilon a+\epsilon^2\approx a^2=f(a);$$
but given (any) infinite number $\omega$,
$$f \left( \omega+\frac{1}{\omega} \right)
=\omega^2+2+\frac{1}{\omega^2} \not\approx \omega^2=f(\omega).$$
If we consider, however, $f(x)=x^2, \, x \in [-1,1]$, then $f$ is
uniformly continuous. In fact, if $x,y \in {^*[-1,1]}$ with $x
\approx y$,
$$f(x)-f(y)=x^2-y^2=(x-y)(x+y)\approx 0$$
since $x-y \approx 0$ and $x+y$ is finite.
\section{The mu-differentiability}\label{muDif}
We now present the basic properties of a recent notion of
differentiation, called \emph{mu-differentiabi-} \emph{lity}
(\cite{Almeida} and \cite{AlmeidaNeves}). The important about this
type of derivative is that, under some assumptions, if $g$ is a
standard $C^1$ function, $f$ is an internal function, and $g$ is
somewhat infinitely close to $f$, then $f$ is mu-differentiable,
and vice-versa. From this we see that for standard functions,
mu-differentiability is equivalent to Fr\'{e}chet differentiation.
Therefore, the novelties appear when we work with internal (but
not standard) functions. To begin with, we will define what is the
\emph{standard part of a function} $f$. Let $f:{^*U}\to {^*F}$ be
an internal function such that $f(ns({^*U})) \subseteq ns({^*F})$.
Then, we can define a new (standard) function, which we denote by
$st(f)$,
$$\begin{array}{cccl}
st(f): & U & \to & F\\
& x &\mapsto & st(f(x)).
\end{array}$$
Observe that, by definition, por all $x \in U$,
$st(f)(x)=st(f(x))$. For example, let
\begin{equation}\label{example1}\begin{array}{cccl}
f: & {^*\mathbb R} & \to & {^*\mathbb R}\\
& x &\mapsto & \left\{
\begin{array}{lll}
x^2 & \mbox{ if } & x \not= 0\\ \epsilon & \mbox{ if } & x=0\\
\end{array}
\right.
\end{array}\end{equation}
where $\epsilon$ is any nonzero infinitesimal. Then $st(f)$ is
simply the quadratic function $x\mapsto x^2$. Notice that the
nonstandard extension of $st(f)$ can be distinct of $f$. In this
example, since $st(f)(x)=x^2 \, , x \in \mathbb R$, then
${^*(st(f))}(x)=x^2 \, , x \in {^*\mathbb R}$.
In \cite{Schlesinger} it is presented a new kind of
differentiation:
\begin{definition} Let $U\subseteq E$ be an open set and
$f:{^*U}\to{^*F}$ be an internal function such that
$f(ns({^*U}))\subseteq ns({^*F})$. We say that $f$ is
\emph{m-differentiable} at $a \in U$ if there exists a positive
infinitesimal $\delta_a$ and an internal finite\footnote{By finite
we mean $Df_a(fin({^*E}))\subseteq fin({^*F})$.} linear operator
$Df_a \in {^*L(E,F)}$ such that
$$\forall x \in {^*U} \hspace{.5cm} \delta_a< |x-a|\approx 0
\Rightarrow f(x)-f(a)=Df_a(x-a)+|x-a|\eta,$$
for some $\eta \approx 0$ (which depends on $x$). If $f$ is
m-differentiable at all $a\in U$, we say that $f$ is
m-differentiable.
\end{definition}
There is an important criterium to test m-differentiability,
comparing with standard functions:
\begin{theorem} \cite{Schlesinger} Let $E$ and $F$ be standard
finite dimensional normed spaces, $K$ a standard compact subset of
$E$ and $f:{^*K} \rightarrow {^*F}$ an internal function. Then,
the two following statements are equivalent:
\begin{enumerate}
\item{$f$ is S-continuous and m-differentiable;} \item{There
exists a differentiable standard function $g:K \rightarrow
F$ such that
$$\displaystyle \sup_{x \in {^*K}}|f(x)-g(x)|\approx 0.$$}
\end{enumerate}
\end{theorem}
Basically, this result asserts that if $g$ is a standard
differentiable function and $f$ is an internal function infinitely
close to $g$, then $f$ is m-differentiable. For example, if $f$ is
the function defined above (see (\ref{example1})), and if we
define $g(x)=x^2 \, ,x \in \mathbb R$, since $f(x)\approx g(x)$
for all $x \in {^*\mathbb R}$ then $f$ is m-differentiable.
In \cite{AlmeidaNeves} this notion is extended by introducing the
concept of m-uniformly differentiability (shortly
mu-differentiable). In the following we present the main results
of that paper.
\begin{definition}\label{defmu} Let $U\subseteq E$ be an open set
and $f:{^*U} \rightarrow {^*F}$ be an internal function satisfying
$f(ns({^*U})) \subseteq ns({^*F})$. We say that $f$ is
\emph{mu-differentiable} if there exists an internal function from
$^*\!U $ into ${^*L(E,F)}$, $x\mapsto Df_x$ such that
\begin{enumerate}
\item when $x$ is near-standard in $^*U$, $Df_x$ is a finite
map. \item for each $a \in U$, there exists a positive
infinitesimal $\delta_a$ for which, when $x,y\approx a$,
some infinitesimal vector $\eta$ verifies
$$|x-y|>\delta_a \Rightarrow f(x)-f(y)=Df_x(x-y)+|x-y|\eta.$$
\end{enumerate}
\end{definition}
Actually one encompassing $\delta$ may be taken in Definition
\ref{defmu}, i.e., the following holds.
\begin{theorem}\label{tdefmu}
Let $f:{^*U} \rightarrow {^*F}$ be an internal function; $f$ is
mu-differentiable if and only if all the following conditions
are verified
\begin{enumerate}
\item $f(ns({^*U})) \subseteq ns({^*F})$. \item There exist an
internal function from $^*\!U$ into $^*L(E,F)$, $x\mapsto
Df_x$ and a positive infinitesimal $\delta$ such that
\begin{enumerate}
\item when $x$ is near-standard in $^*U$, $Df_x$ is a
finite map. \item when $x$ and $y$ are near-standard
in $^*U$, some infinitesimal vector $\eta$
verifies
$$
|x-y|>\delta \Rightarrow f(x)-f(y)=Df_x(x-y)+|x-y|\eta \,
.
$$
\end{enumerate}
\end{enumerate}
\end{theorem}
The term m-uniform differentiability is justified by the following
result:
\begin{theorem}\label{theorem1} Let $f:{^*U} \rightarrow {^*F}$ be
an internal function. Then:
\begin{enumerate}
\item{If $F$ is a finite dimensional space and $f$ is a
mu-differentiable function, then $st(f):U \rightarrow F$
is a $C^1$ function and $D st(f)_a=st( Df_a)$ for $a \in
U$. Furthermore, if $E$ is also finite dimensional then
$$\forall a \in U\, \exists \eta _0 \approx 0 \, \forall x
\approx a \hspace{.5cm} |f(x)-st(f)(x)| \leq \eta_0.$$}
\item{If there exists a $C^1$ standard function $g:U
\rightarrow F$ with
$$\forall a \in U\, \exists \eta _0 \approx 0 \, \forall x
\approx a \hspace{.5cm} |f(x)-g(x)| \leq \eta_0,$$ then $f$ is
mu-differentiable. Moreover, $g=st(f)$.}
\end{enumerate}
\end{theorem}
From the previous theorem, one can prove that for standard
functions $f$,
\begin{center} $f$ is of class $C^1$ if and only if $f$ is
mu-differentiable.
\end{center}
It is clear that, if $f$ and $g$ are two mu-differentiable
functions and $k \in fin({^*\mathbb{R}})$, then $f+g$ and $kf$ are
also mu-differentiable and $D(f+g)_x=Df_x+Dg_x$ and $D(kf)_x=k \,
Df_x$.
There is some form of S-continuity for the function $f$ and for
its derivative map $Df_{(\cdot)}$:
\begin{theorem} If $f:{^*U} \rightarrow {^*F}$ is a
mu-differentiable function, then
$$\forall x,y \in ns({^*U}) \hspace{0.5cm} x \approx y \Rightarrow
f(x) \approx f(y)$$
and for all unit vector $d \in {^*E}$,
$$\forall x,y \in ns({^*U}) \hspace{0.5cm} x \approx y \Rightarrow
Df_x(d) \approx Df_y(d).$$
\end{theorem}
\begin{theorem}[Chain Rule] Let $g$ and $f$ be two
m-differentiable functions at $a$ and $g(a)$, respectively, where
$a$ and $g(a)$ are two standard vectors. In addition, if $Dg_a$ is
invertible and $\|(Dg_a)^{-1}\|$
is finite, then $f \circ g$ is m-differentiable at $a$ and
$D(f\circ g) _a=Df_{g(a)} \circ Dg_a$.
\end{theorem}
Note that since a mu-differentiable function is m-differentiable,
this result follows for mu-differen-tiability.
Next we present a Mean Value Theorem for mu-differentiable
functions. In opposite to standard functions, for an internal
function the derivative is not unique. For example, if
$f:{^*\mathbb R}\to {^*\mathbb R}$ is an internal function,
$a\in\mathbb R$ and $f'(a)$ is one derivative of $f$ at $a$ then
$f'(a)+\epsilon$ with $\epsilon\approx0$ is also a possible
derivative:
$$\delta\approx 0 \Rightarrow f'(a) \approx
\frac{f(a+\delta)-f(a)}{\delta} \approx f'(a)+\epsilon.$$
This justifies the presence of the infinitesimal term $|x-y|\eta$
in the following result.
\begin{theorem}[Mean Value Theorem]Let $U$ be a standard open
convex subset of $E$ and $f:{^*U} \rightarrow {^*\mathbb{R}}$ an
internal mu-differentiable function. Take $\delta$ as
given by Theorem \ref{tdefmu}. Then, for all $x,y \in ns({^*U})$
with $|x-y|>\delta $,
$$\exists c \in {[x,y]} \hspace{0.3cm} f(x)-f(y)=Df_c(x-y)+|x-y|
\eta$$
for some $\eta \approx 0.$ More generally, let $f:{^*U}
\rightarrow {^*F}$ be an internal mu-differentiable function and
$\delta$ as given by Theorem \ref{tdefmu}. Then, for all $x,y \in
ns({^*U})$ with $|x-y|>\delta $,
$$\exists c \in {[x,y]} \hspace{0.3cm} |f(x)-f(y)| \leq
|Df_c(x-y)|+|x-y|\eta$$
for some $\eta \approx 0$.
\end{theorem}
A full version of an Inverse Mapping Theorem for mu-differentiable
functions is not expected. The argument is simple: the standard
function $g(x)=x \, , x \in \mathbb R$, is of class $C^1$ and
invertible. We have proved that any internal function $f$
infinitely close to $g$ is mu-differentiable, so the one-to-one
condition may easily fail. Nevertheless, we have some form of
injectivity:
\begin{theorem}[Inverse Mapping Theorem] Let $f:{^*U} \rightarrow
{^*F}$ be an internal mu-differentiable function. Assume that, for
a certain $a \in U$, $Df_a$ is invertible and $\|(Df_a)^{-1}\|$ is
finite. Then, there exists a standard neighborhood ${^*V}$ of $a$
such that $f$ is injective on the standard elements of ${^*V}$,
\textit{i.e.},
$$\forall x,y \in V \hspace{.2cm} x \not= y \Rightarrow f(x) \not=
f(y).$$
\end{theorem}
Let $f$ be a mu-differentiable function and
$$Df_{(\cdot)}:{^*U}\rightarrow {^*L(E,F)}$$
its derivative map. Since $L(E,F)$ is a standard normed space, it
makes sense to define higher-order derivatives. We say that $f$ is
twice mu-differentiable provided $f$ and $Df_{(\cdot)}$ are both
mu-differentiable.
Recursively, $f$ is $k$-times mu-differentiable ($k \in \mathbb
N$) provided $f$, $Df_{(\cdot)}$, ...,$D^{k-1}f_{(\cdot)}$ are all
mu-differentiable.
\begin{theorem}\label{theorem2} Let $f:{^*U} \rightarrow {^*F}$ be
an internal function. Then:
\begin{enumerate}
\item{If $F$ is a finite dimensional space and $f$ is
$k$-times mu-differentiable, then $st(f):U \rightarrow F$
is a $C^k$ function and for each $a \in U$, $D^j
st(f)_a=st( D^jf_a)$ for $j=1,2, \ldots , k$. Furthermore,
if $E$ is also finite dimensional,
$$\forall a \in U \, \exists \eta _0 \approx 0 \, \forall x
\approx a \hspace{.3cm} |f(x)-st(f)(x)| \leq \eta_0$$
and
$$\forall j \in \{ 1,2, \ldots, k-1 \} \hspace{.1cm} \forall
a \in U \hspace{.1cm} \exists \eta _j \approx 0 \hspace{.1cm}
\forall x \approx a \hspace{.5cm} |D^jf_x-D^j st(f)_x| \leq
\eta_j.$$}
\item{If there exists a $C^k$ standard function $g:U
\rightarrow F$ with
$$\forall a \in U \, \exists \eta _0 \approx 0 \, \forall x
\approx a \hspace{.3cm} |f(x)-g(x)| \leq \eta_0$$
and
$$\forall j \in \{ 1,2, \ldots, k-1 \} \, \forall a \in U \,
\exists \eta _j \approx 0 \, \forall x \approx a \hspace{.3cm}
|D^jf_x-D^jg_x| \leq \eta_j$$
then $f$ is $k$-times mu-differentiable. Moreover,
$g=st(f)$.}
\end{enumerate}
\end{theorem}
\begin{theorem}[Taylor's Theorem]\label{TaylorTheor} Let $E$ and
$F$ be two standard finite dimensional spaces, $U\subset E$ a
standard open set and $f:{^*U} \rightarrow {^*F}$ an internal
function $k$-times mu-differentiable, for some $k \in \mathbb{N}$.
Then,
\begin{enumerate}
\item{for every $x \in ns({^*U})$, there exists $\epsilon
\approx 0$ such that, whenever $y \in {^*U}$ with
$\epsilon < |y-x| \approx 0$, there exists $\eta \approx
0$ satisfying
$$f(y)=f(x)+Df_x(y-x)+\frac{1}{2!}D^{2}f_x(y-x)^{(2)}+...+\frac{1}{k!}D^{k}f_x(y-x)^{(k)}+|y-x|^k
\eta.$$}
\item{for every $x \in ns({^*U})$, there exists $\epsilon
\approx 0$ such that, whenever $y \in {^*U}$ with
$\epsilon < |y-x| \approx 0$, there exists $\eta \approx
0$ satisfying
$$f(y)=st(f)(x)+Dst(f)_x(y-x)+\frac{1}{2!}D^{2}st(f)_x(y-x)^{(2)}+...$$
$$+\frac{1}{k!}D^{k}st(f)_x(y-x)^{(k)}+|y-x|^k \eta.$$}
\end{enumerate}
\end{theorem}
\section{Main results: extremum conditions for mu-differentiable
functions}\label{ExtrCond}
In the following, $f:{^*\mathbb R}\to{^*\mathbb R}$ denotes an
internal function. The goal is to present a notion of minimizer
and maximizer, and to infer the necessary and sufficient
conditions of optimality. Obviously, the (usual) definition of
minimizer is not a good one for our study. Let
$g(x)=x^2,x\in\mathbb R$. By Theorem~\ref{theorem1}, any internal
function $f:{^*\mathbb R}\to{^*\mathbb R}$ infinitely close to $g$
is mu-differentiable and $(st(f))^{(k)}(a)=st(f^{(k)})(a)$, $a\in
\mathbb R$. Consequently, if $\epsilon$ is a positive
infinitesimal, the functions
$$\begin{array}{lcccr}
f_1(x)=x^2 \, , & \hspace{.5cm} f_2(x)= \left\{
\begin{array}{lll}
x^2 \, & \mbox{ if } & x \not=0\\ \epsilon \, &\mbox{ if } & x
=0\\
\end{array}\right. \, ,& \hspace{.5cm}
f_3(x)= \left\{
\begin{array}{lll}
x^2 \, &\mbox{ if } & x \not=0\\ -\epsilon \, &\mbox{ if } & x
=0\\
\end{array} \right.
\end{array}$$
have the same derivatives of all orders (or we can choose as
such). Consequently, the definition of minimizer must take into
account this fact. In the following, given $x,y\in{^*\mathbb R}$,
$x \maior y$ (resp. $x\menor y$) will mean $x \geq y$ or $x\approx
y$ (resp. $x \leq y$ or $x\approx y$). Moreover, $x \gg y$ (resp.
$x \ll y$) is an abbreviation for $x >y$ and $x \not\approx y$
(resp. $x<y$ and $x \not\approx y$).
\begin{definition} Let $a\in\mathbb R$ be a real. We say that $a$
is a \emph{local m-minimizer} of $f$ if there exists a positive
$r\in\mathbb R$ such that
$$\forall x \in {^*]a-r,a+r[} \hspace{.5cm} f(x) \maior f(a).$$
\end{definition}
\begin{definition} Let $a\in\mathbb R$ be a real. We say that $a$
is a \emph
{local m-maximizer} of $f$ if there exists a positive $r\in\mathbb
R$ such that
$$\forall x \in {^*]a-r,a+r[} \hspace{.5cm} f(x) \menor f(a).$$
\end{definition}
Replacing $f$ by $-f$, all results proved henceforth about
m-minimums have then an equivalent for m-maximums. Without loss of
generality, from now on we will simply say that $a$ is a
m-minimizer.
The next theorem establish a relation between the standard and the
nonstandard universes. As we will see, to prove theorems we
transfer some properties to the standard universe, apply the
well-known results about standard functions and then go back to
the nonstandard universe.
\begin{lemma}\label{ponte} Let $f$ be a mu-differentiable
function. Then,
$$a \mbox{ is a m-minimizer of } f \mbox{ if and only if } a
\mbox{ is a minimizer of } st(f).$$
\end{lemma}
\begin{proof} First suppose that $a$ is a m-minimizer. Then,
$$\forall x \in {^*]a-r,a+r[} \hspace{.5cm} f(x) \maior f(a)$$
and so $st(f(x)) \geq st(f(a))$. In particular,
$$\forall x \in ]a-r,a+r[ \hspace{.5cm} st(f(x)) \geq st(f(a)),$$
\textit{i.e.}, $st(f)(x) \geq st(f)(a)$ and we proved that $a$ is
a minimizer of $st(f)$.
To prove the converse, assume
$$\forall x \in ]a-r,a+r[ \hspace{.5cm} st(f)(x) \geq st(f)(a).$$
By the Transfer Principle (see \textrm{e.g.} \cite{HL}), it holds
$$\forall x \in {^*]a-r,a+r[} \hspace{.5cm} st(f)(x) \geq
st(f)(a).$$
Since
\begin{itemize}
\item{$st(f)(a)=st(f(a)) \approx f(a)$ since $a$ is standard;}
\item{$st(f)(x)\approx st(f) (st(x)) = st(f(st(x)))
\approx f(st(x))\approx f(x)$ since $f$ and $st(f)$ are
S-continuous;}
\end{itemize}
it follows
$$\forall x \in {^*]a-r,a+r[} \hspace{.5cm} f(x) \maior f(a).$$
\end{proof}
Recall Theorem \ref{theorem2}: if $f:{^*\mathbb R}\to{^*\mathbb
R}$ is $k$-times mu-differentiable, then $st(f)$ is of class $C^k$
and $(st(f))^{(k)}(a)=st(f^{(k)})(a)$, for every $a \in \mathbb
R$. Moreover, by definition of standard part of a function, if
$a\in \mathbb R$ then $st(f^{(k)})(a)=st(f^{(k)}(a))$; so if
$f^{(k)}(a)\approx L$, for some $L \in \mathbb R$, then
$st(f^{(k)})(a)=L$.
\begin{theorem}[Necessary condition for m-minimum] If $a$ is a
m-minimizer of $f$, then $f'(a)\approx 0$.
\end{theorem}
\begin{proof} If $a$ is a m-minimizer of $f$, then $a$ is a
minimizer of $st(f)$. Consequently $(st(f))'(a)=0$. By
Theorem~\ref{theorem1}, $st(f')(a)=0$ and so $f'(a)\approx 0$.
\end{proof}
\begin{theorem}[Sufficient condition for m-minimum] If $f$ is
twice mu-differentiable, $f'(a)\approx 0$, and $f''(a) \gg 0$,
then $a$ is a m-minimizer of $f$.
\end{theorem}
\begin{proof} Since $st(f'(a))=0$ and $st(f''(a))>0$, it follows
that $(st(f))'(a)=0$ and $(st(f))''(a)>0$. Then $a$ is a minimizer
of $st(f)$ and so a m-minimizer of $f$.
\end{proof}
We remark that we can prove the previous theorem using only the
Taylor's Theorem (Theorem~\ref{TaylorTheor}), avoiding the usage
of Lemma \ref{ponte}. By Taylor's Theorem, there exists
$\epsilon\approx0$ such that for all $x\approx a$, if
$|x-a|>\epsilon$ then for some infinitesimal $\eta$,
$$f(x)-f(a)=f'(a)(x-a)+f''(a)\frac{(x-a)^2}{2}+(x-a)^2\eta=(x-a)^2\left(\frac{f'(a)}{x-a}
+\frac{f''(a)}{2}+\eta \right).$$
We may also assume that $\epsilon>\sqrt{|f'(a)|}$. Then,
$$\left| \frac{f'(a)}{x-a}
\right|<\frac{|f'(a)|}{\epsilon}<\frac{|f'(a)|}{\sqrt{|f'(a)|}}\approx
0$$
and so
$$\frac{f'(a)}{x-a} +\frac{f''(a)}{2}+\eta \approx
\frac{f''(a)}{2}>0.$$
We proved that
$$\forall x\in {^*\mathbb R} \hspace{.5cm} [x\approx a \wedge
|x-a|>\epsilon] \Rightarrow f(x)>f(a).$$
Define the (internal) set $C$ as being
$$C:= \{ \theta \in {^*\mathbb R^+} \, | \, \theta \leq \epsilon
\vee (\forall \xi \in ]\epsilon,\theta[ \hspace{.5cm}
f(a\pm\xi)>f(a) ) \}.$$
Since $C$ contains all positive infinitesimal numbers, it also
contains a real $r$ (\textit{Cauchy's Principal}, see \cite{HL}).
Since $r > \epsilon$, it follows that
$$\forall \xi \in ]\epsilon,r[ \hspace{.5cm} f(a\pm\xi)>f(a).$$
For $ \xi \in [-\epsilon,\epsilon]$, $f(a+\xi)\approx f(a)$. In
conclusion,
$$\forall \xi \in {^*]-r,r[} \hspace{.5cm} f(a+\xi)\maior f(a).$$
\begin{theorem}[Higher-order necessary condition for m-minimum]
Let $f$ be a function $k$-times mu-differentiable. If $a$ is a
m-minimizer of $f$ and
$$f'(a)\approx f''(a) \approx \ldots \approx f^{(k-1)}(a)\approx
0$$
then
\begin{enumerate}
\item $f^{(k)}(a)\approx0$ if $k$ is odd; \item
$f^{(k)}(a)\maior 0$ if $k$ is even.
\end{enumerate}
\end{theorem}
\begin{proof} If $a$ is a m-minimizer of $f$ then $a$ is a
minimizer of $st(f)$. On the other hand,
$$(st(f))'(a)=(st(f))''(a)= \ldots =(st(f))^{(k-1)}(a) =0.$$
Therefore, (see \textit{e.g.} \cite{fenske})
\begin{enumerate}
\item $(st(f))^{(k)}(a) =0 \Leftrightarrow st(f^{(k)}(a))=0
\Leftrightarrow f^{(k)}(a) \approx 0$ if $k$ is odd; \item
$(st(f))^{(k)}(a) \geq 0 \Leftrightarrow st(f^{(k)}(a))
\geq 0 \Leftrightarrow f^{(k)}(a) \maior 0$ if $k$ is
even.
\end{enumerate}
\end{proof}
\begin{theorem}[Higher-order sufficient condition for m-minimum]
If $f$ is $k$-times mu-differentiable,
$$f'(a)\approx f''(a) \approx \ldots \approx f^{(k-1)}(a)\approx
0$$ and $f^{(k)}(a)\not\approx 0$, then
\begin{enumerate}
\item if $k$ is odd then $a$ is not a m-minimizer of $f$.
\item if $k$ is even and $f^{(k)}(a) \gg 0$, then $a$ is a
m-minimizer of $f$.
\end{enumerate}
\end{theorem}
\begin{proof} Since
$$(st(f))'(a)=(st(f))''(a)= \ldots =(st(f))^{(k-1)}(a) =0 \mbox{
and } (st(f))^{(k)}(a)\not=0,$$
it follows that (see \textit{e.g.} \cite{fenske})
\begin{enumerate}
\item{for $k$ odd, $a$ is not a minimizer of $st(f)$ and so
not a m-minimizer of $f$.} \item{for $k$ even, since
$(st(f))^{(k)}(a)>0$, $a$ is a minimizer of $st(f)$ and
so a m-minimizer of $f$.}
\end{enumerate}
\end{proof}
For example, let
$f(x)=\frac{1}{7}x^7-\frac{1}{2}x^6+\frac{2}{5}x^5+\epsilon x+
\delta x^2$, where $\epsilon$ and $\delta$ are two non-zero
infinitesimals. Then, $f'(x)=x^6-3x^5+2x^4+\epsilon+2\delta x$ and
so
$$f'(0)=\epsilon\approx0 \, , \, f'(1)=\epsilon+2\delta\approx0 \,
, \,f'(2)=\epsilon+4\delta\approx0.$$
The second derivative of $f$ is $f''(x)=6x^5-15x^4+8x^3+2\delta$.
Thus,
$$f''(0)=2\delta\approx0 \, , \, f''(1)=-1+2\delta \ll 0 \, ,
\,f''(2)=16+2\delta \gg 0.$$
Furthermore, since $f'''(0)=f^{(4)}(0)=0$ and $f^{(5)}(0) =48 \gg
0$, we conclude that
$$x=1 \mbox{ is a m-maximizer of }f \, , \, x=2 \mbox{ is a
m-minimizer of }f$$
$$\mbox{and } x=0 \mbox{ is neither a m-maximizer nor a
m-minimizer of }f.$$
\subsection*{Functions with several variables}
From now on we will work with internal functions with several
variables, $f:{^*\mathbb R}^n \to {^*\mathbb R}$. Suppose that $f$
is mu-differentiable, \textit{i.e.}, given $a\in \mathbb R^n$,
there exists some $\delta\approx 0$ such that, for all $x\approx
a$ and $\epsilon\approx0$, if $|\epsilon|>\delta$ then
\begin{equation}\label{eq1}
f(x+\epsilon)-f(x)=Df_x(\epsilon)+|\epsilon|\eta
\end{equation}
for some $\eta\approx 0$. Let $x:=(x_1,\ldots,x_n)$,
$\epsilon:=(\epsilon_1,\ldots,\epsilon_n)$ and
$Df_x:=(f'_1,\ldots,f'_n)$. Rewriting equation (\ref{eq1}),
$$f(x_1+\epsilon_1,\ldots,x_n+\epsilon_n)-f(x_1,\ldots,x_n)=f'_1\epsilon_1+\ldots+
f'_n\epsilon_n +\sqrt{\epsilon_1^2+\ldots + \epsilon_n^2} \;
\eta.$$
Consequently, for each $i \in \{1,\ldots, n \}$, if we fix
$\epsilon_i\not=0$ and $\epsilon_k=0$ for $k \in \{1,\ldots,
i-1,i+1,\ldots,n \}$,
$$f(x_1,\ldots, x_{i-1}, x_i+\epsilon_i, x_{i+1},
\ldots,x_n)-f(x_1,\ldots,x_n)=f'_i\epsilon_i+ |\epsilon_i|\eta$$
if $|\epsilon_i|>\delta$, \textit{i.e.},
$$f'_i\approx \frac{f(x_1,\ldots, x_{i-1}, x_i+\epsilon_i,
x_{i+1}, \ldots,x_n)-f(x_1,\ldots,x_n)}{\epsilon_i}.$$
Let us denote $f'_i$ by $\displaystyle \left. \frac{\partial
f}{\partial x_i} \right|_{x}$ and we call them the \textit{partial
derivatives} of $f$.
By Theorem \ref{theorem1}, $st(Df_a)=Dst(f)_a$ whenever $a$ is
standard. So
$$st\left(\left. \frac{\partial f}{\partial x_1}
\right|_{a},\ldots, \left. \frac{\partial f}{\partial x_n}
\right|_{a}\right)=\left(\left. \frac{\partial st(f)}{\partial
x_1} \right|_{a},\ldots,\left. \frac{\partial st(f)}{\partial x_n}
\right|_{a}\right)$$
\begin{equation}\label{eq3}\Leftrightarrow st \left( \left.
\frac{\partial f}{\partial x_i} \right|_{a} \right) = \left.
\frac{\partial st(f)}{\partial x_i} \right|_{a} \, \mbox{ for } \,
i \in \{1,\ldots,n \}.\end{equation}
For example, let
\begin{equation}\label{eq2}\begin{array}{lcll}
f: & {^*\mathbb R^2} & \to & {^*\mathbb R}\\
& (x_1,x_2) & \mapsto & \frac{\sin(\epsilon
x_1)}{\epsilon}+x_2\\
\end{array}
\end{equation}
where $\epsilon$ is a positive infinitesimal number. Let us prove
that $f$ is mu-differentiable. Denote $f$ as the sum of $f_1$ and
$f_2$. Clearly, $f_2(x_1,x_2):=x_2$ is mu-differentiable, so we
only will prove that $f_1(x_1,x_2):= \frac{\sin(\epsilon
x_1)}{\epsilon}$ is also mu-differentiable. First, observe that
for every $(x_1,x_2)\in ns( {^*\mathbb R^2})$,
$$\frac{\sin(\epsilon x_1)}{\epsilon}= \frac{\sin(\epsilon
x_1)}{\epsilon x_1} x_1 \approx x_1\in ns({^*\mathbb R}).$$
Thus $f(ns( {^*\mathbb R^2}))\subseteq ns({^*\mathbb R})$. Let
$$\begin{array}{lcll}
g: & \mathbb R^2 & \to & \mathbb R\\
& (x_1,x_2) & \mapsto & x_1.\\
\end{array}$$
Then $g$ is a (standard) $C^1$ function and, given $a \in \mathbb
R^2$, let $\eta:=\epsilon$. Then, for all $(x_1,x_2)\approx a$,
$$|f_1(x_1,x_2)-g(x_1,x_2)|=\left| \frac{\sin(\epsilon
x_1)}{\epsilon} -x_1 \right|=\left| \frac{\epsilon x_1 -
\frac{(\epsilon x_1)^3}{6}+\epsilon^3 \xi}{\epsilon} -x_1
\right|$$
$$=\left|- \frac{\epsilon^2 x_1^3}{6}+\epsilon^2
\xi\right|=\epsilon \left| - \frac{\epsilon x_1^3}{6}+\epsilon
\xi\right| < \eta$$
for some $\xi\approx 0$ (note that, since $x\mapsto \sin x$ is a
$C^1$ standard function, by Taylor's Theorem,
$\sin(x)=x-\frac{x^3}{6}+x^3\xi, \, (\xi\approx 0)$, whenever $x$
is infinitesimal). Therefore, by Theorem \ref{theorem1}, $f_1$ is
mu-differentiable.
In this case, the partial derivatives of $f$ are given by
$$\begin{array}{ll}
\displaystyle \left. \frac{\partial f}{\partial x_1} \right|_{x} &
\approx \displaystyle
\frac{f(x_1+\delta,x_2)-f(x_1,x_2)}{\delta}\\
& =
\displaystyle \frac{\sin(\epsilon x_1 + \epsilon
\delta)-\sin(\epsilon x_1)}{\epsilon\delta}\\[0.2cm]
& =
\displaystyle \frac{\cos(x^*)\cdot
\epsilon\delta}{\epsilon\delta}\\
&= \cos(x^*)\\
\end{array}$$
for some $x^* \in [\epsilon x_1, \epsilon x_1 + \epsilon \delta]$.
Therefore $x^*\approx 0$ and
$$\left. \frac{\partial f}{\partial x_1} \right|_{x} \approx 1.$$
In a similar way we might prove that
$$\left. \frac{\partial f}{\partial x_2} \right|_{x} =1.$$
\begin{definition} Let $f:{^*\mathbb R}^n\to {^*\mathbb R}$ be a
function and $a \in \mathbb R^n$ a vector. We say that $a$ is a
\emph{local m-minimizer} of $f$ if
$$f(x) \maior f(a) \, \mbox{ for all } \, x \in {^*B_r(a)}:= \{x
\in {^*\mathbb R}^n \, | \,\, |x-a|<r \},$$
where $r\in \mathbb R$ is a positive real number. Analogously, we
define \emph{local m-maximizer} of $f$.
\end{definition}
Similarly to Lemma~\ref{ponte}, there exists a correspondence
between m-minimizers of internal functions and minimizers of
standard functions.
\begin{lemma}\label{ponte2} If $f:{^*\mathbb R}^n\to {^*\mathbb
R}$ is mu-differentiable, then,
$$a \mbox{ is a m-minimizer of } f \mbox{ if and only if } a
\mbox{ is a minimizer of } st(f).$$
\end{lemma}
\begin{theorem} If $f:{^*\mathbb R}^n\to {^*\mathbb R}$ is a
mu-differentiable function and $a$ is a m-minimizer of $f$, then
$$\left. \frac{\partial f}{\partial x_i} \right|_{a}\approx 0 ,
\mbox{ for every }\, i=1,\ldots,n.$$
\end{theorem}
\begin{proof} If $a$ is a m-minimizer of $f$, by
Lemma~\ref{ponte2}, $a$ is a minimizer of $st(f)$. Therefore, for
all $i=1,\ldots,n$, $\displaystyle \left. \frac{\partial
st(f)}{\partial x_i} \right|_{a}=0$. By (\ref{eq3}), $$\left.
\frac{\partial f}{\partial x_i} \right|_{a}\approx 0.$$
\end{proof}
For example, the function $f$ defined in (\ref{eq2}) has no
m-minimums nor m-maximums.
\section*{Acknowledgment}
This work was supported by {\it Centre for Research on
Optimization and Control} (CEOC) from the ``Funda\c{c}\~{a}o para
a Ci\^{e}ncia e a Tecnologia'' (FCT), cofinanced by the European
Community Fund FEDER/POCI 2010.
\end{document}
|
\begin{document}
\title{Mixing properties and statistical limit theorems for singular hyperbolic
flows \\ without a smooth stable foliation}
\author{V. Ara\'ujo and I. Melbourne}
\address{Vitor Ara\'ujo,
Departamento de Matem\'atica, Universidade Federal da Bahia\\
Av. Ademar de Barros s/n, 40170-110 Salvador, Brazil.}
\email{[email protected],
www.sd.mat.ufba.br/$\sim$vitor.d.araujo}
\address{Ian Melbourne,
Mathematics Institute, University of Warwick, Coventry CV4 7AL, UK}
\email{[email protected]}
\thanks{
V.A.
is partially supported by CNPq,
PRONEX-Dyn.Syst. and FAPESB (Brazil).
I.M. is partially supported by
by a European Advanced Grant StochExtHomog (ERC AdG 320977) and by CNPq
(Brazil) through PVE grant number 313759/2014-6.
We are grateful to the referee for very helpful suggestions which improved the readability of the paper.
}
\begin{abstract}
Over the last 10 years or so, advanced statistical properties, including exponential decay of correlations, have been established for certain classes of singular hyperbolic flows in three dimensions. The results apply in particular to the classical Lorenz attractor. However, many of the proofs rely heavily on the smoothness of the stable foliation for the flow.
In this paper, we show that many statistical properties hold for singular hyperbolic flows with no smoothness assumption on the stable foliation. These properties include existence of SRB measures, central limit theorems and associated invariance principles, as well as results on mixing and rates of mixing. The properties hold equally for singular hyperbolic flows in higher dimensions provided the center-unstable subspaces are two-dimensional.
\end{abstract}
\date{3 December 2017. Revised 2 March 2019.}
\maketitle
\section{Introduction} \label{sec:intro}
Singular hyperbolicity is a far-reaching generalization of Smale's notion of Axiom~A~\cite{Smale67} that allows for the inclusion of equilibria (also known as singular points or steady-states) and incorporates the classical Lorenz attractor~\cite{Lorenz63} as well as the geometric Lorenz attractors of~\cite{AfraimovicBykovSilnikov77,GuckenWilliams79}.
For three-dimensional flows, singular hyperbolic attractors are precisely the ones that are robustly transitive,
and they reduce to Axiom A attractors when there are no equilibria~\cite{MoralesPacificoPujals04}.
For the classical Lorenz attractor, strong statistical properties such as exponential decay of correlations, the central limit theorem (CLT), and associated invariance principles have been established in~\cite{AraujoM16,AraujoM17,AMV15,HM07}.
However the proofs rely heavily on the existence of a smooth stable foliation for the flow.
Various issues regarding the existence and smoothness of the stable foliation are clarified in~\cite{AraujoM17}; a topological foliation always exists, and an analytic proof of smoothness of the foliation for the classical Lorenz attractor (and nearby attractors) is given in~\cite{AraujoM17,AMV15}.
Even for three-dimensional flows, the stable foliation for a singular hyperbolic attractor need not be better than H\"older.
In this paper, we consider statistical properties for singular hyperbolic attractors that do not have a smooth stable foliation. We do not restrict to three-dimensional flows, but our main results assume that the stable foliation has codimension two.
\subsubsection*{Main results}
For codimension two singular hyperbolic attracting sets, we prove that the stable foliation is at least H\"older continuous (Theorem~\ref{thm:holder}), and using Pesin theory~\cite{BarreiraPesin} we deduce that the stable holonomies are absolutely continuous with H\"older Jacobians (Theorem~\ref{thm:H}). As a consequence of this, we obtain that the
stable holonomies for the associated Poincar\'e map are $C^{1+{\epsilon}}$
(since they are one-dimensional with H\"older Jacobians).
This extends results of~\cite{AfraimovicBykovSilnikov77,Robinson81,Shashkov94} who obtain a $C^1$ result for geometric Lorenz attractors (see the discussion after equation~(6) in~\cite{OT17}).
Quotienting out by the stable foliation, we obtain a $C^{1+{\epsilon}}$
one-dimensional expanding map.
We can now proceed following~\cite{APPV09} to obtain a spectral decomposition for the singular hyperbolic attracting set (Theorem~\ref{thm:spectral}).
To study statistical properties, we focus attention on the transitive components of a singular hyperbolic attracting set; these are called singular hyperbolic attractors. In the Axiom~A case, the CLT and associated invariance principles are well-known~\cite{DenkerPhilipp84,MT04,Ratner73} and we extend these results to general (codimension two) singular hyperbolic attractors.
In particular, as described in Section~\ref{sec:statflow}, the (functional) CLT and related results follow from~\cite{BMprep} using the results in this paper.
Moreover, many strong limit laws are obtained for the associated Poincar\'e maps in Theorem~\ref{thm:statf}.
Mixing and rates of mixing for Axiom~A attractors are less well-understood even today, but an open and dense set of Axiom~A attractors have superpolynomial decay of correlations~\cite{Dolgopyat98a,FMT07}. Theorem~\ref{thm:super} shows that the same result holds for singular hyperbolic attractors.
As a consequence, Corollary~\ref{cor:super}, we obtain the CLT and almost sure invariance principle for the time-one map of the flow for this open and dense set of singular hyperbolic attractors and sufficiently smooth observables.
(We note that such results are much more delicate for time-one maps than for the flow and for Poincar\'e maps.)
In fact, for singular hyperbolic attractors containing at least one equilibrium and with a smooth stable foliation, mixing~\cite{LMP05}, superpolynomial decay of correlations~\cite{AMV15}, and exponential decay of correlations~\cite{AraujoM16} are automatic subject to a certain indecomposability condition (locally eventually onto). Theorem~\ref{thm:mix} yields a similar result on automatic mixing when there is not a smooth stable foliation. However, automatic rates of mixing, or any results on exponential decay of correlations, seems beyond current techniques when the stable foliation is not smooth.
\subsubsection*{Example}
In a recent paper, Ovsyannikov \& Turaev~\cite{OT17} (see also previous work of~\cite{DumortierKokubuOka95}) give an analytic proof of singular hyperbolic attractors in the extended Lorenz model
\[
\dot x = y, \quad \dot y=-\lambda y+\gamma x(1-z)-\delta x^3,
\quad \dot z=-\alpha z+\beta x^2.
\]
The attractors contain precisely one equilibrium, namely the origin, and are of geometric Lorenz type~\cite{AfraimovicBykovSilnikov77,GuckenWilliams79}.
The eigenvalues of the linearized equations at the equilibrium are close to $-1$, $-1$ and $1$ (up to a scaling) for the parameters considered in~\cite{OT17}, so the standard $q$-bunching condition~\cite{AraujoM17,HPS77} guaranteeing a $C^q$ stable foliation holds only for $q$ close to zero. In this situation it is anticipated that the foliation fails to be $C^1$ except in pathological cases.
In particular, previous results on statistical properties for singular hyperbolic flows do not apply. However, the results in the present paper do not require a smooth foliation. It follows that the attractors in~\cite{OT17} satisfy the
statistical limit laws described in this paper.
Moreover, there is an open set $\mathcal{U}$ within the space of $C^2$ flows on ${\mathbb R}^3$, containing the extended Lorenz examples of~\cite{OT17}, that satisfy these statistical limit laws. In addition, an open and dense set of flows in $\mathcal{U}$ have superpolynomial decay of correlations.
\subsubsection*{Spectral decompositions}
Whereas the results on statistical properties for singular hyperbolic flows in this paper are completely new, we note that there are existing results on spectral decompositions~\cite{APPV09,LeplaideurYang}.
The decomposition in~\cite{APPV09} is for three-dimensional flows and our method extends~\cite{APPV09} in the more general codimension two situation. The method in~\cite{LeplaideurYang} works directly with the flow and does not require the codimension two restriction.
However~\cite{APPV09,LeplaideurYang} both make liberal use of Pesin theory, including results that seem currently unavailable in the literature. The main issue, as clarified in~\cite{AraujoM17}, is that {\em a priori} the stable lamination over a partially hyperbolic attracting set~$\Lambda$ need not cover a neighborhood of $\Lambda$. The stable bundle extends to an invariant contracting bundle over a neighborhood $U\supset\Lambda$ and this integrates to a topological foliation of~$U$. However, the complementary center-unstable bundle does not extend invariantly, so the resulting extended splitting is not invariant. This means that the application of Pesin theory in~\cite{APPV09,LeplaideurYang} is inaccurate. It is likely that the desired results hold (some aspects were extended to noninvariant splittings already in~\cite{AraujoM17}) but currently the arguments seem incomplete.
In this paper, we make the approach in~\cite{APPV09} completely rigorous by bypassing the issue of noninvariance of the extended splitting. Theorem~\ref{thm:fol} below shows that {\em a posteriori} the stable bundle restricted to $\Lambda$ integrates to a topological foliation. This relies heavily on the special structure associated to a codimension two singular hyperbolic attracting set and uses also the information about the extended bundle~\cite{AraujoM17}. Consequently, we can work with the nonextended splitting which is invariant and Pesin theory applies. Also, using~\cite{PSW97} we show that the foliation is H\"older which simplifies the arguments in~\cite{APPV09}.
\subsubsection*{Sectional hyperbolicity}
Finally, we remark on the restriction to singular hyperbolic attracting sets that are codimension two. The natural setting in general is to consider {\em sectional} hyperbolic attracting sets~\cite{MetzgerMorales08} (in the codimension two case, sectional and singular hyperbolicity are the same).
The proof of Theorem~\ref{thm:fol} (specifically Proposition~\ref{prop:cross}) relies on the restriction to codimension two. Nevertheless,
we expect that in the sectional hyperbolic setting, our results on the stable foliation should go through largely unchanged (after adapting various arguments to deal with the noninvariant splitting). However, the quotient map is higher-dimensional and so Pesin theory only gives a H\"older Jacobian; the map itself is no better than H\"older. Hence the arguments in Section~\ref{sec:stat} and~\ref{sec:SH} on spectral decompositions and statistical properties break down; this remains the subject of future work.
\subsubsection*{}
The remainder of the paper is organized as follows.
In Section~\ref{sec:PH}, we review background material on partially hyperbolic attracting sets and singular hyperbolicity, and
recall results on stable foliations from~\cite{AraujoM16}.
In Section~\ref{sec:f}, we construct a global Poincar\'e map~$f$ associated to any partially hyperbolic attracting set, following (and modifying) the construction in~\cite{APPV09}.
Section~\ref{sec:UH} establishes that $f$ is uniformly hyperbolic (with singularities) when the attracting set is singular hyperbolic.
In Section~\ref{sec:fol}, we show that the stable lamination over an attracting codimension two singular hyperbolic set is a topological foliation. In Section~\ref{sec:regWs},
we establish H\"older regularity and absolute continuity of the stable foliation, and show that the stable holonomies have H\"older Jacobians.
Using this, we obtain a uniformly expanding
piecewise $C^{1+{\epsilon}}$ quotient map $\bar f$ in Section~\ref{sec:barf}.
Finally, in Sections~\ref{sec:stat} and~\ref{sec:SH}, we prove results on spectral decompositions, statistical limit laws, and rates of mixing, for $\bar f$, $f$, and the underlying flow.
\subsubsection*{Notation}
Let $(M,d)$ be a metric space and $\eta\in(0,1)$. Given $v:M\to{\mathbb R}$,
define ${\|v\|}_{C^\eta}=|v|_\infty+{|v|}_{C^\eta}$ where
${|v|}_{C^\eta}=\sup_{x\neq x'}|v(x)-v(x')|/d(x,x')^\eta$. We say that $v$ is $C^\eta$ and
write
$v\in C^\eta(M)$ if ${\|v\|}_{C^\eta}<\infty$.
\section{Singular hyperbolic attracting sets}
\label{sec:PH}
In this section, we define what is understood as a singular
hyperbolic attracting set.
Throughout this paper, we restrict mainly to the case where the center-unstable subspace is two-dimensional.
Let $M$ be a compact Riemannian manifold and $\fX^r(M)$,
$r>1$, be the set of $C^r$ vector fields on $M$.
Let $Z_t$ denote the flow generated by $G\in\fX^r(M)$.
Given a compact invariant set $\Lambda$ for $G\in
\fX^r(M)$, we say that $\Lambda$ is \emph{isolated} if
there exists an open set $U\supset \Lambda$ such that
$
\Lambda =\bigcap_{t\in{\mathbb R}}Z_t(U).
$
If $U$ can be chosen so that $Z_t(U)\subset U$ for
all $t>0$, then we say that $\Lambda$ is an \emph{attracting set}.
\begin{defn}
\label{def:PH}
Let $\Lambda$ be a compact invariant set for $G \in
\fX^r(M)$. We say that $\Lambda$ is {\em partially
hyperbolic} if the tangent bundle over $\Lambda$ can be
written as a continuous $DZ_t$-invariant sum
$$
T_\Lambda M=E^s\oplus E^{cu},
$$
where $d_s=\dim E^s_x\ge1$ and $d_{cu}=\dim E^{cu}_x=2$ for $x\in\Lambda$,
and there exist constants $C>0$, $\lambda\in(0,1)$ such that
for all $x \in \Lambda$, $t\ge0$, we have
\begin{itemize}
\item uniform contraction along $E^s$:
\begin{align}\label{eq:contract}
\|DZ_t | E^s_x\| \le C \lambda^t;
\end{align}
\item domination of the splitting:
\begin{align}\label{eq:domination}
\|DZ_t | E^s_x\| \cdot \|DZ_{-t} | E^{cu}_{Z_tx}\| \le C \lambda^t.
\end{align}
\end{itemize}
We refer to $E^s$ as the stable bundle and to
$E^{cu}$ as the center-unstable bundle.
A {\em partially hyperbolic attracting set} is a partially hyperbolic set that is also an attracting set.
\end{defn}
\begin{defn} \label{def:VE}
The center-unstable bundle $E^{cu}$ is \emph{volume
expanding} if there exists $K,\theta>0$ such that
$|\det(DZ_t| E^{cu}_x)|\geq K e^{\theta t}$ for all
$x\in \Lambda$, $t\geq 0$.
\end{defn}
If $\sigma\in M$ and $G(\sigma)=0$, then $\sigma$ is called
an {\em equilibrium}. An invariant set is \emph{nontrivial}
if it is neither a periodic orbit nor an equilibrium.
\begin{defn} \label{def:singularset}
Let $\Lambda$ be a compact nontrivial invariant set for $G \in
{\fX}^r(M)$. We say that $\Lambda$ is a
\emph{singular hyperbolic set} if all equilibria
in $\Lambda$ are hyperbolic, and $\Lambda$ is partially
hyperbolic with volume expanding center-unstable bundle.
A singular hyperbolic set which is also an attracting set is
called a {\em singular hyperbolic attracting set}.
\end{defn}
\begin{rmk} \label{rmk:per}
A singular hyperbolic attracting set contains no isolated periodic orbits.
For such a periodic orbit would have to be a periodic sink, violating volume expansion.
\end{rmk}
A subset $\Lambda \subset M$ is \emph{transitive} if it has
a full dense orbit, that is, there exists $x\in \Lambda$ such that
${\mathbb C}l{\{Z_tx:t\ge0\}}=\Lambda= {\mathbb C}l{\{Z_tx:t\le0\}}$.
\begin{defn}\label{def:attractor}
A \emph{singular hyperbolic attractor} is a transitive
singular hyperbolic attracting set.
\end{defn}
\begin{prop} \label{prop:like}
Suppose that $\Lambda$ is a singular hyperbolic attractor with $d_{cu}=2$, and
let $\sigma\in\Lambda$ be an equilibrium. Then
$\sigma$ is
\emph{Lorenz-like}. That is,
$DG(\sigma)|E^{cu}_\sigma$ has real eigenvalues $\lambda^s$, $\lambda^u$ satisfying
$-\lambda^u<\lambda^s<0<\lambda^u$.
\end{prop}
\begin{proof}
It follows from Definition~\ref{def:singularset} that $\sigma$ is a hyperbolic saddle and that at most two eigenvalues have positive real part.
If there is only one such eigenvalue $\lambda^u>0$ then the constraints on $\lambda^s$ follow from volume expansion.
Let $\gamma$ be the local stable manifold for $\sigma$.
It remains to rule out the case $\dim \gamma=\dim M-2$.
In this case, $T_p\gamma=E^s_p$ for all $p\in \gamma\cap\Lambda$ and in particular $G(p)\in E^s_p$.
Also, $G(p)\in E^{cu}_p$
(see for example~\cite[Lemma~6.1]{AraujoPacifico}),
so we deduce that
$G(p)=0$ for all $p\in \gamma\cap\Lambda$ and hence that
$\gamma\cap\Lambda=\{\sigma\}$.
On the other hand, $\Lambda$ is transitive and nontrivial, so there exists $x\in\Lambda\setminus\{\sigma\}$ such that $\sigma\in\omega(x)$.
By the local behavior of orbits near hyperbolic saddles, there exists
$p\in
(\gamma\setminus\{\sigma\})\cap\omega(x)\subset
(\gamma\setminus\{\sigma\})\cap\Lambda$ which as we have seen is impossible.
\end{proof}
We end this section by recalling/extending some results from~\cite{AraujoM17}.
These results hold for general $d_{cu}\ge2$.
\begin{prop} \label{prop:Es} Let $\Lambda$ be a partially
hyperbolic attracting set. The stable bundle $E^s$ over
$\Lambda$ extends to a continuous uniformly contracting
$DZ_t$-invariant bundle $E^s$ over an open neighborhood of
$\Lambda$.
\end{prop}
\begin{proof} See~\cite[Proposition~3.2]{AraujoM17}.
\end{proof}
Let ${\mathcal D}^k$ denote the $k$-dimensional open unit disk and
let $\mathrm{Emb}^r({\mathcal D}^k,M)$ denote the set of $C^r$
embeddings $\phi:{\mathcal D}^k\to M$ endowed with the $C^r$ distance.
\begin{prop}\label{prop:Ws}
Let $\Lambda$ be a partially hyperbolic attracting set.
There exists a positively invariant neighborhood $U_0$
of $\Lambda$, and constants $C>0$, $\lambda\in(0,1)$, such
that the following are true:
\noindent(a)
For every point $x \in U_0$ there is a $C^r$ embedded $d_s$-dimensional disk
$W^s_x\subset M$, with $x\in W^s_x$, such that
\begin{enumerate}
\item $T_xW^s_x=E^s_x$.
\item $Z_t(W^s_x)\subset W^s_{Z_tx}$ for all $t\ge0$.
\item $d(Z_tx,Z_ty)\le C\lambda^t d(x,y)$ for all $y\in W^s_x$, $t\ge0$.
\end{enumerate}
\noindent(b) The disks $W^s_x$ depend continuously on $x$ in the $C^0$ topology: there is a continuous map $\gamma:U_0\to {\rm Emb}^0({\mathcal D}^{d_s},M)$ such that
$\gamma(x)(0)=x$ and $\gamma(x)({\mathcal D}^{d_s})=W^s_x$.
Moreover, there exists $L>0$ such that $\operatorname{Lip}\gamma(x)\le L$ for all $x\in U_0$.
\noindent(c) The family of disks $\{W^s_x:x\in U_0\}$ defines a topological foliation of $U_0$.
\end{prop}
\begin{proof} See~\cite[Theorem~4.2 and Lemma~4.8]{AraujoM17}.
\end{proof}
The splitting $T_\Lambda M=E^s\oplus E^{cu}$ extends continuously to a
splitting $T_{U_0} M=E^s\oplus E^{cu}$ where $E^s$ is the invariant uniformly contracting bundle in Proposition~\ref{prop:Es}.
(In general, $E^{cu}$ is not invariant.)
Given $a>0$, we define the {\em center-unstable cone field},
\[
{\mathcal C}^{cu}_x(a)=\{v= v^s+v^{cu}\in E^s_x\oplus E^{cu}_x:\|v^s\|\le a\|v^{cu}\|\}, \quad x\in U_0.
\]
\begin{prop} \label{prop:Ccu}
Let $\Lambda$ be a partially hyperbolic attracting set.
There exists $T_0>0$ such that for any $a>0$, after possibly shrinking $U_0$,
\[
DZ_t\cdot {\mathcal C}^{cu}_x(a)\subset {\mathcal C}^{cu}_{Z_tx}(a) \quad\text{for all $t\ge T_0$, $x\in U_0$}.
\]
\end{prop}
\begin{proof} See~\cite[Proposition~3.1]{AraujoM17}.
\end{proof}
\begin{prop} \label{prop:VE}
Let $\Lambda$ be a singular hyperbolic attracting set.
After possibly increasing $T_0$ and shrinking $U_0$, there exist
constants $K,\theta>0$ such that
$|\det(DZ_t| E^{cu}_x)|\geq K \, e^{\theta t}$ for all
$x\in U_0$, $t\geq 0$.
\end{prop}
\begin{proof}
Let $K_0,\theta_0>0$ be the constants from Definition~\ref{def:VE}.
Fix $a>0$ and $T_0$ as in Proposition~\ref{prop:Ccu}.
We may suppose without loss that $K_0<2$ and that $K_0 e^{\theta_0 T_0}>2$.
By continuity, we may assume that for every $x\in U_0$ there exists $y\in\Lambda$ such that
\[
|\det(DZ_t| P)|\ge {\textstyle \frac12}|\det(DZ_t| E^{cu}_y)|
\ge {\textstyle \frac12}K_0 e^{\theta_0t},
\]
for all $t\in[0,T_0]$ and every $d_{cu}$-dimensional subspace
$P\subset {\mathcal C}^{cu}_x(a)$.
Write $t=mT_0+r$ where $m\in{\mathbb N}$, $r\in(0,T_0]$.
Since $Z_{jT_0}x\in U_0$ for all $j\ge0$
by invariance of $U_0$, and since $DZ_{jT_0}P\subset {\mathcal C}^{cu}_{Z_{jT_0}x}(a)$
for all $j\ge0$ by Proposition~\ref{prop:Ccu},
it follows inductively that
\[
|\det(DZ_t|P)|\ge
({\textstyle \frac12}K_0 e^{\theta_0r})
({\textstyle \frac12}K_0 e^{\theta_0T_0})^m
\ge ({\textstyle \frac12}K_0)^{1+t/T_0}e^{\theta_0t}
=Ke^{\theta t},
\]
where $\theta=T_0^{-1}\log(\frac12 K_0e^{\theta_0 T_0})>0$
and $K>0$.
Taking $P=E^{cu}_x$ yields the desired result.
\end{proof}
\section{Global Poincar\'e map $f:X\to X$}
\label{sec:f}
In this section, we
suppose that $\Lambda$ is a partially hyperbolic attracting set, and recall
how
to construct a piecewise smooth Poincar\'e map $f:X\to X$ preserving a contracting stable foliation ${\mathcal{W}}^s(X)$.
This largely follows~\cite{APPV09} (see also~\cite[Chapter~6]{AraujoPacifico})
but with slight modifications; the details enable us to establish notation required for later sections.
Mainly for notational convenience we restrict to the case $d_{cu}=2$.
\subsection{Construction of the global cross-section $X$}
Let $y\in\Lambda$ be a regular point (not an equilibrium).
There exists an open set (flow box) $V_y\subset U_0$ containing $y$ such that the flow on $V_y$ is diffeomorphic to a linear flow.
More precisely, let ${\mathcal D}$ denote the $(\dim M-1)$-dimensional unit disk
and fix ${\epsilon}_0\in(0,1)$ small.
There is a diffeomorphism $\chi:{\mathcal D}\times(-{\epsilon}_0,{\epsilon}_0)\to V_y$
with $\chi(0,0)=y$ such that
$\chi^{-1}\circ Z_t\circ\chi(z,s)=(z,s+t)$.
Define the cross-section $\Sigma_y=\chi({\mathcal D}\times\{0\})$.
For each $x\in\Sigma_y$, let $W^s_x(\Sigma_y)=
\bigcup_{|t|<{\epsilon}_0}Z_t(W^s_x)\cap \Sigma_y$.
This defines a topological foliation ${\mathcal{W}}^s(\Sigma_y)$ of $\Sigma_y$.
We can identify $\Sigma_y$ diffeomorphically with $(-1,1)\times{\mathcal D}^{d_s}$.
The stable boundary
$\partial^s\Sigma_y\cong \{\pm1\}\times {\mathcal D}^{d_s}$ consists of two stable leaves.
Let ${\mathcal D}_{1/2}^{d_s}$ denote the open disk of radius $\frac12$ in ${\mathbb R}^{d_s}$.
Define the {\em subcross-section}
$\Sigma'_y\cong (-1,1)\times {\mathcal D}_{1/2}^{d_s}$,
and the corresponding subflow box $V'_y\cong\Sigma'_y\times(-{\epsilon}_0,{\epsilon}_0)$ consisting of trajectories in $V_y$ that pass through $\Sigma_y'$.
For each equilibrium $\sigma\in\Lambda$, we let $V_\sigma$ be an open neighborhood of $\sigma$ on which the flow is linearizable.
Let $\gamma^s_\sigma$ and $\gamma^u_\sigma$ denote the local stable and unstable manifolds of $\sigma$
within $V_\sigma$; trajectories starting in $V_\sigma$ remain in $V_\sigma$ for all future time if and only if they lie in $\gamma^s_\sigma$.
\begin{rmk}
Note that $W^s_\sigma$ denotes the strong stable manifold
of $\sigma$. In general,
$\dim \gamma^s_\sigma\ge\dim W^s_\sigma= d_s$. (In the
case of a Lorenz-like singularity,
$\dim \gamma^s_\sigma=d_s+1$.)
\end{rmk}
Define $V_0=\bigcup_\sigma V_\sigma$. We shrink the
neighborhoods $V_\sigma$ so that (i) they are disjoint, (ii)
$\Lambda\not\subset V_0$, and (iii)
$\gamma^u_\sigma\cap\partial V_\sigma\subset V'_y$ for some
regular point $y=y(\sigma)$.
By compactness of $\Lambda$, there exists $\ell\ge1$ and
regular points $y_1,\dots,y_\ell\in\Lambda$ such that
$\Lambda\setminus V_0 \subset \bigcup_{j=1}^\ell V'_{y_j}$.
We enlarge the set $\{y_j\}$ to include the points
$y(\sigma)$ mentioned in~(iii) above. Adjust the positions
of the cross-sections $\Sigma_{y_j}$ if necessary so that
they are disjoint, and define the global cross-section
\[
\textstyle X=\bigcup_{j=1}^\ell \Sigma_{y_j}.
\]
In the remainder of the paper, we often modify the choices
of $U_0$ and $T_0$. However, the choices of $V_{y_j}$,
$\Sigma_{y_j}$ and $X$ remain unchanged from now on and
correspond to our current choice of $U_0$ and $T_0$. To
avoid confusion, all subsequent choices will be labelled
$U_1\subset U_0$ and $T_1\ge T_0$. In particular, we
suppose from now on that
$U_1\subset V_0 \cup \bigcup_{j=1}^\ell V'_{y_j}$.
\subsection{Definition of the Poincar\'e map}
By Proposition~\ref{prop:Ws}, for any $\delta>0$ we can
choose $T_1\ge T_0$ such that
\begin{align} \label{eq:delta} \operatorname{diam}
Z_t(W^s_x(\Sigma_{y_j}))<\delta, \quad\text{for all}\;
x\in \Sigma_{y_j},\,j=1,\dots,\ell,\,t>T_1.
\end{align}
Define
\[
\textstyle \Gamma_0=\{x\in X:Z_{T_1+1}(x)\in\bigcup_\sigma\gamma^s_\sigma\}, \qquad
X'=X\setminus\Gamma_0.
\]
If $x\in X'$, then $Z_{T_1+1}(x)$ cannot remain inside $V_0$ so there exists $t>T_1+1$
and $j=1,\dots,\ell$ such that
$Z_tx\in V'_{y_j}$. Since ${\epsilon}_0<1$, there exists $t>T_1$ such that $Z_tx\in\Sigma_{y_j}'$.
Hence for $x\in X'$, we can define
\[
\textstyle f(x)=Z_{\tau(x)}(x)\qquad\text{where}\qquad
\tau(x)=\inf\{t>T_1:Z_tx\in\bigcup_{j=1}^\ell {\mathbb C}l\Sigma_{y_j}'\}.
\]
In this way we obtain a piecewise $C^r$ global Poincar\'e map
$f:X'\to X$ with piecewise $C^r$ roof function
$\tau:X'\to[T_1,\infty)$.
\begin{lemma} \label{lem:log}
If $\Lambda$ contains no equilibria (so $\Gamma_0=\emptyset$), then $\tau\le T_1+2$.
In general, there exists a constant $C>0$ such that
\[
\tau(x)\le -C\log\operatorname{dist}(x,\Gamma_0)\quad\text{for all $x\in X'$.}
\]
\end{lemma}
\begin{proof}
This is a standard result so we sketch the arguments.
If $Z_{T_1+1}x\in V_{y_j}'$ for some $j$, then $Z_tx\in \Sigma_{y_j}'$
for some $t\in(T_1+1-{\epsilon}_0,T_1+1+{\epsilon}_0)$ so $\tau(x)\le T_1+2$.
Otherwise, $Z_{T_1+1}x\in V_\sigma\subset V_0$ for some equilibrium $\sigma$, and we define
\[
\tau_0(x)=\sup\{t\in[0,T_1+1]:Z_tx\not\in V_\sigma\},\qquad
\tau_1(x)=\sup\{t\ge T_1+1:Z_tx\in V_\sigma\}.
\]
Note that $Z_{\tau_1(x)}(x)\in \bigcup_jV_{y_j}'$
so $\tau(x)\le \tau_1(x)+1\le \tau_1(x)-\tau_0(x)+T_1+2$.
By the Hartman-Grobman Theorem, the flow in $V_\sigma$ is
homeomorphic (by a time-preserving conjugacy) to the
linearized flow $\dot x=DG(\sigma) x=(A\oplus E)x$ where
$A$ has eigenvalues with negative real part and $E$ has
eigenvalues with positive real part. After writing $E$ in
Jordan normal form, a standard and elementary argument shows
that the ``time of flight'' of trajectories in $V_\sigma$
satisfies
$\tau_1(x)-\tau_0(x)\le
-C'\log\operatorname{dist}(Z_{\tau_0(x)}(x),\Gamma')$
where $\Gamma'$ denotes the local stable manifold of
$\sigma$ in the linear flow.
Finally, we can suppose without loss that
$\partial V_\sigma$ is smooth so that the initial transition
$x\mapsto Z_{\tau_0(x)}(x)$ is a diffeomorphism in a
neighborhood of $\Gamma_0$. Hence
$\operatorname{dist}(Z_{\tau_0(x)}(x),\Gamma')\approx
\operatorname{dist}(x,\Gamma_0)$ up to uniform constants.
\end{proof}
\begin{rmk} \label{rmk:log}
It is immediate from the proof of Lemma~\ref{lem:log} that
$\tau(x)\to\infty$ as $\operatorname{dist}(x,\Gamma_0)\to0$.
\end{rmk}
Define the topological foliation ${\mathcal{W}}^s(X)=\bigcup_{j=1}^\ell {\mathcal{W}}^s(\Sigma_{y_j})$ of $X$ with leaves $W^s_x(X)$ passing through each $x\in X$.
\begin{prop} \label{prop:invf}
For $T_1$ sufficiently large,
$f(W^s_x(X))\subset W^s_{fx}(X)$ for all $x\in X'$.
\end{prop}
\begin{proof}
By definition of $V_{y_j}'$, it follows from~\eqref{eq:delta} that we can
choose $T_1$ large (and hence $\delta$ small) such that
$W^s_{fx}(X)\subset V_{y_j}$
whenever $fx\in V'_{y_j}$.
The result follows from this by definition of ${\mathcal{W}}^s(X)$ and flow invariance
of ${\mathcal{W}}^s$.
\end{proof}
Define $\partial^s X=\bigcup_{j=1}^\ell \partial^s\Sigma_{y_j}$ and let
\[
\Gamma=\Gamma_0\cup\Gamma_1,\qquad \Gamma_1=\{x\in X':fx\in\partial^sX\}.
\]
\begin{prop} \label{prop:Gamma}
$\Gamma$ is a finite union of stable disks $W^s_x(X)$, $x\in X$.
\end{prop}
\begin{proof}
It is clear that $W^s_x(X)\subset \Gamma$ for all $x\in\Gamma$.
Also, if $x_0\not\in\Gamma$ than $fx_0=Z_{\tau(x_0)}(x_0)\in \Sigma'$ for some $\Sigma'\in\{\Sigma'_{y_j}\}$. For $x$ close to $x_0$, it follows from continuity of the flow that
$fx\in\Sigma'$ (with $\tau(x)$ close to $\tau(x_0)$).
Hence $x\not\in\Gamma$ and so $\Gamma$ is closed.
It remains to rule out the possibility that a sequence of stable disks $W^s_{x_n}(X)$, $x_n\in\Gamma$, accumulates on $W^s_{x_0}(X)$ where $x_0=\lim_{n\to\infty}x_n$.
In showing this, it is useful to note that if $Z_tx\in V'_y$ then $Z_sx\in \Sigma'_y$ for some $s\in(t-1,t+1)$. In particular, if $Z_tx\in V'_y$ for some $t\ge T_1+1$, then
$\tau(x)\le t+1$.
There are two cases to consider:
\noindent
{\bf Case 1:}
$Z_{T_2}x_0\in V'_y$ for some
$T_2\ge T_1+1$, $y\in\{y_1,\dots,y_\ell\}$. In this case, restricting to large $n$ we have $Z_{T_2}x_n\in V'_y$, and hence $\tau(x_n)\le T_2+1$.
It follows that $\bigcup_n W^s_{x_n}(X)\subset X\cap\bigcup_j\bigcup_{t\in[0,T_2+1]}Z_{-t}(\partial^s\Sigma_{y_j})$. But this is a compact submanifold of $X$ with the same dimension $d_s$ as the stable disks, so $\{x_n\}$ is finite.
\noindent{\bf Case 2:}
$Z_tx_0\in V'_\sigma$ for all $t\ge T_1+1$ for some equilibrium $\sigma$.
Note that $Z_tx_0\in\gamma_\sigma^s$ for all $t\ge T_1+1$.
As in Case 1, we can easily rule out accumulations when $\tau(x_n)\le T_1+1$ so we can suppose that $\tau(x_n)>T_1+1$. Also, $\gamma^s_\sigma\cap Z_{T_1+1}(X)$ is a compact submanifold of dimension $d_s$, so $Z_{T_1+1}x_n\in V'_\sigma\setminus \gamma^s_\sigma$.
Hence the trajectory through $Z_{T_1+1}x_n$ eventually leaves $V'_\sigma$ close to $\gamma^u_\sigma$. Such trajectories immediately enter the flow box $V'_{y(\sigma)}$ and hence hit $\Sigma'_{y(\sigma)}$. In particular, $f(x_n)\in \Sigma'_{y(\sigma)}$ and $x_n\not\in\Gamma$.
\end{proof}
Let $X''=X\setminus\Gamma$.
Then $X''=S_1\cup\dots\cup S_m$ for some $m\ge1$, where each $S_i$ is homeomorphic to $(-1,1)\times {\mathcal D}^{d_s}$.
We call these regions {\em smooth strips}.
Note that $f|_{S_i}:S_i\to X$ is a diffeomorphism onto its image and
$\tau|_{S_i}:S_i\to[T_1,\infty)$ is smooth for each $i$.
The foliation ${\mathcal{W}}^s(X)$ restricts to a foliation ${\mathcal{W}}^s(S_i)$ on each $S_i$.
\begin{rmk} \label{rmk:X} In future sections, it may be
necessary to increase $T_1$ leading to changes to $f$,
$\tau$, $\Gamma$ and $\{S_i\}$ (and the constant $C$ in
Lemma~\ref{lem:log}). However the global cross-section
$X=\bigcup\Sigma_{y_j}$ continues to remain fixed
throughout the paper.
\end{rmk}
\section{Uniform hyperbolicity of the Poincar\'e map}
\label{sec:UH}
Let $\Lambda$ be a singular hyperbolic attracting set. We continue to assume
$d_{cu}=2$ for notational simplicity.
In this section, we show that for $T_1$ sufficiently large, the global Poincar\'e map
$f:X'\to X$ constructed in Section~\ref{sec:f} is uniformly hyperbolic
(with singularities). (As noted in Remark~\ref{rmk:X}, the global cross-section $X=\bigcup\Sigma_{y_j}$ is independent of $T_1$.)
Let $S\in\{S_i\}$ be one of the smooth strips from the end of Section~\ref{sec:f}.
There exist cross-sections
$\Sigma$, $\widetilde\Sigma\in \{\Sigma_{y_j}\}$ such that
$S\subset\Sigma$ and $f(\Sigma)\subset\widetilde\Sigma$.
The splitting $T_{U_0}M=E^s\oplus E^{cu}$ induces a continuous
splitting $T\Sigma=E^s(\Sigma)\oplus E^u(\Sigma)$ defined by
\begin{align*}
E^s_x(\Sigma)=(E^s_x\oplus{\mathbb R}\{G(x)\})\cap T_x{\Sigma}
\quad\mbox{and}\quad
E^u_x(\Sigma)=E^{cu}_x\cap T_x{\Sigma},\quad x\in\Sigma.
\end{align*}
The analogous definitions apply to $\widetilde\Sigma$.
For each $y\in\widetilde\Sigma$,
define the projection $\pi_y:T_yM=T_y\widetilde\Sigma\oplus{\mathbb R}\{G(y)\}\to T_y\widetilde\Sigma$.
Also, for $x\in\Sigma$, define the projection
$\hat\pi_x:E^s_x\oplus{\mathbb R}\{G(x)\}\to E_x^s$.
By finiteness of the set of cross-sections $\{\Sigma_{y_j}\}$, there is a universal constant $C_1\ge1$ such that
\begin{align} \label{eq:proj} \nonumber
& \|\pi_{y}v\|\le C_1\|v\|\quad\text{for all $v\in T_yM$},
\\
&
\|\hat \pi_xv\|\le C_1\|v\|\quad\text{for all $v\in E^s_x\oplus{\mathbb R}\{G(x)\}$}.
\end{align}
\begin{prop}\label{prop:secUH}
(a)
$Df\cdot E^s_x(\Sigma) = E^s_{fx}(\widetilde\Sigma)$ for all $x\in S$, and
$Df\cdot E^u_x(\Sigma) = E^u_{fx}(\widetilde\Sigma)$ for all $x\in\Lambda\cap S$.
\\
(b) Let $\lambda_1\in(0,1)$.
For $T_1$ sufficiently large
if $\inf \tau>T_1$, then for all $S\in\{S_i\}$,
\[
\|Df | E^s_x(\Sigma)\| \le \lambda_1\quad\text{and}\quad
\|Df | E^u_x(\Sigma)\| \ge \lambda_1^{-1} \quad\text{for all $x\in S$}.
\]
\end{prop}
\begin{proof}
(a) For $x\in S$, we have that $Df(x):T_x\Sigma\to T_{fx}\widetilde\Sigma$ is given by
\begin{align} \label{eq:Df}
Df(x)= D(Z_{\tau(x)}(x)) =
DZ_{\tau(x)}(x)+G(fx) D\tau(x).
\end{align}
Let $v\in E^s_x(\Sigma)\subset E^s_x+{\mathbb R}\{G(x)\}$.
Then using $DZ_t$-invariance of $E^s$ on $U_0$ and of the flow direction,
\[
Df(x)v\in DZ_{\tau(x)}(x)E^s_x+DZ_{\tau(x)}(x){\mathbb R}\{Gx\}+{\mathbb R}\{G(fx)\}
\subset E^s_{fx}+{\mathbb R}\{G(fx)\},
\]
so $Df(x)v\in (E^s_{fx}+{\mathbb R}\{G(fx)\})\cap T_{fx}\widetilde\Sigma=E^s_{fx}(\widetilde\Sigma)$.
Similarly, for $x\in \Lambda\cap S$ and $v\in E^u_x(\Sigma)\subset E^{cu}_x$, using $DZ_t$-invariance of $E^{cu}$ on $\Lambda$ and the fact that the flow direction lies in $E^{cu}$,
\[
Df(x)v\in DZ_{\tau(x)}(x)E^{cu}_x+{\mathbb R}\{G(fx)\} \subset E^{cu}_{fx},
\]
so $Df(x)v\in E^{cu}_{fx}\cap T_{fx}\widetilde\Sigma=E^u_{fx}(\widetilde\Sigma)$.
\noindent (b)
By~\eqref{eq:Df} and the definition of $\pi_y$,
\begin{align} \label{eq-Df}
Df(x)= \pi_{fx}Df(x)=\pi_{fx}DZ_{\tau(x)}(x)\quad\text{
for $x\in S$}.
\end{align}
Using the definition of $\hat\pi_x$, for $v\in E^s_x(\Sigma)\subset E^s_x\oplus {\mathbb R}\{G(x)\}$,
\begin{align*}
\|Df(x)v\|
=\|\pi_{fx}DZ_{\tau(x)}(x)\hat\pi_xv\|
\le C_1^2\|DZ_{\tau(x)}(x)| E^s_x\|\,\|v\|,
\end{align*}
by~\eqref{eq:proj}.
It follows that
\[
\|Df | E^s_x(\Sigma)\| \le C_1^2C\lambda^{\tau(x)}\le C_1^2C\lambda^{T_1}.
\]
where $C>0$, $\lambda\in(0,1)$ are as in~\eqref{eq:contract}.
The first estimate in (b) is immediate for $T_1$ large enough.
For the second estimate,
define $P=DZ_{\tau(x)}E^{cu}_x$ and write $DZ_{\tau(x)}(x):E^{cu}_x\to P$ in coordinates corresponding to the splittings
\[
E^{cu}_x=E^u_x(\Sigma)\oplus {\mathbb R}\{G(x)\}, \qquad
P=(P\cap\widetilde\Sigma)\oplus {\mathbb R}\{G(fx)\}.
\]
In these coordinates, it follows from invariance and neutrality of the flow direction that
\[
DZ_{\tau(x)}(x)= \left(\begin{array}{cc} a_{11}(x) & 0 \\ a_{21}(x) & a_{22}(x) \end{array}\right),
\]
where $\sup_x |a_{22}(x)|\le C_2$ for some
constant $C_2>0$.
Moreover,
by~\eqref{eq-Df},
\[
a_{11}(x)=\pi_{fx}DZ_{\tau(x)}(x)|_{E^u_x(\Sigma)}=Df(x)|_{E^u_x(\Sigma)}.
\]
Hence by Proposition~\ref{prop:VE},
\begin{align*}
|Df(x)| E^u_x(\Sigma)| =
|a_{11}(x)| & \ge C_2^{-1}|\det DZ_{\tau(x)}(x)| E^{cu}_x|
\\ & \ge
C_2^{-1}K e^{\theta \tau(x)}
\ge C_2^{-1}K e^{\theta T_1}
\ge \lambda_1^{-1},
\end{align*}
for $T_1$ sufficiently large.
\end{proof}
Next, for $a>0$ we define the {\em unstable cone field}
\begin{align*}
{\mathcal C}^u_x(\Sigma,a)=\{w=w^s+w^u\in E^s_x(\Sigma)\oplus E^u_x(\Sigma):
\|w^s\| \le a \|w^u\| \}, \quad x\in \Sigma.
\end{align*}
\begin{prop} \label{prop:sec-cone}
For any $a>0$, $\lambda_1\in(0,1)$, we can increase $T_1$ and shrink $U_1$ such that if $\inf\tau>T_1$ then for all $S\in\{S_i\}$
\\
(a)
$Df(x)\cdot {\mathcal C}^u_x(\Sigma,a) \subset {\mathcal C}^u_{fx}(\Sigma,a)$
for all $x\in S$.
\\
(b)
$\| Df(x)w\| \ge \lambda_1^{-1} \|w\|$
for all $x\in S$, $w\in {\mathcal C}^u_x(\Sigma,a)$.
\end{prop}
\begin{proof}
Let $w=w^s+w^u\in{\mathcal C}^u_x(\Sigma,a)$.
The estimates in Proposition~\ref{prop:secUH}(b) hold with $\lambda_1=1$, so
\[
\|Df(x)w^s\|\le \|w^s\|\le a\|w^u\|\le a\|Df(x)w^u\|,
\]
proving (a).
\\
(b)
Let $\lambda_1\in(0,1)$ be the constant in Proposition~\ref{prop:secUH}(b).
For $w\in{\mathcal C}^u_x(\Sigma,a)$,
\[
\|Df(x)w\|\ge
(1-a)\lambda_1^{-1}\|w^u\|
\ge (1-a)(1+a)^{-1}\lambda_1^{-1}\|w\|
\]
Since $\lambda_1$ is arbitrarily small,
the result follows with a new value of $\lambda_1$.
\end{proof}
Taking unions over smooth strips $S$ and cross-sections $\Sigma$, we obtain a global
continuous uniformly hyperbolic splitting
\[
TX''=E^s(X)\oplus E^u(X),
\]
with the following properties:
\begin{thm} \label{thm:global}
The stable bundle $E^s(X)$ and the restricted splitting
$T_\Lambda X''=E^s_\Lambda(X)\oplus E^u_\Lambda(X)$ are
$Df$-invariant.
Moreover,
for fixed $a>0$, $\lambda_1\in(0,1)$, we can arrange that
\[
Df\cdot {\mathcal C}^u_x(X,a)\subset {\mathcal C}^u_{fx}(X,a)\quad\text{and}\quad
\|Df(x)w\|\ge \lambda_1^{-1}\|w\|
\]
for all $x\in X''$, $w\in{\mathcal C}^u_x(X,a)$.
\qed
\end{thm}
\section{The stable lamination is a topological foliation}
\label{sec:fol}
The stable manifold theorem guarantees the existence of an $Z_t$-invariant stable lamination
consisting of smoothly embedded disks $W^s_x$ through each point $x\in\Lambda$. For general partially hyperbolic attracting sets, there is no guarantee that $\{W^s_x:x\in\Lambda\}$ defines a topological foliation in an open neighborhood of $\Lambda$.
However, in this section we show that this is indeed the case under our assumptions that $\Lambda$ is singular hyperbolic with $d_{cu}=2$:
\begin{thm} \label{thm:fol}
Let $\Lambda$ be a singular hyperbolic attracting set with $d_{cu}=2$.
Then the stable lamination $\{W^s_x:x\in\Lambda\}$ is a topological foliation of an open neighborhood of~$\Lambda$.
\end{thm}
The method of proof is to show that $\{W^s_x:x\in\Lambda\}$ coincides with the topological foliation
$\{W_x^s:x\in U_0\}$ in Proposition~\ref{prop:Ws}(c).
In particular, we have {\em a posteriori} that $\Lambda\subset\operatorname{Int} \bigcup_{x\in\Lambda}W^s_x$. The proof shows that for every $x$ in an open neighbourhood of $\Lambda$, there exists $z\in\Lambda$ such that $x\in W^s_z$ (and hence $W^s_x=W^s_z$).
Fix $a>0$ as in Theorem~\ref{thm:global}.
A smooth curve $\gamma:[0,1]\to \Sigma\subset X$ is called a {\em $u$-curve} if
$D\gamma(t)\in {\mathcal C}^u_{\gamma(t)}(\Sigma,a)$ for all $t\in[0,1]$.
We say that a $u$-curve $\gamma$ contained in $X$
\emph{crosses} a smooth strip $S$ if each stable leaf
$W^s_x(S)$ intersects $\gamma$ in a unique point.
\begin{prop}\label{prop:cross}
For every $u$-curve $\gamma_0$ there exists $n\ge1$ and
a restriction $\hat\gamma\subset\gamma_0$ so that
$f^n|_{\hat\gamma}: \hat\gamma\to f^n\hat\gamma$ is a
diffeomorphism and $f^n\hat\gamma$ crosses $S_j$
for some $j$.
\end{prop}
\begin{proof}
We choose $\lambda_1\in(0,\frac14]$.
Let $S\in\{S_1,\dots,S_m\}$ and let
$\gamma$ be a $u$-curve in $S$ with length $|\gamma|$.
We consider three possibilities:
\begin{itemize}
\item[(i)] $f\gamma\subset S_i$ for some $i$.
In this case $|f\gamma|\ge 4|\gamma|$ by Theorem~\ref{thm:global}.
\item[(ii)] $f\gamma$ intersects $\bigcup \partial S_i$ in precisely one point $q$. In this case at least one of the connected components of
$f\gamma\setminus \{q\}$ has length at least $2|\gamma|$.
\item[(iii)] $f\gamma$ intersects $\bigcup \partial S_i$ in at least two points.
\end{itemize}
In case~(iii), we are finished with $n=1$.
In the other cases, we can pass to a restriction $\tilde\gamma$ such that
$\tilde\gamma$ and $f\tilde\gamma$ lie in smooth strips with
$|f\tilde\gamma|\ge 2|\gamma|$.
By Theorem~\ref{thm:global},
$f\tilde\gamma$ is a $u$-curve so we can repeat the procedure.
After one such repetition,
either the process has terminated with $n=2$ or
there is a restriction $\tilde\gamma$ such that
$\tilde\gamma$ and $f^2\tilde\gamma$ lie in smooth strips with
$|f^2\tilde\gamma|\ge 4|\gamma|$.
Since $X$ is bounded, the process terminates in finitely many steps.
\end{proof}
\begin{prop}\label{prop:denseper}
There exists a finite set $\{p_1,\dots,p_k\}\subset X\cap\Lambda$
such that each $p_i$ is a periodic point for $f$ and
$\bigcup_{n\ge0}f^{-n}\big(\bigcup_{i=1}^k W^s_{p_i}(X)\big)$
is dense in $X$.
\end{prop}
\begin{proof}
Let $\gamma_0$ be a $u$-curve lying in a smooth strip.
By Proposition~\ref{prop:cross}, $f^{n_1}\gamma_0$ crosses a smooth strip for some $n_1\ge1$.
Moreover, there exists a restriction $\tilde\gamma_0\subset\gamma_0$ such that
$f^{n_1}$ maps $\tilde\gamma_0$ diffeomorphically inside this strip. Applying Proposition~\ref{prop:cross}
again, we obtain $n_2>n_1$ such that $f^{n_2}\gamma_0$ crosses a strip.
Inductively, we obtain $1\le n_1<n_2<\cdots$ such that
$f^{n_j}\gamma_0$ crosses a strip for each $n_j$.
Since the number of smooth strips is finite, there exists $1\le q_1<q_2$ such that
$f^{q_1}\gamma_0$ and $f^{q_2}\gamma_0$ cross the same smooth strip~$S$.
Let $q=q_2-q_1$, $\gamma=f^{q_1}\gamma_0$. Choose a restriction
$\tilde\gamma$ of $\gamma$ such that
$f^q|_{\tilde\gamma}: \tilde\gamma \to f^q\tilde\gamma$
is a diffeomorphism and
$f^q\tilde\gamma$ crosses $S$.
Shrink $\gamma$ and $\tilde\gamma$ if necessary so that
$\gamma$ and $f^q\tilde\gamma$ cross ${\mathbb C}l S$ and are contained in ${\mathbb C}l S$.
Define the surjection $g:\tilde\gamma\to \gamma$ such that $g(x)$ is the unique point where $W^s_{f^qx}(X)$ intersects $\gamma$.
Since $W^s(X)$ restricts to a topological foliation of $S$, it follows that $g$ is continuous. Also $\tilde\gamma\subset\gamma$ are one-dimensional curves, so by the intermediate value theorem
$g$ possesses a fixed point $x_0\in{\mathbb C}l{\tilde\gamma}$.
Since $g(x_0)=x_0$ it follows that $f^q x_0\subset W^s_{x_0}(X)$ and hence that
$f^q(W^s_{x_0}(X))\subset W^s_{x_0}(X)$.
By~\eqref{eq:delta}, $f^q:W^s_{x_0}(X)\to W^s_{x_0}(X)$ is a strict contraction, so
$f^qp=p$ for some $p\in W^s_{x_0}(X)$. In particular, $p$ is a periodic point for $f$ lying in $X\cap U_0$.
Since $\Lambda$ is an attracting set, $p\in X\cap\Lambda$.
Moreover, $f^{q_1}\gamma_0$ intersects $W^s_p(X)$.
Starting with a new $u$-curve $\gamma_0'$ and proceeding as before, either
$f^n\gamma_0'$ crosses $S$ and hence intersects $W^s_p(X)$ for some $n\ge0$, or we can construct a new periodic orbit $p'$ in a new smooth strip such that $f^n\gamma_0'$ intersects $W^s_{p'}(X)$. In this way we obtain periodic points $p_1,\dots,p_k$
such that every $u$-curve eventually intersects $\bigcup_{i=1}^k W^s_{p_i}(X)$
under iteration. Since $u$-curves are dense and arbitrarily short, the result follows.
\end{proof}
\begin{rmk} The periodic points constructed in the proof
of Proposition~\ref{prop:denseper} lie in distinct
smooth strips, so $k\le m$. The proof does not show
that each strip contains a periodic point.
\end{rmk}
\begin{prop}\label{prop:fol}
For each $x\in X$ there exists $y\in X\cap\Lambda$ such that $x\in W^s_y(X)$.
\end{prop}
\begin{proof}
Define
\[
\textstyle E=\{ x\in X : x\in W^s_y(X)\;\text{for some $y\in X\cap\Lambda$}\}.
\]
We show that $E=X$.
Suppose first that $x\in \bigcup_{n\ge0}f^{-n}\big(\bigcup_{i=1}^k
W^s_{p_i}(X)\big)$, so there exists $n\ge0$, $i\in\{1,\dots,k\}$
and $y\in W^s_{p_i}(X)$ such that
$f^nx=y$.
Choose an open set $V$ formed of a union of stable leaves and containing $x$ such that $f^n|_V:V\to f^nV$ is a diffeomorphism.
By Remark~\ref{rmk:per},
periodic points are not isolated inside $X\cap \Lambda$, so there exists a
sequence $W_j$ of stable leaves inside $f^nV\cap E$ that converges to $W^s_{p_i}(X)$.
Choose $y_j\in W_j$ such that $y_j\to y$. Let $x_j=f^{-n}y_j$ so
$x_j\to x$.
Since $y_j\in E$, we have
$y_j\in W^s_{y'_j}(X)$ for some $y'_j\in f^nV\cap\Lambda$.
Write $y'_j=f^nx_j'$ where $x_j'\in V\cap\Lambda$.
Since $f^n|_V$ is a diffeomorphism and
$y_j\in W^s_{y'_j}(X)$, it follows that $x_j\in W^s_{x'_j}(X)$.
Passing to a subsequence if needed, we can assume that
$x'_j\to x'\in X\cap\Lambda$ and so $x_j\to x
\in W^s_{x'}(X)\subset E$.
We have shown that $E$ contains
$\bigcup_{n\ge0}f^{-n}\big(\bigcup_{i=1}^k W^s_{p_i}(X)\big)$
and so $E$ is dense in $X$ by Proposition~\ref{prop:denseper}.
Now for $x\in\Sigma$ we take $x_k\in E$ so that $x_k\to x$.
We know that $x_k=W^s_{y_k}(\Sigma)$ for $y_k\in A$ and
passing to a subsequence we find $y\in A\cap\Sigma$ so that
$y_k\to y$. Then $x\in W^s_{y}(\Sigma)$ and $x\in E$.
\end{proof}
\begin{pfof}{Theorem~\ref{thm:fol}}
If $x\in W^u_\sigma$ for some equilibrium $\sigma$, then $x\in\Lambda$ and there is nothing to do. Otherwise, restricting to a smaller positively invariant neighborhood $U_0$, we can ensure that there always exists $t>0$ such that $Z_{-t}x$ lies in one of the flow boxes $V_{y_j}$. But then there exists $t>0$ such that $Z_{-t}x\in \Sigma_{y_j}\subset X$. By Proposition~\ref{prop:fol}, $Z_{-t}x\in W^s_y(X)$ for some $y\in X\cap\Lambda$.
Hence there exists $t>0$ such that $Z_{-t}x\in W^s_y$, and so
$x\in W^s_z$ where $z=Z_ty\in\Lambda$.
\end{pfof}
We have shown that the stable lamination
$\{W^s_x:x\in\Lambda\}$ coincides with the stable foliation
$\{W^s_x:x\in U_0\}$.
From now on, we refer to ${\mathcal{W}}^s=\{W^s_x:x\in\Lambda\}$ as the stable foliation.
\section{H\"older regularity and absolute continuity of the stable foliation}
\label{sec:regWs}
In this section, we continue to assume that $\Lambda$ is a singular hyperbolic attracting set, and show that the topological foliation ${\mathcal{W}}^s$ is in fact a H\"older foliation (bi-H\"older charts). Also we recall results on absolute continuity of the stable foliation.
These results do not use explicitly the fact that $d_{cu}=2$; it suffices that the conclusion of Theorem~\ref{thm:fol} holds.
A key ingredient is regularity of stable holonomies.
Let $Y_0,\,Y_1\subset U_0$ be two smooth disjoint $d_{cu}$-dimensional
disks that are transverse to the stable foliation ${\mathcal{W}}^s$. Suppose that
for all $x\in Y_0$, the stable leaf $W^s_x$ intersects each of $Y_0$ and $Y_1$ in precisely one point.
The {\em stable holonomy} $H:Y_0\to Y_1$ is given by defining $H(x)$ to be the intersection point of $W^s_x$ with $Y_1$.
\begin{lemma} \label{lem:PSW}
There exists ${\epsilon}>0$ such that the stable holonomies $H:Y_0\to Y_1$ are $C^{\epsilon}$. Moreover, if the angles between $Y_i$ and stable leaves are bounded away from zero for $i=0,1$, then there is a constant $K>0$ dependent on this bound but otherwise independent of the holonomy $H:Y_0\to Y_1$ such that
$d(H(y),H(y'))\le Kd(y,y')^{\epsilon}$ for all $y,y'\in Y_0$.
\end{lemma}
\begin{proof}
By Theorem~\ref{thm:fol}, we can view ${\mathcal{W}}^s$ as the stable lamination
corresponding to the invariant splitting
$T_\Lambda M=E^s\oplus E^{cu}$ for the
partially hyperbolic diffeomorphism $f=Z_1$.
Hence we can apply~\cite[Theorem~A']{PSW97}.
The result in~\cite{PSW97} is formulated slightly differently in terms of a splitting $T_\Lambda M=E^s\oplus E^c\oplus E^u$, but their
proof covers our situation (with the invariant splitting $T_\Lambda M=E^u\oplus E^{cs}$ there replaced by the symmetric situation $T_\Lambda M=E^s\oplus E^{cu}$).
\end{proof}
\begin{thm} \label{thm:holder}
The stable foliation ${\mathcal{W}}^s$ is $C^{\epsilon}$ for some ${\epsilon}>0$.
\end{thm}
\begin{proof}
Let $\{\gamma(x):x\in U_0\}$ be the family of embeddings $\gamma(x):{\mathcal D}^{d_s}\to W^s_x$ described in Proposition~\ref{prop:Ws}.
Let $x\in U_0$ and
choose an embedded $d_{cu}$-dimensional disk $Y_0\subset M$
containing $x$ and transverse to $W^s_x$. By continuity of
$E^s$, we can shrink $Y_0$ so that $Y_0$ is transverse to
$W^s_y$ at $y$ for all $y\in Y_0$. Let $\psi:{\mathcal D}^{d_{cu}}\to Y_0$ be
a smooth embedding.
The proof of~\cite[Lemma~4.9]{AraujoM17} shows that
the map $\chi:{\mathcal D}^{d_s}\times{\mathcal D}^{d_{cu}}\to U_0$ given by
\[
\chi(u,v)=\gamma(\psi(v))(u)
\]
is a topological chart for ${\mathcal{W}}^s$ at $x$. Note that $\chi$ maps
horizontal lines $\{v={\rm const.}\}$ homeomorphically onto
stable disks $W^s_{\psi(v)}$.
Moreover, we claim that $\chi$ maps vertical lines $\{u={\rm const.}\}$ onto smooth transversals $Y_u$ to ${\mathcal{W}}^s$.
To see this, we recall the notation $\gamma(y)(u)=Q(u,\varphi_y(u))$
from the proof of~\cite[Lemma~4.8]{AraujoM17}. Here
$Q=Q_{x,0}:{\mathbb R}^d\to M$ is a diffeomorphism and $Q^{-1}(W^s_y)$ is given by the graph of $\varphi_y:{\mathcal D}^{d_s}\to{\mathcal D}^{d_{cu}}$.
Hence
\begin{align*}
Y_u =\{\chi(u,v):v\in{\mathcal D}^{d_{cu}}\}
& =\{\gamma(\psi(v))(u):v\in{\mathcal D}^{d_{cu}}\}
\\ & =\{\gamma(y)(u):y\in Y_0\}
=Q\{(u,\varphi_y(u)):y\in Y_0\}.
\end{align*}
The curves $W^s_y$ foliate $U_0$, so the curves $Q^{-1}(W^s_y)=\{(u,\varphi_y(u))\}$ foliate ${\mathcal D}^{d_s}\times{\mathcal D}^{d_{cu}}$. Hence
the set $\{(u,\varphi_y(u)):y\in Y_0\}$ is precisely $\{u={\rm const.}\}$ and so
$Y_u=Q(\{u={\rm const.}\})$ verifying the claim.
Moreover, via the diffeomorphism $Q$, the angles of $Y_u$ with stable disks $W^s_y$ are bounded away from zero. Hence for any $u\neq0$, the stable holonomy
$H_u:Y_0\to Y_u$ satisfies
$d(H_u(y),H_u(y'))\le Kd(y,y')^{\epsilon}$ for all $y,y'\in Y_0$ by Lemma~\ref{lem:PSW}.
Also $H_u^{-1}:Y_u\to Y_0$ is a stable holonomy, so
$d(H_u^{-1}(y),H_u^{-1}(y'))\le Kd(y,y')^{\epsilon}$ for all $y,y'\in Y_u$.
Now $\chi(u,v)=\gamma(\psi(v))(u)=H_u(\psi(v))$, so
\begin{align*}
d(\chi(u,v),\chi(u,v')) & =
d(H_u(\psi(v)),H_u(\psi(v')))
\\ & \le K d(\psi(v),\psi(v'))^{\epsilon}\le K(\operatorname{Lip}\psi)^{\epsilon} \|v-v'\|^{\epsilon}.
\end{align*}
Also there is a constant $L_1>0$ such that
\begin{align*}
d(\chi(u,v'),\chi(u',v')) & =
d(\gamma(\psi(v'))(u),\gamma(\psi(v'))(u'))
\\ & \le \operatorname{Lip}\gamma(\psi)(v')\|u-u'\|
\le L\|u-u'\|\le L_1\|u-u'\|^{\epsilon}.
\end{align*}
Altogether, letting $M=\max\{K(\operatorname{Lip}\psi)^{\epsilon},L_1\}$,
\[
d(\chi(u,v),\chi(u',v'))\le M
\big(\|u-u'\|^{\epsilon}+
\|v-v'\|^{\epsilon}\big)
\\
\le CM
(\|u-u'\|^2+\|v-v'\|^2)^{{\epsilon}/2}
\]
where $C>0$ is an upper bound for the homogeneous function
$\displaystyle\frac{|x|^{\epsilon}+|y|^{\epsilon}}{(|x|^2+|y|^2)^{{\epsilon}/2}}$
over the set of $(x,y)\in{\mathbb R}^2$ such
that $|x|^2+|y|^2=1$.
Hence $\chi$ is $C^{\epsilon}$.
Next,
\begin{align*}
\|u-u'\| & \le
\|(u,\varphi_{\psi(v)}(u))-(u',\varphi_{\psi(v')}(u'))\|
\\ & \le\operatorname{Lip}(Q^{-1})
d(Q(u,\varphi_{\psi(v)}(u)),Q(u',\varphi_{\psi(v')}(u')))
\\ & =\operatorname{Lip}(Q^{-1})d(\gamma(\psi(v))(u),\gamma(\psi(v'))(u'))=\operatorname{Lip}(Q^{-1})d(\chi(u,v),\chi(u',v')),
\end{align*}
and
\begin{align*}
\|v-v'\| \le \operatorname{Lip}(\psi^{-1})d(\psi(v),\psi(v'))
& = \operatorname{Lip}(\psi^{-1})d(H_{u'}^{-1}\chi(u',v),H_{u'}^{-1}\chi(u',v'))
\\ & \le K \operatorname{Lip}(\psi^{-1})d(\chi(u',v),\chi(u',v'))^{\epsilon}.
\end{align*}
Moreover,
\begin{align*}
\|(u',\varphi_{\psi(v)}(u'))-(u',\varphi_{\psi(v')}(u'))\|
& =\|\varphi_{\psi(v)}(u')-\varphi_{\psi(v')}(u')\|
\\ & \le
\|\varphi_{\psi(v)}(u')-\varphi_{\psi(v)}(u)\|
+\|\varphi_{\psi(v)}(u)-\varphi_{\psi(v')}(u')\|
\\ & \le
L\|u-u'\| +\|\varphi_{\psi(v)}(u)-\varphi_{\psi(v')}(u')\|
\\ & \le
(L+1)\|(u,\varphi_{\psi(v)}(u))-(u',\varphi_{\psi(v')}(u'))\|,
\end{align*}
so
\[
d(\chi(u',v),\chi(u',v'))\le (L+1)\operatorname{Lip} Q\operatorname{Lip}(Q^{-1}) d(\chi(u,v),\chi(u',v')).
\]
Combining these estimates, we obtain
$\|(u,v)-(u',v')\|\le{\rm const.}\,d(\chi(u,v),\chi(u',v'))^{\epsilon}$.
Hence
$\|\chi^{-1}(p)-\chi^{-1}(p')\|\le{\rm const.}\,d(p,p')^{\epsilon}$ for
$p,p'\in U_0$, so $\chi^{-1}\in C^{\epsilon}$.
\end{proof}
\begin{thm}\label{thm:H}
The stable holonomy $H:Y_0\to Y_1$ is absolutely continuous. That is,
$m_1\ll H_*m_0$ where $m_i$ is Lebesgue measure
on $Y_i$, $i=0,1$.
Moreover, the Jacobian $JH:Y_0\to{\mathbb R}$ given by
\begin{align*}
JH(x)=\frac{dm_1}{dH_*m_0}(Hx)=\lim_{r\to0}\frac{m_1(H(B(x,r)))}{m_0(B(x,r))},\quad x\in Y_0,
\end{align*}
is bounded above and below and is $C^{\epsilon}$ for some ${\epsilon}>0$.
\end{thm}
\begin{proof}
This essentially follows from \cite[Theorems 8.6.1 and
8.6.13]{BarreiraPesin}. See also~\cite[Theorem~2.1]{PughShub72} and~\cite[Section~III.3]{Mane}.
The results there are formulated under a condition of the type
$\sup_{x\in\Lambda}\|DZ_t | E^s_x\| \sup_{x\in\Lambda} \|DZ_{-t} | E^{cu}_{Z_tx}\| \le C \lambda^t$ which is more restrictive than the domination condition~\eqref{eq:domination}.
However, it is standard that such results generalise to our setting.
(See the remark in~\cite{PughShub72} after their theorem. Most of the required result is covered by~\cite{PughShub72} except that H\"older continuity of $JH$ is not mentioned, only continuity.)
\end{proof}
\section{One-dimensional quotient map $\bar f:{\overlineerbarr X}\to{\overlineerbarr X}$}
\label{sec:barf}
In this section, we continue to suppose that $\Lambda$ is a singular hyperbolic attracting set with $d_{cu}=2$.
Let $f:X'\to X$ be the global Poincar\'e map defined in Section~\ref{sec:f}
with invariant stable foliation ${\mathcal{W}}^s(X)$.
We now show how to obtain a one-dimensional
piecewise $C^{1+{\epsilon}}$ uniformly expanding quotient map
$\bar f:{\overlineerbarr X}'\to{\overlineerbarr X}$.
We begin by analysing the stable holonomies for $f$.
Let $\gamma_0,\,\gamma_1\subset X$ be two $u$-curves such that
for all $x\in \gamma_0$, the stable leaf $W^s_x(X)$ intersects each of $\gamma_0$ and $\gamma_1$ in precisely one point.
The {\em (cross-sectional) stable holonomy} $h:\gamma_0\to \gamma_1$ is given by defining $h(x)$ to be the intersection point of $W^s_x(X)$ with $\gamma_1$.
\begin{lemma}\label{lem:h}
The stable holonomy $h$ is $C^{1+{\epsilon}}$ for some ${\epsilon}>0$.
\end{lemma}
\begin{proof}
Recall that $X=\bigcup \Sigma_{y_j}$ where $\Sigma_{y_j}$ is the cross-section
associated to the flow box $V_{y_j}$ for each $j$. Since the result is local,
we can suppose that $\gamma_0,\gamma_1\subset \Sigma_{y_j}$ for some $j$ and we can choose coordinates so that the local flow $Z_t$ is linear.
Consider the $2$-dimensional disks
$Y_i=\bigcup_{t\in[-\delta_i,\delta_i]}Z_t(\gamma_i)
=\gamma_i\times[-\delta_i,\delta_i], i=0,1$, for fixed
$\delta_i>0$. These are smooth transversals to the stable
foliation ${\mathcal{W}}^s$ of the flow. Provided $\delta_0$ is small
with respect to $\delta_1$, we can then consider the
holonomy $H:Y_0\to Y_1$ as in Section~\ref{sec:regWs}.
For $p=(v,0)\in \gamma_0\subset Y_0$ we
write $H(v,0)=(H_1(v),\xi(v))$ with $H_1:\gamma_0\to \gamma_1$
and $\xi:\gamma_0\to[-\delta_1,\delta_1]$. Clearly $h=H_1$
by construction. Since ${\mathcal{W}}^s$ is flow invariant,
\begin{align*}
H(v,t)=(h(v),\xi(v)+t).
\end{align*}
Let $\lambda_i=(\pi_i)_*m_i$ denote Lebesgue measure on
$\gamma_i, i=0,1$, where $\pi_i:Y_i\to \gamma_i$ is
the natural projection.
By Theorem~\ref{thm:H}, $m_1\ll H_*m_0$. Since $\pi_1H=h\pi_0$,
\begin{align*}
\lambda_1=(\pi_1)_*m_1\ll (\pi_1H)_*m_0=(h\pi_0)_*m_0= h_*\lambda_0.
\end{align*}
Hence $h$ is absolutely continuous.
Taking balls $B(x,r)$ to be rectangles, we have for $r$ sufficiently small
\[
H(B(x,r))=\bigcup_{v'\in B(v,r)}\{h(v')\}\times(t+\xi(v')-r,t+\xi(v')+r).
\]
By Fubini,
\[
\frac{m_1(H(B(x,r)))}{m_0(B(x,r))}
=\frac{2r\lambda_1(h(B(v)))}{2r\lambda_0(B(v,r))}
=\frac{\lambda_1(h(B(v,r)))}{\lambda_0(B(v,r))},
\]
showing that $JH(x)=Jh(v)$ for all $x=(v,t)$.
By Theorem~\ref{thm:H}, $Jh$ is H\"older. But $\dim\gamma_0=\dim\gamma_1=1$, so $Jh=|Dh|$ and the result follows.
\end{proof}
Recall that $X$ is a union of finitely many cross-sections
$\Sigma_{y_j}$, and that $f$ is smooth on a subset $X''\subset X'\subset X$ which is obtained from $X$ by removing finitely many stable leaves.
Moreover, each $\Sigma_{y_j}\cap X''$ is a union of finitely many connected smooth strips $S$ such that
$f|_S:S\to f(S)$ is a diffeomorphism.
For each $j$, let $\gamma_j\subset \Sigma_{y_j}$ be a $u$-curve crossing $\Sigma_{y_j}$.
Define ${\overlineerbarr X}=\bigcup_j{\mathbb C}l\gamma_j$
and ${\overlineerbarr X}'=X'\cap{\overlineerbarr X}$.
Given a smooth strip $S\subset \Sigma_{y_j}$,
there exists $k$ such that
$f(S)\subset\Sigma_{y_k}$.
Also $f(\gamma_j)$ is a $u$-curve by Theorem~\ref{thm:global}.
Let
$h:f(S\cap \gamma_j)\to \gamma_k$
be the associated stable holonomy and
define $\bar f(x)=h(fx)$ for $x\in S\cap\gamma_j$. In this way we obtain
a one-dimensional map $\bar f:{\overlineerbarr X}'\to {\overlineerbarr X}$.
\begin{cor} \label{cor:barf}
The quotient map $\bar f:{\overlineerbarr X}'\to{\overlineerbarr X}$ is piecewise $C^{1+{\epsilon}}$ and consists of finitely many monotone $C^{1+{\epsilon}}$ branches. Choosing $T_1$ in Section~\ref{sec:UH} sufficiently large, we have $|D\bar f|\ge2$ on ${\overlineerbarr X}'$.
\end{cor}
\begin{proof} Since $f$ is smooth on smooth strips and the holonomies
$h:f(S\cap \gamma_j)\to \gamma_k$ are
$C^{1+{\epsilon}}$ by Lemma~\ref{lem:h}, it follows that $\bar f$ is piecewise $C^{1+{\epsilon}}$.
The collection of intervals $S\cap\gamma_j$ is finite, so $\bar f$ has finitely many branches. By finiteness of the collection $\{\Sigma_{y_j}\}$, there is a constant $c>0$ such that all the holonomies $h$ considered above satisfy $|Dh|\ge c$.
Hence taking $\lambda_1$ sufficiently small in Theorem~\ref{thm:global}, we can ensure that $|D\bar f|$ is as large as desired.
\end{proof}
\section{Statistical properties for $\bar f$ and $f$}
\label{sec:stat}
In this section, we investigate statistical properties for the
$(d_s+1)$-dimensional Poincar\'e map
$f:X'\to X$ and the one-dimensional quotient map
$\bar f:{\overlineerbarr X}'\to{\overlineerbarr X}$.
Define $\pi:X\to{\overlineerbarr X}$ H\"older by letting $\pi(x)$ be the point where $W^s_x(X)$ intersects ${\overlineerbarr X}$. Then $\pi$ defines a semiconjugacy between $f$ and $\bar f$.
From now on, we write $f:X\to X$ and $\bar f:{\overlineerbarr X}\to{\overlineerbarr X}$ with the understanding that $f$ and $\bar f$ are not defined everywhere (and are piecewise smooth where defined).
\subsection{Spectral decomposition and physical measures}
\label{sec:spectral}
\begin{prop} \label{prop:spectral}
There exists a finite number of
ergodic absolutely continuous $\bar f$-invariant probability measures $\bar\mu_1,\dots,\bar\mu_s$ whose basins cover a subset of ${\overlineerbarr X}$ of full Lebesgue measure. For each $j$, the density $d\bar\mu_j/d\operatorname{Leb}$ lies in $L^\infty$
and $\operatorname{Int}\operatorname{supp}\bar\mu_j\neq\emptyset$.
\end{prop}
\begin{proof}
By Corollary~\ref{cor:barf}, $\bar f$ is a piecewise $C^{1+{\epsilon}}$ uniformly expanding one-dimensional map.
Hence, most of the result is immediate from~\cite[Theorem~3.3]{Keller85}.
We refer to~\cite[Lemma~3.1]{Saussol00}
for the fact that $\operatorname{Int}\operatorname{supp}\bar\mu_j\neq\emptyset$.
\end{proof}
\begin{cor} \label{cor:spectral} There exists a
finite number of
ergodic $f$-invariant probability measures $\mu_1,\dots,\mu_s$ whose basins cover a subset of $X$ of full Lebesgue measure.
Moreover, $\pi_*\mu_j=\bar\mu_j$ for each $j$.
\end{cor}
\begin{proof}
This follows from the existence of the stable foliation ${\mathcal{W}}^s(X)$ (here, the fact that it is a topological foliation suffices) combined with Proposition~\ref{prop:spectral}.
For details, see~\cite[Sections~6.1 and~6.2]{APPV09}.
\end{proof}
\subsection{Existence of an inducing scheme}
\label{sec:induce}
In this subsection, we suppose without loss that there is a unique absolutely continuous $\bar f$-invariant measure $\bar\mu$ in Proposition~\ref{prop:spectral}
(so $s=1$).
\begin{prop} \label{prop:barf}
There exists $k\ge1$ such that
$\operatorname{supp}\bar\mu={\overlineerbarr X}_1\cup\cdots\cup{\overlineerbarr X}_k$
where the sets ${\overlineerbarr X}_j$ are permuted cyclically by $\bar f$, and
$\bar f^k:{\overlineerbarr X}_j\to{\overlineerbarr X}_j$ is mixing for each $j$.
Moreover, for any $\eta\in(0,1)$, there exist constants $c,\,C>0$ such that
\[
\Big|\int_{{\overlineerbarr X}_j} v\,w\circ \bar f^{kn}\,d\bar\mu- \int_{{\overlineerbarr X}_j} v\,d\bar\mu
\int_{{\overlineerbarr X}_j} w\,d\bar\mu\Big|\le C\|v\|_{C^\eta}|w|_1 e^{-cn}\quad\text{for all $n\ge1$, $j=1,\dots,k$},
\]
for all $v\in C^\eta({\overlineerbarr X})$ and $w\in L^1({\overlineerbarr X})$.
\end{prop}
\begin{proof} This is immediate from the quasicompactness of
the transfer operator for $\bar f$ which is established
in~\cite[Theorem~3.3]{Keller85}. Indeed the result
in~\cite{Keller85} is proved for the class of
functions with finite $\eta$-variation (for all $\eta>0$
sufficiently small). This includes observables that are
$C^\eta$.
\end{proof}
For ease of exposition, we suppose for the remainder of this
subsection that $k=1$ and ${\overlineerbarr X}_1={\overlineerbarr X}$.
Recall that a one-dimensional map $\overlineerbarr F:{\overlineerbar Y}\to{\overlineerbar Y}$ is a full branch Gibbs-Markov map if
there is an at most
countable partition $\alpha$ of ${\overlineerbar Y}$ and constants
$C>0$, ${\epsilon}\in(0,1]$ such that for all $a\in\alpha$,
\begin{itemize}
\item $\overlineerbarr F|_a:a\to{\overlineerbar Y}$ is a measurable bijection, and
\item $\big|\log |D\overlineerbarr F(y_1)|-\log |D\overlineerbarr F(y_2)|\big|\le C|\overlineerbarr F y_1-\overlineerbarr F y_2|^{\epsilon}$ for
all $y_1,y_2\in a$.
\end{itemize}
\begin{lemma} \label{lem:AFLV}
For all $\beta>0$, there exists a positive
measure subset ${\overlineerbar Y}\subset {\overlineerbarr X}$ and
a full branch Gibbs-Markov induced map $\overlineerbarr F=\bar f^\rho:{\overlineerbar Y}\to {\overlineerbar Y}$, where $\rho:{\overlineerbar Y}\to{\mathbb Z}^+$ is constant on partition elements and satisfies $\operatorname{Leb}(y\in {\overlineerbar Y}:\rho(y)>n)=O(n^{-\beta})$.
\end{lemma}
\begin{proof}
By Proposition~\ref{prop:spectral}, $\bar\mu$ is an ergodic absolutely continuous invariant probability measure on ${\overlineerbarr X}$ with $d\bar\mu/d\operatorname{Leb}\in L^\infty$.
The result follows from~Theorem~\ref{thm:AFLV} provided we
verify that $\bar\mu$ is expanding and that conditions (C0)--(C3) hold. Let ${\mathcal S}$ denote the finite
set consisting of singularities/discontinuities of
$\bar f$. (In general $X\setminus{\mathcal S}$ is a proper subset of $X'$ since ${\mathcal S}$ includes the discontinuities of the piecewise smooth map $f$.)
Conditions~(C0) and (C3) are redundant since
$\bar f$ is one-dimensional. Conditions (C1) and (C2) become
\begin{itemize}
\item[(C1)] $C^{-1}d(x,{\mathcal S})^q\le |D\bar f(x)|\le Cd(x,{\mathcal S})^{-q}$
for all $x\in{\overlineerbarr X}\setminus{\mathcal S}$,
\item[(C2)]
$\big|\log|D\bar f(x)|-\log|D\bar f(x')|\big|\le
C|x-x'|^\eta(|D\bar f(x)|^{-q}+|D\bar f(x)|^{q})$
for all $x,x'\in{\overlineerbarr X}\setminus{\mathcal S}$ with $|x-x'|<\operatorname{dist}(x,{\mathcal S})/2$,
\end{itemize}
where $\eta\in(0,1)$ and $C,q>0$ are constants.
Since $d\bar\mu/d\operatorname{Leb}\in L^\infty$
it is immediate from (C1) that
$\log|(D\bar f)^{-1}|$ is integrable with respect to $\bar\mu$.
Also $\int\log|(D\bar f)^{-1}|\,d\bar\mu\le \log\frac12<0$ by Corollary~\ref{cor:barf},
so $\bar\mu$ is an expanding measure.
It remains to verify conditions (C1) and (C2).
Note that they are trivially satisfied for functions $\bar f$ with $D\bar f$ H\"older and bounded below. Hence they are satisfied away from ${\mathcal S}$ and also near all
discontinuity points in ${\mathcal S}$.
By Proposition~\ref{prop:like}, it remains to consider
singularities $x_0\in X$ corresponding to Lorenz-like equilibria $\sigma$.
The Poincar\'e map $f$ can be written near $x_0$ as $f=h_1\circ g \circ h_2$ where $g$ corresponds to the flow near $\sigma$ and $h_1,\,h_2$ are the remaining parts of the Poincar\'e map. In particular $D\bar h_j$ is H\"older and bounded below for $j=1,2$.
Suppose first that the flow is $C^{1+{\epsilon}}$-linearizable for some ${\epsilon}>0$
in a neighborhood of~$\sigma$.
Incorporating the linearization into $h_1$ and $h_2$,
we can suppose without loss that the flow is linear in a neighborhood of $\sigma$. Hence
the flow is given by $x\mapsto e^{tA}x$
where $A=\lambda^u\oplus\lambda^s\oplus B$
with $-\lambda^u<\lambda^s<0<\lambda^u$
and $B=DG(\sigma)|E^s_\sigma$.
A standard calculation shows that in suitable coordinates,
\[
g(x,z)=(|x|^{-\lambda^s/\lambda^u},ze^{-\lambda_u^{-1}B\log|x|}).
\]
In particular, $\bar g(x)=|x|^\omega$ where
$\omega=-\lambda^s/\lambda^u\in(0,1)$.
Since $D\bar h_j$ is bounded above and below,
it follows from the chain rule that
\[
|D\bar f(x)|\cong |D\bar g(\bar h_2x)|=\omega |\bar h_2x|^{\omega-1}\cong |x-x_0|^{\omega-1},
\]
so (C1) is satisfied.
Next,
\begin{align*}
\big|\log|D\bar f(x)|- & \log| D\bar f(x')|\big|
\le C_1\big(
|x-x'|^{\epsilon}+ \big|\log|D\bar g(\bar h_2x)|-\log|D\bar g(\bar h_2x')|\big|
\\ & \qquad\qquad \qquad +|\bar g(\bar h_2x)-\bar g(\bar h_2x')|^{\epsilon}\big)
\\ & =
C_1\big(|x-x'|^{\epsilon}
+(1-\omega)\big|\log|\bar h_2x|-\log|\bar h_2x'|\big|
+
\big||\bar h_2x|^\omega-|\bar h_2x'|^\omega\big|^{{\epsilon}}\big) .
\end{align*}
Now
\begin{align*}
\big|\log|\bar h_2x|-\log|\bar h_2x'|\big|\le C_2 (|\bar h_2x-\bar h_2x'|/|\bar h_2x|)^{1-\omega}
& \le C_2' |x-x'|^{1-\omega}|x-x_0|^{\omega-1}
\\ & \le C_2'' |x-x'|^{1-\omega}|D\bar f(x)|.
\end{align*}
Also without loss $|x-x_0|\le |x'-x_0|$, so
\begin{align*}
\big||\bar h_2x|^\omega-|\bar h_2x'|^\omega\big|
&\le |\bar h_2x-\bar h_2x'|(|\bar h_2x|^{\omega-1}+|\bar h_2x'|^{\omega-1})
\\ & \le C_3 |x-x'||x-x_0|^{\omega-1} \le C_3' |x-x'||D\bar f(x)|.
\end{align*}
Hence, there exists $\eta\in(0,1)$ such that
\begin{align*}
\big|\log|D\bar f(x)|-\log| D\bar f(x')|\big|
& \le C_4 |x-x'|^\eta(|D\bar f(x)|+1)
\\ & \le C_4 |x-x'|^\eta(2|D\bar f(x)|+|D\bar f(x)|^{-1}),
\end{align*}
verifying (C2).
To complete the proof, we remove the assumption that the flow near $\sigma$
is $C^{1+{\epsilon}}$-linearizable. By the center manifold theorem (eg.~\cite[Theorem~5.1]{HPS77}), locally we can choose a flow-invariant $C^{1+{\epsilon}}$
two-dimensional manifold $W$ tangent to $E^{cu}_\sigma$ (for some ${\epsilon}>0$).
Note that the quotient of $g|W$ coincides with $\bar g$.
By a result of
Newhouse~\cite{Newhouse17} (stated previously but without proof in~\cite{Hartman60}),
the flow restricted to $W$ (being two-dimensional) can be
$C^{1+{\epsilon}'}$ linearized for some ${\epsilon}'>0$.
The proof now proceeds as before.
\end{proof}
\begin{rmk} \label{rmk:exp}
Since we have exponential decay of correlations in Proposition~\ref{prop:barf}, there is the hope of obtaining an induced Gibbs-Markov map as in
Lemma~\ref{lem:AFLV} but with exponential tails for $\rho$. (We note that
Theorem~\ref{thm:AFLV}(2) which would give stretched exponential tails does not apply because the density $d\bar\mu/d\operatorname{Leb}$ is not bounded below.)
In certain situations, it is possible to construct an inducing scheme
with exponential tails by using different methods, controlling
the tail of hyperbolic times and relating this with the tail of
inducing times more directly~\cite{Gouezel06,Araujo07,AST}.
One repercussion of the existence of such an inducing scheme would be that
the error rate in the vector-valued ASIP would be improved to
$n^{\frac14+{\epsilon}}$ for ${\epsilon}>0$ arbitrarily small~\cite{Gouezel10}.
However, our construction here with superpolynomial tails holds in complete generality and suffices for our
results on singular hyperbolic flows in Section~\ref{sec:SH}, so we do not pursue this further.
\end{rmk}
\begin{prop} \label{prop:tau}
There is a constant $C>0$ such that
\[
\textstyle{\sum_{\ell=0}^{\rho(y)-1}}|\tau(f^\ell y)-\tau(f^\ell y')|\le C|\overlineerbarr F y-\overlineerbarr F y'|^{\epsilon}
\quad \text{for all $y,y'\in a$, $a\in\alpha$.}
\]
\end{prop}
\begin{proof}
It follows from the proof of Lemma~\ref{lem:AFLV} that the roof function
$\tau:X\to{\mathbb R}^+$ satisfies $\tau(x)=-\lambda_u^{-1}\log|\bar h_2x|+t(x)$
where $\bar h_2$ and $t$ are $C^{1+{\epsilon}}$.
Hence
\[
|\tau(x)-\tau(x')|\le \lambda_u^{-1} |\bar h_2 x-\bar h_2 x'|/|\bar h_2x|+|Dt|_\infty|x-x'|
\le C_1 |x-x'|d(x,{\mathcal S})^{-1},
\]
and the result follows from~\eqref{eq:hyptime}.
\end{proof}
\subsection{Statistical limit laws for the Poincar\'e map}
By Corollary~\ref{cor:spectral}, there is a unique ergodic
$f$-invariant probability measure $\mu$ on $X$ corresponding
to $\bar\mu$, with $\pi_*\mu=\bar\mu$.
\begin{thm} \label{thm:statf}
Fix $\eta\in(0,1)$ and let $v\in C^\eta(X)$ with $\int_X v\,d\mu=0$.
Write $v_n=\sum_{j=0}^{n-1}v\circ f^j$.
Then the limit
$\sigma^2=\lim_{n\to\infty}n^{-1}\int_\Lambda v_n^2\,d\mu$ exists. Suppose that $\sigma^2>0$. Then the following limit laws hold.
\begin{description}[style=unboxed,leftmargin=0cm]
\item[ASIP~\cite{CunyMerlevede15}]
Let ${\epsilon}>0$. There exists a probability space $\Omega$ supporting a sequence of random variables $\{S_n,\,n\ge1\}$ with the same joint distributions
as $\{v_n,\,n\ge1\}$,
and a sequence $\{Z_n,\,n\ge1\}$ of i.i.d.\ random variables with distribution $N(0,\sigma^2)$, such that
\[
\textstyle \sup_{1\le k\le n} \big|S_k-\sum_{j=1}^k Z_j\big|=
O(n^{\epsilon})
\;\text{a.e.\ as $n\to\infty$.}
\]
\item[Berry-Esseen~\cite{Gouezel05}]
There exists $C>0$ such that
\[
\big|\mu\{x\in X: n^{-1/2}v_n(x)\le a\}-{\mathbb{P}}\{N(0,\sigma^2)\le a\}\big|\le Cn^{-1/2}
\quad\text{for all $a\in{\mathbb R}$, $n\ge1$.}
\]
\item[local limit theorem~\cite{Gouezel05}]
Suppose that $v$ is aperiodic (so it is not possible to write $v=c+g-g\circ f+\lambda q$ where $c\in{\mathbb R}$, $\lambda>0$, $g:X\to{\mathbb R}$ measurable and $q:X\to{\mathbb Z}$).
Then for any bounded interval $J\subset{\mathbb R}$,
\[
\lim_{n\to\infty} n^{1/2}\mu(x\in X: v_n(x)\in J)=(2\pi\sigma^2)^{-1/2}|J|.
\]
\end{description}
For $C^\eta$ vector-valued observables $v:X\to{\mathbb R}^d$ with
$\int_X v\,d\mu=0$, the limit
$\Sigma=\lim_{n\to\infty}n^{-1}\int_\Lambda v_n v_n^T\,d\mu\in{\mathbb R}^{d\times d}$ exists and we obtain
\begin{description}[style=unboxed,leftmargin=0cm]
\item[vector-valued ASIP~\cite{MN09,Korepanovapp}]
There exists $\lambda\in(0,\frac12)$ and a probability space $\Omega$ supporting a sequence of random variables $\{S_n,\,n\ge1\}$ with the same joint distributions
as \mbox{$\{v_n,\,n\ge1\}$},
and a sequence $\{Z_n,\,n\ge1\}$ of i.i.d.\ random variables with distribution $N(0,\Sigma)$, such that
\[
\textstyle \sup_{1\le k\le n} \big|S_k-\sum_{j=1}^k Z_j\big|=
O(n^{\lambda})
\;\text{a.e.\ as $n\to\infty$.}
\]
\end{description}
\end{thm}
\begin{proof}
The strategy is to model $\overlineerbarr F:{\overlineerbar Y}\to{\overlineerbar Y}$ and $F:Y\to Y$ by ``one-sided'' and ``two-sided'' Young towers $\bar\Delta$ and $\Delta$, and to construct an observable $\bar v:\bar\Delta\to{\mathbb R}$ to which the various results in the references can be applied. The desired statistical properties for $v$ are deduced from those for $\bar v$.
Using $\overlineerbarr F:{\overlineerbar Y}\to{\overlineerbar Y}$ and $\rho:{\overlineerbar Y}\to{\mathbb Z}^+$ as given in Lemma~\ref{lem:AFLV}, we
define the one-sided Young tower map
$\bar f_\Delta:\bar\Delta\to\bar\Delta$,
\[
\bar\Delta=\{(y,\ell)\in {\overlineerbar Y}\times{\mathbb Z}^+:0\le\ell\le \rho(y)-1\},
\quad
\bar f_\Delta(y,\ell)=\begin{cases} (y,\ell+1) & \ell\le\rho(y)-2
\\ (\overlineerbarr F y,0) & \ell=\rho(y)-1 \end{cases}.
\]
Let $\bar\mu_Y$ denote the unique absolutely continuous invariant probability measure for the Gibbs-Markov map $\overlineerbarr F:{\overlineerbar Y}\to{\overlineerbar Y}$.
Then
$\bar\mu_\Delta=\bar\mu_Y\times{\rm counting}/\int_{\bar
Y}\rho\,d\bar\mu_Y$
is an ergodic $\bar f_\Delta$-invariant probability measure on $\bar\Delta$.
Next, define $Y=\pi^{-1}{\overlineerbar Y}\subset X$ to be the union of stable leaves $W^s_y(X)$ where $y\in {\overlineerbar Y}$.
In the proof of Corollary~\ref{cor:spectral}, we used
an argument from~\cite{APPV09} which constructs~$\mu$ on $X$ starting from $\bar\mu$ on ${\overlineerbarr X}$.
The same argument constructs an ergodic $F$-invariant probability measure $\mu_Y$ on $Y$ starting from $\bar\mu_Y$.
Define $\rho:Y\to{\mathbb Z}^+$ and $F:Y\to Y$ by setting $\rho(y)=\rho(\pi y)$
and $F(y)=f^{\rho(y)}y$.
Using these (instead of $\rho:{\overlineerbar Y}\to{\mathbb Z}^+$ and $\overlineerbarr F:{\overlineerbar Y}\to {\overlineerbar Y}$)
we obtain a two-sided Young tower map $f_\Delta:\Delta\to \Delta$
with ergodic $f_\Delta$-invariant probability measure
$\mu_\Delta=\mu_Y\times{\rm counting}/\int_Y\rho\,d\mu_Y$.
The projection $\pi:X\to{\overlineerbarr X}$ extends to a semiconjugacy $\pi:\Delta\to\bar\Delta$ given by $\pi(y,\ell)=(\pi y,\ell)$, and $\pi_*\mu_\Delta=\bar\mu_\Delta$.
Moreover, the projection
\[
\pi_\Delta:\Delta\to X, \qquad
\pi_\Delta(y,\ell)= f^\ell y,
\]
is a semiconjugacy from $f_\Delta$ to $f$ and
$\pi_{\Delta\,*}\mu_\Delta=\mu$.
The separation time $s(y,y')$ of points $y,y'\in {\overlineerbar Y}$ is
the least integer $n\ge0$ such that $\overlineerbarr F^ny$ and
$\overlineerbarr F^ny'$ lie in distinct elements of the partition
$\alpha$. This extends to $\bar\Delta$ by setting $s((y,\ell),(y',\ell'))=s(y,y')$ when $\ell=\ell'$ and zero otherwise, and then to $\Delta$ by setting
$s(p,p')=s(\pi p,\pi p')$.
For each $\theta\in(0,1)$, define the symbolic
metric $d_\theta$ on $\bar\Delta$ given by
$d_\theta(p,p')=\theta^{s(p,p')}$.
Given $w:\bar\Delta\to{\mathbb R}$, we define
\[
\|w\|_\theta=|w|_\infty+ \sup_{p\neq p'} |w(p)-w(p')|/d_\theta(p, p').
\]
Let $\lambda_1\in(0,1)$ be as in Propositions~\ref{prop:secUH} and~\ref{prop:sec-cone}, and
set $\theta=\lambda_1^{\eta/2}$.
Let $v\in C^\eta(X,{\mathbb R}^d)$ with $\int_X v\,d\mu=0$. We claim that there exists
$\chi\in L^\infty(\Delta,{\mathbb R}^d)$ and $\bar v\in L^\infty(\bar\Delta,{\mathbb R}^d)$ with
$\|\bar v\|_\theta<\infty$
such that
\begin{align} \label{eq:chi}
v\circ \pi_\Delta=\bar v\circ\pi+\chi\circ f_\Delta-\chi.
\end{align}
Suppose that the claim is true.
Since $\bar\Delta$ is a one-sided Young tower~\cite{Young99} with superpolynomial tails (in fact $\beta>2$ suffices here) and $\|\bar v\|_\theta<\infty$,
it follows that $\bar v$ satisfies all of the
desired statistical properties by the mentioned references.
These are inherited (since $\pi$ is measure-preserving) by
$\bar v\circ\pi:\Delta\to{\mathbb R}^d$. Since $\chi\in L^\infty$, the properties are
inherited by $v\circ\pi_\Delta:\Delta\to{\mathbb R}^d$ and thereby $v$ (since $\pi_\Delta$ is measure-preserving).
It remains to verify the claim.
Define $\chi:\Delta\to{\mathbb R}^d$,
\[
\chi(p)=\sum_{j=0}^\infty \big(v\circ f^j\circ \pi_\Delta(\pi p)-
v\circ f^j\circ \pi_\Delta(p)\big).
\]
For $p=(y,\ell)$, using Proposition~\ref{prop:secUH}(a), we have
\begin{align*}
|\chi(p)| & \le
\sum_{j=0}^\infty {|v|}_{C^\eta} \|f^j\circ\pi_\Delta(\pi p)-f^j\circ\pi_\Delta(p)\|^\eta
={|v|}_{C^\eta}\sum_{j=0}^\infty \|f^{j+\ell}(\pi y)-f^{j+\ell}(y)\|^\eta
\\ & \le
{|v|}_{C^\eta}\sum_{j=0}^\infty \lambda_1^{\eta j}\|\pi y-y\|^\eta<\infty.
\end{align*}
Hence $\chi\in L^\infty(\Delta)$.
Let
$\hat v= v\circ\pi_\Delta-\chi\circ f_\Delta+\chi$.
It follows from the definitions that
$\hat v:\Delta\to{\mathbb R}^d$ is constant along fibres $\pi^{-1}\bar p$ for $\bar p\in\bar\Delta$.
Indeed,
\[
\hat v(p)=\sum_{j=0}^\infty v\circ f^j\circ \pi_\Delta(\pi p)
-\sum_{j=0}^\infty v\circ f^j\circ\pi_\Delta(\pi f_\Delta p).
\]
Hence we can write $\hat v=\bar v\circ\pi$ where
$\bar v:\bar\Delta\to{\mathbb R}^d$ satisfies~\eqref{eq:chi}.
Clearly, $|\bar v|_\infty \le |v|_\infty+2|\chi|_\infty<\infty$.
Let $p=(y,\ell),\,p'=(y',\ell')\in\Delta$.
If $\ell\neq\ell'$, then
$|\bar v(p)-\bar v(p')|\le 2|\bar v|_\infty=2|\bar v|_\infty d_\theta(p,p')$.
When $\ell=\ell'$,
set $N=[s(p,p')/2]$. Then
\[
|\hat v(p)-\hat v(p')|\le A_N(p)+A_N(p')+B_N(p,p')+B_{N-1}(f_\Delta p,f_\Delta p'),
\]
where
\begin{align*}
A_N(q) & =\sum_{j=N}^\infty |v\circ f^j\circ \pi_\Delta (\pi q)
- v\circ f^{j-1}\circ \pi_\Delta(\pi f_\Delta q)|, \\
B_N(q,q') & =\sum_{j=0}^{N-1} |v\circ f^j\circ \pi_\Delta (\pi q)
- v\circ f^j\circ \pi_\Delta(\pi q')|.
\end{align*}
The calculation for $\chi$ gives $A_N(q)=O( \lambda_1^{\eta N})=O( \theta^{s(p,p')})$ for $q=p,p'$.
Next,
\[
B_N(p,p') =\sum_{j=0}^{N-1} |v\circ f^{j+\ell}(\pi y)
- v\circ f^{j+\ell}(\pi y')|.
\]
Write $n=s(p,p')$. By Proposition~\ref{prop:sec-cone},
\begin{align*}
\operatorname{diam} X & \ge \|f^n\circ f^\ell(\pi y)-f^n\circ f^\ell(\pi y')\|
=\|f^{n-j}\circ f^j(f^\ell\pi y)-f^n\circ f^j(f^\ell\pi y')\|
\\ & \ge \lambda_1^{-(n-j)}\|f^j(f^\ell\pi y)-f^j(f^\ell\pi y')\|,
\end{align*}
for all $j\le n$.
Hence
\begin{align} \label{eq:s}
\|f^j(f^\ell \pi y)-f^j(f^\ell \pi y')\|=O( \lambda_1^{s(y,y')-j}),
\end{align}
and so
$B_N(p,p') \le C\sum_{j=0}^{N-1} \lambda_1^{\eta(s(y,y')-j)}=O(
\theta^{s(p,p')})$.
Similarly, $B_{N-1}(f_\Delta p,f_\Delta p')=O( \theta^{s(p,p')})$.
Hence we have shown that $|\hat v(p)-\hat v(p')|=O( \theta^{s(p,p')})$ and
so $\|\bar v\|_\theta<\infty$ as claimed.
\end{proof}
\begin{rmk}
The ASIP and vector-valued ASIP have numerous consequences summarised in
\cite[p.~233]{MorrowPhilipp82}. These include
the central limit theorem (CLT); the functional CLT, also known as the weak invariance principle; the (functional, vector-valued) law of the iterated logarithm (LIL); upper and lower class refinements of the LIL and Chung's LIL.
\end{rmk}
\begin{rmk} The nondegeneracy assumption $\sigma^2>0$ fails only on a closed subspace of infinite codimension in the space of $C^\eta$ observables. Indeed if $\sigma^2=0$ and $x\in X$ is a periodic point, then there exists $N\ge1$ such that $\sum_{j=0}^{N-1}v(f^jx)=0$ for all $v\in C^\eta(X)$ with mean zero.
(See~\cite[Theorem~B]{AMV15} for such a result in a more difficult context.)
Similar comments apply to the covariance matrix $\Sigma$ in the vector-valued ASIP.
Taking one-dimensional projections, we obtain that
the nondegeneracy assumption $\det\Sigma>0$ fails only on a closed subspace of infinite codimension.
\end{rmk}
\section{Statistical properties of singular hyperbolic attractors}
\label{sec:SH}
In this section, we investigate statistical properties of the flow $Z_t$ on a codimension two singular hyperbolic attracting set.
We begin by modifying the Poincar\'e section so that the roof function $\tau$ becomes constant along stable leaves.
Let ${\overlineerbarr X}$ be the union of $u$-curves in Section~\ref{sec:barf}
and define $X_+=\bigcup_{x\in{\overlineerbarr X}}W^s_x$.
Then $X_+$ is a H\"older-embedded cross-section and we obtain a new
Poincar\'e map $f_+:X_+\to X_+$ with return time function
$\tau_+:X_+\to {\mathbb R}^+$.
We also define the quotient map $\bar f_+=h\circ f_+:{\overlineerbarr X}\to{\overlineerbarr X}$ where $h$ is the stable holonomy in $X_+$.
\begin{prop} \label{prop:tau'}
$\tau_+$ is constant along stable leaves in ${\mathcal{W}}^s$ and
$\bar f_+=\bar f$.
\end{prop}
\begin{proof}
For fixed $x\in{\overlineerbarr X}$, set $T_0=\tau(x)$. The stable foliation
${\mathcal{W}}^s$ is invariant under the time $T_0$-map $Z_{T_0}$ so
$Z_{T_0}(W^s_x)=W^s_{T_0x}\subset X_+$.
Hence $\tau_+(x)=T_0$ for each $x\in W^s_x$.
Next, recall that $W^s_{fx}(X)$ is the intersection
of $\bigcup_{|t|<{\epsilon}_0}Z_t W^s_x$ with $X$ for suitably chosen ${\epsilon}_0$.
Then $\bar f x$ is the unique intersection point of $\bigcup_{|t|<{\epsilon}_0}Z_t W^s_x$ with ${\overlineerbarr X}$.
But $f_+ x=Z_t fx$ for some small $t$ so $\bar f_+ x$ also lies in the intersection of $\bigcup_{|t|<{\epsilon}_0}Z_t W^s_x$ with ${\overlineerbarr X}$. Hence $\bar f_+ x=\bar f x$.
\end{proof}
In this section, we work with the new Poincar\'e map and
roof function which we relabel $f:X\to X$ and $\tau:X\to{\mathbb R}^+$.
In doing so we lose the smoothness properties of $f$ and $\tau$ --- they are now only piecewise H\"older. However we gain the property that
$\tau$ is constant along the stable foliation in $X$.
Since $\bar f:{\overlineerbarr X}\to{\overlineerbarr X}$ is unchanged; we still have that $\bar f$ is piecewise $C^{1+{\epsilon}}$ and the results on $\bar f$ in Section~\ref{sec:barf} and the physical measures and statistical properties in Section~\ref{sec:stat} remain valid.
Define the suspension
\[
X^\tau=\{(x,u)\in X\times{\mathbb R}: 0\le u\le\tau(x)\}/\sim\quad\text{where $(x,\tau(x))\sim(fx,0)$},
\]
and the suspension flow $(x,u)\mapsto(x,u+t)$ (computed modulo identifications).
\begin{thm} \label{thm:spectral} There exists a
finite number of
ergodic $Z_t$-invariant probability measures $\mu_{M,1},\dots,\mu_{M,s}$ whose basins cover a subset of $U_0$ of full Lebesgue measure.
\end{thm}
\begin{proof}
For each $\mu_j$ in Corollary~\ref{cor:spectral}, we obtain an ergodic flow-invariant probability measure $\mu_j^\tau=\mu_j\times{\rm Lebesgue}/\int_X\tau\,d\mu_j$
on $X^\tau$. The projection $\pi^\tau:X^\tau\to M$, $\pi^\tau(x,u)=Z_ux$ defines a semiconjugacy from $X^\tau$ to $M$ and
$\mu_{M,j}=\pi^\tau_*\mu_j^\tau$ is an ergodic $Z_t$-invariant probability measure on $M$.
By~\cite[Section~7]{APPV09}, these form a finite family of physical measures
$\mu_{M,j}$ for the flow $Z_t$ whose basins cover a subset of
$U_0$ of full Lebesgue measure.
\end{proof}
Suppose without loss that there is a unique physical measure $\mu_M=\pi^\tau_*\mu^\tau$ where $\mu^\tau=\mu\times{\rm Lebesgue}/\int_X\tau\,d\mu$ (in the notation above).
Recall that $\bar\mu$, and hence $\mu$, is mixing up to a finite cycle of length $k\ge1$.
By shrinking the cross-section $X$ we may suppose without loss that the measure $\mu$ on $X$ is mixing.
Define the {\em induced} roof function
\[
\varphi:{\overlineerbar Y}\to{\mathbb R}^+,\qquad \varphi(y)=
\textstyle{\sum_{\ell=0}^{\rho(y)-1}}\tau(\bar f^\ell y).
\]
\begin{prop} \label{prop:tail}
$\mu_Y(\varphi>t)=O(t^{-\beta})$ for any $\beta>0$.
\end{prop}
\begin{proof}
A standard general calculation (see for example~\cite[Proposition~A.1]{BMprep}) shows that
\[
\mu_Y(\varphi>t)\le \mu_Y(\rho>k)+\bar\rho \mu(\tau>t/k),
\]
for all $t>0$, $k\ge1$, where $\bar\rho=\int_Y\rho\,d\mu_Y$.
In particular,
since $\rho$ has superpolynomial tails and $\tau$ has at most logarithmic singularities, there is a constant $c>0$ such that
$\mu_Y(\varphi>t)=O( k^{-2\beta}+ e^{-ct/k})$.
Now take $k=[t^{1/2}]$.
\end{proof}
Recall that $\overlineerbarr F:{\overlineerbar Y}\to{\overlineerbar Y}$ is a Gibbs-Markov map with partition $\alpha$ and separation time $s(y,y')$.
\begin{prop} \label{prop:varphi}
There exists $\theta\in(0,1)$ and $C>0$ such that
\[
|\varphi(y)-\varphi(y')|\le C\theta^{s(y,y')}
\quad \text{for all $y,y'\in a$, $a\in\alpha$.}
\]
\end{prop}
\begin{proof}
We can write $\tau=\tau_0+\tau_1$ where $\tau_0$ is as in previous sections and in particular satisfies the estimate
Proposition~\ref{prop:tau}, and $\tau_1$ is $C^{\epsilon}$.
Setting $\theta=2^{-{\epsilon}}$ and using uniform expansion of $\bar f$,
\[
|\tau_1(\bar f^\ell y)-\tau_1(\bar f^\ell y')|
\le {|\tau_1|}_{C^{\epsilon}}|\bar f^\ell y-\bar f^\ell y'|^{\epsilon}
\le C_1
\theta^{\rho(y)-\ell}|\overlineerbarr F y-\overlineerbarr F y'|^{\epsilon}.
\]
Combining this with the estimate for $\tau_0$, we obtain that
$\sum_{\ell=0}^{\rho(y)-1}|\tau(\bar f^\ell y)-\tau(\bar f^\ell y')|
\le C_1' |\overlineerbarr F y-\overlineerbarr F y'|^{\epsilon}$.
By~\eqref{eq:s}, $|\overlineerbarr F y-\overlineerbarr F y'|=O(2^{-s(y,y')})$ and the result follows.
\end{proof}
\subsection{Statistical limit laws for the flow}
\label{sec:statflow}
If $\Lambda=\operatorname{supp}\mu_M$ contains no equilibria, then $\Lambda$ is a nontrivial hyperbolic basic set for an Axiom~A flow and the CLT for H\"older observables
follows from~\cite{Ratner73,MT04}. Moreover,~\cite{DenkerPhilipp84} obtains a version of the (scalar) ASIP that implies the functional CLT and functional LIL.
When $\Lambda$ contains equilibria, the CLT and its functional version still holds by~\cite{HM07} at least for geometric Lorenz attractors.
As pointed out in~\cite{BMprep}, a simpler argument than in~\cite{HM07} applies in general situations where the roof function is unbounded
and includes the entire class of singular hyperbolic attractors analysed in this paper. We refer to the introduction of~\cite{BMprep} for a more comprehensive list of statistical limit laws, with precise statements, that can be obtained in this way.
\subsection{Mixing and superpolynomial mixing for the flow}
\begin{thm} \label{thm:super}
There is a $C^2$-open and $C^\infty$-dense set of singular hyperbolic flows such that each nontrivial attractor $\Lambda$ is mixing with superpolynomial decay of correlations: for any $\beta>0$,
\[
\Big|\int_\Lambda v\,w\circ Z_t\,d\mu_M-
\int_\Lambda v\,d\mu_M
\int_\Lambda w\,d\mu_M\Big|\le Ct^{-\beta}\quad\text{for all $t>0$},
\]
for all $v,w:M\to{\mathbb R}$ such that one of $v$ or $w$ is
$C^\infty$ and the other is H\"older. Here $C$ is a
constant depending on $v$, $w$ and $\beta$.
\end{thm}
\begin{proof}
If $\Lambda=\operatorname{supp} \mu_M$ contains no equilibria, then $\Lambda$ is uniformly hyperbolic and the result is due to~\cite{Dolgopyat98a,FMT07}.
The general case follows essentially from~\cite{M07,M09}.
More precisely,
we have seen that the semiflow and flow is modelled as a suspension over a Young tower with superpolynomial tails. Using the induced roof function
$\varphi:Y\to{\mathbb R}^+$,
we obtain a suspension $Y^\varphi$ over the uniformly hyperbolic map
$F:Y\to Y$ where the roof function $\varphi:Y\to{\mathbb R}^+$ has superpolynomial tails.
We are now in a position to apply~\cite[Theorem~3.1]{BBMsub}
(see also~\cite[Theorem~4.1]{rapid}).
Conditions~(3.1) and~(3.2) in~\cite{BBMsub} follow from Propositions~\ref{prop:secUH} and~\ref{prop:sec-cone}.
Moreover, $\varphi$ is constant along stable leaves by Proposition~\ref{prop:tau'} and projects to a well-defined roof function
$\varphi:{\overlineerbar Y}\to{\mathbb R}^+$ satisfying the estimate in Proposition~\ref{prop:varphi} which is condition~(3.3) in~\cite{BBMsub}. Hence the suspension flow on $Y^\varphi$ is a skew product Gibbs-Markov flow in the terminology of~\cite{BBMsub}.
Hence
superpolynomial mixing follows from~\cite[Theorem~3.1]{BBMsub}
subject to a nondegeneracy condition (absence of approximate eigenfunctions).
Finally, it is shown in~\cite{FMT07} that absence of approximate eigenfunctions is $C^2$-open and $C^\infty$-dense (cf.~\cite[Remark~2.5]{M09} or~\cite[Subsection~5.2]{rapid}).
\end{proof}
We have already seen that statistical limit laws such as the CLT hold for all singular hyperbolic flows. In the situation of Theorem~\ref{thm:super}, we can obtain such results also for the time-one map of a singular hyperbolic flow.
\begin{cor} \label{cor:super}
Assume that $Z_t:\Lambda\to\Lambda$ has superpolynomial decay of correlations as in Theorem~\ref{thm:super}.
Let $v:M\to{\mathbb R}$ be $C^\infty$ (or at least sufficiently smooth) with mean zero.
Then the ASIP holds for the time-one map $Z_1$ for all
$C^\infty$ observables $v:M\to{\mathbb R}$.
In particular, the limit
$\sigma^2=\lim_{n\to\infty}n^{-1}\int_\Lambda(\sum_{j=0}^{n-1}v\circ Z_j)^2\,d\mu_M$ exists, and after passing to an enriched probability space, there exists a sequence $A_0,A_1,\ldots$ of i.i.d.\ normal random variables with mean zero and variance $\sigma^2$ such that
\[
\sum_{j=0}^{n-1}v\circ Z_j=\sum_{j=0}^{n-1}A_j
+O(n^{1/4}(\log n)^{1/2}(\log\log n)^{1/4}), \quad a.e.
\]
Moreover, if $\sigma^2=0$, then for every periodic point $q\in\Lambda$, there exists $T>0$ (independent of $v$) such that $\int_0^Tv(Z_tq)\,dt=0$.
\end{cor}
\begin{proof}
This is proved in the same way as~\cite[Theorems~B and C]{AMV15}.
\end{proof}
In the case of the classical Lorenz attractor, it was shown in~\cite{LMP05} and~\cite{AMV15} that mixing and superpolynomial mixing is automatic. The proof exploits the {\em locally eventually onto (l.e.o.)}~property as well as smoothness properties of the stable foliation.
We now show that the mixing argument in~\cite{LMP05} does not require the stable foliation to be smooth.
In the general situation of this paper, we assume hypotheses that are more complicated to state but which are implied by l.e.o.\
for the classical Lorenz attractor.
We require that $\Lambda$ contains at least one equilibrium.
Let $q\in{\mathcal S}$ be the corresponding singularity for $\bar f:{\overlineerbarr X}\to{\overlineerbarr X}$.
(Again, $\bar f$ is not defined at $q$.)
Assume that the set of preimages of $q$ under iterates of $\bar f$ is
dense in ${\overlineerbarr X}$.
(This condition is always satisfied for geometric Lorenz attractors.)
By Lemma~\ref{lem:AFLV} and Remark~\ref{rmk:ALP}, we can construct an induced Gibbs-Markov map $\overlineerbarr F=\bar f^\sigma:{\overlineerbar Y}\to{\overlineerbar Y}$ where the inducing set ${\overlineerbar Y}$ contains~$q$. Let $K=\bigcup_{\ell\ge 0}\bar f^\ell{\overlineerbar Y}$; this is an open and dense full measure subset of ${\overlineerbarr X}$. Our final assumption is
that
$\bar f^pq_+=\lim_{y\to q+}\bar f^py\in K$ for some $p\ge1$. (This would work equally well with $q_+$ replaced by $q_-$.)
\begin{thm} \label{thm:mix}
Under the above assumptions,
$\Lambda$ is automatically mixing (and even Bernoulli).
\end{thm}
\begin{proof} We sketch the proof following~\cite{LMP05}.
By~\cite{Ratner78}, it suffices to show that the quotient suspension semiflow
$\bar f_t^\tau:{\overlineerbarr X}^\tau\to{\overlineerbarr X}^\tau$ is weak mixing. Equivalently,
the cohomological equation $u\circ \bar f=e^{ib\tau}u$ has no measurable solutions $u:{\overlineerbarr X}\to S^1$ for all $b\neq0$.
(Here $S^1$ denotes the unit circle in ${\mathbb C}$.)
Suppose for contradiction that there exists $u:{\overlineerbarr X}\to S^1$ measurable and
$b\neq0$ such that $u\circ \bar f=e^{ib\tau}u$.
A Liv\v{s}ic regularity theorem
of~\cite{BruinHollandNicol05}, exploiting the fact that
$\overlineerbarr F$ is Gibbs-Markov and that
the roof function $\tau$ is H\"older with
at most logarithmic growth (Lemma~\ref{lem:log}) ensures
that $u$ has a version that is continuous on $K$.
Also, $q\in {\overlineerbar Y}\subset K$. Choose $p\ge1$ with
$\bar f^pq_+\in K$. Then
$u\circ \bar f^p=e^{ib\tau_p}u$ where
$\tau_p=\sum_{j=0}^{p-1}\tau\circ \bar f^j$. By
Remark~\ref{rmk:log}, $\tau_p(y)\ge\tau(y)\to\infty$ as
$y\to q_+$, whereas $u(y)\to u(q)$ and
$u(\bar f^py)\to u(\bar f^pq_+)$. Since $b\neq0$, this
contradicts the equality $u\circ \bar f^p=e^{ib\tau_p}u$.
\end{proof}
\begin{rmk} If we assume in addition that the
stable foliation ${\mathcal{W}}^s$ for the flow is $C^{1+{\epsilon}}$,
then we can deduce exponential decay of correlations
following~\cite{AraujoM16}.
However, without smoothness
of ${\mathcal{W}}^s$, the roof function $\tau$ (on the modified
cross-section) is only H\"older and the cancellation
argument of~\cite{Dolgopyat98a} fails. In fact, we are
unable even to prove superpolynomial mixing for fixed
flows (without perturbing as in Theorem~\ref{thm:super}).
It should be possible to use the
techniques in~\cite{AMV15} to prove that the stable and
unstable foliations (defined appropriately) for the flow
are not jointly integrable -- this is a stronger property
than mixing. However, we do not see how to use this to
prove superpolynomial mixing when $\tau$ is only H\"older.
\end{rmk}
\appendix
\section{Theorem of Alves {\em et al.}~\cite{AFLV11}}
In this appendix, we recall a result of Alves {\em et al.}~\cite{AFLV11} that is required in Section~\ref{sec:spectral}.
Although the argument in~\cite{AFLV11} is essentially correct, there are certain problems with the formulation of the hypotheses. First,
the hypotheses (C2) and (C3) in~\cite{AFLV11} are stated too strongly, since
the right-hand side of their conditions are zero for
points $x\neq y$ equidistant from ${\mathcal S}$, whereas the left-hand side is generally nonzero. Second, the hypotheses are not stated strongly enough for
the first half of the proof of~\cite[Lemma~5.1]{AFLV11}, since the estimate for $d(x,{\mathcal S})^{-\alpha}$ is false in general.
We state below a corrected version of the hypotheses in~\cite{AFLV11}. The conclusion in Theorem~\ref{thm:AFLV} is identical to that in~\cite[Theorem~C]{AFLV11}, and the proof is largely unchanged.
Throughout, $(M,d)$ is a compact Riemannian manifold and
$f:M\to M$ is a local $C^{1+}$ diffeomorphism with singularity set ${\mathcal S}$.
We suppose that there are constants $\eta\in(0,1)$ and $C,\,q>0$ such that
\begin{itemize}
\item[(C0)] $\operatorname{Leb}(x:d(x,{\mathcal S})\le{\epsilon})\le C{\epsilon}^\eta$ for all ${\epsilon}\ge0$.
\item[(C1)] $C^{-1}d(x,{\mathcal S})^q\le \|Df(x)v\|\le Cd(x,{\mathcal S})^{-q}$,
for all $x\in M\setminus{\mathcal S}$, $v\in T_xM$ with $\|v\|=1$.
\item[(C2)] $\big|\log\|Df(x)^{-1}\|-\log\|Df(y)^{-1}\|\big|\le Cd(x,y)^\eta(\|Df(x)^{-1}\|^q+\|Df(x)^{-1}\|^{-q})$ for all $x,y\in M\setminus{\mathcal S}$
with $d(x,y)<d(x,{\mathcal S})/2$.
\item[(C3)] $\big|\log|\det Df(x)|-\log|\det Df(y)| \big|\le C d(x,y)^\eta d(x,{\mathcal S})^{-q}$ for all $x,y\in M\setminus {\mathcal S}$
with $d(x,y)<d(x,{\mathcal S})/2$.
\end{itemize}
Recall~\cite[Definition~1.2]{AFLV11} that a measure $\mu$ is expanding
if $\log\|(Df)^{-1}\|$ is integrable with respect to $\mu$ and
$\int_M\log\|(Df)^{-1}\|\,d\mu<0$.
Let $\operatorname{Cov}(v,w)= \int_M v\,w\,d\mu-\int_M v\,d\mu\int_M w\,d\mu $.
\begin{thm}[ {\cite[Theorem~C]{AFLV11}} ] \label{thm:AFLV}
Let $f:M\to M$ be a $C^{1+}$ local diffeomorphism satisfying (C0)--(C3), and let $\alpha\in(0,1)$.
Let $\mu$ be an ergodic expanding absolutely continuous invariant probability measure with $d\mu/d\operatorname{Leb}\in L^p$ for some $p>1$.
\noindent
(1) Suppose that there exists $\beta>1$ and $C>0$ such that
$|\operatorname{Cov}(v,w\circ f^n)|\le C\|v\|_{C^\alpha} |w|_\infty\,n^{-\beta}$
for all $v\in C^\alpha$, $w\in L^\infty$, $n\ge1$.
Then there is a full branch Gibbs-Markov induced map $F=f^\rho:Y\to Y$, where $\rho:Y\to{\mathbb Z}^+$ is constant on partition elements and satisfies $\operatorname{Leb}(y\in Y:\rho(y)>n)=O(n^{-(\beta-1)})$.
Moreover, there are constants $C,\,{\epsilon}>0$ such that
\begin{align} \label{eq:hyptime}
\textstyle{\sum_{\ell=0}^{\rho(y)-1}}d(f^\ell y,f^\ell y')^\eta d(x,{\mathcal S})^{-q}\le Cd(Fy,Fy')^{\epsilon},
\end{align}
for all $y,y'$ lying in the same partition element.
\noindent
(2) Suppose that $d\mu/d\operatorname{Leb}$ is bounded below on its support and that there exist $\gamma\in(0,1]$, $C,\,c>0$ such that
$|\operatorname{Cov}(v,w\circ f^n)|\le C\|v\|_{C^\alpha} |w|_\infty\,e^{-cn^\gamma}$
for all $v\in C^\alpha$, $w\in L^\infty$, $n\ge1$.
Then the conclusion in (1) holds and moreover for any $\gamma'\in(0,\gamma/(3\gamma+6))$ there exists $c'>0$ such that
$\operatorname{Leb}(y\in Y:\rho(y)>n)=O\big(e^{-c'n^{\gamma'}}\big)$.
\end{thm}
\begin{rmk} \label{rmk:fF}
The estimate~\eqref{eq:hyptime} is
a crucial component of the proofs
in~\cite{AFLV11,AlvesLuzzattoPinheiro05}.
(See the calculation at the end of the proof of~\cite[Lemma~4.1]{AlvesLuzzattoPinheiro05}.)
We make it explicit here since it is used in the proof of Proposition~\ref{prop:tau}.
\end{rmk}
\begin{rmk} \label{rmk:ALP}
Let $x\in M$ be any point with dense preimages in $M$. By~\cite[Remarks~1.4]{AlvesLuzzattoPinheiro05},
the inducing set $Y$ can be chosen to be an open ball containing $x$.
\end{rmk}
In the remainder of this appendix, we indicate the modifications to the argument in~\cite{AFLV11} required to obtain the corrected version of Theorem~\ref{thm:AFLV}.
We begin by noting that a consequence of (C1) and (C2) is that
\[
\big|\log\|Df(x)^{-1}\|-\log\|Df(y)^{-1}\| \big|\le C d(x,y)^\eta d(x,{\mathcal S})^{-q^2}
\quad\text{for all $x,y\in M\setminus {\mathcal S}$.}
\]
Combined with (C3),
this means that the $C^{1+}$ version of the $C^2$ set up in~\cite{AlvesBonattiViana00,AlvesLuzzattoPinheiro05} is satisfied.
It is well-known, and routine, that the theory of hyperbolic times and the resulting constructions in~\cite{AlvesBonattiViana00,AlvesLuzzattoPinheiro05} work just as well in the $C^{1+}$ setting.
Hence as in~\cite{AFLV11}, it suffices to
verify the hypotheses of~\cite[Theorem~2]{AlvesLuzzattoPinheiro05}.
This all proceeds exactly as in~\cite{AFLV11} except for the estimate of $\phi_{1,k}$ in~\cite[Lemma~5.1]{AFLV11}.
Recall from~\cite{AFLV11} that
$\phi_1=\log\|(Df)^{-1}\|$
and that
$\phi_{1,k}=\phi_1 1_{\{|\phi_1|\le k\}}$.
(The definition in~\cite{AFLV11} has
$\phi_{1,k}=\phi_1 1_{\{\phi_1\le k\}}$.
but it is clear from the proof of~\cite[Lemma~4.3]{AFLV11} that this is what was meant.)
\begin{prop} \label{prop:AFLV}
For any $\alpha>0$, there exists $\eta'\in(0,1)$, $C>0$ such that
$\|\phi_{1,k}\|_{C^{\eta'}}\le Ce^{\alpha k}$.
\end{prop}
\begin{proof}
We can suppose without loss that $\alpha<2q$.
Let $x,y\in M$. It is immediate that $|\phi_{1,k}(x)|\le k$ and that
$|\phi_{1,k}(x)-\phi_{1,k}(y)|\le 2k$.
Also, by (C2), assuming without loss that $\phi_{1,k}(x)\le \phi_{1,k}(y)$,
\[
|\phi_{1,k}(x)-\phi_{1,k}(y)| \le C_1 d(x,y)^\eta (e^{q\phi_{1,k}(y)}+e^{-q\phi_{1,k}(x)})
\le C_1' d(x,y)^\eta e^{q k}.
\]
The inequality $\min\{1,a\}\le a^{\epsilon}$ holds for all $a\ge0$, ${\epsilon}\in[0,1]$.
Hence
taking ${\epsilon}=\frac12\alpha/q$ and $\eta'={\epsilon}\eta$ we obtain that
\[
|\phi_{1,k}(x)-\phi_{1,k}(y)|
\le C_2 k\min\{1,d(x,y)^\eta e^{q k}\}
\le C_2k
d(x,y)^{\eta'} e^{\frac12\alpha k}\le C_2' d(x,y)^{\eta'} e^{\alpha k}.
\]
We have shown that $\|\phi_{1,k}\|_{C^{\eta'}}=O( k+e^{\alpha k})
=O( e^{\alpha k})$ as required.
\end{proof}
The remainder of the proof of Theorem~\ref{thm:AFLV} proceeds exactly as in~\cite{AFLV11}. (We note that in~\cite{AFLV11} it is asserted that $\eta'=\alpha$, but this is not required in the proof.)
\def$'${$'$}
\def\polhk#1{\setbox0=\hbox{#1}{\ooalign{\hidewidth
\lower1.5ex\hbox{`}\hidewidth\crcr\unhbox0}}}
\end{document}
|
\begin{document}
\setlength\parindent{24pt}
\title{ extbf{On the vanishing of cohomologies of $p$-adic
Galois representations associated with elliptic curves}
\begin{abstract}
Let $K$ be a $p$-adic field and $E$ an elliptic curve
over $K$ with potential good reduction.
For some large Galois extensions $L$ of $K$ containing
all $p$-power roots of unity,
we show the vanishing of certain Galois cohomology groups
of $L$ with values in the $p$-adic representation associated with $E$.
We use these to prove analogous results in the
global case.
This generalizes some results of Coates, Sujatha and Wintenberger.
\end{abstract}
Keywords: $p$-adic Galois representations, elliptic curves
AMS 2000 Mathematics subject classification: 11F80, 11G07, 11F85
\section{Introduction}
The vanishing of cohomology groups associated with $p$-adic Galois
representations defined by elliptic curves is one of the useful
results towards generalization of methods in Iwasawa theory
to larger Galois extensions. Such vanishing
enables the computation of Euler characteristics for discrete modules
associated to $p$-adic Galois representations
\cite{CSW} and Selmer groups of elliptic
curves over extensions containing all $p$-power
roots of unity \cite{CH}, \cite{CSS}. Our
purpose in this paper is to show the vanishing
of cohomology groups with values in a geometric $p$-adic Galois
representation with respect to some large Galois extensions.
In particular, we consider extensions of a
$p$-adic field obtained by adjoining the coordinates
of $p$-power torsion points on an elliptic curve.
Let $p$ be a prime number. For the moment, we let $K$ be
any field with characteristic not equal to $p$.
Fix a separable closure $\overline{K}$ of $K$.
Put $G_K := \Gal(\overline{K}/K)$, the absolute Galois
group of $K$.
Let $X$ be a proper smooth variety defined over $K$.
For each $i \geq 0$, we let
\begin{equation}\label{defnetale}
V = H^{i}_{\text{\'et}} (X_{\overline{K}}, \mathbb{Q}_p)
= \varprojlim H^{i}_{\text{\'et}} (X_{\overline{K}}, \mathbb{Z} / {p^n \mathbb{Z}})
\otimes_{\mathbb{Z}_p} \mathbb{Q}_p
\end{equation}
denote the $i$th \'etale cohomology group of
$X_{\overline{K}}:= X \otimes_K {\overline{K}}$
having coefficients in $\mathbb{Q}_p$, which is a finite-dimensional
vector space over $\mathbb{Q}_p$. We denote by
\[ \rho : G_K \longrightarrow \GL_{\mathbb{Q}_p}(V) \]
the homomorphism giving the action of $G_K$ on the vector space $V$.
For a general finite-dimensional vector space
$V$ over $\mathbb{Q}_p$ and a compact subgroup
$G$ of $\GL(V)$, we write $H^i(G,V)$ ($i = 0,1,\ldots$) for the
cohomology groups of $G$ acting on $V$
defined by continuous cochains, where
$V$ is endowed with the $p$-adic topology.
We say that $V$ has \emph{vanishing $G$-cohomology}
if $H^i(G,V) = 0$ for all $i \geq 0$.
For a Galois representation $(\rho,V)$ as given above,
we denote by $K(V)$ the fixed subfield in $\overline{K}$
by the kernel of $\rho$.
For a subfield $L$ of $\overline{K}$, let
$G_L$ denote the subgroup of $G_K$ corresponding to $L$.
Let $K(\mu_{p^\infty})$ be the smallest field
extension of $K$ which contains all $p$-power roots of unity.
Denote by $G_V$ (resp.\ $H_V$) the image of $G_K$
(resp.\ $G_{K(\mu_{p^\infty})}$) under $\rho$.
We may identify $G_V$ (resp.\ $H_V$) with the Galois
group of $K(V)$ over $K$ (resp.\ $K(V) \cap K(\mu_{p^\infty})$).
We assume henceforth that $K$ is a finite extension
of $\Q_p$. We recall a theorem due to Coates, Sujatha and Wintenberger.
Although the result was originally motivated by computation
of Euler characteristics associated to $V$, this theorem
turned out to be useful in dealing with some problems
in non-commutative Iwasawa theory (cf.\ \cite{CSS}).
\begin{thm}[\cite{CSW}, Theorems 1.1 and 1.5]\label{thm0}
Let $X$ be a proper smooth variety defined over $K$
with potential good reduction. Let $i$ be a positive odd integer
and put $V = H^{i}_{\text{\rm{\'et}}} (X_{\overline{K}}, \mathbb{Q}_p)$.
Then $V$ has vanishing $G_V$-cohomology and vanishing $H_V$-cohomology.
\end{thm}
We proceed further in view of the theorem above.
Consider an arbitrary Galois extension $L/K$
contained in $\overline{K}$.
Put $J_V = \rho(G_L)$. We then ask, when do we
obtain vanishing $J_V$-cohomology?
Clearly if $L$ is ``too close'' to $K(V)$,
the vanishing cannot be attained.
In consideration of the theorem above,
we then expect that if $L$ and $K(V)$ are
sufficiently independent over the field
$K_{\infty,V} := K(V) \cap K(\mu_{p^\infty})$
(or $K$), then $V$ has vanishing $J_V$-cohomology.
This does not necessarily mean that
the intersection $M = K(V) \cap L$ is of
finite degree over $K_{\infty, V}$ (or $K$),
although we have found no
example in which the intersection $M$
is of infinite degree over $K_{\infty,V}$ and
$V$ has vanishing $J_V$-cohomology.
On a related note, what can be said
if $L$ is defined by another geometric representation?
More precisely, suppose we have another $p$-adic
Galois representation $(\rho',V')$.
Let us put $J_V = \rho(G_{K(V')})$
and $J_{V'} = \rho'(G_{K(V)})$.
How does the vanishing of $J_V$-cohomology of $V$
relate with the vanishing of $J_{V'}$-cohomology
of $V'$?
Some results have been
obtained for the vanishing of $H^0 (J_V, V)$. For instance,
Ozeki proved in \cite{Ozeki} that when $V$ is given by an abelian
variety with good ordinary reduction, the vanishing of
$H^0 (J_V, V)$ is equivalent to the property that the degree
of the residue field of $L$ over the residue field $k$ of $K$
has finite $p$-part. It was further shown
that when $V$ is given by an elliptic curve and $L$ is
the field of division points of $p$-power order of
another elliptic curve, $H^0 (J_V, V)$ vanishes depending
on the types of reduction of the elliptic curves involved.
In \cite{KT}, Kubo and Taguchi have shown that $H^0 (J_V, V)$
vanishes in the general setting where $K$ is a complete discrete
valuation field of mixed characteristic and $L$ is a subfield
of the Kummer extension $K(\sqrt[p^\infty]{K^{\times}})$.
After recalling some related facts,
we prove the following result in \S 3 which provides a simple
criterion for determining the vanishing of $J_V$-cohomology
from the Lie algebras of $\Gal(K(V)/(K(V) \cap K(\mu_{p^\infty})))$
and $\Gal (L/(L \cap K(\mu_{p^\infty})))$.
\begin{thm}\label{main}
Let $X$ be a proper smooth variety over $K$ with potential
good reduction and $i$ a positive odd integer. Put
$V = H^{i}_{\text{\rm{\'et}}} (X_{\overline{K}}, \mathbb{Q}_p)$
and $K_{\infty,V} := K(V) \cap K(\mu_{p^\infty})$.
Let $L/K$ be any $p$-adic Lie extension such
that $K(\mu_{p^\infty})$ is of finite degree
over $K_{\infty,L} := L \cap K(\mu_{p^\infty})$.
Assume that the Lie algebras
\[ \Lie(\Gal(K(V)/K_{\infty,V})) \text{ and }
\Lie (\Gal (L/K_{\infty,L}))\] have no common
simple factor. Then $V$ has vanishing $J_V$-cohomology,
where $J_V = \rho(G_L)$.
\end{thm}
For an elliptic curve $E$ over $K$, we denote by
\[ \rho_E : G_K \longrightarrow \GL(T_p(E)) \simeq \GL_{2}(\mathbb{Z}_p) \]
the natural continuous representation
associated with the Tate module $T_p(E)$ of $E$.
We use the usual notation
$V_p(E) = T_p(E) \otimes_{\mathbb{Z}_p} \mathbb{Q}_p$.
Let $\mathbb{Q}_p(r)$ denote the $r$th twist by the
$p$-adic cyclotomic character, where $r \in \mathbb{Z}$.
Note that the dual $V_p(E)^{\vee} = \Hom(V_p(E),\mathbb{Q}_p)$ is
canonically isomorphic to $H^{1}_{\text{\'et}} (E_{\bar{K}}, \mathbb{Q}_p)$.
On the other hand, the Weil pairing allows us
to identify $V_p(E)$ with $V_p(E^\vee)$ in
a canonical way.
Thus we may canonically identify $V_p(E)$ with
$H^{1}_{\text{\'et}} (E^{\vee}_{\bar{K}}, \mathbb{Q}_p(1))$.
We also note that $K(V_p(E)) = K(E_\infty)$,
where $K(E_\infty)$ is the extension of $K$
generated by the coordinates of all the $p$-power
torsion points on the group of $\overline{K}$-valued
points $E(\overline{K})$. By the Weil pairing,
the field $K(E_\infty)$ contains $K(\mu_{p^\infty})$.
In \S \ref{sec:ord} we consider the setting where
the variety in question has good ordinary reduction.
For elliptic curves with good ordinary reduction over
$K$, we obtain a necessary and sufficient condition
on $L$ so that the $p$-adic Galois representation
$V=V_p(E)$ has vanishing $J_V$-cohomology.
\begin{thm}(see Theorem \ref{thm2A}
and Corollary \ref{van-ord-abelvar})\label{thmord}
Let $E$ be an elliptic curve with potential
good ordinary reduction over $K$ and $L$ be a
Galois extension of $K$.
Put $V=V_p(E)$ and $J_V = \rho_E(G_L)$. \\
(1) If the residue field $k_L$ of
$L$ is a potential prime-to-$p$ extension
of the residue field $k$ of $K$
(in the sense of Definition \ref{prime-to-p}),
then $V$ has vanishing $J_V$-cohomology. \\
(2) Assume that $E$ has good ordinary reduction
over $K$ and that $L$ contains $K(\mu_{p^\infty})$
and the coordinates of the $p$-torsion points of $E$.
Then $V$ has vanishing $J_V$-cohomology if and
only if $k_L$ is a potential prime-to-$p$
extension over $k$.
\end{thm}
Now we consider the case where $L=K(V')$ is
given by another ``geometric" representation $V'$.
In this case, we often encounter that $V'$
also has vanishing $J_{V'}$-cohomology, where
$J_{V'}=\rho'(G_{K(V)})$.
This motivates us to introduce the
notion of ``cohomological coprimality" in
\S \ref{cohom_coprime}.
\begin{thm}(see Theorem \ref{thm3})\label{thm}
Let $X$ be a proper smooth variety over $K$ with potential
good ordinary reduction (in the sense of Definition \ref{BK})
and $i$ a positive odd integer. Let $E/K$ be an elliptic curve
with potential good supersingular reduction.
Put $V = H^{i}_{\text{\rm{\'et}}} (X_{\overline{K}}, \mathbb{Q}_p)$
and $V' = V_p(E)$. Then $V$ and $V'$ are cohomologically coprime.
\end{thm}
Suppose $E$ and $E'$ are elliptic curves over $K$.
In \S \ref{sec:vanish for EC}, we prove some
results on the cohomological coprimality of
$V_p(E)$ and $V_p(E')$. This is done by
distinguishing the reduction types of $E$ and $E'$.
We summarize our results in the following theorem.
We also note that its proof provides extensions
to some of the results obtained in \cite{Ozeki}.
\begin{thm}
Let $E$ and $E'$ be elliptic curves over $K$.
The cohomological coprimality of
$V_p(E)$ and $V_p(E')$ is given by the following table:
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
$E$ & $E'$ & Cohomologically coprime \\
\hline
\multirow{3}{3cm}{\centering ordinary} &
\multirow{1}{5cm}
{\centering ordinary} & No$^{\sharp}$ \\
\cline{2-3}
{} & supersingular & Yes \\
\cline{2-3}
{} & multiplicative & ``No" \\
\hline
\multirow{3}{3cm}{\centering supersingular \\ with FCM} & \multirow{1}{5cm}{\centering supersingular with FCM} & Yes$^{\ast}$ \\
\cline{2-3}
{} & \multirow{1}{5cm}{\centering supersingular without FCM} & Yes \\
\cline{2-3}
{} & \multirow{1}{5cm}{\centering multiplicative} & ``No" \\
\hline
\multirow{2}{3cm}{\centering supersingular \\ without FCM} & \multirow{1}{5cm}{\centering supersingular without FCM} & Yes$^{\ast}$ \\
\cline{2-3}
{} & \multirow{1}{5cm}{\centering multiplicative} & ``No" \\
\hline
multiplicative & multiplicative & ``No"\\
\hline
\end{tabular}
\end{center}
\end{thm}
In the table above, FCM means formal complex multiplication.
The symbol $^{\ast}$ means conditional cohomological
coprimality. The cohomological coprimality in
this case holds under the additional assumption that
the group $E(K(E_\infty))[p^\infty]$ of
$K(E_\infty)$-rational points of
$E$ of $p$-power order is finite. For $\sharp$, refer
to Remark \ref{rem3}. For the case where one of the
elliptic curves has multiplicative reduction, we refer
to Remark \ref{cohom-coprime-mult}.
The rest is provided by Theorem \ref{thm6}
in \S\ref{subsec:good reduction}.
In the last section we use the local results above to
prove cohomological coprimality results associated
with ``global" $p$-adic Galois representations.
Indeed, the proof of Theorem \ref{thm0} in \cite{CSW}
relies on showing the existence of some special operator in the
Lie algebra
$\mathfrak{h} = \Lie(\rho(G_{K(\mu_{p^\infty})})) \otimes_{\Q_p} \overline{\Q_p}$.
Such operator satisfies a criterion introduced by
Serre (cf.\ \cite{Ser3}) for the vanishing of Lie algebra cohomology groups,
and hence Theorem \ref{thm0} follows by a
well-known result due to Lazard.
In general, enlarging the field $L$ makes
the corresponding image group $J_V$
smaller and hence the special operator of
$\mathfrak{h}$ may no longer lie in the
Lie algebra
$\mathfrak{j}=\Lie(J_V) \otimes_{\Q_p} \overline{\Q_p}$.
Our methods in the local case ensure
that such operator still belongs to $\mathfrak{j}$
which in turn belongs to the corresponding Lie algebra of the
image of the global Galois group.
The proof of Theorem \ref{thm0} as described above
also implies that $V$ has vanishing $G_V$-cohomology
if and only if $V$ has vanishing $G$-cohomology for
an open subgroup $G$ of $G_V$. The same statement holds for $H_V$.
In this paper Galois representations are defined by
objects which satisfy a ``potential condition over $K$",
in the sense that the given condition is satisfied
after a suitable finite extension $K'$ of $K$.
In most of our proofs we will often encounter the phrase
``replacing $K$ by a finite extension" in order
to reduce the proof to a more convenient
setting, e.g.\ so that the varieties in question
have good reduction over $K$ and that $K(V)$
contains all the $p$-power roots
of unity. By the above remark,
there is no harm in doing this kind of reduction.
\noindent \emph{Acknowledgements.}
The author would like to express his
sincere gratitude to Yuichiro Taguchi,
who proposed the theme of this paper, for his patience,
helpful advice and contributions to this paper.
He would also like to thank the Ministry
of Education, Culture, Sports, Science and Technology of Japan
for its support. He is currently on study leave from the Institute
of Mathematics, University of the Philippines Diliman.
\section{Preliminaries and review of some known results}
\subsection{Vanishing Cohomology and Cohomological Coprimality}\label{cohom_coprime}
Let $G$ be a topological group and $F$ be a topological field.
A continuous $F$-linear representation of $G$
is a finite-dimensional $F$-vector space $V$ equipped with a
continuous linear action of $G$. Equivalently,
it is a continuous homomorphism $\rho: G \rightarrow \GL_{F}(V)$.
We denote a continuous $F$-linear representation
of $G$ by $(\rho,V)$.
For a prime number $p$,
a continuous $\Q_{p}$-linear representation of $G$
is called a $p$-adic representation of $G$.
For a general finite-dimensional $F$-vector space $V$
and a compact subgroup $\mathcal{G}$ of $\GL_{F}(V)$,
we consider the cohomology groups $H^i(\mathcal{G},V)$
($i = 0, 1, \ldots$) of $\mathcal{G}$ acting on $V$
which are defined by continuous cochains
(where $V$ is endowed with the topology induced by $F$).
\begin{defn}\label{van-cohom}
\normalfont
For $V$ and $\mathcal{G}$ as above, we say that $V$
has \emph{vanishing $\mathcal{G}$-cohomology}
if $H^i(\mathcal{G},V) = 0$ for all $i \geq 0$.
\end{defn}
\begin{defn}\label{defn_cohom_coprime}
\normalfont
Let $(\rho,V)$ and $(\rho',V')$ be two
continuous $F$-linear representations of $G$.
Put $\mathcal{G} = \rho(\Ker \rho')$ and
$\mathcal{G}' = \rho'(\Ker \rho)$.
We say that $V$ and $V'$ are
\emph{cohomologically coprime}
if $V$ has vanishing $\mathcal{G}$-cohomology and
$V'$ has vanishing $\mathcal{G}'$-cohomology.
\end{defn}
\begin{remark}
\normalfont
In fact, the above notion of cohomological coprimality
can be formulated for topological modules
over a topological ring with a continuous
action by a fixed topological group $G$. Moreover,
two topological modules (with a continuous action by
$G$) being compared against need not
have the same ring of scalars. For our purposes,
we restrict our definition in the above form.
\end{remark}
\subsection{The setup}\label{setup}
Let $K$ be a finite extension of $\Q_p$.
For a $p$-adic representation
$(\rho,V)$ of $G_K$,
we denote by $K(V)$ the fixed subfield of
$\overline{K}$ by the kernel of $\rho$.
Let $\mu_{m}$ denote the group of $m$th roots
of unity for $m \in \Z$ with $m \geq 1$.
Denote by $\mu_{p^\infty}$ the
union of all $\mu_{p^n}$ as $n$ runs over the
set of all positive integers.
We identify $G_V = \rho(G_K)$ with the Galois group
$\Gal(K(V)/K)$.
For an arbitrary Galois extension $L/K$,
we may identify $J_V = \rho(G_L)$ with a
closed subgroup of $G_V$, whose fixed field
$M = K(V)^{J_V}$ is the intersection
of $K(V)$ and $L$. Then the Galois group
$\Gal(M/K)$ may be identified with a quotient of
$G_V$. This latter group is a $p$-adic Lie group and
thus, so is $\Gal(M/K)$ (cf.\ \cite{DSMS}, Theorem 9.6 (ii)).
Hence, $M/K$ is a $p$-adic Lie extension.
If $L = K(\mu_{p^\infty})$, we write $H_V$
for $\rho(G_L)$ instead of $J_V$ and
$M$ in this case will be written as
$K_{\infty,V} := K(V) \cap K(\mu_{p^\infty})$.
When $V = V_p(E) = T_p(E) \otimes_{\Z_p} \Q_p$ is
given by an elliptic curve $E$ over $K$,
we write $K(E_\infty)$ instead of $K(V)$.
In this case we have $K_{\infty,V} = K(\mu_{p^\infty})$
by the Weil pairing.
We have the following diagram of fields:
\begin{center}
\begin{tikzpicture}[
back line/.style={densely dotted},
cross line/.style={preaction={draw=white, -,line width=6pt}}]
\node (A) {$K(V)$};
\node (B) [below of=A, node distance=1.75cm] {$M$};
\node [below of=A, node distance=1.5cm, left of=B, node distance=1.75cm] (C) {$K_{\infty,V}$};
\node [below of=C, node distance=1.75cm] (D) {$K$};
\draw[cross line] (A) -- (B) -- (D);
\draw[cross line] (A) -- (C) -- (D);
\draw[-, bend right] (A) to node [left]{$H_V$} (C);
\draw[-, bend left] (A) to node [right]{$J_V$} (B);
\draw[-, bend right = 70pt] (A) to node [left]{$G_V$} (D);
\end{tikzpicture}
\end{center}
In particular, if $L$ contains $K(\mu_{p^\infty})$,
then $J_V$ is a closed normal subgroup of $H_V$
and $M$ contains $K_{\infty, V}$.
\subsection{Some lemmas}
Let $L$ be a Galois extension of $K$ which contains
$K(\mu_{p^\infty})$. Put $\mathscr{G} = \Gal(L/K)$ and
$\mathscr{H} = \Gal(L/K(\mu_{p^\infty}))$.
Let $\varepsilon : \mathscr{G} \rightarrow \Z_p^{\times}$
be a continuous character of $\mathscr{G}$
whose image is open in $\Z_p^{\times}$.
The group $\mathscr{G}$ acts on $\mathscr{H}$ by inner automorphisms, that is, for
$\sigma \in \mathscr{G}$ and $\tau \in \mathscr{H}$, we have
$\sigma \cdot \tau = \sigma \tau \sigma^{-1}$.
Assume that the following relation holds:
\begin{equation}\label{eqn1}
\sigma \cdot \tau = \tau^{\varepsilon(\sigma)}
\end{equation}
for all $\sigma \in \mathscr{G}$, $\tau \in \mathscr{H}$.
\begin{lemma}\label{lemB}
Let $(\psi, W)$ be a $p$-adic representation of $\mathscr{G}$.
Let $\varepsilon$ be a character as above
and suppose the action of $\mathscr{G}$
on $\mathscr{H}$ satisfies relation (\ref{eqn1}).
Then after a finite extension $K'/K$, the subgroup
$\mathscr{H}$ acts unipotently on $W$.
\end{lemma}
\begin{proof}
Put $d = \dim_{\Q_p} W$.
The result is trivial if $d = 0$. We assume
henceforth that $d$ is nonzero.
We may argue in the same manner as
the proof of Lemma 2.2 of \cite{KT}.
Let $\tau \in \mathscr{H}$ and
$\lambda_1, \ldots, \lambda_d$ be the
eigenvalues of $\psi(\tau)$.
Then relation (\ref{eqn1}) shows that
\[ \{ \lambda_1, \ldots, \lambda_d \}
= \{ \lambda_1^{\varepsilon(\sigma)}, \ldots, \lambda_d^{\varepsilon(\sigma)} \} \]
for all $\sigma \in \mathscr{G}$.
Let $e$ be a positive integer such that
$1 + p^e$ lies in $\varepsilon(\mathscr{G})$.
Such an integer exists since
$\varepsilon(\mathscr{G})$ is open in
$\mathbb{Z}_p^\times$.
For each $i = 1, \ldots, d$, there exists
an integer $r_i$ with $1 \leq r_i \leq d$
such that $\lambda_i^{(1+p^e)^{r_i}} = \lambda_i$.
We then put
\[ m = \text{LCM} \{ (1 + p^e)^r - 1| r = 1,\cdots,d \}. \]
With this choice of $m$ we see that
$\psi(\tau)^m$ is unipotent
since $\lambda_i^m = 1$ for all $i = 1, \ldots, d$.
Hence $\mathscr{H}^m = \{ \tau^m | \tau \in \mathscr{H} \}$
acts unipotently on $W$.
Then the semisimplification
of the restriction $\psi|_{\mathscr{H}}$ to $\mathscr{H}$ is
a sum of characters $\mathscr{H}/\mathscr{H}^m \rightarrow \mu_m$,
after a suitable extension of scalars.
These characters become trivial upon replacing
$K(\mu_{p^\infty})$ by a finite extension, say $K_{\mu}$.
In fact, $K_{\mu} = K'(\mu_{p^\infty})$ for some
finite extension $K'$ of $K$.
\end{proof}
In this paper, $\chi : \mathscr{G} \rightarrow \mathbb{Z}_p^{\times}$
always denotes the $p$-adic cyclotomic character
(i.e.\ the continuous character such that
$g(\zeta) = \zeta^{\chi(g)}$
for all $g \in \mathscr{G}$ and all
$\zeta \in \mu_{p^\infty}$).
The above lemma will be used later in \S 4
in the case where $\varepsilon$ is the
product of $\chi$ with another
continuous character of $\mathscr{G}$.
\begin{lemma}\label{lemC}
Let $\varphi : \mathscr{U} \rightarrow \GL_{\Q_p}(W)$ be
a representation of a group $\mathscr{U}$
on a finite-dimensional $\Q_p$-vector space $W$.
Suppose $\mathscr{U}$ acts unipotently on $W$.
Then $W^\mathscr{U} = 0$ if and only if $W = 0$.
\end{lemma}
\begin{proof}
One implication is trivial. We assume that $W^\mathscr{U} = 0$
and let $w \in W$. Since the action of $\mathscr{U}$ on $W$ is unipotent,
for all $u \in \mathscr{U}$ the element $\varphi(u) - 1$ is
nilpotent. Thus, there exists $n \in \Z_{\geq1}$ such
that $(\varphi(u)-1)^n(w) = 0$ but
$(\varphi(u)-1)^{n-1}(w) \neq 0$.
If $n=1$, then $\varphi(u)(w) = w$. So $w = 0$ by hypothesis.
If $n>1$, put $w' = (\varphi(u)-1)^{n-1}(w)$. Note that
$w' \neq 0$. But then we have
$(\varphi(u)-1)(w') = (\varphi(u)-1)^n(w) = 0$.
Thus $w' \in W^{\mathscr{U}} = 0$.
Therefore we must have $w=0$.
\end{proof}
\subsection{Ordinary Representations}
\begin{defn}\label{ordrepn}
\normalfont
A $p$-adic Galois representation $V$ of $G_K$ is said to be
\emph{ordinary} if there exists a filtration by $G_K$-invariant subspaces
$\{ \Fil^i V \}_{i \in \mathbb{Z}}$ with the following properties:
\begin{center}
$\Fil^{i+1} V \subseteq \Fil^i V$ for all $i$, \\
$\Fil^i V = V$ for $i \ll 0$ and
$\Fil^i V = 0$ for $i \gg 0$,
\end{center}
such that the inertia subgroup $I_K$ acts on the $i$th graded quotient
$\Fil^i V / \Fil^{i+1} V$ by the $i$th power of the $p$-adic cyclotomic
character.
\end{defn}
\begin{defn}\label{BK}
\normalfont
We say that a proper smooth variety $X$ over $K$ has
\emph{good ordinary reduction} over $K$ if there exists a
smooth proper model $\mathfrak{X}$ over $\mathcal{O}_K$
with special fiber $\mathcal{Y}$ such that the de Rham-Witt cohomology groups
$H^r(\mathcal{Y}, d\Omega_\mathcal{Y}^s)$ are trivial for all $r$ and all $s$.
We say $X$ has \emph{potential good ordinary reduction} over $K$ if
it has good ordinary reduction after a finite extension $K'/K$.
\end{defn}
In the above definition, $d\Omega_\mathcal{Y}^s$ is the sheaf
of exact differentials on $\mathcal{Y}$.
This definition is due to Bloch-Kato (cf.\ \cite{BK}, Definition 7.2).
Equivalent formulations for this definition are given in Proposition 7.3
of \emph{op.\ cit.} When $X$ is an abelian variety of dimension $g$,
this definition coincides with the property that the
group of $\bar{k}$-points of $\mathcal{Y}$ killed by $p$ is isomorphic to
$\left( \mathbb{Z}/{p \mathbb{Z}} \right)^g$, which is the
classical definition of an abelian variety with good ordinary reduction.
Here, $\bar{k}$ denotes an algebraic closure of the residue field
$k$ of $K$.
The $\text{\'e}$tale cohomology groups of
a proper smooth variety with good ordinary
reduction can be characterized by the
following result of Illusie:
\begin{thm}[\cite{LI}, Cor. 2.7]
Let $X$ be a proper smooth variety over $K$ which has
good ordinary reduction over $K$. Then
the $\text{\'e}$tale cohomology group
$H^{i}_{\text{\rm{\'et}}} (X_{\overline{K}}, \mathbb{Q}_p)$ ($i \geq 0$)
is an ordinary representation of $G_K$.
\end{thm}
\subsection{Lie algebras associated with elliptic curves}
Consider an elliptic curve $E$ over $K$.
The structure of the Lie algebras associated to $E$ is well-known
(cf.\ \cite{Ser1}, Appendix of Chapter IV):
\begin{prop}\label{prop3}
Let $E$ be an elliptic curve over $K$.
Let $\mathfrak{g}:= \Lie(\rho_E(G_K))$ and
$\mathfrak{i}:= \Lie(\rho_E(I_K))$ be
the Lie algebras of the image of $G_K$ and
its inertia subgroup $I_K$ under $\rho_E$, respectively
(These are Lie subalgebras of $\End (V_p(E))$).
\begin{enumerate}
\item[(i)] If $E$ has good supersingular reduction
with formal complex multiplication, then $\mathfrak{g}$
is a non-split Cartan subalgebra of $\End(V_p(E))$
and $\mathfrak{i} = \mathfrak{g}$. We have
$\dim \mathfrak{g} = \dim \mathfrak{i}=2$.
\item[(ii)] If $E$ has good supersingular reduction
without formal complex multiplication, then
$\mathfrak{g} = \End(V_p(E))$ and
$\mathfrak{i} = \mathfrak{g}$. We have
$\dim \mathfrak{g} = \dim \mathfrak{i} = 4$.
\item[(iii)] If $E$ has good ordinary reduction
with complex multiplication, then $\mathfrak{g}$
is a split Cartan subalgebra of $\End(V_p(E))$.
We have $\dim \mathfrak{g} = 2$ and $\mathfrak{i}$
is a $1$-dimensional subspace of $\mathfrak{g}$.
\item[(iv)] If $E$ has good ordinary reduction
without complex multiplication, then $\mathfrak{g}$
is the Borel subalgebra of $\End(V_p(E))$ which
corresponds to the kernel of the reduction map
$V_p(E) \rightarrow V_p(\widetilde{E})$.
We have $\dim \mathfrak{g} = 3$ and
$\mathfrak{i}$ is a $2$-dimensional subspace of
$\mathfrak{g}$ with $\mathfrak{i}/[\mathfrak{i},\mathfrak{i}]$
of dimension $1$.
\item[(v)] If $E$ has $j$-invariant with negative $p$-adic valuation,
then $\mathfrak{g}$ is the subalgebra of $\End(V_p(E))$
which consists of the endomorphisms $u$ for which $u(V_p(E)) \subset W$,
where $W$ is the unique $G_K$-stable $1$-dimensional subspace of $V_p(E)$.
Moreover, $\mathfrak{i} = \mathfrak{g}$. We have
$\dim \mathfrak{g} = \dim \mathfrak{i} = 2$
\end{enumerate}
\end{prop}
Denote the ring of integers of $K$ by $\mathcal{O}_K$.
In the proposition above, an elliptic curve $E$ over $K$
with good supersingular reduction is said to have
\emph{formal complex multiplication over $K$} if
the endomorphism ring of the $p$-divisible group
$\mathcal{E}(p)$ associated with the N{\'e}ron model
$\mathcal{E}$ of $E$ over $\mathcal{O}_K$ is a
$\mathbb{Z}_p$-module of rank $2$.
We simply say $E$ has \emph{formal complex multiplication}
if $E \times_K K'$ has formal complex multiplication for
some algebraic extension $K'$ of $K$.
Then the quadratic field
$\text{End}_{\mathcal{O}_{K'}} (\mathcal{E}(p)) \otimes_{\mathbb{Z}_p} \mathbb{Q}_p$
is called the \emph{formal complex multiplication field} of $E$.
We can take for $K'$ a finite extension of $K$ of degree at most $2$.
The subspace $W$ in \emph{(v)} of the proposition
is isomorphic to the twist of $\Q_p(1)$ by an unramified character
of order at most $2$.
\section{Some Criteria for the Vanishing of $J_V$-cohomology} \label{sec: vanishing}
Let $X$ be a proper smooth variety over $K$
with potential good reduction. Let $i$ be a positive odd integer and put
$V = H^{i}_{\text{\'et}} (X_{\overline{K}}, \mathbb{Q}_p)$. Let $\rho$
be the continuous homomorphism attached to $V$ as in the Introduction
and let $G_V = \rho(G_K)$ and $H_V = \rho (G_{K(\mu_{p^\infty})})$.
In this section, we prove Theorem \ref{main}
given in the Introduction.
Before embarking on the proofs, we give a few remarks.
Recall from \S\ref{setup} that we have the isomorhisms $G_V \simeq \Gal(K(V)/K)$ and
$H_V \simeq \Gal(K(V)/K_{\infty,V})$.
If $L/K$ is a Galois extension of $K$,
then we identify $J_V := \rho(G_L) \simeq \Gal(K(V)/M)$
where $M = K(V) \cap L$.
\begin{lemma}\label{lemma6}
Let $V$, $G_V$, $H_V$ and $J_V$ be as above.
\\
(1) If $J_V$ has finite index in $G_V$,
then $V$ has vanishing $J_V$-cohomology. \\
(2) If $L$ contains $K(\mu_{p^\infty})$ and
$J_V$ has finite index in $H_V$,
then $V$ has vanishing $J_V$-cohomology.
\end{lemma}
\begin{remark}\label{rem1}
\normalfont
By Galois theory, we have
$\Gal(M/K) \simeq G_V/J_V$
(resp.\ $\Gal(M/K_{\infty,V}) \simeq H_V/J_V$)
in the discussion above.
So the condition that $J_V$ has finite index in
$G_V$ (resp.\ $H_V$) is equivalent to the finiteness
of the degree of the extension $M$ over $K$
(resp.\ $K_{\infty,V}$).
\end{remark}
\begin{proof}
Replacing $K$ with a finite extension, we may assume
$G_V = J_V$ (resp.\ $H_V = J_V$).
It follows immediately from Theorem \ref{thm0}
that $V$ has vanishing $J_V$-cohomology.
\end{proof}
\begin{remark} \label{rem2}
\normalfont
Let $V$ be any $p$-adic representation of $G_K$ as above and
$L/K$ a Galois extension containing
$K(\mu_{p^\infty})$. Take a $G_K$-stable
$\mathbb{Z}_p$-lattice $T$ of $V$.
It is known (cf.\ e.g.\ \cite{KT}, Lemma 2.1) that
the vanishing of $H^0(J_V, V)$, with $J_V = \rho(G_L)$,
is equivalent to the finiteness of $(V/T)^{G_L}$.
Hence, since $V$ has vanishing $H_V$-cohomology,
we have the relation
$(1)\Rightarrow(2)\Rightarrow(3)$
between the following statements:
\\
(1)
$M$ is a finite extension
of $K(\mu_{p^\infty})$,
\\
(2)
$V$ has vanishing $J_V$-cohomology, and
\\
(3)
$(V/T)^{G_L}$ is a finite group.
\\
However, converses may not necessarily hold.
In some cases though, we have $(3)\Rightarrow(1)$,
as we shall see in Corollary \ref{cor_ss}.
\end{remark}
To prove Theorem \ref{main} given in the Introduction,
we need the following lemma.
\begin{lemma}\label{lemma1}
Let $X$ be a proper smooth variety over a $p$-adic field
$K$ with potential good reduction and $i$ be
a positive integer. Consider the representation
$(\rho, V)$, where
$V = H^i_{\text{\rm{\'et}}}(X_{\bar{K}}, \mathbb{Q}_p)$ and
let $\det \rho: G_K \rightarrow \mathbb{Z}_p^{\times}$
be the character obtained by
composing $\rho$ with the determinant map.
Then $\det \rho = \chi^{-\frac{id}{2}}$ on an open
subgroup of $G_K$, where $d = \dim_{\Q_p} V$
and $\chi$ is the $p$-adic cyclotomic character.
\end{lemma}
\begin{proof}
Replacing $K$ by a finite extension,
we may assume $V$ is crystalline.
Consider the filtered $\varphi$-module
$D_{\mathrm{cris}}(V)$ and let $\Phi = \varphi^f$,
where $q=p^f$ is the cardinality
of the residue field of $\mathcal{O}_K$.
Let $\delta$ denote the determinant
of the endomorphism $\Phi$.
By \cite{C-LS}, the characteristic polynomial
of $\Phi$ has rational coefficients and
its roots are Weil numbers of weight $i$.
Thus, in particular, $\delta$ is a rational number
and it has archimedean absolute value
equal to $q^{t}$ where $t=id/2$.
Hence $\delta = \pm q^t$.
Since $\det \rho$ is crystalline,
the restriction of the
character $\det \rho$ to $I_K$ is
equal to $\chi^{-t}$ (cf.\ \cite{Fon}, Proposition 5.4.1).
Thus, $\det \rho = \eta \chi^{-t}$
with $\eta$ an unramified character.
By Lemma 3.4 in \cite{CSW},
the character $\eta$ has order at most two,
from which the desired result follows.
Note that the Betti number $d$ is even if $i$
is odd, by the Hodge symmetry.
\end{proof}
\begin{proof}[(Proof of Theorem \ref{main})]
The theorem clearly holds if $d = \dim_{\mathbb{Q}_p} V$ is zero.
We assume henceforth that $V$ is of positive dimension.
Since the kernel of $\rho$ is contained in the
kernel of $\det \rho$, we see that $K(V)$ contains the fixed subfield
$K(\det V)$ of $\overline{K}$ by the kernel of $\det \rho$.
Note that the character $\det \rho$ is the $-id/2$-th power of the
$p$-adic cyclotomic character on an open subgroup of
$G_K$, by Lemma \ref{lemma1}. Hence the field
$K(V)$ contains a subfield $F$ of $K(\mu_{p^\infty})$ such that
$K(\mu_{p^\infty})$ is of finite degree over $F$ since $d > 0$.
Replacing $K$ by a finite extension, we may then assume that
$K(V)$ and $L$ contains $K(\mu_{p^\infty})$.
Put $\mathfrak{h} = \Lie(\Gal(K(V)/K(\mu_{p^\infty})))$ and
$\mathfrak{h}' = \Lie(\Gal(L/K(\mu_{p^\infty})))$.
Recall that $M$ is the intersection of the fields
$K(V)$ and $L$, which is a Galois extension of
$K(\mu_{p^\infty})$.
Let $\mathfrak{j}$ and $\mathfrak{j}'$ be the Lie algebras
of $\Gal(K(V)/M)$ and $\Gal(L/M)$ respectively.
The Lie algebra $\mathfrak{j}$ (resp. $\mathfrak{j}'$) is
an ideal of $\mathfrak{h}$ (resp. $\mathfrak{h}'$),
since $\Gal(K(V)/M)$ (resp. $\Gal(L/M)$) is a
closed normal subgroup of
$\Gal(K(V)/K(\mu_{p^\infty}))$
(resp. $\Gal(L/K(\mu_{p^\infty}))$).
We have
\[ \frac{\mathfrak{h}}{\mathfrak{j}} \simeq
\Lie\left( \frac{\Gal(K(V)/K(\mu_{p^\infty}))}{\Gal(K(V)/M)} \right) \simeq
\Lie(\Gal(M/K(\mu_{p^\infty}))) \simeq
\Lie\left( \frac{\Gal(L/K(\mu_{p^\infty}))}{\Gal(L/M)} \right) \simeq
\frac{\mathfrak{h}'}{\mathfrak{j}'}. \]
The above expressions are all equal to zero by hypothesis.
Therefore $\Gal(K(V)/M)$ has finite index in $\Gal(K(V)/K(\mu_{p^\infty}))$.
We then apply Lemma \ref{lemma6} to obtain the desired conclusion.
\end{proof}
It seems worthwhile to state the following corollaries
for cohomological coprimality in
the case where $L$ is given by another ``geometric" representation.
More precisely, consider another proper smooth variety $Y$ over $K$
with potential good reduction. Let $j$ be a positive odd integer and put
$V_1 = V$, as above and
$V_2 = H^{j}_{\text{\'et}} (Y_{\overline{K}},\mathbb{Q}_p)$.
Put $J_1 = \rho_1(\Ker(\rho_2))$ and $J_2 = \rho_2(\Ker(\rho_1))$.
Note that $J_r$ is a closed normal subgroup
of $H_r = \rho_r (G_{K(\mu_{p^\infty})})$
(after a finite extension) for $r=1,2$.
We have the following special case of Lemma \ref{lemma6}.
\begin{cor}\label{cor1}
Let $V_1$ and $V_2$ be as above and let
$K(V_1)$ and $K(V_2)$ be the fixed fields
of $\Ker(\rho_1)$ and $\Ker(\rho_2)$, respectively.
If $M := K(V_1) \cap K(V_2)$
is a finite extension of $M \cap K(\mu_{p^\infty})$,
then $V_1$ and $V_2$ are cohomologically coprime.
\end{cor}
The cohomological coprimality can also be derived by
comparing the Lie algebras $\mathfrak{h}_1 = \Lie(H_1)$ and
$\mathfrak{h}_2 = \Lie(H_2)$.
\begin{cor}\label{prop2}
With the assumptions and notations in the discussion above,
suppose $\mathfrak{h}_1$ and $\mathfrak{h}_2$
have no common simple factor. Then $V_1$ and $V_2$
are cohomologically coprime.
\end{cor}
\begin{proof}
Apply Theorem \ref{main} with $V = V_1$
and $L = K(V_2)$;
and with $V = V_2$ and $L = K(V_1)$.
\end{proof}
\section{The ordinary case}\label{sec:ord}
We use the notation as in the previous
sections. In this section, we look at the
vanishing of cohomology groups for $p$-adic
Galois representations given by varieties
with good ordinary reduction.
We begin with a definition and a few remarks.
\begin{defn}\label{prime-to-p}
\normalfont
Let $F$ be a field. For an algebraic extension $F'$
of $F$, we say that $F'$ is a \emph{prime-to-$p$ extension}
of $F$ if $F'$ is a union of finite extensions over $F$
of degree prime-to-$p$. If $F'$ is a prime-to-$p$ extension
over some finite extension field of $F$, we say that
$F'$ is a \emph{potential prime-to-$p$ extension} of $F$.
\end{defn}
\begin{remark}\label{prime-to-p-remark}
\normalfont
(i)
Clearly, if $F'$ is a potential prime-to-$p$
extension of $F$, then every intermediate field $F''$
(with $F \subseteq F'' \subseteq F'$) is a potential
prime-to-$p$ extension of $F$. \\
(ii)
Let $L$ be a $p$-adic Lie extension of $K$ containing
$K(\mu_{p^\infty})$.
Then the residue field $k_L$ is a potential
prime-to-$p$ extension over $k$ if and only
if $k_L/k$ is a finite extension.
\end{remark}
We now give the first main result in this section.
We consider the case given by elliptic
curves.
\begin{thm}\label{thm2A}
Let $E/K$ be an elliptic curve with potential good
ordinary reduction over $K$.
Let $L$ be a Galois extension of $K$
whose residue field $k_L$ is a potential
prime-to-$p$ extension over $k$.
Put $V =V_p(E)$ and $J_V = \rho_E(G_L)$.
Then $V$ has vanishing $J_V$-cohomology.
\end{thm}
As a corollary, we obtain necessary and
sufficient conditions for the vanishing of
$J_V$-cohomology groups for
$p$-adic representations given by an elliptic
curve with good ordinary reduction over $K$.
Let $\tilde{E}$ denote the reduction of
$E$ modulo the maximal ideal of $\mathcal{O}_K$.
\begin{cor}\label{van-ord-abelvar}
Let $E$ be an elliptic curve over $K$ with good ordinary
reduction and $L$ be a Galois extension with residue field $k_L$.
Assume that $L$ contains $K(\mu_{p^\infty})$ and
the coordinates of the $p$-torsion points of $E$.
Put $V=V_p(E)$ and $J_V=\rho_E(G_L)$.
Then the following statements are equivalent:\\
(1) $E(L)[p^\infty]$ is finite, \\
(2) $E^{\vee}(L)[p^\infty]$ is finite, \\
(3) $\tilde{E}(k_L)[p^\infty]$ is finite, \\
(4) $\tilde{E}^{\vee}(k_L)[p^\infty]$ is finite, \\
(5) $k_L$ is a potential prime-to-$p$ extension of $k$\\
(6) $V$ has vanishing $J_V$-cohomology.
\end{cor}
\begin{proof}
The equivalence of the first five statements
is given by Corollary 2.1 in \cite{Ozeki}.
Theorem \ref{thm2A} shows that condition (5)
implies condition (6).
Note that condition (1) is equivalent to
$H^0(J_V, V) = 0$ (cf.\ e.g.\ \cite{KT}, Lem. 2.1),
so condition (6) implies (1).
\end{proof}
We now give the proof of Theorem \ref{thm2A}.
First we note that we may reduce the proof to the case
$L=L(\mu_{p^\infty})$. Indeed letting
$L'=L(\mu_{p^\infty})$ and $J'_V = \rho(G_{L'})$,
then $J_V'$ is a closed normal subgroup of $J_V$
and we see that if $V$ has vanishing $J'_V$-cohomology
then
\[ H^n (J_V, V) \simeq H^n(J_V/{J'_V}, H^0(J_V',V)) \hspace{20pt} n \geq 0,\]
and a priori, $V$ has vanishing $J_V$-cohomology.
We assume henceforth that $L=L(\mu_{p^\infty})$.
Considering $H^n(J_V,V)$ as a representation of $H_V/J_V$,
we see that Theorem \ref{thm2A} follows if we prove
the following lemma.
\begin{lemma}\label{lemA}
Assume the hypothesis in Theorem \ref{thm2A}.
Then after a finite extension $K'/K$,
the quotient $H_V / J_V$ acts unipotently
on $H^n(J_V,V)$ for all $n \geq 0$.
\end{lemma}
Let us show how the theorem follows from the lemma.
Suppose the lemma holds. Replacing $K$ by a finite extension,
we may assume that $H_V / J_V$ acts unipotently on
$H^n(J_V,V)$ for all $n \geq 0$.
We prove the vanishing by induction
on $n$. The case $n = 0$ is already known
(cf.\ \cite{Ozeki}, Thm.\ 2.1-(1)).
By Theorem \ref{thm0}, we know
that $V$ has vanishing $H_V$-cohomology.
Now let $n \geq 1$ and assume that
$H^{m}(J_V,V) = 0$ for all $1 \leq m < n$.
Then the Hochschild-Serre spectral sequence
(cf.\ \cite{HS}, Thm.\ 2) gives the following
exact sequence:
\[ 0 \rightarrow H^n(H_V / J_V, V^{J_V})
\rightarrow H^n(H_V, V) \rightarrow
H^0(H_V / J_V, H^n(J_V,V)) \rightarrow
H^{n+1}(H_V / J_V, V^{J_V}). \]
As the second and last terms both vanish,
we have
$H^0(H_V / J_V, H^n(J_V,V))=0$. The
vanishing of $J_V$-cohomology
follows from Lemma \ref{lemC} since
$H_V / J_V$ acts unipotently on $H^n(J_V,V)$.
It remains to prove Lemma \ref{lemA}.
\begin{proof}[(Proof of Lemma \ref{lemA})]
Let $K^{\text{ur}}$ be the maximal unramified
extension of $K$ and put
$N_{\infty} = K(E_\infty) \cap K^{\text{ur}}(\mu_{p^\infty})$.
We may view $N_\infty$ as the extension of $K$ obtained by
adjoining all $p$-power roots of unity to the the
maximal subextension $N$ of $K(E_\infty)$ which is unramified over $K$.
Let $M' = M \cap N_\infty$ and we put $G := \Gal(M / K)$,
$H := \Gal(M / M')$ and $Y := \Gal(M' / K) = G/H$.
Note that $M$ is totally ramified over $M'$.
In fact, $M'$ is the extension of $K$ obtained
by adjoining all $p$-power roots of unity in $K(E_\infty)$
to the maximal subextension of $M$ that is unramified over $K$.
As $M$ is a $p$-adic Lie extension over $K$
and its residue field $k_M$ is
potentially prime-to-$p$ over $k$, we see that $k_M$
is a finite extension of $k$
(cf.\ Remark \ref{prime-to-p-remark} (ii))
and that $M'$ is of
finite degree over $K(\mu_{p^\infty})$.
We have the following diagram of fields:
\begin{center}
\begin{tikzpicture}[
back line/.style={densely dotted},
cross line/.style={preaction={draw=white, -,line width=6pt}}]
\node (A1) {$K(E_\infty)$};
\node [below of=A1] (B1) {$N_\infty$};
\node [below of=B1] (B2)
{$N$};
\node (A2) [left of=A, below of=A, node distance=1.5cm] {$M$};
\node [below of=A2] (B3) {$M'$};
\node [below of=B3] (C2) {$K$};
\node (C) [left of=A2, above of=A2, node distance=1cm] {$L$};
\draw[cross line] (C) -- (A2) -- (B3) -- (C2) -- (B2) -- (B1);
\draw[cross line] (A2) -- (A1) -- (B1) -- (B3);
\draw[-, bend left = 70pt] (A1) to node [right]{$G_V$} (C2);
\draw[-, bend right = 30pt] (A2) to node [left]{$H$} (B3);
\draw[-, bend right = 30pt] (B3) to node [left]{$Y$} (C2);
\draw[-, bend right = 70pt] (A2) to node [left]{$G$} (C2);
\end{tikzpicture}
\end{center}
We need an explicit description of the
action of $Y$ on $H$.
The diagram of fields shown above clearly
induces the following commutative diagram,
having exact rows and surjective vertical
maps:
\begin{equation} \label{commdiagram}
\begin{tikzpicture} [baseline=(current bounding box.center)]
\matrix(m)[matrix of math nodes,
row sep=3em, column sep=2.5em,
text height=1.5ex, text depth=0.25ex]
{1 & \Gal(K(E_\infty)/N_\infty) & G_V & \Gal(N_\infty/K) & 1 \\
1 & H & G & Y & 1\\};
\path[->,font=\scriptsize]
(m-1-1) edge (m-1-2)
(m-1-2) edge (m-1-3)
(m-1-3) edge (m-1-4)
(m-1-4) edge (m-1-5)
(m-2-1) edge (m-2-2)
(m-2-2) edge (m-2-3)
(m-2-3) edge (m-2-4)
(m-2-4) edge (m-2-5);
\path[->>,font=\scriptsize]
(m-1-2) edge (m-2-2)
(m-1-3) edge (m-2-3)
(m-1-4) edge (m-2-4);
\end{tikzpicture}
\end{equation}
Moreover the above diagram is compatible with
the actions by inner automorphism, in the
sense that $\sigma \cdot h = g h g^{-1}$ for
$\sigma \in Y$, $h \in H$ and a lifting $g$ of
$\sigma$ to $G$, if and only if
$\tilde{\sigma} \cdot \tilde{h} = \tilde{g} \tilde{h} \tilde{g}^{-1}$,
for liftings $\tilde{\sigma}$
(resp.\ $\tilde{g}$, $\tilde{h}$) of $\sigma$
(resp.\ $g$, $h$) to $\Gal(N_\infty/K)$
(resp.\ $G_V$, $G_V$).
In order to obtain the desired explicit
description for the action of $Y$ on $H$,
we will use the following well-known result
(see for instance Proposition 2.6 in
\cite{Ozeki} which is formulated in a more
general fashion):
\begin{prop}\label{PropA}
Let $E$ be an elliptic curve over $K$ with good
ordinary reduction. For some suitable basis of $T_p(E)$, the
representation $\rho_E$ has the form
\[ \begin{pmatrix}
\varphi & a \\
0 & \psi
\end{pmatrix}, \]
where
\begin{enumerate}
\item[(i)] $\varphi : G_K \rightarrow \mathbb{Z}_p^{\times}$
is a continuous character,
\item[(ii)] $\psi : G_K \rightarrow \mathbb{Z}_p^{\times}$
is an unramified continuous character and
\item[(iii)] $a : G_K \rightarrow \Z_p$ is a continuous map.
\end{enumerate}
Moreover, $\chi = \varphi \cdot \psi$.
In particular, the restriction $\varphi |_{I_K}$
of $\varphi$ to the inertia subgroup
$I_K$ of $G_K$ coincides with the $p$-adic cyclotomic
character.
\end{prop}
\noindent
In fact, the character $\varphi$ in the proposition is
the homomorphism which gives the action of $G_V$ on the Tate module
$T_p(\mathcal{E}(p)^0)$, while $\psi$ is the homomorphism
giving the action of $G_V$ on $T_p(\mathcal{E}(p)^{\text{\'et}})$.
Here, $\mathcal{E}(p)$ is the $p$-divisible group associated
with the N{\'er}on model of $E$ over $\mathcal{O}_K$ and
the superscripts ``$0$" and ``{\'e}t" indicate
the connected $p$-divisible subgroup and
{\'e}tale quotient of $\mathcal{E}(p)$, respectively.
We now choose a basis of the Tate module $T_p(E)$
of $E$ with respect to which the action
of $G_V$ on $T_p(E)$ is given as in the
previous proposition.
Let $h \in H$ and $\sigma \in Y$.
Let $g$ be a lifting of $\sigma$ to $G$.
Let $\tilde{g}$ (resp.\ $\tilde{h}$) be a lifting of $g$
(resp.\ $h$) to $G_V$. Note that
$\tilde{h} \in \Gal(K(E_\infty)/N_\infty)$.
In particular, we have
$\varphi(\tilde{h}) = 1$ and $\psi(\tilde{h}) = 1$.
A matrix calculation gives
\[ \rho_E(\tilde{g} \cdot \tilde{h})
= \rho_E(\tilde{g}) \rho_E(\tilde{h}) \rho_E(\tilde{g})^{-1}
= \begin{pmatrix}
1 & \varphi(\tilde{g}) a(\tilde{h}) \psi(\tilde{g})^{-1} \\
0 & 1
\end{pmatrix}. \]
Since $\varphi = \chi \cdot \psi^{-1}$,
the above equation becomes
\[ \rho_E(\tilde{g}) \rho_E(\tilde{h}) \rho_E(\tilde{g})^{-1}
= \begin{pmatrix}
1 & a(\tilde{h}) \\
0 & 1
\end{pmatrix}^{\chi\cdot\psi^{-2}(\tilde{g})}. \]
By the compatibility of the diagram (\ref{commdiagram})
with the action by inner automorphisms,
the preceding equation gives the relation
\begin{equation*}
\sigma \cdot h = h^{\varepsilon(\sigma)}
\end{equation*}
where $\varepsilon = \chi\cdot\psi^{-2}$.
Since $\psi$ is unramified and the image of
$\chi$ is open in $\Z_p^{\times}$, we deduce
that the image of the character $\varepsilon$
is an open subgroup of $\Z_p^{\times}$.
The desired result follows from Lemma \ref{lemB}.
This completes the proof of Lemma \ref{lemA}
and of Theorem \ref{thm2A}.
\end{proof}
\begin{remark}
\normalfont
The $p$-adic Galois representation $V$ of Theorem \ref{thm2A}
is ordinary in the sense of Definition \ref{ordrepn}
as we can see from Proposition \ref{PropA}.
We note that the proof of Theorem \ref{thm2A}
can be replicated for ordinary $p$-adic Galois
representations whose filtration has the same shape as that for
the Tate module of elliptic curves with good
ordinary reduction. For our purposes, we content ourselves with
the theorem above.
\end{remark}
On the other hand, we may obtain
some vanishing results with respect to a more particular
family of $L$ for more general higher-dimensional ordinary
representations coming from geometry as follows:
\begin{thm}\label{thm3}
Let $X$ be a proper smooth variety over $K$ with potential
good ordinary reduction and let $E/K$ be an elliptic curve
with potential good supersingular reduction.
Let $i$ a positive odd integer and we put
$V = H^{i}_{\text{\'et}} (X_{\overline{K}}, \mathbb{Q}_p)$
and $V' = V_p(E)$.
Then $V$ and $V'$ are cohomologically coprime.
\end{thm}
\begin{proof}
To prove the theorem we have to show that the following
statements hold: \\
(a) If we put $L = K(E_{\infty})$ and $J_V = \rho(G_L)$,
then $V$ has vanishing $J_V$-cohomology; and \\
(b) If we put $L' = K(V)$ and $J_{V'} = \rho_E(G_{L'})$,
then $V'$ has vanishing $J_{V'}$-cohomology. \\
We only prove statement (a) since statement (b) can be proved
in a similar manner.
Replacing $K$ with a finite extension, we may assume that
$X$ has good ordinary reduction and $E$ has good supersingular
reduction over $K$. We may also assume that $K(V)$ contains
$K(\mu_{p^\infty})$ by extending $K$ further
(cf. Lemma \ref{lemma1}).
Let $K^{\text{ur}}$ be the maximal unramified extension
of $K$ in $\overline{K}$ and put
$N_{\infty} = K(V) \cap K^{\text{ur}}(\mu_{p^\infty})$.
The assumption on $V$ implies that the inertia subgroup
$I_K$ of $G_K$ acts on the associated graded quotients
$\text{gr$^r$ } V$ by the $r$th power of the $p$-adic
cyclotomic character. In particular the group
\[ \Gal(K(V)/N_{\infty}) \simeq
\Gal(K^{\text{ur}}(V)/K^{\text{ur}}(\mu_{p^\infty})) \]
acts unipotently on $V$. Hence, $\Lie(\Gal(K(V)/N_{\infty}))$
is a nilpotent Lie algebra contained in
$\Lie(H_V) = \Lie(\Gal(K(V)/K_{\infty,V}))$.
Recall that we may identify $\Gal(L/K)$ with the subgroup
$\rho_E(G_K)$ of $\Aut(T_p(E)) \simeq \GL_2(\mathbb{Z}_p)$.
Put $\mathfrak{g} = \Lie(\Gal(L/K))$
and $\mathfrak{h}= \Lie(\Gal(L/K(\mu_{p^\infty})))$.
If $E$ has no formal complex multiplication, then
$\mathfrak{g} \simeq \mathfrak{gl}_2(\mathbb{Q}_p)$
by Proposition \ref{prop3} $(ii)$ and so
$\mathfrak{h} \simeq \mathfrak{sl}_2(\mathbb{Q}_p)$.
In particular, $\mathfrak{h}$ is simple.
It immediately follows from Theorem \ref{main}
that $V$ has vanishing $J_V$-cohomology.
This proves (a) when $E$ has no formal complex multiplication.
We now suppose that $E$ has formal complex multiplication.
We claim that $M = K(V) \cap L$ is a finite extension
of $K(\mu_{p^\infty})$.
The restriction map induces a surjection
\[ \Gal(K(V)/N_\infty) \twoheadrightarrow
\Gal(MN_\infty / N_\infty) \simeq \Gal(M / {M \cap N_\infty}), \]
from which we obtain a surjection of Lie algebras
\[ \Lie(\Gal(K(V)/N_\infty)) \twoheadrightarrow
\Lie(\Gal(M / {M \cap N_\infty})). \]
As $\Lie(\Gal(K(V)/N_\infty))$ is nilpotent,
we see that $\Lie(\Gal(M / {M \cap N_\infty}))$ is a nilpotent
subalgebra of $\Lie(\Gal(M / K(\mu_{p^\infty})))$.
Since $E$ has formal complex multiplication,
we know from Proposition \ref{prop3} $(i)$ that
$\mathfrak{g}$ is a non-split Cartan subalgebra of
$\End(V_p(E)) \simeq \mathfrak{gl}_2(\mathbb{Q}_p)$.
Thus $\mathfrak{g}$ contains the center $\mathfrak{c}$
of $\mathfrak{gl}_2(\mathbb{Q}_p)$ and
$\mathfrak{h} \simeq \mathfrak{g}/\mathfrak{c}$ is a
Cartan sub-algebra of $\mathfrak{sl}_2(\mathbb{Q}_p)$
(cf.\ \cite{Bour2}, Ch.7, \textsection 2, Proposition 5).
Its elements are semisimple in $\mathfrak{sl}_2(\mathbb{Q}_p)$ by
(\emph{op.\ cit.}, \textsection 4 Thm.\ 2).
Thus, the elements of $\Lie(\Gal(M/K(\mu_{p^\infty})))$
are also semisimple
since it is a quotient of $\mathfrak{h}$.
Since the Lie algebra $\Lie(\Gal(M/M \cap N_\infty))$
is a nilpotent factor of $\Lie(\Gal(M/K(\mu_{p^\infty})))$,
we then see that $\Lie(\Gal(M / M \cap N_\infty)) = 0$.
This means $M / M \cap N_\infty$ is a finite extension.
But note that $M \cap N_\infty$ is unramified over $K(\mu_{p^\infty})$.
Since $\rho_E(I_K)$ is open in $\rho_E(G_K)$
again by Proposition \ref{prop3} $(i)$,
$M \cap N_\infty$ is finite over $K(\mu_{p^\infty})$.
Thus $M / K(\mu_{p^\infty})$ is a finite extension.
As we remarked earlier, the finiteness of $[M:K(\mu_{p^\infty})]$
is equivalent to the finiteness of the index of $J_V = \rho(G_L)$
in $H_V$. By Lemma \ref{lemma6}, we conclude
that $V$ has vanishing $J_V$-cohomology.
\end{proof}
\begin{remark}\label{rem3}
\normalfont
In Theorem \ref{thm3} when the elliptic curve $E$
has potential good ordinary reduction, the vanishing
statement (b) may not hold because $H^0(J_{V'}, {V'})$
may be nontrivial.
This is easily observed by taking $X=E$ and considering
$V=H^1_{\text{\'et}}(X_{\overline{K}}, \Q_p)$.
This observation in fact holds in a more general case.
Indeed, take any abelian variety $A/K$ with
potential good ordinary reduction and consider
$V = V_p(A) \simeq H^{1}_{\text{\'et}} (A_{\overline{K}}, \mathbb{Q}_p)^{\vee}$.
Since $A$ has potential good ordinary reduction,
the field $L' = K(A_\infty)$ contains an unramified
$\mathbb{Z}_p$-extension.
Hence, the residue field $k_{L'}$ is not a
potential prime-to-$p$ extension over $k$.
Replacing $K$ and $L'$ with appropriate finite extensions
(so that the hypothesis of Corollary \ref{van-ord-abelvar}
is satisfied), we conclude
that the group $V'$ does not have
vanishing $J_{V'}$-cohomology.
Thus $V$ and $V'$ are not cohomologically coprime.
\end{remark}
\section{Vanishing result for elliptic curves} \label{sec:vanish for EC}
In this section, we determine the cohomological
coprimality of two Galois representations $V_p(E)$
and $V_p(E')$ given by elliptic curves $E$ and $E'$,
respectively.
\subsection{The case of good reduction} \label{subsec:good reduction}
We first treat the case where $E$ and $E'$ both have
potential good reduction over $K$.
The main result in this subsection is the following:
\begin{thm}\label{thm6}
Let $E$ and $E'$ be elliptic curves
with potential good reduction over $K$.
Put $L=K(E'_\infty)$.
Then the representations $V_p(E)$ and $V_p(E')$
are cohomologically coprime if one of the
following conditions is satisfied:
\begin{itemize}
\item[(i)] $E$ has potential good ordinary reduction and
$E'$ has potential good supersingular reduction,
or vice versa;
\item[(ii)] $E$ has potential good supersingular reduction
with formal complex multiplication and $E'$ has potential
good supersingular reduction without formal complex multiplication,
or vice versa;
\item[(iii)] $E$ and $E'$ both have potential good
supersingular reduction with formal complex multiplication
and the group $E(L)[p^\infty]$ of $p$-power division
points of $E$ over $L$ is finite;
\item[(iv)] $E$ and $E'$ both have potential good
supersingular reduction without formal complex multiplication
and the group $E(L)[p^\infty]$ is finite.
\end{itemize}
\end{thm}
By symmetry, it suffices to verify the
following:
\begin{thm}\label{thm4}
Let $E$ and $E'$ be elliptic curves
with potential good reduction over $K$.
Put $L = K(E'_\infty)$.
If one of the conditions (i) - (iv)
in Theorem \ref{thm6} is satisfied
then $V=V_p(E)$ has vanishing $J_V$-cohomology,
where $J_V = \rho_E (G_L)$.
\end{thm}
\begin{remark}\label{rem4}
\normalfont
Ozeki gave necessary and sufficient conditions
for the finiteness of $E(L)[p^\infty]$ in \emph{(iii)} and \emph{(iv)}.
See Propositions 3.7 and 3.8 of \cite{Ozeki} for more details.
In general, if $E$ is an elliptic curve with good supersingular
reduction over $K$ and $L$ is a Galois extension of $K$,
the group $E(L)[p^\infty]$ is finite if and only if $K(E_{\infty})$
is not contained in $L$ (\emph{op.\ cit.}, Lemma 3.1).
\end{remark}
The case \emph{(i)} of the theorem is already
covered by Theorem \ref{thm3}. For case \emph{(ii)}, we may replace $K$
with a finite extension so that $E$ and $E'$ both
have good supersingular reduction over $K$.
Put $\mathfrak{h} = \Lie(\Gal(K(E_\infty)/K(\mu_{p^\infty})))$.
The Lie algebra of $\Gal(L/K)$
is isomorphic to
$\End(V_p(E')) \simeq \mathfrak{gl}_2(\mathbb{Q}_p)$
by Proposition \ref{prop3} \emph{(ii)}.
The Lie algebra
$\mathfrak{h}' = \Lie(\Gal(L/K(\mu_{p^\infty})))$
is isomorphic to $\mathfrak{sl}_2({\mathbb{Q}_p})$.
In particular, $\mathfrak{h}'$ is simple.
As $\mathfrak{h}$ is abelian,
we see that $\mathfrak{h}$ and
$\mathfrak{h}'$ have no common simple factor.
By Theorem \ref{main}, the desired result follows.
In view of Corollary \ref{cor1}, to prove
the case of $(iii)$ and $(iv)$,
it suffices to show that the field $K(V_p(E)) \cap L$ is a finite
extension of $K(\mu_{p^\infty})$.
We obtain this by the following lemma.
\begin{lemma}\label{lemma2}
Let $E$ and $E'$ be elliptic curves over $K$
which have potential good supersingular reduction.
Suppose $E$ and $E'$ both have
formal complex multiplication
or both does not have
formal complex multiplication.
Assume further that
$E(L)[p^\infty]$ is a finite group.
Then $M := K(E_\infty) \cap L$
is a finite extension of $K(\mu_{p^\infty})$.
\end{lemma}
\begin{proof}
We split the proof into two cases: \\
(Case 1) Assume that both $E$ and $E'$ have
formal complex multiplication.
The Lie algebra $\Lie(\rho_E(G_K))$
attached to $E$ is $2$-dimensional, by
Proposition \ref{prop3} \emph{(i)}.
Thus $\Gal(K(E_\infty)/K)$ is
a $2$-dimensional $p$-adic Lie group and so
$\Gal(K(E_\infty)/K(\mu_{p^\infty}))$ is
$1$-dimensional.
The same statements hold when $E$ is replaced
by $E'$.
Replacing $K$ with a finite extension, we may
assume that $\Gal(K(E_\infty)/K(\mu_{p^\infty}))$
is isomorphic to $\mathbb{Z}_p$. If
$M$ is infinite over $K(\mu_{p^\infty})$,
then $\Gal(K(E_\infty)/M)$ is of infinite index
in $\Gal(K(E_\infty)/K(\mu_{p^\infty}))$.
Since the only closed subgroup of $\mathbb{Z}_p$ of
infinite index is the trivial subgroup, the group
$\Gal(K(E_\infty)/M)$ must be trivial, and thus $K(E_\infty) = M$.
That is, $K(E_\infty)$ is contained in $L$.
Hence, $E(L)[p^\infty]$ is infinite (see Remark \ref{rem4}).
This contradicts our hypothesis. Therefore, $M$ is a finite
extension of $K(\mu_{p^\infty})$. \\
(Case 2) Suppose both $E$ and $E'$ does not
have formal complex multiplication.
Put $\mathfrak{g} = \Lie(\Gal(K(E_\infty)/K))$ and
$\mathfrak{h} = \Lie(\Gal(K(E_\infty)/K(\mu_{p^\infty})))$.
Then $\mathfrak{g}$ (resp.\ $\mathfrak{h}$) is
isomorphic to $\mathfrak{gl}_2 (\mathbb{Q}_p)$
(resp.\ $\mathfrak{sl}_2 (\mathbb{Q}_p)$).
In particular, $\mathfrak{h}$ is simple.
The Lie algebra $\mathfrak{j} = \Lie(\Gal(K(E_\infty)/M))$
is an ideal of $\mathfrak{h}$ since
$\Gal(K(E_\infty)/M)$ is a normal subgroup of
$\Gal(K(E_\infty)/K(\mu_{p^\infty}))$.
Thus $\mathfrak{j}$ is either $(0)$ or
$\mathfrak{sl}_2 (\mathbb{Q}_p)$.
In the former case, $\Gal(K(E_\infty)/M)$
is a finite group and thus $K(E_\infty)/M$ is a finite extension.
Replacing $K$ with a finite extension, we have
$K(E_\infty)= M \subset L$. But then
this implies $E(L)[p^\infty]$ is infinite,
in contrast to our hypothesis. Thus
$\mathfrak{j} = \mathfrak{sl}_2 (\mathbb{Q}_p)$,
which means that $\Gal(K(E_\infty)/M)$ is
an open subgroup of $\Gal(K(E_\infty)/K(\mu_{p^\infty}))$.
This completes the proof of the lemma and of
Theorem \ref{thm4}.
\end{proof}
Theorem \ref{thm4} gives another proof of some
finiteness results in $\cite{Ozeki}$. For instance, in view
of Remark \ref{rem2}, condition $(i)$ of Theorem \ref{thm4}
implies a part of Proposition 3.2 in \cite{Ozeki}.
We also obtain the following corollary.
\begin{cor}\label{cor_ss}
Let $E$ and $E'$ be elliptic curves with potential
good supersingular reduction over $K$, $L = K(E'_\infty)$ and $L' = K(E_\infty)$.
Put $V=V_p(E)$, $V'=V_p(E')$, $J_V = \rho_E(G_L)$ and $J_{V'} = \rho_{E'}(G_{L'})$. Assume that $E$ and $E'$
both have formal complex multiplication or
both do not have formal complex multiplication.
Then the following statements are equivalent:
\\
(1)
$V$ and $V'$ are cohomologically
coprime
\\
(2)
$L \cap L'$ is a finite extension
of $K(\mu_{p^\infty})$,
\\
(3)
$V$ has vanishing $J_V$-cohomology,
\\
(3')
$V'$ has vanishing $J_{V'}$-cohomology,
\\
(4)
$E(L'')[p^\infty]$ is a finite group
for any finite extension $L''$ of $L$,
\\
(4')
$E'(L'')[p^\infty]$ is a finite group
for any finite extension $L''$ of $L'$, and
\\
(5)
The $p$-divisible groups $\mathcal{E}(p)$
and $\mathcal{E}'(p)$
attached to $E$ and $E'$, respectively,
are not isogenous over $\mathcal{O}_{K'}$
for any finite extension $K'$ of $K$.
\end{cor}
\begin{proof}
It remains to prove the equivalence of
each of the first six conditions with the last one.
Replacing $K$ by a finite extension, we may
assume that $E$ and $E'$ have good supersingular
reduction over $K$.
We prove the equivalence $(4) \Leftrightarrow (5)$.
If $E$ and $E'$ both do not have
formal complex multiplication
then this equivalence
is given by Proposition 3.8 in \cite{Ozeki}.
Assume that $E$ and $E'$ both have
formal complex multiplication.
Let $L''$ be a finite extension of $L$
such that $E(L'')[p^\infty]$ is infinite.
Replacing $K$ by a finite extension,
we may assume that $L=L''$.
Then Proposition 3.7 in \cite{Ozeki}
implies that $E$ and $E'$ have
the same fields of formal complex multiplication,
say $F$.
The representations
$\rho_E : G_K \rightarrow \GL(V_p(E))$ and
$\rho_{E'} : G_K \rightarrow \GL(V_p(E'))$
factor through $\Gal(K^{\mathrm{ab}}/K)$,
where $K^{\mathrm{ab}}$ denotes the
maximal abelian extension of $K$.
Moreover $\rho_E$ and $\rho_{E'}$ both have values
in $\mathcal{O}^{\times}_{F}$
and their restrictions to the inertia group
are respectively given by
\[ \rho_E|_{I_K}, \rho_{E'}|_{I_K}
: I(K^{\mathrm{ab}}/K) \simeq \mathcal{O}^{\times}_{K}
\rightarrow \mathcal{O}^{\times}_{F}. \]
Here, $K^{\mathrm{ab}}$ denotes the
maximal abelian extension of $K$ and
$I(K^{\mathrm{ab}}/K)$ is the
inertia subgroup of
$\Gal(K^{\mathrm{ab}}/K)$, with the isomorphism
$I(K^{\mathrm{ab}}/K) \simeq \mathcal{O}_{K^\times}$
coming from local class field theory.
In fact, $\rho_E|_{I_K}$ and $\rho_{E'}|_{I_K} $
are equal since they are both given
by the map
$x \mapsto \mathrm{Nr}_{K/F}(x^{-1})$,
where
$\mathrm{Nr}_{K/F} : K^{\times} \rightarrow F^{\times}$
is the norm map (cf.\ \cite{Ser1}, Chap.\ IV, A.2.2).
From Proposition \ref{prop3} (\emph{i}),
we know that $\rho_E(I_K)$ (resp. $\rho_{E'}(I_K)$)
is an open subgroup of $\rho_E(G_K)$
(resp. $\rho_{E'}(G_K)$).
This, together with the assumption that
$E(L)[p^\infty]$ is infinite implies that
$\rho_E(I_{K'}) = \rho_E(G_{K'}) = \rho_{E'}(G_{K'}) = \rho_{E'}(I_{K'})$
after a finite extension $K'/K$.
We see that the Tate modules $T_p(E)$ and $T_p(E')$
become isomorphic over $K'$.
By a well-known result due to Tate
(cf.\ \cite{Tate}, Corollary 1),
the $p$-divisible groups $\mathcal{E}(p)$
and $\mathcal{E}'(p)$ are isogenous over $\mathcal{O}_{K'}$. Conversely, if there
exists a finite extension $K'$ over $K$ such that $\mathcal{E}(p)$
and $\mathcal{E}'(p)$ are isogenous over
$\mathcal{O}_{K'}$, then $T_p(E)$ and
$T_p(E')$ are isomorphic over $K'$ which
shows that $K'(E_\infty) = K'(E'_\infty)$ which
is a finite extension of $L$.
Therefore we obtain a
finite extension $L''$ of $L$
such that $E(L'')[p^\infty]$ is infinite.
\end{proof}
\subsection{The case of multiplicative reduction}\label{subsec:multip reduction}
We now treat the case where $E'$ has potential multiplicative reduction
over $K$. For this case, the Lie algebra of $\Gal(K(E'_\infty)/K)$
is given by Proposition \ref{prop3} \emph{(v)}.
We have the following result.
\begin{thm}\label{thm5}
Let $E$ and $E'$ be elliptic curves over $K$ such that
$E$ has potential good reduction over $K$ and
$E'$ has potential multiplicative reduction over $K$.
Put $L = K(E'_\infty)$. Then $V = V_p(E)$ has vanishing
$J_V$-cohomology, where $J_V = \rho_E (G_L)$.
\end{thm}
\begin{proof}
Replace $K$ with a finite extension so that
$E$ and $E'$ have good and multiplicative
reductions over $K$, respectively.
We first note that the residue field $k_L$ of $L$
is a potential prime-to-$p$ extension of $k$ since
$\Lie(\rho_{E'}(G_K)) = \Lie (\rho_{E'}(I_K))$.
Thus, the case where $E$ has good ordinary reduction
is just a consequence of Theorem \ref{thm2A}.
It remains to settle the case where $E$ has
good supersingular reduction over $K$.
If $E$ has no formal complex multiplication,
note that the Lie algebra
$\mathfrak{h}_1 = \Lie(\Gal(K(E_\infty) / K(\mu_{p^\infty}))) \simeq \mathfrak{sl}_2(\mathbb{Q}_p)$
is simple. On the other hand, the Lie algebra
$\mathfrak{h}_2 = \Lie(\Gal(K(E'_\infty) / K(\mu_{p^\infty})))$
is a abelian. The result follows from Theorem \ref{main}.
If $E$ has formal complex multiplication,
then by virtue of Corollary \ref{cor1},
it suffices to prove that $M := L \cap K(E_\infty)$ is a
finite extension of $K(\mu_{p^\infty})$.
The Lie algebra $\Lie(\rho_E(G_K))$
attached to $E$ is $2$-dimensional, by
Proposition \ref{prop3} \emph{(i)}.
Thus $\Gal(K(E_\infty)/K)$ is
a $2$-dimensional $p$-adic Lie group and so
$\Gal(K(E_\infty)/K(\mu_{p^\infty}))$ is
$1$-dimensional.
As in the proof for Case (1) of
Lemma \ref{lemma2}, if we assume that $M$ is of
infinite degree over $K(\mu_{p^\infty})$, then
the group $\Gal(K(E_\infty)/M)$ must be trivial,
and thus $K(E_\infty) = M$.
That is, $K(E_\infty)$ is contained in $L$.
Thus we have a natural surjection
$\Gal(L/K) \twoheadrightarrow \Gal(K(E_\infty)/K)$
which induces a surjection of Lie algebras
$\Lie(\Gal(L/K)) \twoheadrightarrow \Lie(\Gal(K(E_\infty)/K))$.
Since both Lie algebras are two-dimensional,
the above surjection of Lie algebras must be an isomorphism.
In view of Proposition \ref{prop3}
\emph{(i)} and \emph{(v)}, we have a contradiction.
Therefore, $M$ is a finite extension of
$K(\mu_{p^\infty})$.
\end{proof}
\begin{remark}\label{cohom-coprime-mult}
\normalfont
Despite the above result, we cannot
expect much about the cohomological coprimality
of $V=V_p(E)$ and $V'=V_p(E')$ if at least
one of $E$ and $E'$ has multiplicative reduction
over $K$. For instance if $E'$ has split
multiplicative reduction, the theory
of Tate curves shows that
$H^0(H_{V'}, V')$ is non-trivial.
On the other hand if $E'$ has non-split
multiplicative reduction, we are not
certain if all the $J_{V'}$-cohomology
groups of $V'$ vanish or not.
(But see Proposition 3.10 in \cite{Ozeki} for
conditions where $H^0(J_{V'}, V')$
vanishes).
\end{remark}
\section{Cohomologies of Global Representations}
In this section, we discuss some global
analogues of the results we obtained in
the previous sections. In fact, these are
consequences of the local results that we proved.
Let $F$ be an algebraic number field,
that is, a finite extension of $\Q$.
We now consider a proper smooth algebraic
variety $X$ defined over $F$.
Denote by $X_{\overline{F}}$ the extension
of scalars of $X$ to $\overline{F}$.
In this section we take
\[ V = H^{i}_{\text{\'et}} (X_{\overline{F}}, \Q_p) \]
and again denote by $\rho : G_F \rightarrow \GL(V)$ the
continuous homomorphism giving the action of $G_F$
on $V$. As in the previous sections if
$V = V_p(E) = T_p(E) \otimes_{\Z_p} \Q_p$ is
given by an elliptic curve $E$ over $F$, we write $\rho_E$
instead of $\rho$.
For an algebraic extension $L$ of $F$
and a place $w$ of $L$,
we denote by $L_w$ the union of the
completions at $w$ of all finite
extensions of $F$ contained in $L$ .
\begin{thm}\label{global_var}
Let $X$ be a proper smooth variety and $E$ be an elliptic curve
over $F$. Suppose there is at least one place $v$ of $F$
above $p$ such that $X$ has potential good ordinary reduction at
$v$ and $E$ has potential good supersingular reduction at $v$.
Let $i$ be a positive odd integer. Put $V = H^{i}_{\text{\'et}} (X_{\overline{F}}, \Q_p)$
and $V' = V_p(E)$. Then $V$ and $V'$ are cohomologically coprime.
\end{thm}
\begin{thm}\label{global_ellcur}
Let $E$ and $E'$ be elliptic curves
over $F$. Put $L=F(E'_\infty)$.
Then $V_p(E)$ and $V_p(E')$
are cohomologically coprime if there is at least one
place $v$ of $F$ above $p$ such that one of the
following conditions is satisfied:
\begin{itemize}
\item[(i)] $E$ has potential good ordinary reduction at
$v$ and $E'$ has potential good supersingular reduction at $v$,
or vice versa;
\item[(ii)] $E$ has potential good supersingular reduction at
$v$ with formal complex multiplication and $E'$ has potential
good supersingular reduction at $v$ without formal complex
multiplication, or vice versa;
\item[(iii)] $E$ and $E'$ both have potential good
supersingular reduction at $v$
with formal complex multiplication and the group
$E(L_w)[p^\infty]$ of $p$-power division points of $E$
over $L_w$ is finite for a place $w$ of $L$
lying above $v$;
\item[(iv)] $E$ and $E'$ both have potential good
supersingular reduction at $v$ without formal complex multiplication
and the group $E(L_w)[p^\infty]$ is finite for a place
$w$ of $M$ lying above $v$.
\end{itemize}
\end{thm}
We give the proof of Theorem \ref{global_var} below.
Theorem \ref{global_ellcur} can be verified
similarly from the proof of Theorem \ref{thm4}.
\begin{proof}[Proof of Theorem \ref{global_var}]
Put $J_{V} = \rho(G_L)$ and $J_{V'} = \rho_E(G_{L'})$,
where $L = F(E_{\infty})$ and $L' = F(V)$.
We must show that $V$ has vanishing $J_V$-cohomology
and $V'$ has vanishing $J_{V'}$-cohomology.
To do this, we proceed as in \cite{CSW}, \S4.4, Example 3.
For a vector space $W$ over $\Q_p$, we write $W_{\overline{\Q_p}}$
for $W \otimes_{\Q_p} \overline{\Q_p}$.
Let $w$ (resp.\ $\nu$, $u$) be a place of
$L$ (resp.\ $L'$, $F(\mu_{p^\infty})$)
lying above $v$.
By replacing $F_v$ by a finite extension we may
assume that $L'_\nu$ contains $F(\mu_{p^\infty})_u=F_v(\mu_{p^\infty})$.
In the proof of Theorem \ref{thm3}, we showed that
$L_w \cap L'_\nu$ is a finite extension over $F_v(\mu_{p^\infty})$.
We let $D_V$ (resp.\ $\mathscr{D}_V$) be the image
of $\rho$ restricted to the decomposition group
in $G_L$ (resp.\ $G_{F(\mu_{p^\infty})}$) of some fixed
place of $\overline{F}$ above $w$ (resp.\ $u$).
Then $D_V$ is an open subgroup of $\mathscr{D}_V$ and
so their Lie algebras coincide.
But the Lie algebra $\Lie(\mathscr{D}_V)_{\overline{\Q_p}}$
satisfies the strong Serre criterion (cf.\ \cite{CSW}).
Thus $\Lie(J_V)_{\overline{\Q_p}} \supset \Lie(D_V)_{\overline{\Q_p}}$
also satisfies the strong Serre criterion. It
follows from Proposition 2.3 of \emph{op.\ cit.}
that $V$ has vanishing $J_V$-cohomology.
Replacing $(\rho, V, L)$ with $(\rho_E, V', L')$ above,
we may prove in the same manner that $V'$ has vanishing
$J_{V'}$-cohomology. This completes the proof
of Theorem \ref{global_var}.
\end{proof}
By arguing in a similar manner as above,
it can be shown that the proof of Theorem \ref{thm5}
for the good supersingular reduction case
also implies the following global result.
\begin{thm}\label{global_ellcur2}
Let $E$ and $E'$ be elliptic curves
over $F$. Put $L = F(E'_\infty)$. Then
$V=V_p(E)$ has vanishing $J_V$-cohomology,
where $J_V = \rho_E(G_L)$, if there is at least one place
$v$ above $p$ such that $E$ has potential good
supersingular reduction at $v$ and
$E'$ has potential multiplicative reduction at $v$.
\end{thm}
Notice that in Theorem \ref{global_ellcur2}
we did not include the case where $E$ has potential
good ordinary reduction and $E'$ has potential multiplicative
reduction. This is because in this case
$\Lie (D_V)$ may be smaller than $\Lie(\mathscr{D}_V)$, with
the notation above. Nevertheless, the same assertion
for the global representation may be shown to hold
by considering places $v$ not lying above $p$.
This, along with similar cases, will be treated in a
subsequent paper.
\noindent Jerome T. Dimabayao \\
Graduate School of Mathematics\\
Kyushu University\\
744 Motooka, Nishi-ku, Fukuoka\\
819-0395, Japan \\
\texttt{[email protected]} \\
\noindent and \\
\noindent Institute of Mathematics \\
University of the Philippines \\
C.P. Garcia St., U.P.Campus Diliman \\
1101 Quezon City, Philippines \\
\texttt{[email protected]}
\end{document}
|
{\bf e}gin{document}
\title
{$\mu$-norm and regularity}
\author{D.Treschev \\
Steklov Mathematical Institute of Russian Academy of Sciences
}
\date{}
\maketitle
{\bf e}gin{abstract}
In \cite{Tre_PSI20} we introduce the concept of a $\mu$-norm for a bounded operator in a Hilbert space. The main motivation is the extension of the measure entropy to the case of quantum systems. In this paper we recall the basic results from \cite{Tre_PSI20} and present further results on the $\mu$-norm. More precisely, we specify three classes of unitary operators for which the $\mu$-norm generates a bistochastic operator. We plan to use the latter in the construction of quantum entropy.
\end{abstract}
\section{Introduction}
\label{sec:intro}
Let ${\cal X}$ be a nonempty set and let ${\cal B}$ be a $\sigma$-algebra of subsets $X\subset{\cal X}$. Consider the measure space $({\cal X},{\cal B},\mu)$, where $\mu$ is a probability measure: $\mu({\cal X}) = 1$.
Consider the Hilbert space ${\cal H} = L^2({\cal X},\mu)$ with the scalar product and the norm
$$
\langle f,g\rangle = \int_{\cal X} f\overline g \, d\mu, \quad
\|f\| = \sqrt{\langle f,f\rangle} .
$$
For any bounded operator $W$ on ${\cal H}$ let $\|W\|$ be its $L^2$ norm:
$$
\|W\| = \sup_{\|f\|=1} \|Wf\|.
$$
We say that $\chi = \{Y_1,\ldots,Y_J\}$ is a (finite, measurable) partition (of ${\cal X}$) if
$$
Y_j\in{\cal B}, \quad
\mu\big({\cal X}\setminus \cup_{1\le j\le J} Y_j\big) = 0, \quad
\mu(Y_j\cap Y_k) = 0 \quad
\mbox{ for any $j,k\in\{1,\ldots,J\}$, $k\ne j$}.
$$
We say that $\kappa = \{X_1,\ldots,X_K\}$ is a subpartition of $\chi = \{Y_1,\ldots,Y_J\}$ if for any $k\in\{1,\ldots,K\}$ there exists $j\in\{1,\ldots,J\}$ such that $\mu(X_k\setminus Y_j) = 0$.
For any $X\in{\cal B}$ consider the orthogonal projector
{\bf e}gin{equation}
\label{imath}
\pi_X : {\cal H}\to{\cal H}, \qquad
{\cal H}\ni f \mapsto \pi_X f = {\bf 1}_X \cdot f,
\end{equation}
where ${\bf 1}_X$ is the indicator of $X$.
Let $W$ be a bounded operator on ${\cal H}$. For any partition $\chi = \{Y_1,\ldots,Y_J\}$ we define
{\bf e}gin{equation}
\label{calM}
{\cal M}_\chi(W) = \sum_{j=1}^J \mu(Y_j) \| W\pi_{Y_j} \|^2.
\end{equation}
In \cite{Tre_PSI20} we have introduced the definition of the $\mu$-norm:\footnote
{in fact, a seminorm}
{\bf e}gin{equation}
\label{mumeasure}
\|W\|_\mu = \inf_\chi \sqrt{{\cal M}_\chi(W)}.
\end{equation}
Recall that the operator $U$ is said to be an isometry if
$$
\langle f,g\rangle = \langle Uf,Ug\rangle, \qquad
f,g\in{\cal H}.
$$
If the isometry $U$ is invertible then $U$ is a unitary operator.
For any bounded $W$, any $Y\in{\cal B}$, and any isometry $U$
$$
\|W\pi_Y\| \le \|W\|, \quad
\|UW\| = \|W\|, \quad
\|\pi_Y\| = 1 \; \mbox{(if $\mu(Y)>0$)}.
$$
This implies the following obvious properties of the $\mu$-norm:
{\bf e}gin{eqnarray}
\label{|1|}
\|\operatorname{id}\|_\mu
&=& 1,\qquad
\|W\|_\mu
\;\le\; \|W\|, \\
\label{WW}
\|W_1 W_2\|_\mu
&\le& \|W_1\| \|W_2\|_\mu, \\
\label{lambdaW}
\|\lambda W\|_\mu
&=& |\lambda|\, \|W\|_\mu \quad
\mbox{ for any $\lambda\in\mathbb{C}$}, \\
\label{UWWU}
\|W\|_\mu
&=& \|UW\|_\mu \quad \mbox{ for any isometry $U$}.
\end{eqnarray}
The $\mu$-norm is motivated by the problem of the extension of the measure entropy\footnote
{known also as the Kolmogorov-Sinai entropy} to the case of quantum systems. Now we describe briefly the idea.
Let $F:{\cal X}\to{\cal X}$ be an endomorphism of the probability space $({\cal X},{\cal B},\mu)$. This means that for any $X\in{\cal B}$ the set $F^{-1}(X)$ (the complete preimage) also lies in ${\cal B}$ and $\mu(X) = \mu(F^{-1}(X))$. Invertible endomorphisms are called automorphisms. Let $\operatorname{End}({\cal X})$ denote the semigroup of all endomorphisms of $({\cal X},{\cal B},\mu)$. There are two standard constructions.
(1) Any $F\in\operatorname{End}({\cal X},\mu)$ generates the isometry (a unitary operator if $F$ is an automotphism) $U_F$ on ${\cal H}$ (the Koopman operator):
$$
L^2({\cal X},\mu)\ni f \mapsto U_F f = f\circ F, \qquad
U_F =: \operatorname{Koop}(F).
$$
(2) For any $F\in\operatorname{End}({\cal X},\mu)$ it is possible to compute the measure entropy $h(F)$.
Our question is as follows. Is it possible to determine in some ``natural way'' a real nonnegative function ${\mathfrak h}$ on the semigroup of isometries $\operatorname{Iso}({\cal H})$ so that the diagram
$$
{\bf e}gin{array}{rcl}
& \operatorname{End}({\cal X},\mu) & \\
h\swarrow & & \searrow\operatorname{Koop} \\
\mathbb{R}_+ & \stackrel{{\mathfrak h}}{\longleftarrow} & \operatorname{Iso}({\cal H})
\end{array}
$$
is commutative?
Recall the construction of the measure entropy of an endomorphism. Let $J_N$ be the set of multiindices $j = (j_0,\ldots,j_N)$, where any component $j_n$ takes values in the set $\{0,\ldots,K\}$. For any partition $\chi = \{X_0,\ldots,X_K\}$ and $j\in J_N$ we define
$$
{\bf X}_j = F^{-N}(X_{j_N})\cap \ldots \cap F^{-1}(X_{j_1}) \cap X_{j_0}.
$$
We define $h_F(\chi,N+1)$ by
$$
h_F(\chi,N+1)
= - \sum_{j\in J_N} \mu({\bf X}_j) \log \mu({\bf X}_j).
$$
The function $h_F$, as a function of the second argument, is subadditive:
$h_F(\chi,n+m) \le h_F(\chi,n) + h_F(\chi,m)$. This implies existence of the limit
$$
h_F(\chi) = \lim_{n\to\infty} \frac1n h_F(\chi,n).
$$
Finally, the measure entropy is defined by
$$
h(F) = \sup_\chi h_F(\chi).
$$
A rough idea is to construct the entropy of a unitary operator $U$ analogously with the following difference. Instead of ${\bf X}_j$ we take
{\bf e}gin{equation}
\label{frakX}
{\mathfrak X}_j
= \pi_{X_{j_N}} U \pi_{X_{j_{N-1}}} U \ldots U \pi_{X_{j_1}} U \pi_{X_{j_0}} .
\end{equation}
We define
{\bf e}gin{equation}
\label{hUchiN}
{\mathfrak h}_U(\chi,N+1)
= - \sum_{j\in J_N} \|{\mathfrak X}_j\|_\mu^2 \log \|{\mathfrak X}_j\|_\mu^2 .
\end{equation}
Other details are the same:
{\bf e}gin{equation}
\label{limfrakh}
{\mathfrak h}_U(\chi)
= \lim_{n\to\infty} \frac1n {\mathfrak h}(\chi,n), \qquad
{\mathfrak h}(U)
= \sup_\chi {\mathfrak h}_U(\chi)
\end{equation}
(provided the limit (\operatorname{Re}f{limfrakh}) exists).
In \cite{Tre_PSI20} we prove that for any automorphism $F$
{\bf e}gin{equation}
\label{UF=F}
{\mathfrak h}(U_F) = h(F) .
\end{equation}
In the literature there exist several attempts to extend the concept of the measure entropy to quantum systems, see \cite{CNT,Ohya95,Ohya00,AOW,AF,M,GLW} and many others. In \cite{A} several mutual relations between these approaches are given. Some works (for example, \cite{AOW, Sr, Pe, BG, KK}) deal with the finite-dimensional case $(\#{\cal X}<\infty)$. In \cite{DF05, DF18} a construction for the measure entropy is proposed for doubly stochastic (bistochastic) operators on various spaces of functions on a measure space. It remains unclear, which approach to quantum generalization of the measure entropy is ``more physical''. This issue may become more clear after computation of the entropy ${\mathfrak h}$ or its analogs in examples. We plan to do this in forthcoming papers.
The above definition of ${\mathfrak h}(U)$ meets several technical problems, including the question on subadditivity\footnote
{This subadditivity is important for the existence of the limit (\operatorname{Re}f{limfrakh}).}
of ${\mathfrak h}_U(\chi,n)$ and the inequality ${\mathfrak h}_U(\chi)\le{\mathfrak h}_U(\kappa)$ if $\kappa$ is a subpartition of $\chi$\footnote
{This inequality is necessary if we want to approach the supremum (\operatorname{Re}f{limfrakh}) on fine partitions $\chi$.}.
We plan to change slightly the definition by replacing $\|{\mathfrak X}_j\|_\mu^2$ in (\operatorname{Re}f{hUchiN}) by another quantity similar to it to satisfy these two properties. To this end we associate with $U$ a bistochastic operator on $L^1({\cal X},\mu)$. Then the entropy of $U$ may be defined analogously to \cite{DF05, DF18}. We will present details in another paper, but seems, this bistochastic operator may turn to be interesting by itself. As we will see below in this paper, the concept of the $\mu$-norm remains central in all our constructions.
We use the $\mu$-norm to construct a bistochastic operator in three cases: Koopman operators, the case $\#{\cal X} < \infty$ and regular operators (a special class of operators defined in Section \operatorname{Re}f{sec:reg_oper}) for ${\cal X}=\mathbb{T}$. It is more or less clear that the circle $\mathbb{T}$ may be replaced by the torus $\mathbb{T}^d$, but this will be a subject of another paper.
Before systematic attempts to compute entropy of various unitary operators and to study its proprties we have to study the $\mu$-norm $\|\cdot\|_\mu$. We have started this in \cite{Tre_PSI20}. The present paper is a continuation of this program.
\section{Notation and previous results}
Here we collect basic results from \cite{Tre_PSI20}. We will refer to some of them below.
{\bf (1)}. $\|\pi_X\|_\mu^2 = \mu(X)$ for any $X\in{\cal B}$.
{\bf (2)}. If $\chi'$ is a subpartition of $\chi$ then ${\cal M}_{\chi'}(W) \le {\cal M}_\chi(W)$. Hence the quantities ${\cal M}_\chi(W)$ approach the infinum (\operatorname{Re}f{mumeasure}) on fine (small scale) partitions.
{\bf (3)}. For any two bounded operators $W_1$ and $W_2$
$$
\|W_1 + W_2\|_\mu \le \|W_1\|_\mu + \|W_2\|_\mu.
$$
This triangle inequality combined with (\operatorname{Re}f{lambdaW}) imply that $\|\cdot\|_\mu$ is a seminorm on the space of bounded operators on ${\cal H}$.
{\bf (4)}. Let $F$ be an automorphism of $({\cal X},{\cal B},\mu)$ and let $U_F=\operatorname{Koop}(F)$. Then
{\bf e}gin{equation}
\label{UFpi}
U_F\pi_X = \pi_{F^{-1}(X)} U_F \quad
\mbox{for any } X\in{\cal B} .
\end{equation}
For any bounded operator $W$
{\bf e}gin{equation}
\label{WUF}
\|W U_F\|_\mu = \|W\|_\mu.
\end{equation}
This implies $\|U_F^{-1} W U_F\|_\mu = \|W\|_\mu$. Informally speaking, this means that measure preserving coordinate changes on ${\cal X}$ preserve the $\mu$-norm.
{\bf (5)}. $\|\cdot\|_\mu$ is a continuous function in the $L^2({\cal X},\mu)$ operator topology.
{\bf (6)}. If the measure $\mu$ has no atoms then $\|W+W_0\|_\mu=\|W\|_\mu$ for any bounded $W$ and compact $W_0$. In particular, $\mu$-norm of any compact operator vanishes. In fact, there exist non-compact operators with zero $\mu$-norm.
{\bf (7)}. Given $g\in L^\infty({\cal X},\mu)$ let $\widehat g$ be the multiplication operator defined by
$f\mapsto \widehat g f = gf$. Then $\|\widehat g\|_\mu = \|g\|$.
{\bf (8)}. Suppose ${\cal X} = \{1,\ldots,J\}$ is finite and the measure of any element equals $1/J$. Then ${\cal H}$ is isomorphic to $\mathbb{C}^J$ with the Hermitian product $\langle f,g\rangle_J = \frac1J\sum_{j=1}^J f(j)\overline{g(j)}$. Let
$$
f\mapsto Wf, \quad (Wf)(k) = \sum_{j=1}^J W(k,j) f(j)
$$
be an operator on ${\cal H}$. Then
{\bf e}gin{equation}
\label{|.|mu(finite)}
\|W\|_\mu^2 = \frac1J \sum_{j,k=1}^J |W(k,j)|^2 .
\end{equation}
{\bf (9)}. For any partition $\{X_1,\ldots,X_K\}$ of ${\cal X}$
$$
\|W\|_\mu^2
= \sum_{k=1}^K \|W \pi_{X_k}\|_\mu^2 , \quad
\|W\|_\mu^2
\le \sum_{k=1}^K \|\pi_{X_k} W\|_\mu^2.
$$
{\bf (10)}. Let ${\cal X}$ be a compact metric space and $\mu$ a Borel measure w.r.t. the corresponding topology. Let $B_r(x)\subset{\cal X}$ denote the open ball with center at $x$ and radius $r$. Then for any $x\in{\cal X}$ the limit
$$
\vartheta(x) = \lim_{\varepsilon\searrow 0} \|W\pi_{B_\varepsilon(x)}\|^2
$$
exists, the function $\vartheta$ is measurable and $\|W\|_\mu^2 \le \int_{\cal X} \vartheta \,d\mu$. There exists an example which shows that in general this inequality is strict. However $\|W\|_\mu^2 = \int_{\cal X} \vartheta \,d\mu$ provided two additional conditions {\bf C1} and {\bf C2} hold:
{\bf C1}. The function $\vartheta$ is continuous.
{\bf C2}. There exists $c>0$ such that for any open ${\cal O}\subset{\cal X}$, $\operatorname{diam}({\cal O})\le\varepsilon$ and any $x\in{\cal O}$ there exists a function $f = \pi_{\cal O} f$ satisfying
$$
\Big| \|Wf\|^2 - \vartheta(x) \|f\|^2 \Big|
\le \gamma(\varepsilon) \|f\|^2, \quad
c < \big| f|_{\cal O} \big| < c^{-1},
$$
where $\gamma(\varepsilon)\to 0$ as $\varepsilon\to 0$. As usual, $f|_{\cal O}$ denotes restriction of $f$ to the set ${\cal O}$.
The further results from \cite{Tre_PSI20} we mention here concern the case
${\cal X} = \mathbb{T} = \mathbb{R} / 2\pi\mathbb{Z}$ with the Lebesgue measure $\mu$. We expect that they can be extended to the case ${\cal X} = \mathbb{T}^d$, $d>1$.
{\bf (11)}. Let ${\cal X} = \mathbb{T}$ be a circle with the Lebesgue measure $d\mu = \frac1{2\pi} dx$. For any bounded sequence $\{\lambda_k\}_{k\in\mathbb{Z}}$ we consider the distribution $\lambda(x) = \sum \lambda_k e^{ikx}$. The convolution operator
$$
f\mapsto\operatorname{Conv}_\lambda f = \lambda * f := \int_\mathbb{T} \lambda(y) f(\cdot - y)\, dy
$$
is bounded: $\|\operatorname{Conv}_\lambda\| = \sup_{k\in\mathbb{Z}} |\lambda_k|$. Then
$$
\|\operatorname{Conv}_\lambda\|_\mu^2 = \rho(\lambda), \qquad
\rho(\lambda) = \limsup_{\# I\to\infty} \rho_I(\lambda), \quad
\rho_I(\lambda) = \frac1{\# I} \sum_{k\in I} |\lambda_k|^2,
$$
where $I\subset\mathbb{Z}$ are integer intervals.
{\bf (12)}. One of the main technical tools in the analysis of the $\mu$-norm in the case ${\cal X} = \mathbb{T}$ is the following lemma on Fourier coefficients of localized functions on the circle.
{\bf e}gin{lem}
\label{lem:YfJ}
Let $Y=[a-\varepsilon,a+\varepsilon]$ and
$\displaystyle f = \pi_Y f = \sum_{k\in\mathbb{Z}} f_k e^{ikx} \in L^2(\mathbb{T})$.
Then for any integer interval $J$ and any $m\in\mathbb{Z}$
{\bf e}gin{eqnarray}
\label{f-ef}
\|f - e^{im(x-a)} f\|
&\le& |m| \varepsilon \|f\| , \\
\label{f-fe}
|f_m - e^{ila} f_{m+l}|
&\le& \frac{\varepsilon^{3/2}}{\sqrt\pi} |l| \|f\| , \\
\label{ff-f}
\Big| \sum_{k\in\mathbb{Z}} e^{-ima} f_k\overline f_{k+m} - \|f\|^2 \Big|
&\le& |m| \varepsilon \|f\|^2 .
\end{eqnarray}
\end{lem}
{\bf (13)}. Consider the operator $W = (W_{j,k})_{j,k\in\mathbb{Z}^d}$ on ${\cal H} = L^2(\mathbb{T})$:
$$
f = \sum_{k\in\mathbb{Z}} f_k e^{ikx} \mapsto Wf = \sum_{j,k\in\mathbb{Z}} W_{j,k} f_k e^{ijx}.
$$
We say that $W$ is of diagonal type ($W\in{\cal DT}(\mathbb{T})$) if
$$
\sup_{j\in\mathbb{Z}} |W_{j+k,j}| = c_k < \infty, \quad k\in\mathbb{Z}\quad
\mbox{and}\quad \sum_{k\in\mathbb{Z}} c_k = {\bf c} < \infty.
$$
The sequence $c_s$ is said to be the majorating sequence for $W\in{\cal DT}(\mathbb{T})$. We define the norm $\|W\|_{\cal DT} = {\bf c}$.
Operators from ${\cal DT}(\mathbb{T})$ are bounded. As simple examples we have the following operators of diagonal type.
(a) Bounded convolution operators.
(b) Operators of multiplication by functions with absolutely converging Fourier series.
(c) The conjugated operator $W^*$ if $W\in{\cal DT}(\mathbb{T})$.
(d) Linear combinations and products of operators of diagonal type. Moreover,
$$
\|W' W''\|_{\cal DT} \le \|W'\|_{\cal DT} \|W''\|_{\cal DT} \quad
\mbox{for all $W',W''\in{\cal DT}(\mathbb{T})$}.
$$
{\bf (14)}. We prove that the normed space $\big({\cal DT}(\mathbb{T}), \|\cdot\|_{\cal DT}\big)$ is closed. As a corollary we obtain that $\big({\cal DT}(\mathbb{T}), \|\cdot\|_{\cal DT}\big)$ is a $C^*$-algebra.
{\bf (15)}. We have the following inequalities between the norms:
{\bf e}gin{eqnarray}
\label{.<.<.}
& \|W\|_\mu \le \|W\| \le \|W\|_{\cal DT} \quad
\mbox{for any $W\in{\cal DT}(\mathbb{T})$} , & \\
\label{||f||<||f||_DT}
& \|f\|_\infty \le \|\widehat f\|_{\cal DT} \quad
\mbox{for any $f$ with absolutely converging Fourier series} , &
\end{eqnarray}
where $\widehat f$ is the operator of multiplication by $f$.
{\bf (16)}. We associate with $W\in{\cal DT}(\mathbb{T})$ and any point $a\in\mathbb{T}$ the distribution $L_a$,
{\bf e}gin{equation}
\label{L_a}
L_a = \sum_{j\in\mathbb{Z}} w_j(a) e^{ijx}, \qquad
w_j(a) = \sum_{k\in\mathbb{Z}} W_{j,k} e^{i(j-k)a}.
\end{equation}
For any $l\in\mathbb{Z}$ and $a\in\mathbb{T}$ we have the estimate
{\bf e}gin{equation}
\label{|w(a)|<}
|w_l(a)| \le {\bf c} = \|W\|_{\cal DT} .
\end{equation}
We prove that for any $W\in{\cal DT}(\mathbb{T})$ the function
$a\mapsto\rho(L_a) = \limsup_{\# I\to\infty} \rho_I(L_a)$ ($I\subset\mathbb{Z}$ are intervals, $\rho_I$ is defined in item {\bf (11)}) is continuous and
{\bf e}gin{equation}
\label{||W||_mu=int}
\|W\|_\mu^2 = \frac1{2\pi} \int_\mathbb{T} \rho(L_a) \, da .
\end{equation}
{\bf (17)}. For any operator $W\in{\cal DT}(\mathbb{T})$ we introduce the average trace of $W^* W$ by
$$
{\bf T}(W)
= \limsup_{\# I\to\infty} \frac1{\# I} \sum_{j\in\mathbb{Z},\, l\in I} |W_{l,j}|^2,
$$
where $I\subset\mathbb{Z}$ are intervals. Then
{\bf e}gin{equation}
\label{||W||_mu>}
{\bf T}(W) \le \|W\|_\mu^2.
\end{equation}
{\bf (18)}. We also prove that if $U\in{\cal DT}(\mathbb{T})$ is a unitary operator then
{\bf e}gin{equation}
\label{T=T=T}
{\bf T}(W) = {\bf T}(WU) = {\bf T}(UW).
\end{equation}
\section{Main results}
In this section we collect main results of the present paper. In short, these results concern (1) properties of the $\mu$-norm on ${\cal R}(\mathbb{T})$, a special class of operators on $L^2(\mathbb{T})$, and (2) a construction of a bistochastic operator ${\cal W}$ on $L^1({\cal X},\mu)$ associated with an operator $W$ under some conditions, imposed on $W$.
{\bf e}gin{itemize}
\item We say that an operator $W\in{\cal DT}(\mathbb{T})$ is regular (the notation is $W\in{\cal R}(\mathbb{T})$) if for any $m,n\in\mathbb{Z}$ there exists the limit
$$
\omega_{m,n}
= \lim_{\# I\to\infty} \omega_{I,m,n} , \qquad
\omega_{I,m,n}
= \frac1{\# I} \sum_{j\in\mathbb{Z},\, l\in I}
W_{l+m,j} \overline W_{l,j+n} .
$$
\item By Lemma \operatorname{Re}f{lem:Rclosed} ${\cal R}(\mathbb{T})$ is a cone closed with respect to the norm $\|\cdot\|_{\cal DT}$.
\item By Lemma \operatorname{Re}f{lem:dimregular} $\|W\|_\mu^2 = {\bf T}(W)$ for any $W\in{\cal R}(\mathbb{T})$ (compare with (\operatorname{Re}f{||W||_mu>})).
\item Let ${\cal ACF}(\mathbb{T})$ be the space of functions on $\mathbb{T}$ with absolutely converging Fourier series. Then the space of multiplication operators $\widehat g$ by functions $g\in{\cal ACF}(\mathbb{T})$ form a $C^*$-subalgebra in the $C^*$-algebra ${\cal DT}(\mathbb{T})$.
\item In Section \operatorname{Re}f{sec:reg_exa} we present several examples of regular operators.
Let $W\in{\cal DT}(\mathbb{T})$ be an operator with periodic matrix i.e., there exists $\tau\in\mathbb{N}$ such that $W_{j+\tau,k+\tau} = W_{j,k}$ for any $j,k\in\mathbb{Z}$. Any such operator is regular (Lemma \operatorname{Re}f{lem:per=>reg}).
Another example of a regular operator is $\operatorname{Conv}_\lambda$, where
$\lambda_k=e^{i\tau k^2}$.
\item By Proposition \operatorname{Re}f{prop:WgWWg_reg} for any $W\in{\cal R}(\mathbb{T})$ and $g_1,g_2\in{\cal ACF}(\mathbb{T})$ the operator $\widehat g_1 W \widehat g_2$ is regular.
\item In Section \operatorname{Re}f{sec:mu_W} we associate with operators $W$ from the following three classes
(1) Koopman operators $\operatorname{Koop}(F)$, $F\in\operatorname{Aut}({\cal X},\mu)$,
(2) operators in the case $\#{\cal X} < \infty$,
(3) operators from ${\cal R}(\mathbb{T})$
a measure $d\mu_W(x',x'') = \nu(x',x'')\, d\mu(x') d\mu(x'')$ on ${\cal X}\times{\cal X}$ such that for any ``sufficiently regular'' functions $g',g'' : {\cal X}\to\mathbb{C}$
$$
\|\widehat g' W\widehat g''\|_\mu^2 = \int_{{\cal X}^2} |g'(x')|^2 |g''(x'')|^2 \, d\mu_W(x',x'') .
$$
\item In Section \operatorname{Re}f{sec:bistochastic} we introduce the operator
$$
L^1({\cal X},\mu) \ni f \mapsto {\cal W} f = \int_{\cal X} \nu(\cdot, a) f(a)\, d\mu(a)
$$
and prove (Lemma \operatorname{Re}f{lem:bistochastic}) that ${\cal W}$ is a bistochastic operator on $L^1({\cal X},\mu)$.
\end{itemize}
\section{Regular operators}
\label{sec:reg_oper}
\subsection{Definition of $\omega_{m,n}$}
{\bf e}gin{dfn}
\label{dfn:regular}
We say that $W\in{\cal DT}(\mathbb{T})$ is regular ($W\in{\cal R}(\mathbb{T})$) if for any $m,n\in\mathbb{Z}$ there exists the limit
{\bf e}gin{equation}
\label{limomega}
\lim_{\# I\to\infty} \omega_{I,m,n}
= \omega_{m,n}, \qquad
\omega_{I,m,n}
= \frac1{\# I} \sum_{j\in\mathbb{Z},\, l\in I} W_{l+m,j} \overline W_{l,j+n} ,
\end{equation}
where $I$ are integer intervals.
\end{dfn}
Note that $\omega_{0,0}$ coincides with the average trace of $W^* W$:
{\bf e}gin{equation}
\label{omega=T}
\omega_{0,0} = {\bf T}(W)\quad
\mbox{if $W\in{\cal R}(\mathbb{T})$}.
\end{equation}
For any integer interval $I$ we put
{\bf e}gin{equation}
\label{vIm}
v_{I,m}(a)
= \sum_{l\in I} \frac{w_{l+m}(a) \overline w_l(a)}{\# I} ,
\end{equation}
where the functions $w_l(a)$ are defined in (\operatorname{Re}f{L_a}).
{\bf e}gin{lem}
\label{lem:lim_v}
Suppose $W$ is regular. Then for any $m\in\mathbb{Z}$ there exists the limit
{\bf e}gin{equation}
\label{limv}
\lim_{\# I\to\infty} v_{I,m}(a)
= v_m(a), \quad
v_m(a) = \sum_{n\in\mathbb{Z}} \omega_{m,n} e^{i(m+n)a}
\end{equation}
uniformly in $a\in\mathbb{T}$. The Fourier series of the function $v_m$ absolutely converges.
\end{lem}
{\it Proof}. By (\operatorname{Re}f{L_a})
{\bf e}gin{eqnarray*}
v_m(a)
&=& \lim_{\# I\to\infty} \frac1{\# I} \sum_{j,k\in\mathbb{Z},\,l\in I}
W_{l+m,j} \overline W_{l,k} e^{i(m-j+k)a} \\
&=& \lim_{\# I\to\infty} \sum_{n\in\mathbb{Z}}
\omega_{I,m,n} e^{i(m+n)a}
\; =\; \sum_{n\in\mathbb{Z}} \omega_{m,n} e^{i(m+n)a} .
\end{eqnarray*}
By (\operatorname{Re}f{omegaI}) the limit is uniform in $a$.
By (\operatorname{Re}f{sumomega}) this Fourier series absolutely converges. \qed
Note that by (\operatorname{Re}f{|w(a)|<}) for any interval $I\subset\mathbb{Z}$, any $m\in\mathbb{Z}$, and any $a\in\mathbb{T}$
{\bf e}gin{equation}
\label{|v|}
|v_{I,m}(a)| \le {\bf c}^2, \quad
|v_m(a)| \le {\bf c}^2.
\end{equation}
\subsection{Closeness with respect to $\|\cdot\|_{{\cal DT}}$}
If $W\in{\cal R}(\mathbb{T})$ then for any $\lambda\in\mathbb{C}$ the operator $\lambda W$ is also regular. Hence, regular operators form a cone ${\cal R}(\mathbb{T})\subset{\cal DT}(\mathbb{T})$.
{\bf e}gin{lem}
\label{lem:Rclosed}
The cone ${\cal R}(\mathbb{T})$ is closed with respect to the norm $\|\cdot\|_{{\cal DT}}$.
\end{lem}
{\it Proof}. Suppose $\{W_p\}_{p\in\mathbb{N}}$, $W_p\in{\cal R}(\mathbb{T})$ is a Cauchy sequence. By {\bf (14)} there exists $W = \lim_{p\to\infty} W_p$, where the limit is taken with respect to the norm $\|\cdot\|_{{\cal DT}}$.
For any $\varepsilon>0$ there exists positive $N$ such that
{\bf e}gin{equation}
\label{Wp-Wq}
\mbox{for any integer $p,q>N$ we have:} \quad
\|W_p - W_q\|_{\cal DT} < \varepsilon.
\end{equation}
We define $(\omega_p)_{I,m,n}$ and $(\omega_p)_{m,n}$ by (\operatorname{Re}f{limomega}), where $W$ is replaced by $W_p$. Then
{\bf e}gin{eqnarray*}
\big| (\omega_p)_{I,m,n} - (\omega_q)_{I,m,n} \big|
&=& \frac1{\# I}
\bigg| \sum_{j\in\mathbb{Z}, l\in I} \bigg(
(W_p)_{l+m,j} (\overline W_p)_{l,j+n}
- (W_q)_{l+m,j} (\overline W_q)_{l,j+n}
\bigg)\bigg| \\
&\le& \frac1{\# I} \big( \Sigma_1 + \Sigma_2 \big), \\
\Sigma_1
&=& \sum_{j\in\mathbb{Z}, l\in I} \bigg|
(W_p)_{l+m,j} \bigg( (\overline W_p)_{l,j+n} - (\overline W_q)_{l,j+n}
\bigg) \bigg|, \\
\Sigma_2
&=& \sum_{j\in\mathbb{Z}, l\in I} \bigg|
\bigg( (W_p)_{l+m,j} - (W_q)_{l+m,j} \bigg) (\overline W_q)_{l,j+n}
\bigg| .
\end{eqnarray*}
To estimate the sums $\Sigma_1$ and $\Sigma_2$, we put
$$
(c_p)_k = \sup_{j\in\mathbb{Z}} |(W_p)_{k+j,j}|, \quad
(c_q)_k = \sup_{j\in\mathbb{Z}} |(W_q)_{k+j,j}|, \quad
d_k = \sup_{j\in\mathbb{Z}} |(W_p)_{k+j,j} - (W_q)_{k+j,j}|.
$$
The sums $\sum_k (c_p)_k$ are uniformly bounded:
{\bf e}gin{equation}
\label{cpcq}
\sum_{k\in\mathbb{Z}} (c_p)_k \le \tilde{\bf c}, \quad
\sum_{k\in\mathbb{Z}} (c_q)_k \le \tilde{\bf c} \quad
\mbox{for some constant $\tilde{\bf c}$}.
\end{equation}
Moreover, by (\operatorname{Re}f{Wp-Wq})
{\bf e}gin{equation}
\label{dk}
\sum_{k\in\mathbb{Z}} d_k < \varepsilon.
\end{equation}
By (\operatorname{Re}f{cpcq}) and (\operatorname{Re}f{dk})
$$
\Sigma_1
\le \sum_{j\in\mathbb{Z},\, l\in I} (c_p)_{l+m-j} d_{l-j-n}
\le \# I \tilde{\bf c} \varepsilon.
$$
Analogously $\Sigma_2 \le \# I \tilde{\bf c} \varepsilon$. This implies that for any interval $I$
$$
\big| (\omega_p)_{I,m,n} - (\omega_q)_{I,m,n} \big| \le 2\tilde{\bf c} \varepsilon.
$$
Hence, $\big| (\omega_p)_{m,n} - (\omega_q)_{m,n} \big| \le 2\tilde{\bf c}\varepsilon$ i.e., for any integer $m,n$ the sequence $(\omega_p)_{m,n}$, $p\in\mathbb{N}$ is a Cauchy sequence. \qed
\subsection{$\mu$-norm of a regular operator}
{\bf e}gin{lem}
\label{lem:dimregular}
If $W\in{\cal R}(\mathbb{T})$ then (compare with (\operatorname{Re}f{||W||_mu>}))
{\bf e}gin{equation}
\label{dim=omega}
\|W\|_\mu^2
= {\bf T}(W) .
\end{equation}
\end{lem}
{\it Proof}. By (\operatorname{Re}f{L_a}), (\operatorname{Re}f{||W||_mu=int}) and (\operatorname{Re}f{vIm})
{\bf e}gin{equation}
\label{dim=intlimreg}
\|W\|_\mu^2
= \frac1{2\pi} \int_\mathbb{T} \lim_{\# I\to\infty} v_{I,0}(a) \, da.
\end{equation}
By Lemma \operatorname{Re}f{lem:lim_v} the limit $v_0(a) = \lim_{\#I\to\infty} v_{I,0}(a)$ exists for any $a\in\mathbb{T}$ and by (\operatorname{Re}f{|v|}) $|v_{I,0}(a)| \le {\bf c}^2$ for all $I$ and $a$. Therefore by the Lebesgue theorem on bounded convergence we may exchange the integration and the limit:
{\bf e}gin{eqnarray*}
\|W\|_\mu^2
&=& \lim_{\# I\to\infty} \frac1{2\pi} \int_\mathbb{T} v_{I,0}(a)\, da
= \lim_{\# I\to\infty} \frac1{2\pi} \int_\mathbb{T} \sum_{j,k\in\mathbb{Z},\, l\in I}
\frac1{\# I} W_{l,j} \overline W_{l,k} e^{i(k-j)a} \, da \\
&=& \lim_{\# I\to\infty} \sum_{j\in\mathbb{Z},\, l\in I}
\frac1{\# I} W_{l,j} \overline W_{l,j}
\; =\; \omega_{0,0} .
\end{eqnarray*}
By (\operatorname{Re}f{limomega}) this implies (\operatorname{Re}f{dim=omega}). \qed
Assertion {\bf (18)} and Lemma \operatorname{Re}f{lem:dimregular} imply the following.
{\bf e}gin{cor}
\label{cor:UWU}
Suppose $W,U\in{\cal DT}(\mathbb{T})$, where $U$ is unitary and both $W$ and $UWU^{-1}$ are regular. Then by {\bf (18)} and (\operatorname{Re}f{UWWU})
$$
\|W\|_\mu = \|UWU^{-1}\|_\mu = \|WU^{-1}\|_\mu .
$$
\end{cor}
\section{Regular operators: examples}
\label{sec:reg_exa}
{\bf e}gin{dfn}
We say that the matrix $(W_{k,j})$ of the operator $W\in{\cal DT}(\mathbb{T})$ is $\tau$-periodic, $\tau\in\mathbb{N}$, if
$$
W_{k+\tau,j+\tau} = W_{k,j} \quad\mbox{for any } k,j\in\mathbb{Z}.
$$
\end{dfn}
In particular, for any $g\in{\cal ACF}(\mathbb{T})$ matrix of the operator $\widehat g\in{\cal DT}(\mathbb{T})$ is 1-periodic.
{\bf e}gin{lem}
\label{lem:per=>reg}
Suppose $W\in{\cal DT}(\mathbb{T})$ is an operator with $\tau$-periodic matrix, $\tau\in\mathbb{N}$. Then $W\in{\cal R}(\mathbb{T})$ and $\omega_{m,n} = \breve\omega_{m,n}$,
$$
\breve\omega_{m,n} = \frac1\tau \sum_{j\in\mathbb{Z},\,l\in J}
W_{l+m,j} \overline W_{l,j+n},
$$
where $J\subset\mathbb{Z}$ is any interval with $\# J = \tau$.
\end{lem}
{\it Proof}. By $\tau$-periodicity $\breve\omega_{m,n}$ does not depend on $J$.
For any $I = \{s,s+1,\ldots,s+K\}$ let $I'\subset I$ be the maximal integer interval of the form $I' = \{s,s+1,\ldots,s+q\tau - 1\}$, $q\in\mathbb{Z}$. We put
$I'' = \{s+q\tau,s+q\tau+1,\ldots,s+K\}$. Then $\# I'' < \tau$,
$\omega_{I',m,n} = \breve\omega_{m,n}$ while
$\omega_{I,m,n} - \omega_{I',m,n} = A_1 + A_2$,
$$
A_1
= \frac1{\# I} \sum_{l\in I''\setminus I'}
W_{l+m,j} \overline W_{l,j+n}, \quad
A_2
= \Big(\frac1{\# I} - \frac1{\# I'}\Big) \sum_{l\in I'}
W_{l+m,j} \overline W_{l,j+n}.
$$
The inequalities
$$
|A_1| \le \frac{\tau{\bf c}^2}{\# I}, \quad
|A_2| \le \frac{\tau{\bf c}^2}{\# I - \tau}
$$
imply the existence of the limit (\operatorname{Re}f{limomega}) and the equation
$\omega_{m,n} = \breve\omega_{m,n}$. \qed
{\bf e}gin{cor}
Consider the operator $\widehat g$, where $g\in{\cal ACF}(\mathbb{T})$. After simple calculations we obtain:
{\bf e}gin{equation}
\label{omega(g)}
\omega_{m,n}(\widehat g) = {\bf g}_{m+n}, \qquad
g(x) \overline g(x) = \sum_{k\in\mathbb{Z}} {\bf g}_k e^{ikx},
\end{equation}
where the last equation is the definition of ${\bf g}_k$.
\end{cor}
{\bf e}gin{lem}
\label{lem:su}
Let $\{\lambda_k\}_{k\in\mathbb{Z}}$ be defined by $\lambda_k = e^{i\tau k^2}$. Then the operator $\operatorname{Conv}_\lambda$ is regular:
$$
\omega_{m,n}
= \delta_{0,m+n} \delta_{\tau m,\pi\mathbb{Z}} e^{i\tau m^2}, \qquad
\delta_{\tau m,\pi\mathbb{Z}}
= \left\{ {\bf e}gin{array}{cl}
1 & \mbox{if } \tau m/\pi \in \mathbb{Z} , \\
0 & \mbox{if } \tau m/\pi \not\in \mathbb{Z} .
\end{array}
\right.
$$
\end{lem}
{\it Proof}. In this case $W_{k,j} = \delta_{kj} e^{i\tau k^2}$ and
{\bf e}gin{eqnarray*}
\omega_{I,m,n}
&=& \frac1{\# I} \sum_{j\in\mathbb{Z},\,l\in I}
\delta_{l+m,j}\delta_{l,j+n} e^{i\tau((l+m)^2 - l^2} \\
&=& \frac1{\# I} \sum_{l\in I}
\delta_{l,l+m+n} e^{i\tau m^2 + 2i\tau ml}
\; =\; \frac1{\# I} \delta_{0,m+n} e^{i\tau m^2}
\sum_{l\in I} e^{2i\tau ml} .
\end{eqnarray*}
Hence, if $\tau m / \pi \in \mathbb{Z}$ then $\omega_{m,n} = \delta_{0,m+n} e^{i\tau m^2}$. If $\tau m / \pi \not\in \mathbb{Z}$ then $\omega_{m,n} = 0$. \qed
\section{Measure associated with an operator}
\label{sec:mu_W}
\subsection{Koopman operator}
Let $({\cal X},{\cal B},\mu)$ be a probability space.
{\bf e}gin{lem}
\label{lem:gU_F}
$F\in\operatorname{Aut}({\cal X},\mu)$, $g_0,\ldots,g_K\in L^\infty({\cal X},\mu)$. Then
{\bf e}gin{equation}
\label{gU_F}
\big\| \widehat g_K U_F \widehat g_{K-1} \ldots U_F \widehat g_0 \big\|^2_\mu
= \int_{\cal X} |g_K\circ F^K|^2 |g_{K-1}\circ F^{K-1}|^2 \ldots |g_0|^2 \, d\mu
\end{equation}
\end{lem}
{\it Proof}. Take a small constant $\sigma>0$ and consider the partition $\{X_1,\ldots,X_J\}$ such that\footnote
{We can use the same partition for all the functions $g_k$.}
{\bf e}gin{equation}
\label{g-Sigma}
\| g_k - \varphi_k \|_\infty < \sigma \|g_k\|_\infty ,
\quad \varphi_k = \sum_{j=1}^J g_{k,j} {\bf 1}_{X_j} ,
\quad \|\varphi_k\|_\infty \le \|g_k\|_\infty ,
\qquad k = 0,\ldots,K .
\end{equation}
Here $g_{k,j}\in\mathbb{C}$ are some constants.
We put $S = \widehat\varphi_K U_F \widehat\varphi_{K-1} \ldots U_F \widehat \varphi_0$.
By the triangle inequality the quantity
$$
\Big| \| \widehat g_K U_F \widehat g_{K-1} \ldots U_F \widehat g_0 \|_\mu
- \|S\|_\mu \Big|
$$
does not exceed
{\bf e}gin{eqnarray*}
&& \Big| \| \widehat g_K U_F \widehat g_{K-1} \ldots U_F \widehat g_0 \|_\mu
- \| \widehat\varphi_K U_F \widehat g_{K-1} \ldots U_F \widehat g_0 \|_\mu \Big|\\
&+& \Big| \| \widehat\varphi_K U_F \widehat g_{K-1} \ldots U_F \widehat g_0 \|_\mu
- \| \widehat\varphi_K U_F \widehat g_{K-1} \ldots U_F \widehat g_0 \|_\mu \Big|\\
&+& \ldots
\; +\; \Big| \| \widehat\varphi_K U_F \widehat\varphi_{K-1}
\ldots \widehat\varphi_1 U_F \widehat g_0 \|_\mu
- \| \widehat\varphi_K U_F \widehat\varphi_{K-1}
\ldots \widehat\varphi_1 U_F \widehat\varphi_0 \|_\mu \Big| \\
\le && \| (\widehat g_K - \widehat\varphi_K) U_F \widehat g_{K-1}
\ldots U_F \widehat g_0 \|_\mu
+ \| \widehat\varphi_K U_F (\widehat g_{K-1} - \widehat\varphi_{K-1})
\ldots U_F \widehat g_0 \|_\mu \\
&+& \ldots \;
+ \| \widehat\varphi_K U_F \widehat\varphi_{K-1}
\ldots \widehat\varphi_1 U_F (\widehat g_0 - \widehat\varphi_0) \|_\mu \\
\le && (K+1) \sigma \mathop{\rm pr}\nolimitsod_{0\le k\le K} \|g_k\|_\infty .
\end{eqnarray*}
By (\operatorname{Re}f{UFpi})
$$
S
= \sum_{j_0,\ldots,j_K} g_{K,j_K} g_{K-1,j_{K-1}} \ldots g_{0,j_0}
\pi_{X_{j_K}} \pi_{F^{-1}(X_{j_{K-1}})} \ldots \pi_{F^{-K}(X_{j_0})} U_F^K.
$$
We put $X_{j_K,\ldots,j_0} = X_{j_K} \cap F^{-1}(X_{j_{K-1}}) \cap
\ldots \cap F^{-K}(X_{j_0})$.
Note that for any two sets $X'=X_{j'_K,\ldots,j'_0}$ and $X''=X_{j''_K,\ldots,j''_0}$ we have: $\mu(X'\cap X'')=0$ if the collections of indices $j'_K,\ldots,j'_0$ and $j''_K,\ldots,j''_0$ do not coincide.
Then by (\operatorname{Re}f{WUF}) and {\bf (1)} we obtain:
{\bf e}gin{eqnarray}
\nonumber
S
&=& \sum_{j_0,\ldots,j_K} | g_{K,j_K} g_{K-1,j_{K-1}} \ldots g_{0,j_0} |^2
\| \pi_{X_{j_K,\ldots,j_0}} \|_\mu^2 \\
\label{gU_FgU_F}
&=& \sum_{j_0,\ldots,j_K} | g_{K,j_K} g_{K-1,j_{K-1}} \ldots g_{0,j_0} |^2
\mu( X_{j_K,\ldots,j_0} ) .
\end{eqnarray}
Equation (\operatorname{Re}f{g-Sigma}) implies
$$
\varphi_k\circ F^k
= \sum_{j=1}^J g_{k,j} {\bf 1}_{F^{-k}(X_j)} .
$$
Hence (\operatorname{Re}f{gU_FgU_F}) is an integral sum for the integral
$$
\int_{\cal X} |\varphi_K\circ F^K|^2 |\varphi_{K-1}\circ F^{K-1}|^2 \ldots |\varphi_0|^2 \, d\mu .
$$
This integral differs from the integral (\operatorname{Re}f{gU_F}) at most by
$2\sigma\mathop{\rm pr}\nolimitsod_{0\le k\le K} \|g_k\|_\infty^2$. Since $\sigma$ is arbitrarily small, we obtain equation (\operatorname{Re}f{gU_F}). \qed
Now suppose that ${\cal X}$ is in addition a topological space and ${\cal B}$ is the corresponding Borel $\sigma$-algebra.
Consider the distribution $\delta \in (C({\cal X}^2))^*$ (a measure on ${\cal X}^2$ by the Riesz theorem) such that for any $\Phi\in C({\cal X}^2)$
{\bf e}gin{equation}
\label{delta}
\int_{{\cal X}^2} \delta(x',x'') \Phi(x',x'')\, d\mu(x') d\mu(x'')
= \int_{\cal X} \Phi(x,x)\, d\mu(x) .
\end{equation}
Taking in (\operatorname{Re}f{delta}) $\Phi(x',x'')=\varphi'(x')\varphi''(x'')$, where
$\varphi',\varphi'' : {\cal X}\to\mathbb{C}$ are arbitrary continuous functions, we obtain:
{\bf e}gin{equation}
\label{int_delta}
\int_{\cal X} \delta(x',x'') \varphi'(x')\, d\mu(x') = \varphi'(x''), \quad
\int_{\cal X} \delta(x',x'') \varphi''(x'')\, d\mu(x'') = \varphi''(x').
\end{equation}
Recall also that if $F\in\operatorname{End}({\cal X},\mu)$
{\bf e}gin{equation}
\label{intfF}
\int_{\cal X} f\, d\mu = \int_{\cal X} f\circ F\, d\mu \quad
\mbox{for any } f\in L^1({\cal X},\mu).
\end{equation}
Then for any continuous $F\in\operatorname{Aut}({\cal X},\mu)$
{\bf e}gin{equation}
\label{delta=delta}
\delta(x',x'') = \delta(F(x'),F(x'')).
\end{equation}
Indeed, for any $\Phi\in C({\cal X}^2)$ by (\operatorname{Re}f{intfF})
{\bf e}gin{eqnarray*}
&& \int_{{\cal X}^2} \delta(F(x'),F(x'')) \Phi(x',x'')\, d\mu(x') d\mu(x'') \\
&=& \int_{{\cal X}^2} \delta(x',x'') \Phi(F^{-1}(x'),F^{-1}(x''))\, d\mu(x') d\mu(x'')
\; =\; \int_{\cal X} \Phi(x,x)\, d\mu(x) .
\end{eqnarray*}
{\bf e}gin{lem}
\label{lem:piUFpi}
Let $F\in\operatorname{Aut}({\cal X},\mu)$ be continuous and $g_0,\ldots,g_K\in C({\cal X})$. Then
{\bf e}gin{eqnarray}
\!\!\!\!\!\nonumber
&& \big\|\widehat g_K U_F \ldots\widehat g_1 U_F \widehat g_0 \big\|_\mu^2 \\
\!\!\!\!\!\label{gU_F=int}
&=& \int_{{\cal X}^{K+1}} |g_K(x_K)|^2 \delta(x_K,F(x_{K-1})) \ldots
|g_1(x_1)|^2 \delta(x_1,F(x_0)) |g_0(x_0)|^2 \,
d\mu^{K+1} , \\
\!\!\!\!\!\nonumber
&& \qquad d\mu^{K+1}
= d\mu(x_K)\ldots d\mu(x_0) .
\end{eqnarray}
\end{lem}
{\it Proof}. Let $I$ denote the integral (\operatorname{Re}f{gU_F=int}). We use in this integral the change of coordinates
$$
x_0 = x'_0, \quad x_1 = F(x'_1), \quad \ldots \quad x_K = F^K(x'_K), \quad
d\mu^{\mathop{\rm pr}\nolimitsime K+1} = d\mu(x'_K)\ldots d\mu(x'_0) .
$$
Then
{\bf e}gin{eqnarray*}
I
&=& \int |g_K\circ F^K(x'_K)|^2 \delta(F^K(x'_K),F^K(x'_{K-1})) \\
&& \qquad\qquad
\ldots \; |g_1\circ F(x'_1)|^2 \delta(F(x'_1),F(x'_0)) |g_0(x'_0)|^2
\, d\mu^{\mathop{\rm pr}\nolimitsime K+1} \\
&=& \int_{\cal X} |g_K\circ F^K|^2 \ldots |g_1\circ F|^2 |g_0|^2\, d\mu .
\end{eqnarray*}
It remains to use Lemma \operatorname{Re}f{lem:gU_F}. \qed
We associate with any $U_F=\operatorname{Koop}(F)$, where $F\in\operatorname{Aut}({\cal X},\mu)$ is continuous,\footnote{We expect that the continuity is inessential here.} the measure $\mu_{U_F}$ on ${\cal X}^2$:
{\bf e}gin{equation}
\label{muUF}
d\mu_{U_F}(x',x'') = \delta(x',F(x''))\, d\mu(x') d\mu(x'') .
\end{equation}
\subsection{The finite-dimensional case}
\label{sec:mu_W_finite}
Let ${\cal X} = \{1,\ldots,J\}$ be a finite set. We identify ${\cal X}$ with $\mathbb{Z}_J = \mathbb{Z} / J\mathbb{Z}$, the cyclic additive group with $J$ elements.\footnote{Below we also use the structure of a commutative ring on $\mathbb{Z}_J$.}
Let the measure $\mu$ of any point be equal to $1/J$. Then
${\cal H} = L^2({\cal X},\mu)\cong (\mathbb{C}^J,\langle\,,\rangle)$, where $\langle\,,\rangle$ equals the standard Hermitian product divided by $J$.
We put $\eta=e^{2\pi i/J}$. Then $\eta^J=1$ and $\overline\eta=\eta^{-1}$. The space ${\cal H}$ may be identified with the space of ``discrete trigonometric polynomials''
{\bf e}gin{equation}
\label{f(eta)}
f = f(x) = \sum_{j\in\mathbb{Z}_J} f_j \eta^{jx}, \qquad
f_j\in\mathbb{C}, \quad x\in\mathbb{Z}_J .
\end{equation}
This polynomial representation generates on ${\cal H}$ an operation of multiplication: for any two vectors
$f' = \sum f'_j \eta^{jx}$ and $f'' = \sum f''_j \eta^{jx}$
$$
f' f'' = f = \sum_{k\in\mathbb{Z}_J} f_k \eta^{kx}, \qquad
f_k = \sum_{j\in\mathbb{Z}_J} f'_{k-j} f''_j .
$$
This product introduces on ${\cal H}$ the structure of a commutative ring. The structure of a Hilbert space is determined by
$$
\langle f,g\rangle = \frac1J \sum_{k\in\mathbb{Z}_J} f(k) \overline{g(k)} .
$$
The coefficients $f_k$ and $f(k)$ are connected by the ``discrete Fourier transform''
{\bf e}gin{equation}
\label{discreteFourier}
f(k) = \sum_{j\in\mathbb{Z}_J} f_j \eta^{kj}, \quad
f_j = \frac1J \sum_{k\in\mathbb{Z}_J} f(k) \eta^{-kj} .
\end{equation}
For any $f$ satisfying (\operatorname{Re}f{f(eta)}) and any $A\subset\mathbb{Z}_J$ we put
$$
\int_A f(x)\, d\mu(x)
:= \frac1J \sum_{k\in A} f(k)
= \frac1J \sum_{k\in A,\, j\in\mathbb{Z}_J} f_j \eta^{jk} .
$$
Then
$$
\int_{\cal X} f\, d\mu = f_0, \quad
\langle f,g\rangle = \frac1J \int_{\cal X} f\overline g \, d\mu .
$$
Consider a linear operator
$$
f\mapsto Wf, \qquad
(Wf)_k = \sum_{j\in\mathbb{Z}_J} W_{kj} f_j .
$$
In another basis it takes the form
$$
(Wf)(k) = \sum_{j\in\mathbb{Z}_J} W(k,j) f(j) .
$$
Equations (\operatorname{Re}f{discreteFourier}) imply
{\bf e}gin{equation}
\label{WfWf}
W(m,n) = \frac1J \sum_{j,k\in\mathbb{Z}_J} \eta^{mk} W_{kj} \eta^{-jn}.
\end{equation}
We define
{\bf e}gin{equation}
\label{nu_finite}
\omega_{m,n}
= \frac1J \sum_{j,l\in\mathbb{Z}_J} W_{l+m,j} \overline W_{l,j+n} , \quad
\nu(x,a)
= \sum_{m,n\in\mathbb{Z}_J} \omega_{m,n} \eta^{mx} \eta^{na} , \qquad
x,a\in\mathbb{Z}_J.
\end{equation}
{\bf e}gin{lem}
\label{lem:WW=WW}
$\sum_{m,n\in\mathbb{Z}_J} |W(m,n)|^2 = \sum_{j,k\in\mathbb{Z}_J} |W_{j,k}|^2$.
\end{lem}
{\it Proof}. Direct computation with the help of (\operatorname{Re}f{WfWf}) and the identity $\sum_{j\in\mathbb{Z}_J}\eta^{jk}=J\delta_{k,0}$. \qed
{\bf e}gin{cor}
By (\operatorname{Re}f{|.|mu(finite)}) and Lemma \operatorname{Re}f{lem:WW=WW}
{\bf e}gin{equation}
\label{||mu=omega00}
\|W\|_\mu^2
= \omega_{0,0}
= \int_{{\cal X}^2} \nu(x,a)\, d\mu(x) d\mu(a) .
\end{equation}
\end{cor}
{\bf e}gin{lem}
\label{lem:gnug}
For any $g',g''\in{\cal H}$ the operator $\widetilde W = \widehat g' W\widehat g''$ generates the coefficients $\widetilde\omega_{m,n}$ such that
{\bf e}gin{equation}
\label{g'nug''}
\sum_{m,n\in\mathbb{Z}} \widetilde\omega_{m,n} \eta^{mx} \eta^{na}
= |g'(x)|^2 \nu(x,a) |g''(a)|^2 .
\end{equation}
The measure
$d\mu_W(x,a) = \nu(x,a)\, d\mu(x) d\mu(a)$ on ${\cal X}^2$ satisfies
{\bf e}gin{equation}
\label{g'g''}
\|\widehat g' W \widehat g''\|_\mu^2
= \int_{{\cal X}^2} |g'(x)|^2 |g''(a)|^2 \, d\mu_W(x,a) .
\end{equation}
\end{lem}
{\it Proof}. We put
$$
g'(x) = \sum g'_k\eta^{kx}, \quad
g''(x) = \sum g''_k\eta^{kx}, \quad
|g'(x)|^2 = \sum {\bf g}'_k\eta^{kx}, \quad
|g''(x)|^2 = \sum {\bf g}''_k\eta^{kx} .
$$
The equation $\widetilde W_{p,q} = \sum_{\alpha,{\bf e}ta} g'_{p-\alpha} W_{\alpha,{\bf e}ta} g''_{{\bf e}ta-q}$ implies
{\bf e}gin{eqnarray*}
\widetilde\omega_{m,n}
&=& \frac1J \sum_{l,j,\alpha,{\bf e}ta} g'_{l+m-\alpha'} W_{\alpha',{\bf e}ta'} g''_{{\bf e}ta'-j}
\overline g'_{l-\alpha''} \overline W_{\alpha'',{\bf e}ta''} \overline g''_{{\bf e}ta''-j-n} \\
&=& \frac1J \sum_{\alpha,{\bf e}ta} {\bf g}'_{m-\alpha'+\alpha''} W_{\alpha',{\bf e}ta'}
\overline W_{\alpha'',{\bf e}ta''} {\bf g}''_{{\bf e}ta'-{\bf e}ta''+n} \\
&=& \frac1J \sum_{p,q,\alpha'',{\bf e}ta'} {\bf g}'_{m-p} W_{p+\alpha'',{\bf e}ta'}
\overline W_{\alpha'',{\bf e}ta'-q} {\bf g}''_{q+n}
\; = \; \sum_{p,q} {\bf g}'_{m-p} \omega_{p,-q} {\bf g}''_{q+n} .
\end{eqnarray*}
This implies (\operatorname{Re}f{g'nug''}). Equation (\operatorname{Re}f{g'g''}) follows from (\operatorname{Re}f{||mu=omega00}). \qed
We put
$$
c_k = \max_{j\in\mathbb{Z}_J} |W_{k+j,j}|, \quad
\|W\|_{{\cal DT}} = \sum_{k\in\mathbb{Z}_J} c_k .
$$
Then
{\bf e}gin{equation}
\label{DT_finite}
\sum_{m\in\mathbb{Z}_J} |\omega_{m,n}| \le \|W\|_{\cal DT}^2, \quad
\sum_{n\in\mathbb{Z}_J} |\omega_{m,n}| \le \|W\|_{\cal DT}^2 .
\end{equation}
{\bf e}gin{dfn}
\label{dfn:f>0}
We say that $f=\sum_{j\in\mathbb{Z}_J} f_j \eta^{jx}$ is nonnegative ($f\ge 0$) if the numbers $f(x)$ are real and nonnegative for all $x\in\mathbb{Z}_J$.
\end{dfn}
{\bf e}gin{lem}
\label{lem:f>0}
If ${\cal X} = \mathbb{Z}_J$ then the following three statements are equivalent.
(1) $f\ge 0$,
(2) $f = g\overline g$ for some $g\in{\cal H}$,
(3) $f_k = \sum_{s\in\mathbb{Z}_J} g_{k+s} \overline g_s$.
\end{lem}
{\it Proof}. Equivalence of statements (2) and (3) is obvious. Equivalence of (1) and (2) follows from two simple facts.
(a) For any $g',g''\in{\cal H}$ and any $x\in\mathbb{Z}_J$\;
$(g'g'')(x) = g'(x) g''(x)$.
(b) The numbers $f(x)$, $x\in\mathbb{Z}_J$ determine uniquely $f\in{\cal H}$. \qed
{\bf e}gin{lem}
\label{lem:omega=measure}
Suppose $f\ge0$ and
$\displaystyle F(x) = \sum_{m,n\in\mathbb{Z}_J} \omega_{m,-n} f_n \eta^{mx}$. Then $F\ge 0$.
\end{lem}
{\it Proof}. By Lemma \operatorname{Re}f{lem:f>0} $f=g\overline g$ for some $g\in{\cal H}$. Then
$$
F(x)
= \frac1J \sum_{j,l,n,s\in\mathbb{Z}_J}
W_{l+m,j}\overline W_{l,j-n} g_{n+s} \overline g_s \eta^{mx}.
$$
By using the change of summation indices $(n,s)\mapsto (k,q)$, $k=j-n$, $q=s-k$, we obtain:
$$
F(x)
= \frac1J \sum_{j,l,k,q\in\mathbb{Z}_J} W_{l+m,j} g_{j+q} \overline W_{l,k} \overline g_{k+q} \eta^{mx}
= \sum_{q\in\mathbb{Z}} F^{(q)}(x) \overline F^{(q)}(x),
$$
where
$$
F^{(q)}(x)
= \sum_{j,l\in\mathbb{Z}_J} W_{l,j} g_{j+q} \eta^{lx} .
$$
Hence, $F\ge 0$. \qed
Now suppose $W$ is a unitary operator:
{\bf e}gin{equation}
\label{Wunitary}
\frac1J \sum_{l\in\mathbb{Z}_J} W_{l,j} \overline W_{l,k} = \delta_{j,k}, \qquad
j,k\in\mathbb{Z}_J.
\end{equation}
We put ${\bf 1}_{\cal X} = \sqrt{J} \in{\cal H}$. Hence,
$$
{\bf 1}_{{\cal X}}(x)
= \sum_{k\in\mathbb{Z}_J} ({\bf 1}_{\cal X})_k \eta^{kx}, \qquad
({\bf 1}_{\cal X})_k = \sqrt{J}\delta_{k,0}, \quad
\langle{\bf 1}_{\cal X} , {\bf 1}_{\cal X}\rangle = 1.
$$
{\bf e}gin{lem}
\label{lem:one_finite}
Suppose $W$ is unitary. Then
{\bf e}gin{equation}
\label{1finite}
\sum_{n\in\mathbb{Z}_J} \omega_{m,-n} ({\bf 1}_{\cal X})_n = ({\bf 1}_{\cal X})_m, \quad
\sum_{m\in\mathbb{Z}_J} ({\bf 1}_{\cal X})_m \omega_{-m,n} = ({\bf 1}_{\cal X})_n.
\end{equation}
\end{lem}
{\it Proof}. By (\operatorname{Re}f{nu_finite}) and (\operatorname{Re}f{Wunitary})
$$
\sum_{n\in\mathbb{Z}_J} \omega_{m,-n} ({\bf 1}_{\cal X})_n
= \sum_{j,l,n\in\mathbb{Z}_J} \frac{W_{l+m,j} \overline W_{l,j-n}}{J} \sqrt{J} \delta_{n,0}
= \sum_{j,l\in\mathbb{Z}_J} \frac{W_{l+m,j} \overline W_{l,j}}{\sqrt{J}}
= ({\bf 1}_{\cal X})_m
$$
The second equation (\operatorname{Re}f{1finite}) can be obtained analogously.
\qed
\subsection{Measure associated with a regular operator}
\label{ssec:meas_reg}
We start from a simple remark. Take any $f\in L^2(\mathbb{T})$. Then $f\in L^1(\mathbb{T})$ and we may associate with $f$ the Radon measure $f d\mu$ on $\mathbb{T}$ i.e., a linear functional on $C(\mathbb{T})$:
$$
C(\mathbb{T}) \ni \varphi \mapsto \int_\mathbb{T} f\varphi\, d\mu .
$$
We use this observation in the following lemma.
{\bf e}gin{lem}
\label{lem:regular}
Suppose $W\in{\cal R}(\mathbb{T})$ and for any small $\varepsilon>0$ the function $f_\varepsilon = f_\varepsilon(x)$ satisfies
{\bf e}gin{equation}
\label{feps}
f_\varepsilon = \pi_Y f_\varepsilon \in L^2(\mathbb{T}), \quad
\|f_\varepsilon\|^2 = 1, \quad
Y = (a-\varepsilon,a+\varepsilon).
\end{equation}
Let $L_a$ be determined by (\operatorname{Re}f{L_a}). Then there exists the weak limit\footnote
{i.e., we regard here $|L_a * f_\varepsilon|$ as functionals on $C(\mathbb{T})$}
{\bf e}gin{equation}
\label{nu}
\lim_{\varepsilon\searrow 0} |L_a * f_\varepsilon|^2
= \nu, \qquad
\nu = \nu(x,a)
= \sum_{m\in\mathbb{Z}} v_m(a) e^{im(x-a)} .
\end{equation}
The limit (\operatorname{Re}f{nu}) is independent of the choice of the family $f_\varepsilon$, satisfying (\operatorname{Re}f{feps}). For any $a\in\mathbb{T}$
{\bf e}gin{equation}
\label{dmu=nudx}
d\widetilde\mu_{W,a} = \frac1{2\pi}\nu(x,a)\, dx
\end{equation}
is a (non-negative) measure on $\mathbb{T}$ and its norm as a functional on $C(\mathbb{T})$ satisfies
{\bf e}gin{equation}
\label{|mu|}
\|\widetilde\mu_{W,a}\|_{C^*}
= \int_\mathbb{T} d\widetilde\mu_{W,a}
\le {\bf c}^2
= \|W\|^2_{{\cal DT}}.
\end{equation}
\end{lem}
{\bf e}gin{rem}
\label{rem:nu=sum}
By using (\operatorname{Re}f{limv}) we obtain the equation
{\bf e}gin{equation}
\label{nu=sum}
\nu(x,a) = \sum_{m,n\in\mathbb{Z}} \omega_{m,n} e^{imx+ina}.
\end{equation}
\end{rem}
{\bf e}gin{cor}
Suppose $W\in{\cal R}(\mathbb{T})$. Then by (\operatorname{Re}f{dim=omega}) we have $\|W\|_\mu^2={\bf T}(W)=\omega_{0,0}$. Therefore by (\operatorname{Re}f{nu=sum})
$$
\|W\|_\mu^2 = \int_{\mathbb{T}^2} \nu(x,a) \, da dx.
$$
\end{cor}
{\it Proof of Lemma \operatorname{Re}f{lem:regular}}. For any $f_\varepsilon$ satisfying (\operatorname{Re}f{feps})
{\bf e}gin{equation}
\label{e|Lf|}
\Big| \frac1{2\pi} \int_\mathbb{T} e^{-ikx} |L_a * f_\varepsilon|^2\, dx \Big|
\le \|\operatorname{Conv}_{L_a}\|^2 \le {\bf c}^2, \qquad
k\in\mathbb{Z}.
\end{equation}
Hence absolute values of Fourier coefficients of any function $\nu_\varepsilon(a,\cdot) = |L_a * f_\varepsilon|^2$ do not exceed ${\bf c}^2$. Existence of the weak limit (\operatorname{Re}f{nu}) in the class of distributions with bounded Fourier coefficients is equivalent to the existence of limits for all Fourier coefficients:
{\bf e}gin{equation}
\label{limFourier}
\xi_m
= \lim_{\varepsilon\searrow 0} \xi_{\varepsilon,m}, \qquad
\xi_{\varepsilon,m}
= \frac1{2\pi} \int_\mathbb{T} e^{-imx} |L_a * f_\varepsilon|^2\, dx ,
\end{equation}
independence of these limits of $f_\varepsilon$, and the equation $\xi_m = e^{-ima} v_m(a)$. The convergence (\operatorname{Re}f{limFourier}) does not need to be uniform in $m$.
We put $f_\varepsilon = \sum_k f_{\varepsilon,k} e^{ikx}$. Below for brevity we skip the subscript $\varepsilon$ and write $f_k$ instead of $f_{\varepsilon,k}$. Then
$L_a * f_\varepsilon = \sum_k w_k(a) f_k e^{ikx}$ and
$$
\xi_{\varepsilon,m}
= \sum_{k\in\mathbb{Z}} w_{k+m}(a) \overline w_k(a) f_{k+m} \overline f_k .
$$
By (\operatorname{Re}f{e|Lf|}) $|\xi_{\varepsilon,m}|\le{\bf c}^2$ for any $m\in\mathbb{Z}$.
Now we show that for any $m\in\mathbb{Z}$ convergence (\operatorname{Re}f{limFourier}) takes place. We put
{\bf e}gin{equation}
\label{xi}
\widetilde\xi_{\varepsilon,m}
= \sum_{k\in\mathbb{Z}} \sum_{|l'|,|l''|\le B}
w_{k+m}(a) \overline w_k(a) \frac{f_{k+m+l'} \overline f_{k+l''}}{(2B+1)^2} e^{i(l'-l'')a}.
\end{equation}
Then by (\operatorname{Re}f{|w(a)|<}) and (\operatorname{Re}f{f-fe})
{\bf e}gin{eqnarray}
\nonumber
|\xi_{\varepsilon,m} - \widetilde\xi_{\varepsilon,m}|
&\le& \sum_{k\in\mathbb{Z}} \sum_{|l'|,|l''|\le B} |w_{k+m}(a)\overline w_k(a)|
\frac{|f_{k+m}\overline f_k
- f_{k+m+l'}\overline f_{k+l''} e^{i(l'-l'')a}|}
{(2B+1)^2} \\
\nonumber
&\le& \sum_{k\in\mathbb{Z}} \sum_{|l'|,|l''|\le B} \!\! {\bf c}^2
\frac{|f_{k+m} - f_{k+m+l'} e^{il'a}|\cdot |f_k|
+ |f_{k+m+l'}|\cdot |f_{k} - f_{k+l''} e^{il'' a}|}
{(2B+1)^2} \\
\nonumber
&\le& \frac{2{\bf c}^2}{2B+1}
\sum_{|l|\le B} \|f_\varepsilon\|\cdot \|f_\varepsilon - e^{-il(x-a)} f_\varepsilon\| \\
\label{xi-xi}
&\le& \frac{2{\bf c}^2}{2B+1} \|f_\varepsilon\|^2 \sum_{|l|\le B} \frac{\varepsilon^{3/2}}{\sqrt\pi} |l|
\;\le\: 2\varepsilon^{3/2} B{\bf c}^2 \|f_\varepsilon\|^2 .
\end{eqnarray}
By using in (\operatorname{Re}f{xi}) the notation $l=l'$, $n=k+l'$, and $s=l'-l''$, we obtain:
$$
\widetilde\xi_{\varepsilon,m}
= \sum_{n\in\mathbb{Z}} \sum_{|l|,|s-l|\le B}
w_{n+m-l}(a) \overline w_{n-l}(a) \frac{f_{n+m} \overline f_{n-s}}{(2B+1)^2} e^{isa} .
$$
If we fix $n\in\mathbb{Z}$ and $s\in [-2B,2B]\cap\mathbb{Z}$ in the last sum then $n-l\in I_{n,s}$, where
$$
I_{n,s}
= \left\{ {\bf e}gin{array}{cc}
[n-B,n+B-s] \cap\mathbb{Z} & \mbox{ if } s\ge 0, \\ {}
[n-B-s,n+B] \cap\mathbb{Z} & \mbox{ if } s < 0.
\end{array}
\right.
$$
We also put
$$
b_m
= \frac{2B + 1 - |m|}{(2B+1)^2}, \qquad |m| \le 2B+1.
$$
Then $\sum_{|s|\le 2B} b_s = 1$. By (\operatorname{Re}f{vIm})
$$
\widetilde\xi_{\varepsilon,m}
= \sum_{k\in\mathbb{Z},\,|s|\le 2B} v_{I_{k,s},m}(a)\,
b_s f_{k+m} \overline f_{k-s} e^{isa} .
$$
Since $W$ is regular, then by Lemma \operatorname{Re}f{lem:lim_v} for any $m\in\mathbb{Z}$ and any $\sigma>0$ there exists $Q_m = Q_m(\sigma)$ such that
{\bf e}gin{equation}
\label{v-v}
\# I \ge Q_m \quad\mbox{implies}\quad
|v_{I,m}(a) - v_m(a)| < \sigma.
\end{equation}
Then $\widetilde\xi_{\varepsilon,m} = \Sigma_1 + \Sigma_2 + \Sigma_3$, where
{\bf e}gin{eqnarray*}
\Sigma_1
&=& \sum_{k\in\mathbb{Z},\,|s|\le 2B} v_m(a)\, b_s f_{k+m} \overline f_{k-s} e^{isa}, \\
\Sigma_2
&=& \sum_{k\in\mathbb{Z},\,2B+1-|s|\ge Q_m} (v_{I_{k,s},m}(a) - v_m(a))\,
b_s f_{k+m} \overline f_{k-s} e^{isa}, \\
\Sigma_3
&=& \sum_{k\in\mathbb{Z},\,2B+1-|s| < Q_m} (v_{I_{k,s},m}(a) - v_m(a))\,
b_s f_{k+m} \overline f_{k-s} e^{isa} .
\end{eqnarray*}
By using Lemma \operatorname{Re}f{lem:YfJ}, we obtain:
{\bf e}gin{eqnarray}
\nonumber
\Big|\Sigma_1 - e^{-ima} v_m(a) \|f_\varepsilon\|^2 \Big|
&=& \bigg| \sum_{|s|\le 2B} e^{-ima} v_m(a) b_s \sum_{k\in\mathbb{Z}}
\big( f_{k+m}\overline f_{k-s} e^{i(m+s)a} - |f_k|^2 \big)
\bigg| \\
\label{Sigma1}
&\le& 3{\bf c}^2 (2B + |m|) \varepsilon \|f_\varepsilon\|^2 .
\end{eqnarray}
If $2B + 1 -|s| \ge Q_m$ then by using (\operatorname{Re}f{v-v}) we can estimate $\Sigma_2$:
{\bf e}gin{equation}
\label{Sigma2}
|\Sigma_2|
\le \sum_{k\in\mathbb{Z},\,2B+1-|s|\ge Q_m} \sigma b_s f_{k+m}\overline f_{k-s}
\le \sigma \|f\|^2 .
\end{equation}
We have:
{\bf e}gin{eqnarray}
\nonumber
|\Sigma_3|
&\le& 2{\bf c}^2 \sum_{k\in\mathbb{Z},\,2B+1-|s| < Q_m} b_s |f_{k+m}\overline f_{k-s}| \\
\label{Sigma3}
&\le& 2{\bf c}^2 \sum_{2B+1-|s| < Q_m} b_s \|f_\varepsilon\|^2
\;\le\; 2{\bf c}^2 \frac{(1 + Q_m)Q_m}{(2B+1)^2} \|f_\varepsilon\|^2.
\end{eqnarray}
Combining estimates (\operatorname{Re}f{xi-xi}), (\operatorname{Re}f{Sigma1}), (\operatorname{Re}f{Sigma2}), and (\operatorname{Re}f{Sigma3}), we obtain:
$$
\big| \xi_{\varepsilon,m} - e^{-ima} v_m(a) \|f_\varepsilon\|^2 \big|
\le \bigg( (2\varepsilon^{3/2} B + 6\varepsilon B + 3\varepsilon |m|){\bf c}^2 + \sigma + 2{\bf c}^2 \frac{(1 + Q_m)Q_m}{(2B+1)^2}
\bigg) \|f_\varepsilon\|^2.
$$
This implies existence of the limits (\operatorname{Re}f{limFourier}). Since the functions $\nu_\varepsilon(a,\cdot)$ are nonnegative, $d\widetilde\mu_{W,a}$ is a measure on $\mathbb{T}$ for any $a\in\mathbb{T}$. \qed
\subsection{The space ${\cal ACF}(\mathbb{T})$}
{\bf e}gin{dfn}
We say that $f\in L^\infty(\mathbb{T})$ lies in the space ${\cal ACF}(\mathbb{T})$ if it has absolutely converging Fourier series.
\end{dfn}
For any $f\in{\cal ACF}(\mathbb{T})$ we put
$$
\|f\|_{{\cal DT}} = \sum_{k\in\mathbb{Z}} |f_k| .
$$
Then for any $f\in{\cal ACF}(\mathbb{T})$ we have: $\|f\|_{{\cal DT}} = \|\widehat f\|_{{\cal DT}}$ and the estimate (\operatorname{Re}f{||f||<||f||_DT}).
Assertion {\bf (14)} implies the following
{\bf e}gin{cor}
\label{cor:ACF}
The space $\big({\cal ACF}(\mathbb{T}),\|\cdot\|_{\cal DT}\big)$ is a commutative $\mathbb{C}^*$-subalgebra in the $\mathbb{C}^*$-algebra
$\big({\cal DT}(\mathbb{T}),\|\cdot\|_{\cal DT}\big)$.
\end{cor}
Suppose $\nu$ satisfies (\operatorname{Re}f{nu=sum}) for some $W\in{\cal R}(\mathbb{T})$. Consider the functions
{\bf e}gin{equation}
\label{phipsi}
\varphi = \frac1{2\pi}\int_{\mathbb{T}} \nu(x,\cdot)\, dx
\quad{and}\quad
\psi = \frac1{2\pi}\int_{\mathbb{T}} \nu(\cdot,a)\, da .
\end{equation}
{\bf e}gin{lem}
\label{lem:intnuACF}
Suppose $W\in{\cal R}(\mathbb{T})$. Then $\varphi,\psi\in{\cal ACF}(\mathbb{T})$. Moreover,
$$
\|\varphi\|_{\cal DT} \le \|W\|^2_{\cal DT}, \quad
\|\psi\|_{\cal DT} \le \|W\|^2_{\cal DT}.
$$
\end{lem}
{\it Proof}. By (\operatorname{Re}f{nu=sum})
$$
\frac1{2\pi} \int_\mathbb{T} \nu(x,a) \, dx
= \sum_{n\in\mathbb{Z}} \omega_{0,n} e^{ina} .
$$
By (\operatorname{Re}f{sumomega}) $\sum |\omega_{0,n}| \le \|W\|_{\cal DT}^2$.
The case of the function $\psi$ is analogous. \qed
\subsection{The operator $\widehat g_1 W \widehat g_2$}
{\bf e}gin{prop}
\label{prop:WgWWg_reg}
Suppose $W\in{\cal R}(\mathbb{T})$, $g_2\in L^\infty(\mathbb{T})$, and $g_1, |g_2|^2\in{\cal ACF}(\mathbb{T})$. Then the operator $\widetilde W = \widehat g_1 W \widehat g_2$ is also regular and the corresponding coefficients $\widetilde\omega_{m,n}$ satisfy
{\bf e}gin{equation}
\label{tildeomega_e_WgWWg}
\sum_{m,n\in\mathbb{Z}} \widetilde\omega_{m,n} e^{imx+ina}
= |g_1(x)|^2 \nu(x,a) |g_2(a)|^2.
\end{equation}
\end{prop}
{\bf e}gin{cor}
\label{cor:WgWWg}
Suppose $W\in{\cal R}(\mathbb{T})$, $g_2\in L^\infty(\mathbb{T})$, and $g_1, |g_2|^2\in{\cal ACF}(\mathbb{T})$. Then by (\operatorname{Re}f{omega=T}) and (\operatorname{Re}f{dim=omega})
{\bf e}gin{equation}
\label{gWg}
\| \widehat g_1 W \widehat g_2 \|_\mu^2
= \frac{1}{(2\pi)^2} \int_{\mathbb{T}^2} |g_1(x)|^2 \nu(x,a) |g_2(a)|^2\, da dx .
\end{equation}
\end{cor}
Proposition \operatorname{Re}f{prop:WgWWg_reg} follows from Lemmas \operatorname{Re}f{lem:WWg_reg} and \operatorname{Re}f{lem:dimWW} while Corollary \operatorname{Re}f{cor:WgWWg} is a combination of Corollaries \operatorname{Re}f{cor:dimWWg} and \operatorname{Re}f{cor:dimWgW}.
We associate with any $W\in{\cal R}(\mathbb{T})$ the measure $\mu_W$ on ${\cal X}\times{\cal X}$:
$$
d\mu_W = \nu(x,a)\, \frac{dx da}{4\pi^2} .
$$
\section{A bistochastic operator generated by $\mu_W$}
\label{sec:bistochastic}
{\bf e}gin{lem}
\label{lem:bistochastic}
Suppose $W$ satisfies (at least) one of the following conditions:
(1) $W = U_F$, where $F\in\operatorname{Aut}({\cal X},\mu)$,
(2) $W$ is an operator on ${\cal H} = L^2({\cal X})$, ${\cal X} = \mathbb{Z}_J$,
(3) $W\in{\cal R}(\mathbb{T})$.
Then the corresponding measure $\mu_W$ determines a bounded operator
$$
{\cal W} : L^1({\cal X},\mu)\to L^1({\cal X},\mu) , \qquad
f \mapsto {\cal W} f = \int_{\cal X} \nu(\cdot,a) f(a)\, d\mu(a) .
$$
This operator satisfies the estimate
{\bf e}gin{equation}
\label{norm_calW}
\|{\cal W}\|_{L^1\to L^1} \le \|W\|_{\cal DT}^2
\end{equation}
and moreover, has the following properties
(a). ${\cal W}$ is nonnegative: ${\cal W} f\ge 0$ whenever $0\le f\in L^1({\cal X},\mu)$.
If $W$ is unitary\footnote{In case (1) this condition automatically holds.} then two more statements hold.
(b). ${\cal W}{\bf 1}_{\cal X} = {\bf 1}_{\cal X}$.
(c). $\int_{\cal X} {\cal W} f(x) \, d\mu(x) = \int_{\cal X} f(a) \, d\mu(a)$ for any $f\in L^1({\cal X},\mu)$.
\end{lem}
Conditions (a)--(c) mean that ${\cal W}$ is a bistochastic (doubly stochastic) operator.
{\it Proof of Lemma \operatorname{Re}f{lem:bistochastic}}. (1) In this case $d\mu_W(x,a) = \delta(x,F(a))\, d\mu(x) d\mu(a)$ (see (\operatorname{Re}f{muUF})). Then by (\operatorname{Re}f{int_delta})--(\operatorname{Re}f{delta=delta}) for any $f\in L^1({\cal X})$
{\bf e}gin{equation}
\label{calWf}
{\cal W} f(x)
= \int_{\cal X} \delta(x,F(a)) f(a) \, d\mu(a)
= \int_{\cal X} \delta(F^{-1}(x),a) f(a) \, d\mu(a)
= f\circ F^{-1}(x).
\end{equation}
Hence, $\|{\cal W} f\|_1 = \|f\|_1$. Therefore ${\cal W}$ is an isometry and condition (c) holds. Conditions (a) and (b) also follow from (\operatorname{Re}f{calWf}).
(2) If ${\cal X} = \mathbb{Z}_J$ then by (\operatorname{Re}f{nu_finite}) we have:
$$
f(a)
= \sum_{j\in\mathbb{Z}} f_j \eta^{ja} , \quad
{\cal W} f(x)
= \sum_{m,n\in\mathbb{Z}} \omega_{m,-n} f_n \eta^{mx}.
$$
Hence, by (\operatorname{Re}f{DT_finite})
$$
\|{\cal W} f\|_1
\le \sum_{m,n\in\mathbb{Z}_J} |\omega_{m,-n} f_n|
\le \|W\|_{\cal DT}^2 \|f\|_1 .
$$
Condition (a) follows from Lemma \operatorname{Re}f{lem:omega=measure}, while Conditions (b) and (c) from Lemma \operatorname{Re}f{lem:one_finite}.
(3) In this case for any $f\in L^1(\mathbb{T})$
$$
\|{\cal W} f\|_1
= \frac1{(2\pi)^2} \int_\mathbb{T} dx \int_\mathbb{T} |\nu(x,a) f(a)|\, da
= \frac1{2\pi} \int_\mathbb{T} \varphi(a) |f(a)| \, da ,
$$
where $\varphi$ is determined by (\operatorname{Re}f{phipsi}). By Lemma \operatorname{Re}f{lem:intnuACF}
$\|\varphi\|_\infty\le \|\varphi\|_{\cal DT}\le \|W\|_{\cal DT}^2$. This implies (\operatorname{Re}f{norm_calW}).
To prove (a), we fix $0 \le f\in L^1(\mathbb{T})$ and take any nonnegative continuous function $g:\mathbb{T}\to\mathbb{R}$. Then
$\frac1{2\pi} \int_\mathbb{T} g(x) {\cal W} f(x) \, dx$ is the result of the action of the measure $\nu$ on the non-negative function $g(x) f(a)$. This result is nonnegative. Hence, ${\cal W} f\ge 0$.
Now suppose $W$ is unitary. Then by (\operatorname{Re}f{gWg}) for any $0\not\equiv g\in{\cal ACF}(\mathbb{T})$
$$
\frac1{2\pi} \int_\mathbb{T} |g(x)|^2 {\cal W} {\bf 1}_\mathbb{T}(x) \, dx
= \frac1{(2\pi)^2} \int_{\mathbb{T}^2} |g(x)|^2 \nu(x,a) \, dx da
= \|\widehat g W\|_\mu^2
= \|\widehat g\|_\mu^2
$$
(the last equation follows from Corollary \operatorname{Re}f{cor:UWU}). The equation
$$
\|\widehat g\|_\mu^2
= \|g\|^2 = \frac1{2\pi} \int_\mathbb{T} |g(x)|^2 \, dx
$$
implies (b).
It is sufficient to check condition (c) only for $f\ge 0$. First, consider the case when $0\le f\in L^\infty(\mathbb{T})$. Consider $g\ge 0$ such that $f=g^2$. Then by Corollary \operatorname{Re}f{cor:WgWWg}
$$
\frac1{2\pi} \int_\mathbb{T} {\cal W} f(x) \, dx
= \frac1{(2\pi)^2} \int_{\mathbb{T}^2} \nu(x,a) |g(a)|^2 \, dx da
= \|W\widehat g\|_\mu^2
= \|\widehat g\|_\mu^2
= \frac1{2\pi} \int_{\mathbb{T}} f(a) \, da .
$$
If $0\le f\in L^1(\mathbb{T})$ is unbounded, by using a cut off, for any $\varepsilon > 0$ we have: $f=f_1+f_2$,
$0\le f_1\in L^\infty(\mathbb{T})$, $\|f_2\|_1 < \varepsilon$. Then by (\operatorname{Re}f{norm_calW})
$$
\|{\cal W} f - {\cal W} f_1\|_1
< \|W\|_{\cal DT}^2 \varepsilon.
$$
This implies
$$
\bigg| \frac1{2\pi} \int_\mathbb{T} {\cal W} f(x)\, dx - \frac1{2\pi} \int_\mathbb{T} {\cal W} f_1(x)\, dx \bigg|
\le \|W\|_{\cal DT}^2 \varepsilon .
$$
The estimate
$$
\frac1{2\pi} \int_\mathbb{T} {\cal W} f_1(x)\, dx
= \frac1{2\pi} \int_\mathbb{T} f_1(x)\, dx
= \frac1{2\pi} \int_\mathbb{T} f(x)\, dx + \Delta, \qquad
|\Delta| \le \varepsilon
$$
finishes the proof. \qed
\section{Product with $\widehat g$}
\label{sec:product}
In this section we prove Proposition \operatorname{Re}f{prop:WgWWg_reg} and Corollary \operatorname{Re}f{cor:WgWWg}.
\subsection{The operator $W \widehat g$}
{\bf e}gin{lem}
\label{lem:WWg_reg}
Suppose $W\in{\cal R}(\mathbb{T})$, $g\in L^\infty(\mathbb{T})$, and $|g|^2\in{\cal ACF}(\mathbb{T})$. Then
$\widetilde W = W\widehat g$ is also regular and the corresponding coefficients $\widetilde\omega_{m,n}$ satisfy
{\bf e}gin{equation}
\label{tildeomega_e}
\sum_{m,n\in\mathbb{Z}} \widetilde\omega_{m,n} e^{imx+ina}
= \nu(x,a) |g(a)|^2.
\end{equation}
\end{lem}
{\bf e}gin{cor}
\label{cor:dimWWg}
Suppose $W\in{\cal R}(\mathbb{T})$ and $g\in{\cal ACF}(\mathbb{T})$. Then by (\operatorname{Re}f{omega=T}) and (\operatorname{Re}f{dim=omega})
{\bf e}gin{equation}
\label{||Wg||mu}
\| W\widehat g \|_\mu^2
= \frac{1}{(2\pi)^2} \int_{\mathbb{T}^2} \nu(x,a) |g(a)|^2 \, da dx .
\end{equation}
\end{cor}
{\it Proof of Lemma \operatorname{Re}f{lem:WWg_reg}}. We denote by $\widetilde\omega_{I,m,n}$ the quantities (\operatorname{Re}f{limomega}), corresponding to the operator
$\widetilde W = W\widehat g$. Then
$$
\widetilde\omega_{I,m,n}
= \frac1{\# I} \sum_{k,s,j\in\mathbb{Z},\,l\in I}
W_{l+m,k} g_{k-j} \overline W_{l,s} \overline g_{s-j-n}
= \frac1{\# I} \sum_{k,q\in\mathbb{Z},\,l\in I}
W_{l+m,k}\overline W_{l,k-q+n} {\bf g}_q,
$$
where ${\bf g}_q = \sum_{p\in\mathbb{Z}} g_p \overline g_{p-q}$.
Note that ${\bf g}_q$ is the $q$-th Fourier coefficient of the function $|g|^2$. Indeed,
$$
{\bf g}_q
= \langle g, g e^{iqx} \rangle
= \frac1{2\pi} \int_\mathbb{T} |g(x)|^2 e^{-iqx}\, dx.
$$
By assumption of the lemma the series $\sum {\bf g}_q$ absolutely converges. We put
$$
\sum_{q\in\mathbb{Z}} |{\bf g}_q|
= {\bf c}_g.
$$
For any $\sigma>0$ there exists $Q(\sigma)\in\mathbb{N}$ such that
$\sum_{|q|>Q(\sigma)} |{\bf g}_q| < \sigma$. Given a small $\sigma$ we have: $\widetilde\omega_{I,m,n} = \Omega_1 + \Omega_2$, where
{\bf e}gin{eqnarray*}
\Omega_1
&=& \frac1{\# I} \sum_{k\in\mathbb{Z},\,l\in I,\, |q|\le Q(\sigma)}
W_{l+m,k} \overline W_{l,k-q+n} {\bf g}_q, \\
\Omega_2
&=& \frac1{\# I} \sum_{k\in\mathbb{Z},\,l\in I,\, |q| > Q(\sigma)}
W_{l+m,k} \overline W_{l,k-q+n} {\bf g}_q .
\end{eqnarray*}
In the sum $\Omega_1$ the index $q$ contains only a finite number of values. Hence for all sufficiently big $\# I$ we have:
$$
\Omega_1
= \sum_{|q|\le Q(\sigma)} \omega_{m,n-q} {\bf g}_q + \Delta, \qquad
|\Delta|
\le \sum_{|q|\le Q(\sigma)} \sigma |{\bf g}_q|
\le \sigma{\bf c}_g .
$$
Now we estimate $\Omega_2$. Let $c_k$ be the majorating sequence for $W$ with
$\sum c_k = {\bf c}$. Then
{\bf e}gin{eqnarray*}
|\Omega_2|
&\le& \frac1{\# I} \sum_{k\in\mathbb{Z},\,l\in I\,|q|>Q(\sigma)}
c_{l+m-k} c_{l-k+q-n} |{\bf g}_q| \\
&\le& \sum_{j\in\mathbb{Z},\,|q|>Q(\sigma)}
c_{m+j} c_{q-n+j} |{\bf g}_q|
= \sum_{|q| > Q(\sigma)} \widetilde c_{m+n-q} \sigma |{\bf g}_q| ,
\end{eqnarray*}
where $\widetilde c_s = \sum_j c_{s+j} c_j$, \; $\sum_s \widetilde c_s = {\bf c}^2$. Hence
$$
|\Omega_2| \le {\bf c}^2 \sum_{|q| > Q(\sigma)} |{\bf g}_q| \le {\bf c}^2 \sigma.
$$
By (\operatorname{Re}f{sumomega})
$$
\bigg| \sum_{|q| > Q(\sigma)} \omega_{m,n-q} {\bf g}_q \bigg|
\le {\bf c}^2 \sum_{|q| > Q(\sigma)} |{\bf g}_q|
\le {\bf c}^2 \sigma .
$$
Therefore
$$
\Big| \widetilde\omega_{I,m,n} - \sum_{q\in\mathbb{Z}} \omega_{m,n+q} {\bf g}_q \Big|
\le \sigma({\bf c}_g + 2{\bf c}^2) .
$$
Finally note that
{\bf e}gin{eqnarray*}
\nu(x,a) |g(a)|^2
&=& \sum_{q,p\in\mathbb{Z}} g_p\overline g_{p-q} e^{iqa}
\sum_{m,k\in\mathbb{Z}} \omega_{m,k} e^{imx+ika} \\
&=& \sum_{m,n,q,p\in\mathbb{Z}} \omega_{m,n-q} g_p\overline g_{p-q} e^{imx+ina} .
\end{eqnarray*}
These computations imply that for all sufficiently big $\# I$ the quantity $\widetilde\omega_{I,m,n}$ differs arbitrarily small from
$$
\sum_{p,q\in\mathbb{Z}} \omega_{m,n+q} g_p\overline g_{p-q}
= \frac1{(2\pi)^2} \int_{\mathbb{T}^2} e^{-imx-ina} \nu(x,a) |g(a)|^2 \, dx da .
$$
\qed
\subsection{The operator $\widehat g W$}
{\bf e}gin{lem}
\label{lem:dimWW}
Suppose $W\in{\cal R}(\mathbb{T})$ and $g\in{\cal ACF}(\mathbb{T})$. Then $\widetilde W = \widehat g W$ is also regular and the corresponding coefficients $\widetilde\omega_{m,n}$ satisfy
{\bf e}gin{equation}
\label{breveomega)}
\sum_{m,n\in\mathbb{Z}}\widetilde\omega_{m,n} e^{imx+ina}
= |g(x)|^2 \nu(x,a).
\end{equation}
\end{lem}
{\bf e}gin{cor}
\label{cor:dimWgW}
Suppose $W\in{\cal R}(\mathbb{T})$ and $g\in{\cal ACF}(\mathbb{T})$. Then by (\operatorname{Re}f{omega=T}) and (\operatorname{Re}f{dim=omega})
{\bf e}gin{equation}
\label{dimWgW}
\|\widehat g W\|_\mu^2
= \frac1{(2\pi)^2} \int_{\mathbb{T}^2} |g(x)|^2 \nu(x,a)\, dx da.
\end{equation}
\end{cor}
{\it Proof of Proposition \operatorname{Re}f{lem:dimWW}}. We denote by $\widetilde\omega_{I,m,n}$ the quantities (\operatorname{Re}f{limomega}), corresponding to the operator
$\widetilde W = \widehat g W$. Then
{\bf e}gin{eqnarray*}
\widetilde\omega_{I,m,n}
&=& \frac1{\# I} \sum_{k,s,j\in\mathbb{Z},\, l\in I}
g_{l+m-k} W_{k,j} \overline g_{l-s} \overline W_{s,j+n} \\
&=& \frac1{\# I} \sum_{k,s,j\in\mathbb{Z},\, l\in I}
g_{m-k} \overline g_{-s} W_{k+l,j} \overline W_{s+l,j+n} \\
&=& \frac1{\# I} \sum_{k,s,j\in\mathbb{Z},\, l\in I+s}
g_{m-k} \overline g_{-s} W_{k-s+l,j} \overline W_{l,j+n} \\
&=& \sum_{k,s\in\mathbb{Z}} g_{m-k} \overline g_{-s} \omega_{I+s,k-s,n}.
\end{eqnarray*}
We define ${\bf c}_g = \sum_{j\in\mathbb{Z}} |g_j|$.
Given $\sigma>0$ we take $M = M(\sigma) > |m|$ such that
{\bf e}gin{equation}
\label{sumg<sigma}
\sum_{|j| \ge (M - |m|) / 2} |g_j| < \sigma.
\end{equation}
We take $N = N(\sigma,M)$ such that for any $j$, $|j|\le M$ and any interval $J\subset\mathbb{Z}$, $\# J > N$
{\bf e}gin{equation}
\label{om-om}
|\omega_{J,j,n} - \omega_{j,n}| < \sigma.
\end{equation}
Then $\widetilde\omega_{I,m,n} = \Sigma_0 + \Sigma_1 + \Sigma_2 + \Sigma_3$, where
{\bf e}gin{eqnarray*}
\Sigma_0
&=& \sum_{k,s\in\mathbb{Z}} g_{m-k} \overline g_{-s} \omega_{k-s,n} , \\
\Sigma_1
&=& \sum_{|k-s|\ge M} g_{m-k} \overline g_{-s} \omega_{k-s,n} , \\
\Sigma_2
&=& - \sum_{|k-s| < M}
g_{m-k} \overline g_{-s} (\omega_{I+s,k-s,n} - \omega_{k-s,n}) , \\
\Sigma_3
&=& \sum_{|k-s|\ge M} g_{m-k} \overline g_{-s} \omega_{I,k-s,n} .
\end{eqnarray*}
First, we transform the sum $\Sigma_0$:
{\bf e}gin{eqnarray}
\nonumber
\Sigma_0
&=& \frac1{(2\pi)^2} \int_{\mathbb{T}^2} e^{-imx-ina}
\sum_{k,s\in\mathbb{Z}} g_{m-k} \overline g_{-s}
e^{i(m-k+s)x} \omega_{k-s,n} e^{i(k-s)x+ina}\, dxda \\
\label{Sig0}
&=& \frac1{(2\pi)^2} \int_{\mathbb{T}^2} e^{-imx-ina}
|g(x)|^2 \nu(x,a) \, dxda .
\end{eqnarray}
We estimate $\Sigma_1$ by using (\operatorname{Re}f{omegaI}) and (\operatorname{Re}f{sumg<sigma}):
{\bf e}gin{equation}
\label{Sig1}
|\Sigma_1|
\le {\bf c}^2 \sum_{|k-s| \ge M} |g_{m-k}| |g_{-s}|
\le 2{\bf c}^2 \sum_{{\bf e}ta\in\mathbb{Z}, |\alpha| \ge (M-|m|)/2} |g_\alpha| |g_{\bf e}ta|
\le 2{\bf c}^2 {\bf c}_g \sigma.
\end{equation}
We have by (\operatorname{Re}f{om-om}):
{\bf e}gin{equation}
\label{Sig2}
|\Sigma_2|
\le \sigma \sum_{k,s\in\mathbb{Z}} |g_{m-k}| |g_{-s}|
= \sigma{\bf c}_g^2.
\end{equation}
The sum $\Sigma_3$ is estimated in the same way as $\Sigma_1$:
{\bf e}gin{equation}
\label{Sig3}
|\Sigma_3|
\le {\bf c}^2 \sum_{|k-s| \ge M} |g_{m-k}| |g_{-s}|
\le 2{\bf c}^2{\bf c}_g\sigma.
\end{equation}
Combining (\operatorname{Re}f{Sig0}), (\operatorname{Re}f{Sig1}), (\operatorname{Re}f{Sig2}), and (\operatorname{Re}f{Sig3}), we see that
$$
\bigg| \widetilde\omega_{I,m,n}
- \frac1{(2\pi)^2} \int_{\mathbb{T}^2} e^{-imx-ina} |g(x)|^2 \nu(x,a) \, dxda
\bigg|
\le (4{\bf c}^2{\bf c}_g + {\bf c}_g^2) \sigma.
$$
This implies (\operatorname{Re}f{breveomega)}). \qed
\section{Weaker assumptions on $g$}
Unfortunately we do not know if equation (\operatorname{Re}f{gWg}) remains valid for $W\in{\cal R}(\mathbb{T})$ and arbitrary
$g_1,g_2\in L^\infty(\mathbb{T})$.
In this section we present two partial results in this direction. More precisely, we show that equations (\operatorname{Re}f{||Wg||mu}) and (\operatorname{Re}f{dimWgW}) hold for $g$ lying in sets which are larger than declared in Corollaries \operatorname{Re}f{cor:dimWWg} and \operatorname{Re}f{cor:dimWgW}, in particular, for $g$ equal to indicators of intervals.
\subsection{Computation of $\|W \widehat g\|_\mu$}
{\bf e}gin{lem}
\label{lem:Whatg}
Suppose $W\in{\cal R}(\mathbb{T})$ and $g\in L^\infty(\mathbb{T})$. Then equation (\operatorname{Re}f{||Wg||mu}) remains valid.
\end{lem}
{\it Proof}. For any $\sigma > 0$ there exists $g_*\in{\cal ACF}(\mathbb{T})$ such that
$$
\|g_*\|_\infty \le \|g\|_\infty \quad\mbox{and}\quad
\|g - g_*\| < \sigma .
$$
Indeed, it is sufficient to take $\widetilde g$ equal to a finite, but sufficiently long part of the Fourier series of $g$, so that
$\big| \|g\|^2 - \|\widetilde g\|^2 \big| < \sigma$. Then we define $g_*$ as a cut off of $\widetilde g$:
$$
g_*(x)
= \left\{{\bf e}gin{array}{ccc}
\widetilde g(x) & \mbox{ if } & |\widetilde g(x)| \le \|g\|_\infty, \\
\|g\|_\infty & \mbox{ if } & \widetilde g(x) > \|g\|_\infty, \\
- \|g\|_\infty & \mbox{ if } & \widetilde g(x) < - \|g\|_\infty. \\
\end{array}
\right.
$$
The function $g_*(x)$ is Lipschitz. Hence, it lies in ${\cal ACF}(\mathbb{T})$.
By using the triangle inequality, (\operatorname{Re}f{WW}) and Assertion {\bf (7)}, we have:
{\bf e}gin{equation}
\label{g-g*1}
\Big| \|W\widehat g\|_\mu - \|W\widehat g_*\|_\mu \Big|
\le \|W(\widehat g - \widehat g_*)\|_\mu
\le \|W\|\, \|(\widehat g - \widehat g_*)\|_\mu
\le \sigma \|W\| .
\end{equation}
On the other hand, by (\operatorname{Re}f{||Wg||mu})
{\bf e}gin{eqnarray*}
\!\!\!\!
\Big| \|W\widehat g_*\|_\mu^2
- \frac1{(2\pi)^2} \int_{\mathbb{T}^2} \nu(x,a) |g(a)|^2 \, dadx \Big|
&=& \frac1{2\pi} \Big| \int_{\mathbb{T}} \varphi(a) (|g_*(a)|^2 - |g(a)|^2) \, da \Big| , \\
\varphi
&=& \frac1{2\pi} \int_{\mathbb{T}} \nu(x,\cdot) \, dx .
\end{eqnarray*}
By using the estimate $\|\varphi\|_\infty \le \|\varphi\|_{\cal DT} \le \|W\|_{\cal DT}^2$
(Lemma \operatorname{Re}f{lem:intnuACF}), we obtain:
{\bf e}gin{equation}
\label{g-g*2}
\Big| \|W\widehat g_*\|_\mu^2
- \frac1{(2\pi)^2} \int_{\mathbb{T}^2} \nu(x,a) |g(a)|^2 \, dadx \Big|
\le \|W\|_{\cal DT}^2 \sigma
\end{equation}
Hence, Lemma \operatorname{Re}f{lem:Whatg} is a combination of (\operatorname{Re}f{g-g*1}) and (\operatorname{Re}f{g-g*2}). \qed
\subsection{Computation of $\|\widehat g W\|_\mu$}
{\bf e}gin{dfn}
\label{dfn:AACF}
We say that $g\in L^\infty(\mathbb{T})$ is almost ${\cal ACF}(\mathbb{T})$ (the notation is $g\in{\cal AACF}(\mathbb{T})$) if there exists a continuous in
$\varepsilon\in (-\varepsilon_0,\varepsilon_0)$ family of functions
$g_\varepsilon\in L^\infty(\mathbb{T})$ such that the following conditions hold:
{\bf A1}. $g_0 = g$,
{\bf A2}. if $\varepsilon\ne 0$ then $g_\varepsilon\in{\cal ACF}(\mathbb{T})$,
{\bf A3}. $|g_\varepsilon - g| \le |g_\varepsilon - g_{-\varepsilon}|$ if $0 \le \varepsilon < \varepsilon_0$,
{\bf A4}. $\lim_{\varepsilon\to 0} \|g_\varepsilon - g_{-\varepsilon}\| = 0$.
\end{dfn}
A typical example of an ${\cal AACF}(\mathbb{T})$ function is the indicator of an interval (see Lemma \operatorname{Re}f{lem:interval} below).
{\bf e}gin{lem}
\label{lem:WfW}
Suppose $W\in{\cal R}(\mathbb{T})$ and $g\in{\cal AACF}(\mathbb{T})$. Then equation (\operatorname{Re}f{dimWgW}) remains valid.
\end{lem}
{\it Proof}. Let $g_\varepsilon$ be the family from Definition \operatorname{Re}f{dfn:AACF}.
By {\bf A3} and {\bf A1} for any $\varepsilon\in (0,\varepsilon_0)$ and any $X\in{\cal B}$
$$
0
\le \|(\widehat g_\varepsilon - \widehat g) W\pi_X\|^2
\le \|(\widehat g_\varepsilon - \widehat g_{-\varepsilon}) W\pi_X\|^2 .
$$
This inequality implies
$$
0
\le \|(\widehat g_\varepsilon - \widehat g) W\|_\mu^2
\le \|(\widehat g_\varepsilon - \widehat g_{-\varepsilon}) W\|_\mu^2, \qquad
\varepsilon\in (0,\varepsilon_0).
$$
By {\bf A2} and (\operatorname{Re}f{dimWgW})
$\|(\widehat g_\varepsilon - \widehat g) W\|_\mu^2 \le \Delta_\varepsilon$,
$$
\Delta_\varepsilon
= \frac1{(2\pi)^2}\int_{\mathbb{T}^2} |g_\varepsilon(x) - g_{-\varepsilon}(x)|^2 \nu(x,a) \, dxda
\le \|g_\varepsilon - g_{-\varepsilon}\|^2 \|\psi\|_\infty , \quad
\psi
= \frac1{2\pi} \int_\mathbb{T} \nu(\cdot,a) \, da
$$
(see also (\operatorname{Re}f{phipsi})). Note that $\|\psi\|_\infty \le \|\psi\|_{\cal DT}$. Hence by
{\bf A4} and Lemma \operatorname{Re}f{lem:intnuACF} $\Delta_\varepsilon \to 0$ as $\varepsilon\to 0$.
By triangle inequality
$$
\Big| \|\widehat g_\varepsilon W\|_\mu^2
- \frac1{(2\pi)^2} \int_{\mathbb{T}^2} |g(x)|^2 \nu(x,a)\, dadx \Big|
= \big| \|\widehat g W\|_\mu^2 - \|\widehat g_\varepsilon W\|_\mu^2
\le \Delta_\varepsilon .
$$
Hence equation (\operatorname{Re}f{dimWgW}) follows. \qed
\subsection{Indicators of intervals}
{\bf e}gin{lem}
\label{lem:interval}
For any interval $I\subset\mathbb{T}$ the function ${\bf 1}_I$ lies in ${\cal AACF}$.
\end{lem}
{\it Proof}. We put $g_0 = {\bf 1}_I$. For $\varepsilon\in (0,1)$ we define
{\bf e}gin{eqnarray*}
g_\varepsilon(x)
&=& \max \Big\{ 0 , 1 - \frac1{\varepsilon} \operatorname{dist}(x,I) \Big\} , \\
g_{-\varepsilon}(x)
&=& 1 - \max \Big\{ 0 , 1 + \frac1{\varepsilon} \operatorname{dist}(x,\mathbb{T}\setminus I) \Big\} .
\end{eqnarray*}
Conditions {\bf A1}--{\bf A4} from Definition \operatorname{Re}f{dfn:AACF} also obviously hold. Hence ${\bf 1}_I\in{\cal AACF}$. \qed
\section{Coefficients $\omega_{m,n}$}
In this section $W\in{\cal DT}(\mathbb{T})$, $\{c_k\}_{k\in\mathbb{Z}}$ is its majorating sequence, and ${\bf c} = \sum c_k = \|W\|_{\cal DT}$.
{\bf e}gin{lem}
\label{lem:omegabaromega}
For any $W\in{\cal R}(\mathbb{T})$ and any $m,n\in\mathbb{Z}$
$$
\omega_{m,n} = \overline\omega_{-m,-n} .
$$
\end{lem}
{\it Proof}. This equation follows from the identity
$\omega_{I,m,n} = \overline\omega_{I+m,-m,-n}$ for any interval $I\subset\mathbb{Z}$ .
\qed
We have the estimates
{\bf e}gin{equation}
\label{omegaI}
{\bf e}gin{array}{ccc}
\displaystyle
\sum_{n\in\mathbb{Z}} |\omega_{I,m,n}|
&\le&
\displaystyle \frac1{\# I} \sum_{j,n\in\mathbb{Z},\, l\in I} c_{l+m-j} c_{l-j-n}
= {\bf c}^2 , \\ \displaystyle
\sum_{m\in\mathbb{Z}} |\omega_{I,m,n}|
&\le&
\displaystyle \frac1{\# I} \sum_{j,n\in\mathbb{Z},\, l\in I} c_{l+m-j} c_{l-j-n}
= {\bf c}^2 .
\end{array}
\end{equation}
These estimates imply the following
{\bf e}gin{cor}
\label{cor:omega}
If $W$ is regular then (compare with (\operatorname{Re}f{DT_finite}))
{\bf e}gin{equation}
\label{sumomega}
\sum_{n\in\mathbb{Z}} |\omega_{m,n}| \le {\bf c}^2 , \quad
\sum_{n\in\mathbb{Z}} |\omega_{n,m}| \le {\bf c}^2 \quad
\mbox{ for any $m\in\mathbb{Z}$}.
\end{equation}
\end{cor}
We put
$$
P_M(m) = \{n\in\mathbb{Z} : |n+m| \ge 2M \}.
$$
{\bf e}gin{lem}
\label{lem:sumomega}
For any $m\in\mathbb{Z}$
{\bf e}gin{equation}
\label{sum_P}
\sum_{n\in P_M(m)} |\omega_{m,n}| \le 2{\bf c} \sum_{|k| \ge M} c_k.
\end{equation}
\end{lem}
{\it Proof}. For any integer interval $I$
{\bf e}gin{eqnarray*}
\sum_{n\in P_M(m)} |\omega_{I,m,n}|
&\le& \frac1{\# I} \sum_{j\in\mathbb{Z},\, n\in P_M(m),\, l\in I} c_{l-j+m} c_{l-j-n} \\
&=& \sum_{k\in\mathbb{Z},\, n\in P_M(m)} c_{m+n-k} c_k
\; =\; \sum_{k\in\mathbb{Z},\, s\in P_M(0)} c_{s-k} c_k \\
&\le& \sum_{k\in\mathbb{Z},\, |m|\ge M} c_m c_k
+ \sum_{m\in\mathbb{Z},\, |k|\ge M} c_m c_k
\; = \; 2{\bf c} \sum_{|k|\ge M} c_k.
\end{eqnarray*}
In the limit $\# I\to\infty$ we obtain (\operatorname{Re}f{sum_P}).
\qed
{\bf e}gin{thebibliography}{20}
\bibitem{A} Accardi L. Note on quantum dynamical entropies. Reports on Math. Phys.
{\bf 38} (1996), 457--469.
\bibitem{AOW} Accardi L., Ohya M., and Watanabe N. Dynamical entropy through quantum Markov chain. Open System and Information Dynamics.
\bibitem{AF} Alicki R. and Fannes M. Quantum dynamical systems.
Oxford University Press. Oxford, 2001.
\bibitem{BG} Beck C., Graudenz D. Symbolic dynamics of successive quantum-mechanical measurements. Phys. Rev. A {\bf 46} (1992) 6265--6276.
\bibitem{CE} Collet P. and Eckmann J.-P. (2006)
Concepts and results in chaotic dynamics: a short course.
Springer-Verlag, Berlin, Heidelberg.
\bibitem{CNT} Connes A., Narnhoffer H., and Thirring W. Comm. Math. Phys. {\bf 112} (1987), 691.
\bibitem{CT} Cover T.M. and Thomas J.A. Elements of Information Theory.
New York: Wiley, 1991.
\bibitem{DF05} Downarowicz T. and Frej B. Measure-theoretic and topological entropy of operators on function spaces. Erg. Th. Dyn. Sys. {\bf 25}, 2005, 455-481.
\bibitem{DF18} Downarowicz T. and Frej B. Doubly stochastic operators with zero entropy. arXiv:1803.07882v1 [math.DS] 21 Mar 2018.
\bibitem{KH} Katok A. and Hasselblatt B. (1995)
Introduction to the modern theory of dynamical systems.
Encyclopedia of mathematics and its applications. Vol 54.
Cambridge University Press, Cambridge.
\bibitem{Dudley} R.~M.~Dudley. Real Analysis and Probability (2 ed.). Cambridge University Press. 2002.
\bibitem{Feldman} M.~B.~Feldman. A Proof of Lusin's Theorem. American Math. Monthly, 88 (1981), 191-2.
\bibitem{GLW} Ghys E., Langevin R., and Walczak. Entropie mesur\'ee et partitions de l'unit\'e. C. R. Acad. Sci., Paris. S\'er I, {\bf 303} (1986), 251--254.
\bibitem{KK} Koll\'ar B. and Koniorczyk M. Entropy rate of message sources driven by quantum walks. Phys. Rev. A {\bf 89}, (2014) 022338.
\bibitem{M} Makarov I.I. Dynamical entropy for Markov operators.
J. Dynam. Control Systems {\bf 6} (1), 2000, 1--11.
\bibitem{Maurin} K.~Maurin. Methods of Hilbert spaces. Warszawa, 1967.
\bibitem{Ohya95} Ohya M. Quantum communications and measurement. {\bf 2} (1995), 309.
\bibitem{Ohya00} Ohya M. Foundation of entropy, complexity and fractal in quantum systems. Internatinal congress of Probability Towards 2000, 1996.
\bibitem{Pe} Pechukas P. Kolmogorov entropy and quantum chaos. J. Phys. Chem. {\bf 86} (1982) 2239--2243.
\bibitem{Schmidt} W.~M.~Schmidt. Diophantine approximation. Lecture Notes in Mathematics 785. Springer, 1980.
\bibitem{Sr} Srinivas M.D. Quantum generalization of Kolmogorov entropy. J. Math. Phys. {\bf 19}, (1978) 1952-1961.
\bibitem{Tre_PSI20} D.~Treschev $\mu$-norm of an operator. Proc. of Steklov Math. Inst. {\bf 310}, (2020), 262--290.
\end{thebibliography}
\end{document}
|
\begin{document}
\setcounter{arxiv}{1}
\title{Maximum density of an induced $5$-cycle is achieved by an iterated blow-up of a $5$-cycle}
\author{
J\'{o}zsef Balogh\thanks{ Department of Mathematics, University of Illinois, Urbana, IL 61801, USA and Bolyai Institute, University of Szeged, Szeged, Hungary E-mail: {\tt [email protected]}. Research is partially supported by Simons Fellowship, NSF CAREER Grant DMS-0745185, Arnold O. Beckman Research Award (UIUC Campus Research Board 13039) and Marie Curie FP7-PEOPLE-2012-IIF 327763.} \and
Ping Hu\thanks{Department of Mathematics, University of Illinois, Urbana, IL 61801, USA and University of Warwick, UK E-mail: {\tt [email protected]}.} \and
Bernard Lidick\'{y}\thanks{Department of Mathematics, Iowa State University, Ames, IA, E-mail: {\tt [email protected]}.}
\and
Florian Pfender\thanks{Department of Mathematical and Statistical Sciences, University of Colorado Denver, E-mail: {\tt
[email protected]}. Research is partially supported by a collaboration grant from the Simons Foundation.}
}
\newcommand{0.19816}{0.19816}
\newcommand{0.20184}{0.20184}
\newcommand{0.0026}{0.0026}
\newcommand{0.000011}{0.000011}
\newcommand{0.000011Deg}{0.0132}
\newcommand{0.081}{0.081}
\maketitle
\begin{abstract}
Let $C(n)$ denote the maximum number of induced copies of $5$-cycles in graphs
on $n$ vertices.
For $n$ large enough, we show that
$C(n)=a\cdot b\cdot c \cdot d \cdot e + C(a)+C(b)+C(c)+C(d)+C(e)$,
where $a+b+c+d+e = n$ and $a,b,c,d,e$ are as equal as possible.
Moreover, if $n$ is a power of 5, we show that the unique graph on $n$ vertices
maximizing the number of induced $5$-cycles is an iterated blow-up of a 5-cycle.
The proof uses flag algebra computations and stability methods.
\end{abstract}
\section{Introduction}
In 1975, Pippinger and Golumbic~\cite{PippengerGolumbic1975} conjectured that
in graphs the maximum induced density of a $k$-cycle is $k!/(k^k - k)$ when $k \geq 5$.
In this paper we solve their conjecture for $k=5$. In addition, we also show that
the extremal limit object is unique.
The problem of maximizing the induced density of $C_5$ is also posted
on \url{http://flagmatic.org} as one of the problems where the plain flag algebra
method was applied but failed to provide an exact result.
It was also mentioned by Razborov~\cite{RazborovIMA}.
Problems of maximizing the number of induced copies of a fixed small graph $H$
have attracted a lot of attention recently~\cite{EvanLinial2013,Hirst2014,flagmatic}.
For a list of other results on this so called inducibility of small graphs
of order up to $5$, see the work of Even-Zohar and Linial~\cite{EvanLinial2013}.
{\ifnum\thearxiv=1
In this paper, we use a method that we originally developed for maximizing the
number of rainbow triangles in $3$-edge-colored complete graphs~\cite{BaloghHLPVY:2014+}.
However, the application of the method to the $C_5$ problem is less technical,
and therefore this paper is a more accessible exposition of this new method.}{\fi}
Denote the $(k-1)$-times iterated blow-up of $C_5$ by $C_5^{k\times}$,
see Figure~\ref{fig-construction}.
Let $\mathcal{G}_n$ be the set of all graphs on $n$ vertices, and
denote by $C(G)$ the number of induced copies of $C_5$ in a graph $G$. Define
\[
C(n) = \max_{G\in \mathcal{G}_n} C(G).
\]
We say a graph $G\in \mathcal{G}_n$ is \emph{extremal} if $C(G) = C(n)$. Notice that, since $C_5$ is a self-complementary graph, $G$ is extremal
if and only if its complement is extremal. If $n$ is a power of $5$, we can exactly determine the unique extremal graph and thus $C(n)$.
\begin{theorem}\label{thmC55k}
For $k\ge 1$, the unique extremal graph in $\mathcal{G}_{5^k}$
is $C_5^{k\times}$.
\end{theorem}
\begin{figure}
\caption{The graph $C_5^{k\times}
\label{fig-construction}
\end{figure}
To prove Theorem~\ref{thmC55k}, we first prove the following theorem. Note that this theorem is sufficient to determine the unique limit object
(the graphon) maximizing the density of induced copies of $C_5$.
\begin{theorem}\label{thmrecurs}
There exists $n_0$ such that for every $n \geq n_0$
\[
C(n) = a\cdot b \cdot c \cdot d \cdot e + C(a) + C(b) + C(c) + C(d)+ C(e),
\]
where $a+b+c+d+e = n$ and $a,b,c,d,e$ are as equal as possible.
Moreover, if $G\in \mathcal{G}_n$ is an extremal graph, then $V(G)$ can be partitioned into five sets $X_1, X_2, X_3, X_4$ and $X_5$ of sizes $a, b, c, d$ and $e$ respectively, such that
for
$1\le i< j\le 5$ and $x_i\in X_i$, $x_j\in X_j$, we have $x_ix_j\in E(G)$ if and only if $j-i\in\{1,4\}$.
\end{theorem}
In the next section, we give a brief overview of our method,
in Section~\ref{sec:thmrecurs} we prove Theorem~\ref{thmrecurs},
and in Section~\ref{sec:thmC55k} we prove Theorem~\ref{thmC55k}.
\section{Method and Flag Algebras}
Our method relies on the theory of flag algebras developed by Razborov~\cite{Razborov:2007}.
Flag algebras can be used as a general tool to attack problems from extremal combinatorics.
Flag algebras were used for a wide range of problems,
for example
the Caccetta-H\"aggkvist conjecture~\cite{HladkyKN:2009,RazborovCH:2011},
Tur\'an-type problems in graphs~\cite{DasHMNS:2012,Grzesik:2011,Hatami:2011,PikhurkoV:2013,Razborov:2008,Reiher:2012,Sperfeld:2011},
$3$-graphs~\cite{Falgas:2012,GlebovKV:2013}
and hypercubes~\cite{Baber:2012,BaloghHLL:2014},
extremal problems in a colored environment~\cite{BaberT:2013,BaloghHLPVY:2014+,CummingsKPSTY:2012},
and also to problems in geometry~\cite{Kral:2011} or extremal theory of permutations~\cite{BaloghHLPUV:2014}.
For more details on these applications, see a recent survey of Razborov~\cite{Razborov13}.
A typical application of the so called {\em plain flag algebra method} provides a bound on densities of substructures.
In some cases the bound is sharp, which happens most often when the extremal construction is `clean',
for example a simple blow-up of a small graph, replacing each vertex by a large independent set.
Obtaining an exact result from the sharp bound usually consists of first
bounding the densities of some small substructures by $o(1)$,
which can be read off from the flag algebra computation.
Forbidding these structures can yield a lot of structure of the extremal structure.
Finally, stability arguments are used to extract the precise extremal structure.
Simple blow-ups of small graphs appear very often as extremal graphs, in fact there are large families of graphs whose extremal graphs
for the inducibility are of this type, see Hatami, Hirst and Norin~\cite{Hatami2014}.
However, there are also many problems where the extremal construction is an iterated blow-up as shown by Pikhurko~\cite{OlegItterative}.
For our problem, the conjectured extremal graph has an iterated structure,
for which it is rare to obtain the precise density from plain flag algebra
computations alone. One such rare example is the problem to determine the inducibility of small out-stars
in oriented graphs~\cite{Falgas:2012} (note that the problem of inducibility of all out-stars
was recently solved by Huang~\cite{Huang:2014} using different techniques).
Hladk\'y, Kr\'a\soft{l} and Norin announced that they found the inducibility
of the oriented path of length 2, which also has an
iterated extremal construction, via a flag algebra method.
{\ifnum\thearxiv=1 Other than these two examples and~\cite{BaloghHLPVY:2014+},}{\fi
In~\cite{BaloghHLPVY:2014+} we determine the iterated extremal construction maximizing
the number of rainbow triangles in $3$-edge-colored complete graphs.
Other than these three examples,}
we are not aware of any applications of flag algebras which completely determined an iterative structure.
For our question, a direct application of the plain method gives
an upper bound on the limit value and shows that
$\lim_{n\to\infty}C(n)/{n\choose 5} < 0.03846157$,
which is slightly more than the density of $C_5$ in the conjectured extremal
construction, which is $\frac{1}{26}\approx 0.03846154$.
This difference may appear very small, but the bounds on densities of subgraphs
not appearing in the extremal structure are too weak to allow the standard methods to work.
Instead, we use flag algebras to find bounds on densities of other subgraphs, which appear with fairly
high density in the extremal graph. This enables us to better control the slight lack of performance
of the flag algebra bounds as these small errors have a weaker relative effect on larger densities.
\section{Proof of Theorem~\ref{thmrecurs}}\label{sec:thmrecurs}
In our proofs we consider densities of $7$-vertex subgraphs.
Guided by their prevalence in the conjectured extremal graph, the following two types of graphs will play an important role.
We call a graph $C22111$ if it can be obtained from $C_5$ by duplicating two vertices.
We call a graph $C31111$ if it can be obtained from $C_5$ by tripling one vertex.
The edges between the original vertices and their copies are not specified, and there are two complementary types of $C22111$, depending
on the adjacency of the two doubled vertices in $C_5$. Technically, $C22111$ and $C31111$ denote collections of several graphs.
Examples of $C22111$ and $C31111$ are depicted in Figure~\ref{fig-conf}.
We slightly abuse notation by using $C22111$ and $C31111$ also to denote the densities of these graphs, i.e., the probability that randomly chosen $7$ vertices induce the appropriate $7$-vertex blow-up of $C_5$.
Moreover, for a set of vertices $Z$ we denote by $C22111(Z)$ and $C31111(Z)$ the densities
of $C22111$ and $C31111$ containing $Z$, i.e., for a graph $G$ on $n$ vertices, $C22111(Z)$ ($C31111(Z)$) is the number of $C22111$($C31111$) containing $Z$ divided by $\binom{n-|Z|}{7-|Z|}$.
\begin{figure}
\caption{Sketches of $C22111$ and $C31111$.}
\label{fig-conf}
\end{figure}
\noindent
We start with the following statement.
\begin{proposition} \label{prop:flag}
There exists $n_0$ such that every extremal graph $G$ on at least $n_0$ vertices satisfies:
\begin{align}
C_5 &< 0.03846157; \nonumber\\
4\cdot C22111-11.94 \cdot C31111 &\ge \frac{1349894760355389179787709186391}{420000000000000000000000000000000} + o(1)> 0.003214. \label{diff}
\end{align}
\end{proposition}
\begin{proof}
This follows from a standard application of the plain flag algebra method.
The first inequality was obtained by Flagmatic~\cite{flagmatic}, which also provides
the corresponding certificate.
For the second inequality, we minimize the left side
with the extra constraint that $C_5\ge \frac1{26}$.
We performed the computation on $7$ vertices since the resulting bound
was sufficient and rounding the solution is easier on $7$ vertices than
on $8$.
For certificates, see \url{http://orion.math.iastate.edu/lidicky/pub/c5/}.
\end{proof}
The expressions from Proposition~\ref{prop:flag} compare to the following limiting values in the iterated blow-up $C_5^{k\times}$, where $k\to\infty$:
\begin{align*}
C_5 &= \frac{1}{26} \approx 0.03846154; &
4\cdot C22111-11.94 \cdot C31111 &= 4\cdot\frac{5}{31}-11.94 \cdot\frac{5}{93} \approx 0.0032258.
\end{align*}
Notice that in the iterated blow-up of $C_5$, in the limit $4\cdot C22111-12 \cdot C31111=0$. For our method to work,
we need a lower bound greater than zero. On the other hand, computational experiments convinced us that the method works best if the
bound is only slightly above zero, where a suitable factor is again determined by computations.
Let $G$ be an extremal graph on $n$ vertices, where $n$ is sufficiently large to apply Proposition~\ref{prop:flag}.
Denote the set of all induced $C_5$s in $G$ by $\mathcal{Z}$.
We assume that $a \in \mathbb{R}$ and $Z=z_1z_2z_3z_4z_5$ is an induced $C_5$ maximizing $C22111(Z) - a\cdot C31111(Z) $.
Then
\begin{align}
& \left(C22111(Z) - a\cdot C31111(Z)\right)\binom{n-5}{2} \nonumber
\geq \frac{1}{|\mathcal Z|} \sum_{Y \in \mathcal Z}\left(C22111(Y) - a\cdot C31111(Y)\right)\binom{n-5}{2} \nonumber\\
&=~ \frac{\left( 4\cdot C22111 - 3a\cdot C31111\right)\binom{n}{7}}{C_5\binom{n}{5}} \nonumber \
=\frac{\tfrac{4}{21}C22111 - \tfrac{a}{7}C31111}{C_5} \binom{n-5}{2}. \nonumber
\end{align}
As mentioned above, computations indicate that we get the most useful bounds if $C22111(Z) - a\cdot C31111(Z) $ is close but not too close to $0$.
Using \eqref{diff} and letting $a=3.98$, we get
\begin{align}
C22111(Z) - 3.98\cdot C31111(Z)
> 0.003979. \label{main2}
\end{align}
For $1\le i\le 5$, we define sets of vertices $Z_i$ which look like $z_i$
to the other vertices of $Z$. Formally,
\[
Z_i:=\{v\in V(G):G[(Z\setminus z_i)\cup v] \cong C_5\}\text{ for } 1\le i\le 5.
\]
Note that $Z_i\cap Z_j=\emptyset$ for $i\ne j$.
We call a pair $v_iv_j$ {\em funky}, if $v_iv_j$ is an edge while $z_iz_j$ is not an edge or vice versa, where $v_i\in Z_i$, $v_j\in Z_j$, $1\le i<j\le 5$.
In other words, $G[Z\cup\{v_i,v_j\}]\ncong C22111$, i.e., every funky pair destroys a potential copy of $C22111(Z)$. Denote by $E_f$ the set of funky pairs. With this notation, \eqref{main2} implies that
\[
\sum_{1\le i<j\le 5}|Z_i||Z_{j}|-|E_f|-3.98\sum_{i\in[5]}|Z_i|^2/2 >0.003979\binom{n-5}{2}.
\]
For any choice of sets $X_i\subseteq Z_i$, where $i\in[5]$, let $X_0:=V(G)\setminus\bigcup X_i$. Let $f$ be the number of funky pairs not incident to vertices in $X_0$, divided by $n^2$ for normalization, and denote $x_i =\tfrac1n|X_i|$ for $i \in \{0,\ldots,5\}$. Choose the $X_i$ (possibly $X_i=Z_i$) such that the left hand side in
\begin{align}
2\sum_{1\le i<j\le 5}x_ix_{j}-2f-3.98\sum_{i\in [5]}x_i^2 > 0.003979\label{main3}
\end{align}
is maximized.
In order to simplify notation, we use $X_{i+5}=X_{i}$ and $x_{i+5} = x_i$ for all $i \geq 1$.
\begin{claim}
The following equations are satisfied:
\begin{align}
0.19816< x_i &<0.20184 \quad\text{for}\quad i\in[5]; \label{xbound}\\
x_0&<0.0026;\label{Xzeromax}\\
f &< 0.000011.\label{funky}
\end{align}
\end{claim}
\begin{proof}
To obtain \eqref{xbound}--\eqref{funky}, we need to solve four quadratic programs.
The objectives are to minimize $x_1$, maximize $x_1$, maximize $x_0$, and to maximize $f$, respectively.
The constraints are \eqref{main3} and $\sum_{i=0}^5 x_i = 1$ in all four cases.
By symmetry, bounds for $x_1$ apply also for $x_2$, $x_3$, $x_4$, and $x_5$.
Here we describe the process of obtaining the lower bound on $x_1$ in \eqref{xbound}.
We need to solve the following program $(P)$:
\[
(P) \begin{cases}
\text{minimize} & x_1\\
\text{subject to}
& \sum_{i=0}^5 x_i = 1, \\
& 2\sum_{1\le i<j\le 5}x_ix_{j}-2f-3.98\sum_{i\in [5]}x_i^2 > 0.003979,\\
& x_i \geq 0 \text{ for } i \in \{0,1,\ldots,5\}. \\
\end{cases}
\]
We claim that if $(P)$ has a feasible solution $S$,
then there exists a feasible solution $S'$ of $(P)$ where
\begin{align*}
S'(x_1) = S(x_1),\quad S'(f) &= 0, \quad S'(x_0)=S(x_0), \\
S'(x_2) = S'(x_3) = S'(x_4)&=S'(x_5) = \frac{1}{4}\big(1 - S(x_1)-S(x_0)\big).
\end{align*}
Since $x_2$, $x_3$, $x_4$ and $x_5$ appear only in constraints, we only need to
check if \eqref{main3} is satisfied.
The left hand side of \eqref{main3} can be rewritten as
\begin{align*}
&2x_1\sum_{2\le i<j\le 5}x_i + 2\sum_{2\le i<j\le 5}x_ix_j-3.98\sum_{1\le i<j\le 5} x_i^2 -2f \\
&= ~ 2x_1\sum_{2\le i<j\le 5}x_i -\sum_{2\le i<j\le 5} (x_i-x_j)^2 - 0.98\sum_{2\le i<j\le 5} x_i^2 -3.98x_1^2-2f.
\end{align*}
Note that the term $\sum_{2\le i<j\le 5} (x_i-x_j)^2$ is minimized if $x_i=x_j$ for
all $i,j \in \{2,3,4,5\}$.
The term $x_2^2+x_3^2+x_4^2+x_5^2$, subject to $x_2+x_3+x_4+x_5$ being a constant, is also minimized
if $x_i=x_j$ for all $i,j \in \{2,3,4,5\}$.
Since $f \geq 0$, the term $2f$ is minimized when $f=0$.
Hence \eqref{main3} is satisfied by $S'$ and we can add the constraints $x_2=x_3=x_4=x_5$ and $f=0$ to bound $x_1$.
The resulting program $(P')$ is
\[
(P') \begin{cases}
\text{minimize} & x_1\\
\text{subject to}
& x_0+x_1+4y= 1, \\
& 8x_1y - 0.98\cdot4 y^2 -3.98x_1^2 \geq 0.003979,\\
& x_0,x_1,y \geq 0. \\
\end{cases}
\]
We solve $(P')$ using Lagrange multipliers. We delegate the work to Sage~\cite{sage} and
we provide the Sage script at \url{http://orion.math.iastate.edu/lidicky/pub/c5/}.
Finding an upper bound on $x_1$ is done by changing the objective to maximization.
Similarly, we can set $x_1=x_2=x_3=x_4=x_5 = 1/5$ to get an upper bound on $f$.
We can set $f=0$ and $x_1=x_2=x_3=x_4=x_5 = (1-x_0)/5$ to get an upper bound on $x_0$.
We omit the details. Sage scripts for solving the resulting programs are provided at \url{http://orion.math.iastate.edu/lidicky/pub/c5/}.
\end{proof}
For any vertex $v\in X_i,i\in[5]$ we use $d_f(v)$ to denote the number of funky pairs from $v$ to $(X_1\cup X_2\cup X_3\cup X_4\cup X_5)\setminus X_i$ after normalizing by $n$.
If we move $v$ from $X_1$ to $X_0$, then the left hand side of \eqref{main3} will decrease by
\[
\frac{1}{n}\left(2(x_2+x_3+x_4+x_5) - 2d_f(v)-2\cdot 3.98 \cdot x_1 + o(1)\right).
\]
If this quantity was negative, then the left hand side of \eqref{main3} could be increased by moving $v$ to $X_0$, contradicting our choice of $X_i$.
This together with~\eqref{xbound} implies that
\begin{align}
d_f(v) \leq x_2+x_3+x_4+x_5-3.98\cdot x_1+o(1) \le 1-4.98\cdot x_1+o(1)\le 0.000011Deg.\label{maxfunky}
\end{align}
Symmetric statements hold also for every vertex $v\in X_2 \cup X_3 \cup X_4 \cup X_5$.
\begin{claim}\label{nofunky}
There are no funky pairs.
\end{claim}
\begin{proof}
Assume there is a funky pair $uv$.
By symmetry, we only need to consider two cases, either $u\in X_1,v\in X_2$ or $u\in X_1, v\in X_3$. In fact, it is sufficient to
check the case where $u\in X_1$ and $v\in X_2$, so $uv$ is not an edge. The other case then follows from considering the complement of $G$.
Let $G'$ be a graph obtained from $G$ by adding the edge $uv$, i.e., changing $uv$ to be not funky.
We compare the number of induced $C_5$s containing $\{u,v\}$ in $G$
and in $G'$.
In $G'$, there are at least
\[
\left[x_3x_4x_5-(d_f(u)+d_f(v))\max\{x_3x_4,x_3x_5,x_4x_5\}-f\cdot\max\{x_3,x_4,x_5\}\right]n^3
\]
induced $C_5$s containing $uv$, since we can pick one vertex from each of $X_3,X_4,X_5$ to form an induced $C_5$ as long as none of the resulting nine pairs is funky.
Now we count the number of induced $C_5$s in $G$ containing $\{u,v\}$. The number of such $C_5$s which contain vertices from $X_0$ is upper bounded by $x_0n^3/2$.
Next we count the number of such $C_5$s avoiding $X_0$.
Observe that there are no $C_5$s avoiding $X_0$ in which $uv$ is the only funky pair.
The number of $C_5$s containing another funky pair $u'v'$ with $\{u,v\}\cap\{u',v'\}=\emptyset$ can be upper bounded by $fn^3$.
We are left to count $C_5$s where the other funky pairs contain $u$ or $v$.
The number of $C_5$s containing at least two vertices other than $u$ and $v$ which
are in funky pairs can be upper bounded by $(d_f(u)^2/2+d_f(v)^2/2+d_f(u)d_f(v))n^3$.
It remains to count only $C_5$s containing exactly one vertex $w$ where
$uw$ and $vw$ are the options for funky pairs. The number of choices of $w$ is at most $(d_f(u)+d_f(v))n$.
As $\{u,v,w\}$ is in an induced $C_5$, the set $\{u,v,w\}$ induces a path in either $G$ or the complement of $G$.
Let the middle vertex of that path be in $X_i$.
If $G[\{u,v,w\}]$ is a path, then the remaining two vertices of a $C_5$ cannot be in $X_{i+1}\cup X_{i+4}$.
If $G[\{u,v,w\}]$ is the complement of a path, then the remaining two vertices cannot be in $X_{i+2}\cup X_{i+3}$.
Hence the remaining two vertices of a $C_5$ containing $\{u,v,w\}$ can be chosen from at most $3n \max\{x_i\}$ vertices.
This gives an upper bound of $(d_f(u)+d_f(v))n\binom{3n \max\{x_i\}}{2}$ on the number of such $C_5$s.
Now we compare the number of induced $C_5$s containing $uv$ in $G$ and in $G'$ .
We use $x_{max}$ and $x_{min}$ to denote the upper and lower bound respectively from \eqref{xbound}, use $d_f$ to denote the upper bound on $d_f(u)$ and $d_f(v)$ from \eqref{maxfunky}, and also use bounds from \eqref{Xzeromax} and \eqref{funky}.
The number of $C_5$s containing $uv$ divided by $n^3$ is
\begin{align*}
\mbox{in }G: &\le x_0/2 + f+2d_f^2+9d_fx_{max}^2 \le 0.0065;\\
\mbox{in }G':&\ge (x_{min}-2d_f)x_{min}^2-fx_{max} \ge 0.0067.
\end{align*}
This contradicts the extremality of $G$.
\end{proof}
Next, we want to show that $X_0=\emptyset$. For this, suppose that there exists an $x\in X_0$. We will add $x$ to one of the $X_i$, $i \in [5]$ such that $d_f(x)$ is minimal.
By symmetry, we may assume that $x$ is added to $X_1$. Note that adding a single vertex to $X_1$ does not change any of the density bounds we used above by more than $o(1)$.
\begin{claim}\label{X0funky}
For every $x \in X_0$, if $x$ is added to $X_1$ then $d_f(x)\geq 0.081$.
\end{claim}
\begin{proof}
Let $xw$ be a funky pair, where $w \in X_2$. The case where $w \in X_3$ can be argued the same way by considering the complement of $G$.
Let $G'$ be obtained from $G$ by adding the edge $xw$.
Since $G$ is extremal,
we have $C(G')\le C(G)$.
The following analysis is similar to the proof of Claim~\ref{nofunky}, however, we can say a bit more since every funky pair contains $x$.
First we count induced $C_5$s containing $xw$ in $G$.
The number of induced $C_5$s containing $xw$ and other vertices from $X_0$ is easily bounded from above by $x_0n^3/2$.
Let $F$ be an induced $C_5$ in $G$ containing $xw$ and avoiding $X_0\setminus\{x\}$.
Since all funky pairs contain $x$, $F-x$ is an induced path $p_0p_1p_2p_3$ without funky pairs.
Either $p_j \in X_2$ for all $j \in \{0,1,2,3\}$ or there is an $i \in \{1,2,3,4,5\}$
such that $p_j \in X_{i+j}$ for all $j \in \{0,1,2,3\}$.
The first case is depicted in Figure~\ref{fig-funky}(a). Consider now the second case.
If $i \in \{2,3,4\}$, then $xp_0p_1p_2p_3$ does not satisfy the definition of $F$.
Hence $i \in \{1,5\}$ and the possible $C_5$s are depicted in Figure~\ref{fig-funky}(b) and (c).
In each of the three cases, $F$ contains exactly two funky pairs, $xw$ and $xy$.
The location of $y$ entirely determines the location of $F-x$.
Hence the number of induced $C_5$s containing $xw$ is at most $d_f(x)x_{max}^2n^3$.
In $G'$, there are at least $\big(x_3x_4x_5-d_f(x)\max\{x_3x_4,x_3x_5,x_4x_5\}\big)n^3$
induced $C_5$s containing $xw$.
We obtain
\begin{align*}
C(G)/n^3&\le d_f(x)x_{max}^2+x_0/2 & &\text{and} &
C(G')/n^3&\ge (x_{min}-d_f(x))x_{min}^2.
\end{align*}
Since $C(G')\le C(G)$, we have
\[
(x_{min}-d_f(x))x_{min}^2\le d_f(x)x_{max}^2+x_0/2,
\]
which together with \eqref{xbound} and \eqref{Xzeromax} gives $d_f(x)\ge 0.081$.
\end{proof}
\begin{figure}
\caption{Possible $C_5$s with funky pair $xw$. They all have exactly one other funky pair $xy$.\label{fig-funky}
\label{fig-funky}
\end{figure}
\begin{claim}\label{uniformVertex}
Every vertex of the extremal graph $G$ is in at least $(1/26+o(1))\binom{n}{4}\approx 0.001602564 n^4$ induced $C_5$s.
\end{claim}
\begin{proof}
For every vertex $u\in V(G)$, denote by $C_5^u$ the number of $C_5$s in $G$ containing $u$.
For any two vertices $u,v\in V(G)$, we show that $C_5^u - C_5^v < n^3$, which implies Claim~\ref{uniformVertex}.
Denote by $C_5^{uv}$ the number of $C_5$s in $G$ containing both $u$ and $v$.
A trivial bound is $C_5^{uv} \leq \binom{n-2}{3}$.
Let $G'$ be obtained from $G$ by deleting $v$ and
duplicating $u$ to $u'$, i.e., for every vertex $x$ we add the edge $xu'$ iff $xu$ is an edge.
As $G$ is extremal we have
\begin{align*}
0\ge C(G')-C(G)\ge C_5^u - C_5^v - C_5^{uv} \ge C_5^u - C_5^v- \binom{n-2}{3}.
\end{align*}
\end{proof}
\begin{claim}\label{X0empty}
The set $X_0$ is empty.
\end{claim}
\begin{proof}
Assume that there is an $x\in X_0$. We count $C_5^x$, the number of induced $C_5$s containing $x$. Our goal is to show that $C_5^x$ is smaller than the value in
Claim~\ref{uniformVertex}, which is a contradiction.
Let $a_i n$ be the number of neighbors of $x$ in $X_i$ and $b_i n$ be the number of non-neighbors of $x$ in $X_i$ for $i \in \{0,1,2,3,4,5\}$.
The number of $C_5$s where the other four vertices are in $X_1\cup X_2\cup X_3\cup X_4\cup X_5$
is upper bounded by
\[
\left(a_1b_2b_3a_4 + a_2b_3b_4a_5 + a_3b_4b_5a_1+a_4b_5b_1a_2+a_5b_1b_2a_3+\frac{1}{4}\sum_{i=1}^5a_i^2b_i^2\right)n^4.
\]
Moreover, we also need to include the $C_5$s containing vertices from $X_0$ in our bound,
which we do very generously by increasing all variables by $a_0$ or $b_0$.
Since $x_i = a_i+b_i$, we can use \eqref{xbound} for every $i \in [5]$ as constraints.
We also use Claim~\ref{X0funky} to obtain constraints since
it is possible to express $d_f(x)$ using $a_i$s and $b_i$s if $x$ is added to $X_j$ for all $i,j \in [5]$.
By combining the previous objective and constraints, we obtain the following program $(P)$, whose objective gives an upper bound on the number
of $C_5$s containing $x$ divided by $n^4$.
\[
(P) \begin{cases}
\text{maximize}
& \sum_{i=1}^5(a_i+a_0)(b_{i+1}+b_0)(b_{i+2}+b_0)(a_{i+3}+a_0)+\frac{1}{4}\sum_{i=1}^5a_i^2b_i^2\\
\text{subject to}
& \sum_{i=0}^5 (a_i+b_i)= 1, \\
& 0.19816\le a_i+b_i\le0.20184 \text{ for } i \in \{1,2,3,4,5\}, \\
& a_0+b_0 \le 0.0026, \\
& b_2+b_5+a_3+a_4\ge 0.081,\\
& b_1+b_3+a_4+a_5\ge 0.081,\\
& b_2+b_4+a_1+a_5 \ge 0.081,\\
& b_3+b_5+a_1+a_2 \ge 0.081,\\
& b_4+b_1+a_2+a_3 \ge 0.081,\\
& a_i,b_i\geq 0 \text{ for } i \in \{0,1,2,3,4,5\}. \\
\end{cases}
\]
\newcommand{0.20184Relax}{0.21}
\newcommand{0.081Relax}{0.081}
Instead of solving $(P)$ we solve a slight relaxation $(P')$ with increased upper bounds on $a_i+b_i$,
which allows us to drop $a_0$ and $b_0$. Since the objective function is maximizing, we can claim
that $a_i+b_i$ is always as large as possible, which decreases the degrees of freedom.
\[
(P') \begin{cases}
\text{maximize}
& f=\sum_{i=1}^5a_ib_{i+1}b_{i+2}a_{i+3}+\frac{1}{4}\sum_{i=1}^5a_i^2b_i^2\\
\text{subject to}
& a_i+b_i = 0.20184Relax \text{ for } i \in \{1,2,3,4,5\}, \\
& b_2+b_5+a_3+a_4\ge 0.081,\\
& b_1+b_3+a_4+a_5\ge 0.081,\\
& b_2+b_4+a_1+a_5 \ge 0.081,\\
& b_3+b_5+a_1+a_2 \ge 0.081,\\
& b_4+b_1+a_2+a_3 \ge 0.081,\\
& a_i,b_i\geq 0 \text{ for } i \in \{1,2,3,4,5\}. \\
\end{cases}
\]
Note that the resulting program $(P')$ has only 5 degrees of freedom.
We find an upper bound of the solution of $(P')$ by a brute force method. We discretize
the space of possible solutions, and bound the gradient of the target function to control the
behavior between the grid points.
For solving $(P')$, we fix a constant $s$ which will correspond to the number of steps. For every
$a_i$ we check $s+1$ equally spaced values between $0$ and $0.20184Relax$ that include
the boundaries.
By this we have a grid of $s^5$ boxes where every feasible solution of $(P')$, and hence also of $(P)$, is in one of the boxes.
Next we need to find the partial derivatives of $f$. Since $f$ is symmetric, we only check the partial
derivative with respect to $a_1$.
\[
\frac{\partial f}{\partial a_1} = b_{2}b_{3}a_{4} + a_{3}b_{4}b_{5} + \frac{1}{2}a_1b_1^2
\]
We want to find an upper bound on $\frac{\partial f}{\partial a_1}$.
We can pick $0.21$ as an upper bound
on $a_i+b_i$.
Hence we assume $a_1+b_1=a_3+b_3=a_4+b_4=b_{2}=b_5=0.21$ and we maximize
\[
b_2b_{3}a_{4}+a_{3}b_{4}b_5= 0.21\left((0.21-a_3)a_4 + a_3(0.21-a_4)\right) = 0.21\left(0.21a_4 + 0.21a_3 -2a_3a_4\right).
\]
This is maximized if $a_3=0, a_4=0.21$ or $a_3=0.21, a_4=0$ and gives the value $0.21^3$.
Hence
\[
\frac{1}{2}a_1b_1^2 =\frac42 a_1\cdot \frac{b_1}{2}\cdot \frac{b_1}{2}\le \frac{2( a_1+b_1)^3}{3^3} = \frac{2\cdot 0.21^3}{27}.
\]
The resulting upper bound is
\[
\frac{\partial f}{\partial a_1} \leq 0.21^3 + \frac{2\cdot 0.21^3}{27} < 0.001.
\]
Hence in a box with side length $t$ the value of $f$ cannot be bigger than the
value at a corner plus $5t/2\cdot0.001$. The factor $5t/2$ comes from
the fact that the closest corner is in distance at most $t/2$ in each of the $5$ coordinates.
If we set $s = 100$, we compute that the maximum over all grid points of $(P'')$ is less than 0.00157.
This can be checked by a computer program \texttt{mesh-opt.cpp} which computes the values at all grid points.
With $t<0.21/s=0.0021$, we have $5t/2\cdot0.001<0.00001 $.
We conclude that $x$ is in less than $0.00158n^4$ induced $C_5$s which contradicts Claim~\ref{uniformVertex}.
Let us note that if we choose $s = 200$, we could conclude that $x$ is less than $0.00147 n^4$.
\end{proof}
We have just established the ``outside'' structure of $G$.
Observe that in this outside structure, an induced $C_5$ can appear only if it either intersects each of the classes
in exactly one vertex, or if it lies completely inside one of the classes.
This implies that
\[C(n)=(x_1\cdot x_2\cdot x_3 \cdot x_4 \cdot x_5)n^5 + C(x_1n)+C(x_2n)+C(x_3n)+C(x_4n)+C(x_5n).\]
By averaging over all subgraphs of $G$ of order $n-1$, we can easily see that $C(n)\le C(n-1)$ for all $n$, so
\[
\ell:=\lim_{n\to\infty}\frac{C(n)}{{n\choose 5}}
\]
exists. Therefore,
\[\ell+o(1)=5!\cdot x_1\cdot x_2\cdot x_3 \cdot x_4 \cdot x_5 + \ell (x_1^5+x_2^5+x_3^5+x_4^5+x_5^5),\]
which implies that $x_i=\frac15+o(1)$, and $\ell=\frac1{26}$, given the constraints on the $x_i$.
In order to prove Theorem~\ref{main2}, it remains to show that in fact $|X_i|-|X_j|\le 1$ for all $i,j \in \{1,\ldots,5\}$.
\newcommand{\mathrm{avg}}{\mathrm{avg}}
\begin{claim}\label{clbalance}
For $n$ large enough, we have $|X_i|-|X_j|\le 1$ for all $i,j \in \{1,\ldots,5\}$.
\end{claim}
\begin{proof}
By symmetry, assume for contradiction that $|X_1|-|X_2|\ge 2$.
Let $v \in X_1$ where $C_5^v$ is minimized over the vertices in $X_1$
and let $w \in X_2$ where $C_5^w$ is maximized over the vertices in $X_2$.
As $G$ is extremal, $C_5^v+C_5^{vw}-C_5^w\ge 0$; otherwise, we can increase the number of $C_5$s
by replacing $v$ by a copy of $w$.
Let $y_i:=|X_i|=x_in$. By the monotonicity of $\frac{C(n)}{n^5}$, we have
\[
\frac1{26}+o(1)\ge \frac{C(y_2)}{{y_2\choose 5}}\ge \frac{C(y_1)}{{y_1\choose 5}}\ge \frac1{26} -o(1).
\]
Therefore, using that $y_1-y_2\ge 2$,
\begin{align*}
C_5^v+C_5^{vw}-C_5^w&\le \frac{C(y_1)}{y_1}+y_2y_3y_4y_5+y_3y_4y_5-\frac{C(y_2)}{y_2}-y_1y_3y_4y_5\\
&= \frac{y_2C(y_1)-y_1C(y_2)}{y_1y_2}+(y_2-y_1+1)y_3y_4y_5\\
&\le \left( \frac1{26}+o(1)\right)\frac1{y_1y_2} \left( y_2{y_1\choose 5}-y_1{y_2\choose 5}\right)+(y_2-y_1+1)y_3y_4y_5\\
&\le \left( \frac1{26\cdot 5!}+o(1)\right) \left( y_1^4-y_2^4\right)+(y_2-y_1+1)y_3y_4y_5\\
&= \left( \frac1{26\cdot 5!}+o(1)\right) (y_1-y_2)\left( y_1^3+y_1^2y_2+y_1y_2^2+y_2^3\right)+(y_2-y_1+1)y_3y_4y_5\\
&= (y_1-y_2)\left(\left( \frac1{26\cdot 5!}+o(1)\right) \frac{4n^3}{125} - \frac{n^3}{125} \right)+\frac{(1+o(1))n^3}{125}\\
&\le \left( \frac2{26\cdot 5!}+o(1)\right) \frac{4n^3}{125} - \frac{(1+o(1))n^3}{125}
<0,
\end{align*}
a contradiction.
\end{proof}
With this claim, the proof of Theorem~\ref{thmrecurs} is complete.
\section{Proof of Theorem~\ref{thmC55k}}\label{sec:thmC55k}
Theorem~\ref{thmC55k} is a consequence of Theorem~\ref{thmrecurs}.
The main proof idea is to take a minimal counterexample $G$ and show that some blow-up of $G$ contradicts Theorem~\ref{thmrecurs}.
\begin{proof}[Proof of Theorem~\ref{thmC55k}.]
Theorem~\ref{thmC55k} is easily seen to be true for $k=1$.
Suppose for a contradiction that there is a graph $G$ on $n=5^k$ vertices with $C(G)\ge C(C_5^{k\times})$ that
is not isomorphic to $C_5^{k\times}$, where $k\ge 2$ is minimal.
Let $n_0$ be the $n_0$ from the statement of Theorem~\ref{thmrecurs}.
We say that a graph $F$ of size $5m$ can be \emph{5-partitioned}, if $V(F)$ can be partitioned
into five sets $X_1,X_2,X_3,X_4,X_5$ with $|X_i|=m$ for all $i \in [5]$ and for every
$1 \leq i < j \leq 5$, every $x_i \in X_i$ and $x_j \in X_j$ are adjacent
if and only if $|i-j| \in \{1,4\}$. Notice that this is the structure described by Theorem~\ref{thmrecurs}. Hence if $5m \geq n_0$, and $F$ is extremal then $F$ can be $5$-partitioned.
If $G$ can be $5$-partitioned, then $G$ is isomorphic to $C_5^{k\times}$ by the minimality of $k$, a contradiction.
Therefore, $G$ cannot be $5$-partitioned.
Let $H$ be an extremal graph on $5^\ell>n_0$ vertices.
Blowing up every vertex of $C_5^{k\times}$ by a factor of $5^\ell$, and inserting $H$ in every part, gives an extremal graph $G_1$
on $5^{k+\ell}$ vertices by $\ell$ applications of Theorem~\ref{thmrecurs}. On the other hand, the graph $G_2$ obtained by blowing up every vertex of $G$ by a factor of $5^\ell$,
and inserting $H$ in every part, contains at least as many $C_5$s as $G_1$,
\begin{align*}
C(G_1) &= 5^k\cdot C(H) + C(C_5^{k\times}) \cdot (5^\ell)^5, &
C(G_2) &= 5^k\cdot C(H) + C(G) \cdot (5^\ell)^5,
\end{align*}
so $C(G_1) \leq C(G_2)$. Hence $G_2$ must also be extremal.
Therefore $G_2$ can be $5$-partitioned into five sets $X_1,X_2,X_3,X_4,X_5$ with $|X_i|=5^{k+\ell-1}$. In particular, two vertices in $G_2$
are in the same set $X_i$ if and only if their adjacency pattern agrees on more than half of the remaining vertices. But this implies that for every copy $H'$ of $H$ inserted into the blow up of $G$, all vertices of $H'$ are in the same $X_i$, and thus the $5$-partition of $V(G_2)$ gives a $5$-partition of $V(G)$, a contradiction.
\end{proof}
\end{document}
|
\begin{document}
\title{Permutation Complexity of the Thue-Morse Word}
\begin{abstract}
Given a countable set $X$ (usually taken to be $\N$ or $\Z$), an infinite permutation $\pi$ of $X$ is a linear ordering $\prec_\pi$ of $X$, introduced in $\cite{FlaFrid}$. This paper investigates the combinatorial complexity of the infinite permutation on $\N$ associated with the well-known and well-studied Thue-Morse word. A formula for the complexity is established by studying patterns in subpermutations and the action of the Thue-Morse morphism on the subpermutations.
$
$
$\textbf{Keywords:}$ infinite permutation, permutation complexity, Thue-Morse word
\end{abstract}
\section{Introduction}
Permutation complexity of aperiodic words is a relatively new notion of word complexity which was first introduced and studied by Makarov $\cite{Makar06}$ based on ideas of S.V. Avgustinovich (see the acknowledgements in $\cite{FlaFrid}$), and is based on the idea of an infinite permutation associated to an aperiodic word. For an infinite aperiodic word $\w$, no two shifts of $\w$ are identical. Thus, given a linear order on the symbols used to compose $\w$, no two shifts of $\w$ are equal lexicographically. The infinite permutation associated with $\w$ is the linear order on $\N$ induced by the lexicographic order of the shifts of $\w$. The permutation complexity of the word $\w$ will be the number of distinct subpermutations of a given length of the infinite permutation associated with $\w$.
Infinite permutations associated with infinite aperiodic words over a binary alphabet act fairly well-behaved, but many of the arguments used for binary words break down when used with words over more than two symbols. Given a subpermutation of length $n$ of an infinite permutation associated with a binary word, a portion of length $n-1$ of the word can be recovered from the subpermutation. This is not always the case for subpermutations associated with words over 3 or more symbols. For example, consider the permutation $(1 \hspace{0.5ex} 2 \hspace{0.5ex} 3)$. If this permutation is associated with a binary word over $\{0,1 \}$, with $0<1$, it could only correspond to the word $00$. On the other hand, if this permutation is associated with a word over 3 symbols, suppose $\{0,1,2 \}$ with $0<1<2$, then the permutation could be associated with any of $00$, $01$, $11$, or $12$.
For binary words the subpermutations depend on the order on the symbols used to compose $\w$, but the permutation complexity does not depend on the order. For words over 3 or more symbols, not only do the subpermutations depend on the order on the alphabet but so does the permutation complexity. For example, consider the Fibonacci word
$$t = 0100101001001010010100100101\ldots,$$
defined by iterating the morphism $0 \mapsto 01, 1 \mapsto 0$ on the letter $0$, and suppose the 1s are replaced by alternating $a$'s and $b$'s to create the word:
$$\hat{t} = 0a00b0a00b00a0b00a0b00a00b0a\ldots.$$
If the symbols in $\hat{t}$ are ordered $0<a<b$ there will be 5 distinct subpermutations of length 3, and if the symbols are ordered $a<0<b$ there will be only 4 distinct subpermutations of length 3. The verification of this fact is left to the reader.
In view of the notion of an infinite permutation associated to an aperiodic word, it is natural to compute the permutation complexity of well-known classes of words. In $\cite{Makar09}$, Makarov computes the permutation complexity of Sturmian words. The goal of this paper is to determine the permutation complexity of the Thue-Morse word.
The Thue-Morse word, $T = T_0T_1T_2 \cdots$, is:
$$ T = 0110 1001 1001 0110 1001 0110 0110 1001 \cdots,$$
which can be generated by the morphism:
$$\mu_T:0 \mapsto 01, \hspace{1.5ex} 1 \mapsto 10, $$
by iterating on the letter $0$. Axel Thue introduced this word in his studies of repetitions in words, and proved that the word $T$ is overlap-free ($\cite{Thue12}$). A word $\w$ is said to be $\textit{overlap-free}$ if it does not contain a factor of the form $vuvuv$ for words $u$ and $v$, with $v$ non-empty.
The Thue-Morse word was again discovered independently by Marston Morse in 1921 $\cite{Morse21}$ through his study of differential geometry, and used in the foundations of symbolic dynamics. For a more in depth look at further properties, independent discoveries, and applications of the Thue-Morse word see $\cite{AllSha99}$.
The factor complexity of the Thue-Morse word was computed independently by two groups in 1989, Brlek $\cite{Brlek89}$ and de Luca and Varricchio $\cite{LucaVarr89}$. Our proof of the permutation complexity of the Thue-Morse word does not use the factor complexity function.
The permutation complexity of the Thue-Morse word can be found as follows. For any $n \geq 2$, we can write $n$ as $n = 2^a + b$, with $0 < b \leq 2^a$. Using this notation, it will shown that the formula for the permutation complexity of T, initially conjectured by M. Makarov, is
$$ \tau_T(n) = 2( 2^{a+1} + b - 2 ). $$
We give a a non-trivial proof of this formula here. We start with some basic notation and definitions. Some properties of infinite permutations are given in Section $\ref{GeneralPermResults}$. The infinite permutation associated with the Thue-Morse word, $\pi_T$, is introduced in Section $\ref{ThueMorsePermutation}$. Patterns found in the subpermutations of $\pi_T$ are studied in Section $\ref{TypeKandCompPairs}$, while Section $\ref{SecTypeOnePairs}$ investigates when a specific pattern occurs. The formula for the permutation complexity is established in Section $\ref{FormulaForPermComp}$. Low order subpermutations are listed in Appendix $\ref{SecTheSubperms}$ to be used as a base case for induction arguments.
\subsection{Words}
A $\textit{word}$ is a finite, (right) infinite, or bi-infinite sequence of symbols taken from a finite non-empty set, $A$, called an $\textit{alphabet}$. The standard operation on words is concatenation, and is represented by juxtaposition of letters and words. A $\textit{finite word}$ over $A$ is a word of the form $u = a_1 a_2 \ldots a_n$ with $n \geq 0$ (if $n=0$ we say $u$ is the $\textit{empty word}$, denoted $\epsilon$) and each $a_i \in A$; the $\textit{length}$ of the word $u$ is the number of symbols in the sequence and is denoted by $\abs{u} = n$. For $a \in A$, let $\abs{u}_a$ denote the number of occurrences of the letter $a$ in the word $u$. The set of all finite words over the alphabet $A$ is denoted by $A^*$, and is a free monoid with concatenation of words as the operation.
A $\textit{(right) infinite word}$ over $A$ is a word of the form $\w = \w_0 \w_1 \w_2 \ldots$ with each $\w_i \in A$, and the set of all infinite words over $A$ is denoted $A^\N$. Given $\w \in A^* \cup A^\N$, any word of the form $u=\w_i\w_{i+1} \ldots \w_{i+n-1}$, with $i \geq 0$, is called a $\textit{factor}$ of $\w$ of length $n \geq 1$. The set of all factors of a word $\w$ is denoted by $\scr{F}(\w)$. The set of all factors of length $n$ of $\w$ is denoted $\scr{F}_\w(n)$, and let $\rho_\w(n) = \abs{\scr{F}_\w (n)}$. The function $\rho_\w: \N \rightarrow \N $ is called the $\textit{factor complexity function}$, or $\textit{subword complexity function}$, of $\w$ and it counts the number of factors of length $n$ of $\w$. For a natural number $i$ we denote by $\w[i] = \w_i\w_{i+1}\w_{i+2}\w_{i+3}\ldots$ the $i$$\textit{-letter shift of}$ $\w$. For natural numbers $i \leq j$, $\w[i,j] = \w_i\w_{i+1}\w_{i+2} \ldots \w_j$ denotes the factor of length $j-i+1$ starting at position $i$ in $\w$.
For words $u \in A^*$ and $v \in A^* \cup A^\N$ where $\w = uv$, we call $u$ a $\textit{prefix}$ of $\w$ and $v$ a $\textit{suffix}$ of $\w$. A word $\w$ is said to be $\textit{periodic}$ of period $p$ if for each $i \in \N$, $\w_i = \w_{i+p}$, and $\w$ is said to be $\textit{eventually periodic}$ of period $p$ if there exists an $N \in \N$ so that for each $i > N$, $\w_i = \w_{i+p}$; or equivalently, $\w$ has a periodic suffix. A word $\w$ is said to be $\textit{aperiodic}$ if it is not periodic or eventually periodic.
Let $A$ and $B$ be two finite alphabets. A map $\varphi: A^* \rightarrow B^*$ so that $\varphi(uv) = \varphi(u)\varphi(v)$ for any $u,v \in A^*$ is called a $\textit{morphism}$ of $A^*$ into $B^*$, and $\varphi$ is defined by the image of each letter in $A$. A morphism on $A$ is a morphism from $A^*$ into $A^*$, also called an $\textit{endomorphism}$ of $A$. A morphism $\varphi$ is said to be $\textit{non-erasing}$ if the image of any non-empty word is not empty.
The action of a morphism $\varphi$ on $A$ can naturally be extended from $A^*$ to $A^\N$. For any $\w = \w_0\w_1\w_2\ldots \in A^\N$, we define $\varphi(\w) = \varphi(\w_0)\varphi(\w_1)\varphi(w_2)\ldots$ as in the case for words in $A^*$. We say that a word $\w$ is a $\textit{fixed point}$ of the morphism $\varphi$ if $\varphi(\w) = \w$. If $\varphi$ is a morphism on A and if $\varphi(a) = au$ for some $a \in A$ and non-empty $u \in A^*$, $\varphi$ is said to be $\textit{prolongable}$ on $a$. If $\varphi$ is a morphism on $A$ that is prolongable on some $a \in A$, then $\varphi^n(a)$ is a proper prefix of $\varphi^{n+1}(a)$ for each $n \in \N$. The limit of the sequence $\left\{ \varphi^n(a) \right\}_{n \in \N}$ will be the unique infinite word
$$ \w = \lim_{n \rightarrow \infty} \varphi^n(a) = \varphi^\infty(a) = au\varphi(u)\varphi^2(u) \cdots $$
where $\w$ is a fixed point of $\varphi$, and we say that $\w$ is $\textit{generated}$ by $\varphi$.
\subsection{Permutations on words}
The idea of an infinite permutation that will be here used was introduced in $\cite{FlaFrid}$. This paper will be dealing with permutation complexity of infinite words so the set used in the following definition will be $\N$ rather than an arbitrary countable set. To define an $\textit{infinite permutation}$ $\pi$, start with a linear order $\prec_\pi$ on $\N$, together with the usual order $<$ on $\N$. To be more specific, an infinite permutation is the ordered triple $\pi = \left\langle \N,\prec_\pi,< \right\rangle$, where $\prec_\pi$ and $<$ are linear orders on $\N$. The notation to be used here will be $\pi(i) < \pi(j)$ rather than $i \prec_\pi j.$
Given an infinite aperiodic word $\w = \w_0\w_1\w_2 \ldots$ on an alphabet $A$, fix a linear order on $A$. We will use the binary alphabet $A = \{0, 1\}$ and use the natural ordering $0<1$. Once a linear order is set on the alphabet, we can then define an order on the natural numbers based on the lexicographic order of shifts of $\w$. Considering two shifts of $\w$ with $a \neq b$, $\w[a] = \w_a\w_{a+1}\w_{a+2} \ldots$ and $\w[b] = \w_b\w_{b+1}\w_{b+2} \ldots$, we know that $\w[a] \neq \w[b]$ since $\w$ is aperiodic. Thus there exists some minimal number $c \geq 0$ so that $\w_{a+c} \neq \w_{b+c}$ and for each $0 \leq i < c$ we have $\w_{a+i} = \w_{b+i}$. We call $\pi_\w$ the infinite permutation associated with $\w$ and say that $\pi_\w(a) < \pi_\w(b)$ if $\w_{a+c} < \w_{b+c}$, else we say that $\pi_\w(b) < \pi_\w(a)$.
For natural numbers $a \leq b$ consider the factor $\w[a, b] = \w_a\w_{a+1} \ldots \w_b$ of $\w$ of length $b - a + 1$. Denote the finite permutation of $\{ 1, 2, \ldots , b - a + 1 \}$ corresponding to the linear order by $\pi_\w[a,b]$. That is $\pi_\w[a,b]$ is the permutation of $\{ 1, 2, \ldots , b - a + 1 \}$ so that for each $0 \leq i,j \leq (b - a)$, $ \pi_\w[a,b](i) < \pi_\w[a,b](j)$ if and only if $\pi_\w(a + i) < \pi_\w(a + j)$. Say that $p = p_0p_1 \cdots p_n$ is a $\textit{(finite) subpermutation}$ of $\pi_\w$ if $p = \pi_\w[a,a+n]$ for some $a,n \geq 0$. For the subpermutation $p = \pi_\w[a,a+n]$ of $\{1, 2, \cdots, n+1 \}$, we say the $\textit{length}$ of $p$ is $n+1$.
Denote the set of all subpermutations of $\pi_\w$ by $Perm_{\pi_\w}$, and for each positive integer $n$ let
$$Perm_{\pi_\w}(n) = \{ \hspace{1.0ex} \pi_\w[i,i+n-1] \hspace{1.0ex} \left| \hspace{1.0ex} i \geq 0 \right. \hspace{1.0ex} \}$$
denote the set of distinct finite subpermutations of $\pi_\w$ of length $n$. The $\textit{permutation complexity function}$ of $\w$ is defined as the total number of distinct subpermutations of $\pi_\w$ of a length $n$, denoted $\tau_\w(n) = \abs{Perm_{\pi_\w}(n)}$.
\begin{example}
Let's consider the well-known Fibonacci word,
$$t = 0100101001001010010100100101\ldots,$$
with the alphabet $A = \{0,1 \}$ ordered as $0 < 1$. We can see that $t[2] = 001010\ldots $ is lexicographically less than $t[1] = 100101\ldots$, and thus $\pi_t(2) < \pi_t(1)$.
Then for a subpermutation, consider the factor $t[3,5] = 010$. We see that $\pi_t[3,5] = (231)$ because in lexicographic order if we have $\pi_t(5) < \pi_t(3) < \pi_t(4)$.
\end{example}
\section{Some General Permutation Results}
\label{GeneralPermResults}
Initially work has been done with infinite binary words (see $\cite{Makar06,FlaFrid,Makar09,Makar09TM,Makar10,AvgFriKamSal}$). Suppose $\w = \w_0\w_1\w_2\ldots$ is an aperiodic infinite word over the alphabet $A=\{ 0,1 \}$. First let's look at some remarks about permutations generated by binary words where we use the natural order on $A$.
\begin{claim}
\emph{($\cite{Makar06}$)}
\label{PCClaim01}
For an infinite aperiodic word $\w$ over $A = \{ 0, 1 \}$ with the natural ordering we have:
(1) $\pi_\w(i) < \pi_\w(i+1)$ if and only if $\w_i = 0$.
(2) $\pi_\w(i) > \pi_\w(i+1)$ if and only if $\w_i = 1$.
(3) If $\w_i = \w_j$, then $\pi_\w(i) < \pi_\w(j)$ if and only if $\pi_\w(i+1) < \pi_\w(j+1)$
\end{claim}
\begin{lemma}
\emph{($\cite{Makar06}$)}
\label{PermComp01}
Given two infinite binary words u = $u_0u_1\ldots$ and $v=v_0v_1 \ldots$ with $\pi_u[0, n+1] = \pi_v[0, n+1]$, it follows that $u[0,n] = v[0,n]$.
\end{lemma}
We do have a trivial upper bound for $\tau_\w(n)$ being the number of permutations of length $n$, which is $n!$. Lemma $\ref{PermComp01}$ directly implies a lower bound for the permutation complexity for a binary aperiodic word $\w$, namely the factor complexity of $\w$. Thus, initial bounds on the permutation complexity can be seen to be:
$$ \rho_\w(n-1) \leq \tau_\w(n) \leq n!$$
For $a \in A = \{0,1\}$, let $\bar{a}$ denote the $\textit{complement}$ of $a$, that is $\bar{0} = 1$ and $\bar{1} = 0$. If $u = u_1u_2u_3 \cdots$ is a word over $A$, the $\textit{complement}$ of $u$ is defined to be the word composed of the complement of the letters in $u$, that is $\bar{u} = \bar{u}_1\bar{u}_2\bar{u}_3 \cdots$. Let $\w$ be an infinite aperiodic binary word, we say the set of factors of $\w$ is $\textit{closed under complementation}$ if for each $u \in \scr{F}(\w)$ then $\bar{u} \in \scr{F}(\w)$. The following lemma shows an interesting property of the subpermutations of the infinite permutation $\pi_\w$.
\begin{lemma}
\label{ClosedUnderCompliment}
Let $\w = \w_0\w_1\w_2\cdots$ be an infinite aperiodic binary word with factors closed under complementation. If $p$ is a subpermutation of $\pi_\w$ of length $n$, then the subpermutation $q$ defined by $q_i = n-p_i +1$ for each $i$, is also a subpermutation of $\pi_\w$ of length $n$.
\end{lemma}
\begin{proof}
Let $p$ be a subpermutation of $\pi_\w$. There is an $a \in \N$ so that $p = \pi_\w[a,a+n-1]$. For each $i,j \in \{0,1, \ldots, n-1 \}$, if $p_i < p_j$ then $\w[a+i] < \w[a+j]$ and there is some finite word $u_{i,j}$ so that
\begin{align*}
\w[a+i] &= u_{i,j}0\cdots \\
\w[a+j] &= u_{i,j}1\cdots
\end{align*}
Let $v$ be the prefix of $\w[a]$ so that for each $i,j \in \{ 0,1, \ldots, n-1 \}$, $v$ contains both $u_{i,j}0$ and $u_{i,j}1$. Since the set of factors of $\w$ is closed under complementation, $\bar{v}$ is a factor of $\w$. There is a $b$ so that $\bar{v}$ is a prefix of $\w[b]$, and let $q = \pi_\w[b,b+n-1]$. For each $i,j \in \{0, 1, \ldots, n-1 \}$, if $p_i < p_j$
\begin{align*}
\w[b+i] &= \bar{u}_{i,j}1\cdots \\
\w[b+j] &= \bar{u}_{i,j}0\cdots
\end{align*}
and thus, $q_i > q_j$.
For any $i \in \{0,1, \ldots, n-1 \}$ there are $p_i - 1$ many $j$ so that $p_j < p_i$ and there are $n - p_i$ many $j$ so that $p_j > p_i$. Therefore there are $n - p_i$ many $j$ so that $q_j < q_i$, so $q_i = n - p_i + 1$.
$\qed$
\end{proof}
\begin{definition}
\label{SameForm}
Two permutations $p$ and $q$ of $\{1, 2, \ldots, n \}$ have the $\textit{same form}$ if for each $i = 0, 1, \ldots, n-1$, $p_i < p_{i+1}$ if and only if $q_i < q_{i+1}$. For a binary word $u$ of length $n-1$, say that $p$ $\textit{has form u}$ if
$$p_i<p_{i+1} \Longleftrightarrow u_i = 0$$
for each $i = 0, 1, \ldots, n-2$.
\end{definition}
\section{The Thue-Morse Permutation}
\label{ThueMorsePermutation}
In this section the action of the Thue-Morse morphism on the subpermutations of $\pi_T$ will be investigated. This action will induce a well-defined map on the subpermutations of $\pi_T$ and lead to an initial upper-bound on the permutation complexity of $T$.
The Thue-Morse word is:
$$ T = 0110 1001 1001 0110 1001 0110 0110 1001 \cdots,$$
and the Thue-Morse morphism is:
$$\mu_T:0 \rightarrow 01, \hspace{1.5ex} 1 \rightarrow 10. $$
It can readily be verified that if $a$ is a natural number then
$$\mu_T(T[a]) = T[2a]$$
since for any letter $x \in \{0,1 \}$, $\abs{\mu_T(x)}=2$.
A nice property of the factors of $T$ is that any factor of length 5 or greater contains either $00$ or $11$. Another interesting property is that for any $i \in \N$, $T[2i,2i+1]$ will be either 01 or 10. Thus any occurrence of $00$ or $11$ must be a factor of the form $T[2i+1,2i+2]$ for some $i \in \N$. Therefore any factors $T[2i,2i+n]$ and $T[2j+1,2j+1+n]$ where $n \geq 4$ cannot be equal based on the location of the factors $00$ or $11$.
Let $\pi_T$ be the infinite permutation associated to the Thue-Morse word $T$. For notational purposes, the set of all subpermutations of $\pi_T$ of length $n$ will be denoted as $Perm(n)$.
Let $a$ and $n$ be natural numbers and suppose we want to determine if $T[a] < T[a+n]$. There will be some (possibly empty) factor $u$ of $T$, and suffixes $x$ and $y$ of $T$ so that $T[a] = u\lambda x$ and $T[a+n] = u \bar{\lambda}y$, for $\lambda \in \{0,1 \}$. If $\abs{u} \geq n+1$ we would have $T_{a+i} = T_{a+n+i}$ for each $i = 0, 1, \ldots, n$, and thus $T[a,a+n] = T[a+n,a+2n]$, and $T[a,a+2n]$ would violate the fact that $T$ is overlap-free. Thus $\abs{u} \leq n$, and if $\abs{u} = n$ we have $T[a,a+n-1] = T[a+n,a+2n-1]$ and $T_{2n} = \overline{T_a}$. Therefore the subpermutation $\pi_T[a,a+n]$ can be determined within the factor $T[a,a+2n]$ of length $2n+1$. Thus the trivial bounds for the permutation complexity of the Thue-Morse word $T$ are
$$ \rho_T(n-1) \leq \tau_T(n) \leq \rho_T(2n-1). $$
Since the factor complexity of the Thue-Morse word is known (see $\cite{Brlek89,LucaVarr89}$) we can find all factors of a given length. Thus for any natural number $n$, all factors of $T$ of length $2n-1$ can be identified and thus the set of all subpermutations of $\pi_T$ of length $n$, $Perm(n)$, can be identified as well. The subpermutations of $\{1, 2, \ldots, n\}$ have been identified for relatively low $n$ (up to $n=65$) and in these cases no more than two subpermutations of any length were identified to have the same form. In other words, for any factor $u$ of $T$ of length $n \leq 64$ there are at most two subpermutations of length $n+1$ having form $u$.
This section will deal with some properties of $\pi_T$. Something to note about the Thue-Morse morphism is that it is an order preserving morphism, as shown by the following lemma.
\begin{lemma}
\label{OrderPresMorph}
For natural numbers $a$ and $b$, $T[a] < T[b]$ if and only if $\mu_T(T[a]) < \mu_T(T[b])$.
\end{lemma}
\begin{proof}
If $T[a] < T[b]$, then there exists a finite factor $u$ of $T$, and suffixes $x$ and $y$ of $T$ so that
\begin{align*}
T[a] &= u0x \\
T[b] &= u1y.
\end{align*}
Thus we can see
\begin{align*}
\mu_T(T[a]) &= \mu_T(u)01\mu_T(x) \\
\mu_T(T[b]) &= \mu_T(u)10\mu_T(y)
\end{align*}
and therefore $\mu_T(T[a]) < \mu_T(T[b])$.
Suppose $\mu_T(T[a]) < \mu_T(T[b])$, then there exists a finite factor $u$ of $T$, and suffixes $x$ and $y$ of $T$ so that
\begin{align*}
\mu_T(T[a]) &= u0x \\
\mu_T(T[b]) &= u1y
\end{align*}
If $u$ ends with a $0$, then $\mu_T(T[a])$ would have 00 at the end of $u0$, so $u$ ends with $10$ and $0x$ starts with $01$. If $u$ ends with a $1$, then $\mu_T(T[b])$ would have 11 at the end of $u1$, so $u$ ends with $01$ and $1x$ starts with $10$. In either case we have there is some factor $v$ so that $\mu_T(v) = u$. Hence a prefix of $\mu_T(T[a])$ is $\mu_T(v)01$ and a prefix of $\mu_T(T[b])$ is $\mu_T(v)10$
Thus a prefix of $T[a]$ is $v0$ and a prefix of $T[b]$ is $v1$. Therefore $T[a] < T[b]$.
$\qed$
\end{proof}
\begin{lemma}
\label{ImgOfZerosAndOnesTM}
If $u$ and $v$ are shifts of $T$ so that for some $a$ and $b$ $u = 0T[a]$ and $v = 1T[b]$, and hence $u<v$, $\mu_T(u) = 01\mu_T(T[a])$, and $\mu_T(v) = 10\mu_T(T[b])$. Thus $0\mu_T(T[b]) < 01\mu_T(T[a]) < 10\mu_T(T[b]) < 1\mu_T(T[a])$.
\end{lemma}
\begin{proof}
The first letters in $T[a]$ will be either $01$ or $1$, thus $\mu_T(T[a])$ will start with either $0110$ or $10$, respectively. The first letters in $T[b]$ will be either $10$ or $0$, thus $\mu_T(T[b])$ will start with either $1001$ or $01$, respectively.
Then $0\mu_T(T[b])$ will start with $01001$ or $001$ and $01\mu_T(T[a])$ will start with $010110$ or $0110$. Thus $001<01001<010110<0110$, so
$$0\mu_T(T[b]) < 01\mu_T(T[a]).$$
Then $10\mu_T(T[b])$ will start with $101001$ or $1001$ and $1\mu_T(T[a])$ will start with $10110$ or $110$. Thus $1001<101001<10110<110$, so
$$10\mu_T(T[b]) < 1\mu_T(T[a]).$$
Therefore
$$0\mu_T(T[b]) < 01\mu_T(T[a]) < 10\mu_T(T[b]) < 1\mu_T(T[a]).$$
$\qed$
\end{proof}
Let $u$ be a factor of $T$ of length $n$. There is an $a \in \N$ so that $u = T[a,a+n-1]$. Also recall that $\abs{u}_1$ is the number of occurrences of the letter $1$ in $u$, and that $\abs{u}_1 = n-\abs{u}_0$. Let $p = \pi_T[a,a+n]$ be a subpermutation of $\pi_T$ with form $u$. Then $\mu_T(u) = T[2a, 2a + 2n-1]$, and let $p'$ be the subpermutation $p' = \pi_T[2a, 2a + 2n]$ with form $\mu_T(u)$. When Lemma $\ref{ImgOfZerosAndOnesTM}$ is used with this notation, for $0\leq i,j \leq n-1$, where $T_i=0$ and $T_j = 1$, we have $p_i < p_j$ and $p'_{2j+1} < p'_{2i} < p'_{2j} < p'_{2i+1}$. The following lemma describes the values of $p'$ in terms of the values of $p$.
\begin{proposition}
\label{CalculateTheFwdImage}
Let $u$, $p$, and $p'$ be as described above. For any $i \in \{0, 1, \ldots, n \}$:
$$ p'_{2i} = p_i + \abs{u}_1 $$
and for any $i \in \{0, 1, \ldots, n-1 \}$:
$$ p'_{2i+1} = \begin{cases}
p_i + \abs{u}_1+(n+1) & \text{if $p_i < p_{i+1}$ and $p_i < p_n$} \\
p_i + \abs{u}_1+n & \text{if $p_i < p_{i+1}$ and $p_i > p_n$} \\
p_i + \abs{u}_1 - n & \text{if $p_i > p_{i+1}$ and $p_i < p_n$} \\
p_i + \abs{u}_1 - (n+1) & \text{if $p_i > p_{i+1}$ and $p_i > p_n$}
\end{cases} $$
\end{proposition}
\begin{proof}
To take care of the $p'_{2i}$ terms, let $i \in \{0, 1, \ldots, n \}$. There will be $p_i-1$ many $j$ so that $p_i > p_j$, so there are $p_i-1$ many $j$ so that $p'_{2i} > p'_{2j}$. Clearly, if $p_i < p_j$ then $p'_{2i} < p'_{2j}$. So there are exactly $p_i-1$ many even $j$ so that $p'_{2i} > p'_j$. There are $\abs{u}_1$ many $j$ so that $T_{a+j} = 1$, so there are $\abs{u}_1$ many $j$ so that $p'_{2i} > p'_{2j+1}$ and $\abs{u}_0$ many $j$ so that $T_{a+j}=0$, so $p'_{2i} < p'_{2j+1}$. So there are exactly $\abs{u}_1$ many odd $j$ so that $p'_{2i} > p'_j$. Thus there are exactly $p_i-1+\abs{u}_1$ many $j$ so that $p'_{2i} > p'_j$, and therefore $ p'_{2i} =(p_i-1+\abs{u}_1)+1 = p_i + \abs{u}_1 $.
The $p'_{2i+1}$ terms will be done in two cases. First when $p_i < p_{i+1}$ and then when $p_i > p_{i+1}$.
$\textbf{Case a:}$ Suppose that $p_i < p_{i+1}$, so $T_{a+i} = 0$. For each $j = 0, 1, \ldots, n$ we must have $p'_{2i+1} > p'_{2j}$, so for each even $j$ (there are $n+1$ many such $j$) $p'_{2i+1} > p'_j$. There are $\abs{u}_1$ many $j$ so that $T_{a+j} = 1$, so there are $\abs{u}_1$ many $j$ so that $p_{2i+1} > p_{2j+1}$. Thus the only other $j$ where $p'_{2j+1}$ can be less than $p'_{2i+1}$ are $j \in \{0, 1, \ldots, n-1 \}$ where $T_{a+j} = 0$ and $p_i > p_j$.
$\textbf{Subcase a.1:}$ If $p_i < p_n$ then there are $p_i - 1$ many $j$ so that $T_{a+j}=0$ and $p_i > p_j$, and then $n-p_i-\abs{u}_1= \abs{u}_0 - p_i$ many $j$ so that $T_{a+j}=0$ and $p_i < p_j$. Thus there can only be $(n+1) + \abs{u}_1 + p_i - 1$ many $j$ so that $p'_{2i+1} > p'_j$, and therefore $p'_{2i+1} = (n+1) + \abs{u}_1 + p_i - 1 + 1 = p_i + \abs{u}_1 + (n+1)$.
$\textbf{Subcase a.2:}$ If $p_i > p_n$ then there are $p_i - 2$ many $j$ so that $T_{a+j}=0$ and $p_i > p_j$ (since $T_{a+n}$ is not in $u = T[a,a+n-1]$), and then $n-(p_i-1)-\abs{u}_1= \abs{u}_0 - (p_i - 1)$ many $j$ so that $T_{a+j}=0$ and $p_i < p_j$. Thus there can only be $(n+1) + \abs{u}_1 + p_i - 2$ many $j$ so that $p'_{2i+1} > p'_j$, and therefore $p'_{2i+1} = (n+1) + \abs{u}_1 + p_i - 2 + 1 = p_i + \abs{u}_1 + n$.
$\textbf{Case b:}$ Suppose that $p_i > p_{i+1}$, so $T_{a+i} = 1$. For each $j = 0, 1, \ldots, n$ we must have $p'_{2i+1} < p'_{2j}$, so for each even $j$ (there are $n+1$ many such $j$) $p'_{2i+1} < p'_j$. There are $\abs{u}_0$ many $j$ so that $T_{a+j} = 0$, so there are $\abs{u}_0$ many $j$ so that $p_{2i+1} < p_{2j+1}$. Thus the only other $j$ where $p'_{2j+1}$ can be less than $p'_{2i+1}$ are $j \in \{0, 1, \ldots, n-1 \}$ where $T_{a+j} = 1$ and $p_i > p_j$.
$\textbf{Subcase b.1:}$ If $p_i < p_n$ then there are $(p_i - 1) - \abs{u}_0$ many $j$ so that $T_{a+j}=1$ and $p_i > p_j$, and there can only be $\abs{u}_1 - (p_i - 1 - \abs{u}_0) - 1 = n-p_i$ many $j$ so that $T_{a+j}=1$ and $p_i < p_j$ (since $T_{a+n}$ is not in $u = T[a,a+n-1]$). Thus there can only be $(p_i - 1) - \abs{u}_0 = p_i - 1 - (n-\abs{u}_1) = p_i +\abs{u}_1 - n -1$ many $j$ so that $p'_{2i+1} > p'_j$, and therefore $p'_{2i+1} = p_i +\abs{u}_1 - n -1 + 1 = p_i + \abs{u}_1 - n$.
$\textbf{Subcase b.2:}$ If $p_i > p_n$ then there are $(p_i - 2) - \abs{u}_0$ many $j$ so that $T_{a+j}=1$ and $p_i > p_j$ (since $T_{a+n}$ is not in $u = T[a,a+n-1]$), and there can only be $\abs{u}_1 - (p_i - 2 - \abs{u}_0) - 1 = (n+1)-p_i$ many $j$ so that $T_{a+j}=1$ and $p_i < p_j$. Thus there can only be $(p_i - 2) - \abs{u}_0 = p_i - 2 - (n-\abs{u}_1) = p_i +\abs{u}_1 - n -2$ many $j$ so that $p'_{2i+1} > p'_j$, and therefore $p'_{2i+1} = p_i +\abs{u}_1 - n -2 + 1 = p_i + \abs{u}_1 - (n+1)$.
$\qed$
\end{proof}
Fix a subpermutation $p=\pi_T[a,a+n]$, and then let $p'=\pi_T[2a,2a+2n]$. So the terms of $p'$ can be defined using the method defined in Proposition $\ref{CalculateTheFwdImage}$. Let $q=\pi_T[b,b+n]$, $b \neq a$, be a subpermutation of $\pi_T$ and let $q'=\pi_T[2b,2b+2n]$ as in Proposition $\ref{CalculateTheFwdImage}$. The following lemma concerns the relationship of $p$ and $q$ to $p'$ and $q'$. Therefore the idea of $p'$ can ne used to define a map on the subpermutations of $\pi_T$, and the map will be well-defined by Proposition $\ref{CalculateTheFwdImage}$.
\begin{lemma}
\label{pISqIFFppISqp}
$p \neq q$ if and only if $p' \neq q'$.
\end{lemma}
\begin{proof}
Supposing that $p \neq q$, there are $i,j \in \{0, 1, \ldots, n \}$ so that $p_i < p_j$ and $q_i > q_j$ and thus
\begin{align*}
T[a+i] &< T[a+j] \\
T[b+i] &> T[b+i].
\end{align*}
Then since the Thue-Morse morphism is order preserving we have
\begin{align*}
T[2(a+i)] = \mu_T(T[a+i]) &< \mu_T(T[a+j]) = T[2(a+j)] \\
T[2(b+i)] = \mu_T(T[b+i]) &> \mu_T(T[b+j]) = T[2(b+j)].
\end{align*}
Therefore $p'_{2(a+i)}<p'_{2(a+j)}$ and $q'_{2(b+i)}>q'_{2(b+j)}$ so $p' \neq q'$.
Now to show by contrapositive, suppose that $p=q$, so $p_i = q_i$ for each $i \in \{0, 1, \ldots, n \}$. Since $p=q$, $p$ and $q$ have the same form, because $p_i < p_{i+1}$ if and only if $q_i < q_{i+1}$, so $T[a,a+n-1] = T[b,b+n-1]$ and thus $T[2a,2a+2n-1] = T[2b,2b+2n-1]$. Then by Proposition $\ref{CalculateTheFwdImage}$ it should be clear that for each $j \in \{0, 1, \ldots, 2n \}$ we have $p'_j = q'_j$, and thus $p' = q'$.
Therefore if $p' \neq q'$ then $p \neq q$.
$\qed$
\end{proof}
\begin{corollary}
\label{CorTo_pisqiff}
If $p=\pi_T[a,a+n]=\pi_T[b,b+n]$ for some $a \neq b$, then $\pi_T[2a,2a+2n]=\pi_t[2b,2b+2n]$.
\end{corollary}
Thus there is a well-defined function on the subpermutations of $\pi_T$. Let $p = \pi_T[a,a+n]$, and define $\phi(p) = p' = \pi_T[2a,2a+2n]$ using the formula in Proposition $\ref{CalculateTheFwdImage}$. Thus we have the map
$$\phi:Perm(n+1) \rightarrow Perm(2n+1)$$
which is injective by Lemma $\ref{pISqIFFppISqp}$. Not all subpermutations of $\pi_T$ will be the image under $\phi$ of another subpermutation.
Let $n \geq 5$ and $a$ be natural numbers. Then $n$ and $a$ can be either even or odd, and for the subpermutation $\pi_T[a,a+n]$, there exist natural numbers $b$ and $m$ so that one of 4 cases hold:
\begin{enumerate}
\item $\pi_T[a,a+n] = \pi_T[2b,2b+2m]$, even starting position with odd length
\item $\pi_T[a,a+n] = \pi_T[2b,2b+2m-1]$, even starting position with even length
\item $\pi_T[a,a+n] = \pi_T[2b+1,2b+2m]$, odd starting position with even length
\item $\pi_T[a,a+n] = \pi_T[2b+1,2b+2m+1]$, odd starting position with odd length
\end{enumerate}
Consider two subpermutations of length $n > 5$, $\pi_T[2c, 2c+n]$ and $\pi_T[2d+1, 2d+n+1]$. The subpermutations $\pi_T[2c, 2c+n]$ will have form $T[2c, 2c+n-1]$, and $\pi_T[2d+1, 2d+n+1]$ will have form $T[2d+1, 2d+n]$. Since the length of these factors is at least 5, we know that $T[2c, 2c+n-1] \neq T[2d+1, 2d+n]$, and thus $\pi_T[2c, 2c+n] \neq \pi_T[2d+1, 2d+n+1]$ because they do not have the same form. Thus we can break up the set $Perm(n)$ into two classes of subpermutations, namely the subpermutations that start at an even position or an odd position. So say that $Perm_{ev}(n)$ is the set of subpermutations $p$ of length $n$ so that $p = \pi_T[2b,2b+n-1]$ for some $b$, and that $Perm_{odd}(n)$ is the set of subpermutations $p$ of length $n$ so that $p = \pi_T[2b+1,2b+n]$ for some $b$. Thus
$$Perm(n) = Perm_{ev}(n) \cup Perm_{odd}(n),$$
where we have
$$Perm_{ev}(n) \cap Perm_{odd}(n) = \emptyset.$$
Thus for $n \geq 3$, $Perm_{ev}(2n+1)$ is the set of all subpermutations of length $2n+1$ starting at an even position. So for $\pi_T[2a,2a+2n]$, we know there is a subpermutation $p = \pi_T[a,a+n]$ so that $\phi(p) = p' = \pi_T[2a,2a+2n]$. Thus the map
$$\phi:Perm(n+1) \rightarrow Perm_{ev}(2n+1)$$
is also a surjective map, and is thus a bijection. The next definition about the restriction of subpermutations will be helpful to count the size of the sets $Perm_{odd}(2n)$, $Perm_{ev}(2n)$, and $Perm_{odd}(2n+1)$.
\begin{definition}
Let $p = \pi[a,a+n]$ be a subpermutation of the infinite permutation $\pi$. The $\textit{left restriction of}$ $p$, denoted by $L(p)$, is the subpermutation of $p$ so that $L(p) = \pi[a, a+n-1]$. The $\textit{right restriction of}$ $p$, denoted by $R(p)$, is the subpermutation of $p$ so that $R(p) = \pi[a+1, a+n]$. The $\textit{middle restriction of}$ $p$, denoted by $M(p)$, is the subpermutation of $p$ so that $M(p) = R(L(p)) = L(R(p)) = \pi[a+1, a+n-1]$.
\end{definition}
For each $i$, there are $p_i-1$ terms in $p$ that are less than $p_i$ and there are $n-p_i$ terms that are greater than $p_i$. Thus consider $i \in \{0, 1, \ldots, n-1\}$ and the values of $L(p)_i$ and $R(p)_i$. If $p_0 < p_{i+1}$ there will be $p_{i+1}-2$ terms in $R(p)$ less than $R(p)_i$ so we have $R(p)_i = p_{i+1}-1$. In a similar sense, if $p_n < p_i$ we have $L(p)_i = p_i - 1$. If $p_0 > p_{i+1}$ there will be $p_{i+1}-1$ terms in $R(p)$ less than $R(p)_i$ so we have $R(p)_i = p_{i+1}$. In a similar sense, if $p_n > p_i$ we have $L(p)_i = p_i $.
The values in $M(p)$ can be found by finding the values in $R(L(p))$ or $L(R(p))$. Since $R(L(p))$ or $L(R(p))$ correspond to the same subpermutation of $p$, $R(L(p))_i < R(L(p))_j$ if and only if $L(R(p))_i < L(R(p))_j$. Therefore $R(L(p)) = L(R(p))$.
It should also be clear that if there are two subpermutations $p= \pi_T[a,a+n]$ and $q = \pi_T[b,b+n]$ so that $p=q$ then $L(p) = L(q)$, $R(p) = R(q)$, and $M(p) = M(q)$ since if $p=q$ then $p_i < p_j$ if and only if $q_i < q_j$.
For $p=\pi_T[a,a+n]$, we can then define three additional maps by looking at the left, right, and middle restrictions of $\phi(p) = p'$. These maps are
\begin{align*}
\phi_L:Perm(n+1) &\rightarrow Perm_{ev}(2n) \\
\phi_R:Perm(n+1) &\rightarrow Perm_{odd}(2n) \\
\phi_M:Perm(n+2) &\rightarrow Perm_{odd}(2n+1)
\end{align*}
and are defined by
\begin{align*}
\phi_L(p) &= L(\phi(p)) = L(p')\\
\phi_R(p) &= R(\phi(p)) = R(p')\\
\phi_M(p) &= M(\phi(p)) = M(p')
\end{align*}
It can be readily verified that these three maps are surjective. To see an example of this, consider the map $\phi_L$, and let $\pi_T[2b,2b+2n-1]$ be a subpermutation in $Perm_{ev}(2n)$. Then for the subpermutation $p=\pi_T[b,b+n]$, $\phi_L(p) = L(p') = \pi_T[2b,2b+2n-1]$ so $\phi_L$ is surjective. A similar argument will show that $\phi_R$ and $\phi_M$ are also surjective.
\begin{lemma}
\label{UpperBoundForTau}
For $n \geq 2$:
\begin{align*}
\tau_T(2n) &\leq 2(\tau_T(n+1)) \\
\tau_T(2n+1) &\leq \tau_T(n+1) + \tau_T(n+2)
\end{align*}
\end{lemma}
\begin{proof}
Let $n \geq 2$. We have:
\begin{align*}
\abs{Perm_{ev}(2n)} &\leq \abs{Perm(n+1)} \\
\abs{Perm_{odd}(2n)} &\leq \abs{Perm(n+1)} \\
\\
\abs{Perm_{ev}(2n+1)} &= \abs{Perm(n+1)} \\
\abs{Perm_{odd}(2n+1)} &\leq \abs{Perm(n+2)}
\end{align*}
since $\phi$ is a bijection, and the 3 maps $\phi_L$, $\phi_R$, and $\phi_M$ are all surjective. Thus we have the following inequalities:
\begin{align*}
\tau_T(2n) &= \abs{Perm(2n)} = \abs{Perm_{ev}(2n)} + \abs{Perm_{odd}(2n)} \\
&\leq \abs{Perm(n+1)} + \abs{Perm(n+1)} = 2(\tau_T(n+1)) \\
\\
\tau_T(2n+1) &= \abs{Perm(2n+1)} = \abs{Perm_{ev}(2n+1)} + \abs{Perm_{odd}(2n+1)} \\
&\leq \abs{Perm(n+1)} + \abs{Perm(n+2)} = \tau_T(n+1) + \tau_T(n+2)
\end{align*}
$\qed$
\end{proof}
The three maps $\phi_L$, $\phi_R$, and $\phi_M$ are not injective maps. To see this, consider the subpermutations
\begin{align*}
&p=\pi_T[5,9] = [2 \hspace{.5ex} 3 \hspace{.5ex} 5 \hspace{.5ex} 4 \hspace{.5ex} 1] \\
&q=\pi_T[23,27] = [1 \hspace{.5ex} 3 \hspace{.5ex} 5 \hspace{.5ex} 4 \hspace{.5ex} 2].
\end{align*}
Both of these subpermutations have form $T[5,8] = T[23,26] = 0011$. Then applying the maps we see:
\begin{align*}
&p' = \phi(p) = \pi_T[10,18] = [4 \hspace{.5ex} 8 \hspace{.5ex} 5 \hspace{.5ex} 9 \hspace{.5ex} 7 \hspace{.5ex} 2 \hspace{.5ex} 6 \hspace{.5ex} 1 \hspace{.5ex} 3] \\
&q' = \phi(q) = \pi_T[46,54] = [3 \hspace{.5ex} 8 \hspace{.5ex} 5 \hspace{.5ex} 9 \hspace{.5ex} 7 \hspace{.5ex} 2 \hspace{.5ex} 6 \hspace{.5ex} 1 \hspace{.5ex} 4]
\end{align*}
\begin{align*}
&\phi_L(p) = \pi_T[10,17] = [3 \hspace{.5ex} 7 \hspace{.5ex} 4 \hspace{.5ex} 8 \hspace{.5ex} 6 \hspace{.5ex} 2 \hspace{.5ex} 5 \hspace{.5ex} 1] \\
&\phi_L(q) = \pi_T[46,53] = [3 \hspace{.5ex} 7 \hspace{.5ex} 4 \hspace{.5ex} 8 \hspace{.5ex} 6 \hspace{.5ex} 2 \hspace{.5ex} 5 \hspace{.5ex} 1]
\end{align*}
\begin{align*}
&\phi_R(p) = \pi_T[11,18] = [7 \hspace{.5ex} 4 \hspace{.5ex} 8 \hspace{.5ex} 6 \hspace{.5ex} 2 \hspace{.5ex} 5 \hspace{.5ex} 1 \hspace{.5ex} 3] \\
&\phi_R(q) = \pi_T[47,54] = [7 \hspace{.5ex} 4 \hspace{.5ex} 8 \hspace{.5ex} 6 \hspace{.5ex} 2 \hspace{.5ex} 5 \hspace{.5ex} 1 \hspace{.5ex} 3]
\end{align*}
\begin{align*}
&\phi_M(p) = \pi_T[11,17] = [6 \hspace{.5ex} 3 \hspace{.5ex} 7 \hspace{.5ex} 5 \hspace{.5ex} 2 \hspace{.5ex} 4 \hspace{.5ex} 1] \\
&\phi_M(q) = \pi_T[47,53] = [6 \hspace{.5ex} 3 \hspace{.5ex} 7 \hspace{.5ex} 5 \hspace{.5ex} 2 \hspace{.5ex} 4 \hspace{.5ex} 1]
\end{align*}
So $p' \neq q'$ but $\phi_L(p) = \phi_L(q)$, $\phi_R(p) = \phi_R(q)$, and $\phi_M(p) = \phi_M(q)$, and these maps are not injective in general. Hence the values in Lemma $\ref{UpperBoundForTau}$ are only an upper bound. The next goal is to determine when these maps are not injective.
\section{Type $k$ and Complementary Pairs}
\label{TypeKandCompPairs}
An interesting pattern occurs in some subpermutations of $\pi_T$. The subpermutations that follow this pattern are said to be subpermutations of type $k$ which is described in the next definition. Proposition $\ref{CalculateTheFwdImage}$ will be used inductively to show the maps $\phi$, $\phi_L$, $\phi_R$, and $\phi_M$ preserve subpermutations of type $k$. An induction argument with this fact will be used to show that two subpermutations have the same form if and only if they are a complimentary pair of type $k$, defined below. A corollary of this will determine when the maps $\phi_L$, $\phi_R$, and $\phi_M$ are bijective.
\begin{definition}
A subpermutation $p = \pi_T[a,a+n]$ is of $\textit{type $k$}$, for $k \geq 1$, if $p$ can be decomposed as
$$ p = [\alpha_1 \cdots \alpha_k \lambda_1 \cdots \lambda_l \beta_1 \cdots \beta_k] $$
where $\alpha_i = \beta_i + \varepsilon$ for each $i = 1, 2, \ldots, k$ and an $\varepsilon \in \{ -1, 1 \}$.
\end{definition}
Some examples of subpermutations of type $1$, 2, and 3 (respectively) are:
\begin{align*}
&\pi_T[5,9] = [2 \hspace{.5ex} 3 \hspace{.5ex} 5 \hspace{.5ex} 4 \hspace{.5ex} 1] \\
&\pi_T[20,25] = [2 \hspace{.5ex} 5 \hspace{.5ex} 4 \hspace{.5ex} 1 \hspace{.5ex} 3 \hspace{.5ex} 6] \\
&\pi_T[6,12] = [3 \hspace{.5ex} 7 \hspace{.5ex} 5 \hspace{.5ex} 1 \hspace{.5ex} 2 \hspace{.5ex} 6 \hspace{.5ex} 4]
\end{align*}
\begin{definition}
Suppose that the subpermutation $p = \pi_T[a,a+n]$ is of type $k$ so that for $\varepsilon \in \{-1, 1 \}$, $\alpha_i = \beta_i + \varepsilon$ for each $i = 1, 2, \ldots, k$. If there exists a subpermutation $q = \pi_T[b,b+n]$ of type $k$ so that $p$ and $q$ can be decomposed as:
\begin{align*}
p &= \pi_T[a,a+n] = [\alpha_1 \cdots \alpha_k \lambda_1 \cdots \lambda_l \beta_1 \cdots \beta_k] \\
q &= \pi_T[b,b+n] = [\beta_1 \cdots \beta_k \lambda_1 \cdots \lambda_l \alpha_1 \cdots \alpha_k]
\end{align*}
then $p$ and $q$ are said to be a $\textit{complementary pair of type $k$}$. If $p$ and $q$ are a $\textit{complementary pair of type}$ $k \leq 0$ then $p = q$.
\end{definition}
The subpermutations
\begin{align*}
&\pi_T[5,9] = [2 \hspace{.5ex} 3 \hspace{.5ex} 5 \hspace{.5ex} 4 \hspace{.5ex} 1] \\
&\pi_T[23,27] = [1 \hspace{.5ex} 3 \hspace{.5ex} 5 \hspace{.5ex} 4 \hspace{.5ex} 2]
\end{align*}
are a complementary pair of type 1. The following subpermutation of type 1
$$ \pi_T[0,3] = [2 \hspace{.5ex} 4 \hspace{.5ex} 3 \hspace{.5ex} 1] $$
does not have a complementary pair, since $[1 \hspace{.5ex} 4 \hspace{.5ex} 3 \hspace{.5ex} 2] $ is not a subpermutation of $\pi_T$.
The following proposition considers subpermutations of type $k$, and complementary pairs of type $k$.
\begin{proposition}
\label{ImageOfTypeK}
Suppose $p = \pi_T[a,a+n]$ is of type $k$ and $q = \pi_T[b,b+n]$ is of type $k$, with $k \geq 1$, and that $p$ and $q$ are a complementary pair of type $k$.
\begin{itemize}
\item[(a)] $\phi(p)$ is of type $2k-1$, and if $k \geq 2$ then $\phi_L(p)$ and $\phi_R(p)$ are of type $2k-2$ and $\phi_M(p)$ is of type $2k-3$.
\item[(b)] $\phi(p)$ and $\phi(q)$ are a complementary pair of type $2k-1$.
\item[(c)] $\phi_L(p)$ and $\phi_L(q)$ are a complementary pair of type $2k-2$.
\item[(d)] $\phi_R(p)$ and $\phi_R(q)$ are a complementary pair of type $2k-2$.
\item[(e)] $\phi_M(p)$ and $\phi_M(q)$ are a complementary pair of type $2k-3$.
\end{itemize}
\end{proposition}
\begin{proof}
Since $p$ and $q$ are a complementary pair of type $k$ they can be decomposed as
\begin{align*}
p &= \pi_T[a,a+n] = [\alpha_1 \cdots \alpha_k \lambda_1 \cdots \lambda_l \beta_1 \cdots \beta_k] \\
q &= \pi_T[b,b+n] = [\beta_1 \cdots \beta_k \lambda_1 \cdots \lambda_l \alpha_1 \cdots \alpha_k]
\end{align*}
and for $\varepsilon \in \{-1, 1 \}$, $\alpha_i = \beta_i + \varepsilon$ for each $i = 1, 2, \ldots, k$. For the values of $k$ and $l$, $2k+l = n+1$ and $4k+2l-1=2n+1$.
$
$
$\textbf{(a)}$ The first thing to show is that $\phi(p)$ is of type $2k-1$.
For $i \in \{0, 1, \ldots, k-1 \}$ we have $p_i = p_{n-(k-1)+i}+\varepsilon$, so by Proposition $\ref{CalculateTheFwdImage}$:
$$p'_{2i} = p'_{2(n-(k-1)+i)} +\varepsilon $$
For $i \in \{0, 1, \ldots, k-2 \}$, $p_i < p_{i+1}$ if and only if $p_{n-(k-1)+i} < p_{n-(k-1)+i+1}$, and $p_i < p_n$ if and only if $p_{n-(k-1)+i} < p_n$ since $p_i$ and $p_{n-(k-1)+i}$ are consecutive values. By Proposition $\ref{CalculateTheFwdImage}$:
$$ p'_{2i+1} = p'_{2(n-(k-1)+i)+1} +\varepsilon $$
So for each $i \in \{0, 1, \ldots, 2k-2 \}$: $p'_i = p'_{2n-2k+2+i}+\varepsilon$, and $\phi(p)$ can be decomposed as
$$\phi(p) = \pi_T[2a,2a+2n] = [\alpha'_1 \cdots \alpha'_{2k-1} \lambda'_1 \cdots \lambda'_{2l+1} \beta'_1 \cdots \beta'_{2k-1}],$$
where $\alpha'_i = \beta'_i + \varepsilon$, so $\phi(p)=p'$ is of type $2k-1$.
Next, suppose that $k \geq 2$ so $2k-1 \geq 3$, we show that $\phi_L(p) = L(p')$ and $\phi_R(p) = R(p')$ are of type $2k-2$ and $\phi_M(p)$ is of type $2k-3$.
Let $i \in \{0, 1, \ldots, 2k-3\}$, and consider $\phi_L(p) = L(p')$. Since $p'_i$ and $p'_{2n-2k+2+i}$ are consecutive values, $p'_i < p'_{2n}$ if and only if $p'_{2n-2k+2+i} < p'_{2n}$. So if $L(p')_i = p'_i$ then $L(p')_{2n-2k+2+i} = p'_{2n-2k+2+i}$, and if $L(p')_i = p'_i-1$ then $L(p')_{2n-2k+2+i} = p'_{2n-2k+2+i}-1$. In either case, $L(p')_i = L(p')_{2n-2k+2+i} + \varepsilon$ and there is a decomposition
$$\phi_L(p) = \pi_T[2a,2a+2n-1] = [\alpha'_1 \cdots \alpha'_{2k-2} \lambda'_1 \cdots \lambda'_{2l+2} \beta'_1 \cdots \beta'_{2k-2}],$$
and $\phi_L(p)$ is of type $2k-2$.
Now consider $\phi_R(p) = R(p')$. Since $p'_{i+1}$ and $p'_{2n-2k+2+i+1}$ are consecutive values, $p'_{i+1} < p'_0$ if and only if $p'_{2n-2k+2+i+1} < p'_0$. So if $R(p')_i = p'_{i+1}$ then $R(p')_{2n-2k+2+i} = p'_{2n-2k+2+i+1}$, and if $R(p')_i = p'_{i+1}-1$ then $R(p')_{2n-2k+2+i} = p'_{2n-2k+2+i+1}-1$. In either case, $R(p')_i = R(p')_{2n-2k+2+i} + \varepsilon$ and there is a decomposition
$$\phi_R(p) = \pi_T[2a+1,2a+2n] = [\alpha'_1 \cdots \alpha'_{2k-2} \lambda'_1 \cdots \lambda'_{2l+2} \beta'_1 \cdots \beta'_{2k-2}],$$
and $\phi_R(p)$ is of type $2k-2$.
Now consider $\phi_M(p)$, and let $i \in \{0, 1, \ldots, 2k-4\}$. Since $R(p')_i$ and $R(p')_{2n-2k+1+i}$ are consecutive values; $R(p')_i < R(p')_{2n-1}$ if and only if $R(p')_{2n-2k+1+i} < R(p')_{2n-1}$. So if $M(p')_i = L(R(p'))_i = R(p')_i$ then $M(p')_{2n-2k+1+i} = L(R(p'))_{2n-2k+1+i} = R(p')_{2n-2k+1+i}$, and if $M(p')_i = L(R(p'))_i = R(p')_i-1$ then $M(p')_{2n-2k+1+i} = L(R(p'))_{2n-2k+1+i} = R(p')_{2n-2k+1+i}-1$. In either case, $M(p')_i = M(p')_{2n-2k+2+i} + \varepsilon$ and there is a decomposition
$$\phi_M(p) = \pi_T[2a+1,2a+2n-1] = [\alpha'_1 \cdots \alpha'_{2k-3} \lambda'_1 \cdots \lambda'_{2l+3} \beta'_1 \cdots \beta'_{2k-3}],$$
and $\phi_R(p)$ is of type $2k-3$.
$
$
$\textbf{(b)}$ From (a), $\phi(q) = q'$ is of type $2k-1$. Since $p$ and $q$ are a complementary pair of type $k$, $p_i = p_{n-k+1+i} + \varepsilon = q_i + \varepsilon = q_{n-k+1+i}$ for each $i \in \{0, 1, \ldots, k-1 \}$, and $p_{k+i} = q_{k+i}$ for each $i \in \{0, 1, \ldots, l-1 \}$. Thus for $i \in \{0, 1, \ldots, k-1 \}$:
\begin{align*}
p'_{2i} &= p'_{2(n-k+1 + i)} + \varepsilon \\
p'_{2i} &= q'_{2(n-k+1 + i)} \\
q'_{2(n-k+1+i)} &= q'_{2i}+ \varepsilon
\end{align*}
For $i \in \{0, 1, \ldots, k-2 \}$:
\begin{align*}
p'_{2i+1} &= p'_{2(n-k+1+i)+1} + \varepsilon \\
p'_{2i+1} &= q'_{2(n-k+1+i)+1} \\
q'_{2(n-k+1+i)+1} &= q'_{2i+1}+ \varepsilon
\end{align*}
We know that $p_{k-1} = p_n + \varepsilon = q_{k-1} + \varepsilon = q_n$, so $p_{k-1} > p_n$ and $q_{k-1} < q_n$. Thus if $p_{k-1} < p_k$
$$ p'_{2k-1} = p_{k-1} + \abs{u}_1 + n = q_{k-1} + 1 + \abs{u}_1 + n = q_{k-1} + 1 + \abs{u}_1 + (n+1) = q'_{2k-1} $$
and if $p_{k-1} > p_k$
$$ p'_{2k-1} = p_{k-1} + \abs{u}_1 - (n+1) = q_{k-1} + 1 + \abs{u}_1 - (n+1) = q_{k-1} + \abs{u}_1 - n = q'_{2k-1}. $$
By Proposition $\ref{CalculateTheFwdImage}$, since $p_{k+i} = q_{k+i}$ for each $i \in \{0, 1, \ldots, l-1 \}$,
\begin{align*}
p'_{2(k+i)} &= q'_{2(k+i)} \\
p'_{2(k+i)+1} &= q'_{2(k+i)+1}
\end{align*}
Thus there are decompositions of $\phi(p) = p'$ and $\phi(q) = q'$ so that
$$\phi(p) = \pi_T[2a,2a+2n] = [\alpha'_1 \cdots \alpha'_{2k-1} \lambda'_1 \cdots \lambda'_{2l+1} \beta'_1 \cdots \beta'_{2k-1}],$$
$$\phi(q) = \pi_T[2b,2b+2n] = [\beta'_1 \cdots \beta'_{2k-1} \lambda'_1 \cdots \lambda'_{2l+1} \alpha'_1 \cdots \alpha'_{2k-1}],$$
where $\alpha'_i = \beta'_i + \varepsilon$. Therefore $\phi(p)=p'$ and $\phi(q)=q'$ are a complementary pair of type $2k-1$.
$
$
$\textbf{(c)}$ From (b), $\phi(p)=p'$ and $\phi(q)=q'$ are a complementary pair of type $2k-1$. Suppose $k \geq 2$ and so $2k-3 \geq 1$, and let $i \in \{0, 1, \ldots, 2k-3\}$, then $p'_i = q'_i + \varepsilon = p'_{2n-2k+2 +i} + \varepsilon = q'_{2n-2k+2+i}$. Thus $p'_i$ and $p'_{2n-2k+2 +i}$ are consecutive values, as are $q'_i$ and $q'_{2n-2k+2 +i}$, also $p'_{2n} < p'_i$ if and only if $p'_{2n} < p'_{2n-2k+2 +i}$, and
$$p'_{2n} < p'_i \text{ and } p'_{2n} < p'_{2n-2k+2 +i} \hspace{1.5ex} \Longleftrightarrow \hspace{1.5ex} q'_{2n} < q'_i \text{ and } q'_{2n} < q'_{2n-2k+2 +i}.$$
If $L(p')_i = p'_i-1$ or $L(p')_i = p'_i$, we have $L(q')_i = q'_i-1$ or $L(q')_i = q'_i$ (respectively), and $L(p')_i = L(q')_i+ \varepsilon = L(p')_{2n-2k+2 +i} + \varepsilon = L(q')_{2n-2k+2 +i}$.
Now let $i \in \{0, 1, \ldots, 2l \}$, so $p'_{2k-1+i} = q'_{2k-1+i}$. Thus $p'_{2n} < p'_{2k-1+i}$ if and only if $q'_{2n} < q'_{2k-1+i}$, and so we have $L(p')_{2k-1+i} = L(q')_{2k-1+i}$.
Then $p'_{2k-2} = q'_{2k-2} + \varepsilon = p'_{2n} + \varepsilon = q'_{2n}$, so $p'_{2k-2} > p'_{2n}$ if and only if $q'_{2k-2} < q'_{2n}$. If $p'_{2k-2} > p'_{2n}$ and $q'_{2k-2} < q'_{2n}$, then $p'_{2k-2} = q'_{2k-2} + 1 = p'_{2n} + 1 = q'_{2n}$ so
$$ L(p')_{2k-2} = p'_{2k-2} - 1 = q'_{2k-2} = L(q')_{2k-2}.,$$
If $p'_{2k-2} < p'_{2n}$ and $q'_{2k-2} > q'_{2n}$, then $p'_{2k-2} = q'_{2k-2} - 1 = p'_{2n} - 1 = q'_{2n}$ so
$$ L(p')_{2k-2} = p'_{2k-2} = q'_{2k-2} - 1 = L(q')_{2k-2}.$$
In either case, $ L(p')_{2k-2} = L(q')_{2k-2}$. Thus there are decompositions of $\phi_L(p) = L(p')$ and $\phi_L(q) = L(q')$ so that
$$\phi_L(p) = \pi_T[2a,2a+2n-1] = [\alpha'_1 \cdots \alpha'_{2k-2} \lambda'_1 \cdots \lambda'_{2l+2} \beta'_1 \cdots \beta'_{2k-2}],$$
$$\phi_L(q) = \pi_T[2b,2b+2n-1] = [\beta'_1 \cdots \beta'_{2k-2} \lambda'_1 \cdots \lambda'_{2l+2} \alpha'_1 \cdots \alpha'_{2k-2}],$$
where $\alpha'_i = \beta'_i + \varepsilon$. Therefore $\phi_L(p)$ and $\phi_L(q)$ are a complementary pair of type $2k-2$.
Now suppose that $k=1$ and so $2k-1 = 1$. Then $p'_0 = q'_0 + \varepsilon = p'_{2n} + \varepsilon= q'_{2n}$ and $p'_i = q'_i$ for $i = 1,2, \ldots, 2n-1$. If $p'_0 > p'_{2n}$ and $q'_0 < q'_{2n}$, then $p'_0 = q'_0 + 1 = p'_{2n} + 1 = q'_{2n}$ so
$$ L(p')_0 = p'_0 - 1 = q'_0 = L(q')_0.$$
If $p'_0 < p'_{2n}$ and $q'_0 > q'_{2n}$, then $p'_0 = q'_0 - 1 = p'_{2n} - 1 = q'_{2n}$ so
$$ L(p')_0 = p'_0 = q'_0 - 1 = L(q')_0.$$
In either case, $ L(p')_0 = L(q')_0$. Then for each $i \in \{1, 2, \ldots, 2n-1 \}$, $p'_i = q'_i$, and $p'_{2n} < p'_i$ if and only if $q'_{2n} < q'_i$ so $L(p')_i = L(q')_i$. Therefore, if $k=1$ then $\phi_L(p) = \phi_L(q)$.
$
$
$\textbf{(d)}$ From (b), $\phi(p)=p'$ and $\phi(q)=q'$ are a complementary pair of type $2k-1$. Suppose $k \geq 2$ and so $2k-3 \geq 1$, and let $i \in \{0, 1, \ldots, 2k-3\}$, then $p'_{i+1} = q'_{i+1} + \varepsilon = p'_{2n-2k+2 +i+1} + \varepsilon = q'_{2n-2k+2+i+1}$. Thus $p'_{i+1}$ and $p'_{2n-2k+2 +i+1}$ are consecutive values, as are $q'_{i+1}$ and $q'_{2n-2k+2 +i+1}$, also $p'_{2n} < p'_{i+1}$ if and only if $p'_{2n} < p'_{2n-2k+2 +i+1}$, and
$$p'_0 < p'_{i+1} \text{ and } p'_0 < p'_{2n-2k+2 +i+1} \hspace{1.5ex} \Longleftrightarrow \hspace{1.5ex} q'_0 < q'_{i+1} \text{ and } q'_0 < q'_{2n-2k+2 +i+1}.$$
If $R(p')_i = p'_{i+1}-1$ or $R(p')_i = p'_{i+1}$, we have $R(q')_i = q'_{i+1}-1$ or $R(q')_i = q'_{i+1}$ (respectively), and $R(p')_i = R(q')_i+ \varepsilon = R(p')_{2n-2k+2 +i} + \varepsilon = R(q')_{2n-2k+2 +i}$.
Now let $i \in \{0, 1, \ldots, 2l \}$, so $p'_{2k-1+i} = q'_{2k-1+i}$. Thus $p'_0 < p'_{2k-1+i}$ if and only if $q'_0 < q'_{2k-1+i}$, and so we have $R(p')_{2k-1+i-1} = R(q')_{2k-1+i-1}$.
Then $p'_0 = q'_0 + \varepsilon = p'_{2n-2k+2} + \varepsilon = q'_{2n-2k+2}$, so $p'_{2n-2k+2} > p'_0$ if and only if $q'_{2n-2k+2} < q'_0$. If $p'_{2n-2k+2} > p'_0$ and $q'_{2n-2k+2} < q'_0$, then $p'_{2n-2k+2} = q'_{2n-2k+2} + 1 = p'_0 + 1 = q'_0$ so
$$ R(p')_{2n-2k+1} = p'_{2n-2k+2} - 1 = q'_{2n-2k+2} = R(q')_{2n-2k+1}.$$
If $p'_{2n-2k+2} < p'_0$ and $q'_{2n-2k+2} > q'_0$, then $p'_{2n-2k+2} = q'_{2n-2k+2} - 1 = p'_0 - 1 = q'_0$ so
$$ R(p')_{2n-2k+1} = p'_{2n-2k+2} = q'_{2n-2k+2} - 1 = R(q')_{2n-2k+1}.$$
In either case, $ R(p')_{2n-2k+1} = R(q')_{2n-2k+1}$. Thus there are decompositions of $\phi_R(p) = R(p')$ and $\phi_R(q) = R(q')$ so that
$$\phi_L(p) = \pi_T[2a+1,2a+2n] = [\alpha'_1 \cdots \alpha'_{2k-2} \lambda'_1 \cdots \lambda'_{2l+2} \beta'_1 \cdots \beta'_{2k-2}],$$
$$\phi_L(q) = \pi_T[2b+1,2b+2n] = [\beta'_1 \cdots \beta'_{2k-2} \lambda'_1 \cdots \lambda'_{2l+2} \alpha'_1 \cdots \alpha'_{2k-2}],$$
where $\alpha'_i = \beta'_i + \varepsilon$. Therefore $\phi_R(p)$ and $\phi_R(q)$ are a complementary pair of type $2k-2$.
Now suppose that $k=1$ and so $2k-1 = 1$. Then $p'_0 = q'_0 + \varepsilon = p'_{2n} + \varepsilon= q'_{2n}$ and $p'_i = q'_i$ for $i = 1,2, \ldots, 2n-1$. If $p'_0 > p'_{2n}$ and $q'_0 < q'_{2n}$, then $p'_0 = q'_0 + 1 = p'_{2n} + 1 = q'_{2n}$ so
$$ R(p')_{2n-1} = p'_{2n} = q'_{2n}-1 = R(q')_{2n-1}.$$
If $p'_0 < p'_{2n}$ and $q'_0 > q'_{2n}$, then $p'_0 = q'_0 - 1 = p'_{2n} - 1 = q'_{2n}$ so
$$ R(p')_{2n-1} = p'_{2n} - 1 = q'_{2n} = R(q')_{2n-1}.$$
In either case, $ R(p')_0 = R(q')_0$. Then for each $i \in \{1, 2, \ldots, 2n-1 \}$, $p'_i = q'_i$, and $p'_0 < p'_i$ if and only if $q'_0 < q'_i$ so $R(p')_{i-1} = R(q')_{i-1}$. Therefore, if $k=1$ then $\phi_R(p) = \phi_R(q)$.
$
$
$\textbf{(e)}$ From (c), $\phi_R(p)=R(p')$ and $\phi_R(q)=R(q')$ are a complementary pair of type $2k-2$. Suppose $k \geq 2$ and so $2k-4 \geq 0$, and let $i \in \{0, \ldots, 2k-4\}$, then $R(p')_i = R(q')_i + \varepsilon = R(p')_{2n-2k+3 +i} + \varepsilon = R(q')_{2n-2k+3+i}$. Thus $R(p')_i$ and $R(p')_{2n-2k+3 +i}$ are consecutive values, as are $R(q')_i$ and $R(q')_{2n-2k+3 +i}$, and $R(p')_{2n-1} < R(p')_i$ if and only if $R(p')_{2n-1} < R(p')_{2n-2k+3 +i}$, and
$$R(p')_{2n-1} < R(p')_i \text{ and } R(p')_{2n-1} < R(p')_{2n-2k+3 +i} \hspace{1.5ex} \Longleftrightarrow \hspace{1.5ex} R(q')_{2n-1} < R(q')_i \text{ and } R(q')_{2n-1} < R(q')_{2n-2k+3 +i}.$$
If $L(R(p'))_i = R(p')_i-1$ or $L(R(p'))_i = R(p')_i$, we have $L(R(q'))_i = R(q')_i-1$ or $L(R(q'))_i = R(q')_i$ (respectively), and $L(R(p'))_i = L(R(q'))_i+ \varepsilon = L(R(p'))_{2n-2k+2 +i} + \varepsilon = L(R(q'))_{2n-2k+2 +i}$.
Now let $i \in \{0, 1, \ldots, 2l+1 \}$, so $R(p')_{2k-2+i} = R(q')_{2k-2+i}$. Thus $R(p')_{2n-1} < R(p')_{2k-2+i}$ if and only if $R(q')_{2n-1} < R(q')_{2k-2+i}$, and so we have $L(R(p'))_{2k-1+i} = L(R(q'))_{2k-1+i}$.
Then $R(p')_{2k-3} = R(q')_{2k-3} + \varepsilon = R(p')_{2n-1} + \varepsilon = R(q')_{2n-1}$, so $R(p')_{2k-3} > R(p')_{2n-1}$ if and only if $R(q')_{2k-3} < R(q')_{2n-1}$. If $R(p')_{2k-3} > R(p')_{2n-1}$ and $R(q')_{2k-3} < R(q')_{2n-1}$, then $R(p')_{2k-3} = R(q')_{2k-3} + 1 = R(p')_{2n-1} + 1 = R(q')_{2n-1}$ so
$$ L(R(p'))_{2k-3} = R(p')_{2k-3} - 1 = R(q')_{2k-3} = L(R(q'))_{2k-3}.$$
If $R(p')_{2k-3} < R(p')_{2n-1}$ and $R(q')_{2k-3} > R(q')_{2n-1}$, then $R(p')_{2k-3} = R(q')_{2k-3} - 1 = R(p')_{2n-1} - 1 = R(q')_{2n-1}$
$$ L(R(p'))_{2k-3} = R(p')_{2k-3} = R(q')_{2k-3} - 1 = L(R(q'))_{2k-2-1}.$$
In either case, $ L(R(p'))_{2k-3} = L(q')_{2k-3}$. Thus there are decompositions of $\phi_M(p) = L(R(p'))$ and $\phi_M(q) = L(R(q'))$ so that
$$\phi_M(p) = \pi_T[2a-1,2a+2n-1] = [\alpha'_1 \cdots \alpha'_{2k-3} \lambda'_1 \cdots \lambda'_{2l+3} \beta'_1 \cdots \beta'_{2k-3}],$$
$$\phi_M(q) = \pi_T[2b-1,2b+2n-1] = [\beta'_1 \cdots \beta'_{2k-3} \lambda'_1 \cdots \lambda'_{2l+3} \alpha'_1 \cdots \alpha'_{2k-3}],$$
where $\alpha'_i = \beta'_i + \varepsilon$. Therefore $\phi_M(p)$ and $\phi_M(q)$ are a complementary pair of type $2k-3$.
Now suppose that $k=1$ and so $2k-1 = 1$. Then $\phi_R(p) = \phi_R(q)$, and thus $L(R(p')) = L(R(q'))$. Therefore, if $k=1$ then $\phi_M(p) = \phi_M(q)$.
$\qed$
\end{proof}
\begin{theorem}
\label{SameFormIFFCompPair}
Let $p$ and $q$ be distinct subpermutations of $\pi_T$. Then $p$ and $q$ have the same form if and only if $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$.
\end{theorem}
\begin{proof}
First, suppose that $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$. So there are decompositions:
\begin{align*}
p &= \pi_T[a,a+n] = [\alpha_1 \cdots \alpha_k \lambda_1 \cdots \lambda_l \beta_1 \cdots \beta_k] \\
q &= \pi_T[b,b+n] = [\beta_1 \cdots \beta_k \lambda_1 \cdots \lambda_l \alpha_1 \cdots \alpha_k]
\end{align*}
so that for $\varepsilon \in \{-1, 1 \}$, $\alpha_i = \beta_i + \varepsilon$ for each $i \in \{1, 2, \ldots, k \}$.
For each $i \in \{0, 1, \ldots, k-2 \}$, $p_i$ and $p_{n-k+1+i}$ are consecutive values, as are $q_i$ and $q_{n-k+1+i}$, so
$$ p_i < p_{i+1} \text{ and } p_{n-k+1+i} < p_{n-k+1+i+1} \hspace{1.5ex} \Longleftrightarrow \hspace{1.5ex} q_i < q_{i+1} \text{ and } q_{n-k+1+i} < q_{n-k+1+i+1}.$$
Since $p_{k-1} = q_{k-1} + \varepsilon$, $p_{k+l} + \varepsilon = q_{k+l}$, $p_k = q_k$, and $p_{k+l-1} = q_{k+l-1}$:
\begin{align*}
p_{k-1} < p_k \hspace{1.5ex} &\Longleftrightarrow \hspace{1.5ex} q_{k-1} < q_k \\
p_{k+l-1} < p_{k+l} \hspace{1.5ex} &\Longleftrightarrow \hspace{1.5ex} q_{k+l-1} < q_{k+l}.
\end{align*}
For each $i \in \{0, 1, \ldots, l-2 \}$, $p_{k+i} = q_{k+i}$, so
$$ p_{k+i} < p_{k+i+1} \hspace{1.5ex} \Longleftrightarrow \hspace{1.5ex} q_{k+i} < q_{k+i+1}. $$
Therefore $p_i < p_{i+1}$ if and only if $q_i < q_{i+1}$ for each $i \in \{0, 1, \ldots, n-1 \}$, so $p$ and $q$ have the same form.
$
$
To show that distinct subpermutations with the same form are a complementary pair of type $k$, for some $k \geq 1$, an induction argument will be used. The subpermutations of lengths 2 through 9 are listed in Appendix $\ref{SecTheSubperms}$, along with the form of the subpermutations. It can be seen that distinct subpermutations with the same form are a complementary pair of type $k$, for some $k \geq 1$.
Assume that $n \geq 9$ and that the theorem is true for all subpermutations of length at most $n$. Let $p'$ and $q'$ be distinct subpermutations of length $n+1$ with the same form, so $p'_i < p_{i+1}$ if and only if $q'_i < q'_{i+1}$ for each $i = 0, 1, \ldots, n-1$.
Then
$$p', q' \in Perm_{ev}(n+1) \hspace{3.0ex} \text{ or } \hspace{3.0ex} p', q' \in Perm_{odd}(n+1).$$
If, without loss of generality, $p' \in Perm_{ev}(n+1)$ and $q' \in Perm_{odd}(n+1)$, then $p' = \pi_T[2a,2a+n]$ and $q' = \pi_T[2b+1,2b+n+1]$, so $T[2a,2a+n-1] = T[2b+1,2b+n]$. Since $n \geq 9$, $T[2a,2a+n-1]$ will contain either 00 or 11, so there is some $c$ so that $T[2a+2c+1,2a+2c+2]$ is 00 or 11. Then also, $T[2b+1 + 2c+1,2b+1 + 2c+2] = T[2b+2c+2,2b+2c+3]$ must be the same as $T[2a+2c+1,2a+2c+2]$, but $T[2b+2c+2,2b+2c+3]$ is either $\mu_T(0) = 01$ or $\mu_T(1) = 10$, so $T[2b+2c+2,2b+2c+3] \neq T[2a+2c+1,2a+2c+2]$. Therefore, either $p',q' \in Perm_{ev}(n+1)$ or $p',q' \in Perm_{odd}(n+1)$
Thus one of the 4 following cases must hold:
\begin{enumerate}
\item $p',q' \in Perm_{ev}(n+1)$ and $n+1$ is odd
\item $p',q' \in Perm_{ev}(n+1)$ and $n+1$ is even
\item $p',q' \in Perm_{odd}(n+1)$and $n+1$ is even
\item $p',q' \in Perm_{odd}(n+1)$and $n+1$ is odd
\end{enumerate}
$
$
$\textbf{Case 1}$ Suppose $p',q' \in Perm_{ev}(n+1)$ and $n+1 = 2m+1$, so there are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2m]$ and $q' = \pi_T[2b,2b+2m]$, and
$$ p = \pi_T[a,a+m] \hspace{8.0ex} q = \pi_T[b,b+m], $$
$$ p' = \phi(p) \hspace{8.0ex} q' = \phi(q). $$
If $T[a,a+m-1] \neq T[b,b+m-1]$ then $T[2a,2a+2m-1] \neq T[2b,2b+2m-1]$. Hence
$$T[a,a+m-1] = T[b,b+m-1]$$
and $p$ and $q$ have the same form. If $p=q$ then $p'=q'$, by Lemma $\ref{pISqIFFppISqp}$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi(p) = p'$ and $\phi(q) = q'$ are a complementary pair of type $2k-1$.
$
$
$\textbf{Case 2}$ Suppose $p',q' \in Perm_{ev}(n+1)$ and $n+1 = 2m$, so there are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2m-1]$ and $q' = \pi_T[2b,2b+2m-1]$, and
$$ p = \pi_T[a,a+m] \hspace{8.0ex} q = \pi_T[b,b+m], $$
$$ p' = \phi_L(p) \hspace{8.0ex} q' = \phi_L(q). $$
Since $p'$ and $q'$ have the same form, $T[2a,2a+2m-2] = T[2b,2b+2m-2]$. Thus $T_{2a+2m-2} = T_{2b+2m-2}$ implies $T_{a+m-1} = T_{b+m-1}$, so
$$ T[2a+2m-2,2a+2m-1] = \mu_T(T_{a+m-1}) = \mu_T(T_{b+m-1}) = T[2b+2m-2,2b+2m-1]$$
and
$$T[2a,2a+2m-1] = T[2b,2b+2m-1].$$
If $T[a,a+m-1] \neq T[b,b+m-1]$ then $T[2a,2a+2m-1] \neq T[2b,2b+2m-1]$. Hence
$$T[a,a+m-1] = T[b,b+m-1]$$
and $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Lemma $\ref{pISqIFFppISqp}$, and $p'=L(\phi(p))=L(\phi(q))=q'$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$. If $k=1$, then $\phi_L(p)$ and $\phi(q)_L$ are a complementary pair of type $2k-2 = 0$ and $p' = q'$, thus $k \geq 2$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi_L(p) = p'$ and $\phi_L(q) = q'$ are a complementary pair of type $2k-2 \geq 2$.
$
$
$\textbf{Case 3}$ Suppose $p',q' \in Perm_{odd}(n+1)$ and $n+1 = 2m$, so there are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2m]$ and $q' = \pi_T[2b+1,2b+2m]$, and
$$ p = \pi_T[a,a+m] \hspace{8.0ex} q = \pi_T[b,b+m], $$
$$ p' = \phi_R(p) \hspace{8.0ex} q' = \phi_R(q). $$
Since $p'$ and $q'$ have the same form, $T[2a+1,2a+2m-1] = T[2b+1,2b+2m-1]$. Thus $T_{2a+1} = T_{2b+1}$ implies $T_{a} = T_{b}$, so
$$ T[2a,2a+1] = \mu_T(T_{a}) = \mu_T(T_{b}) = T[2b,2b+1]$$
and
$$T[2a,2a+2m-1] = T[2b,2b+2m-1].$$
If $T[a,a+m-1] \neq T[b,b+m-1]$ then $T[2a,2a+2m-1] \neq T[2b,2b+2m-1]$. Hence
$$T[a,a+m-1] = T[b,b+m-1]$$
and $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Lemma $\ref{pISqIFFppISqp}$, and $p'=R(\phi(p))=R(\phi(q))=q'$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$. If $k=1$, then $\phi_R(p)$ and $\phi_R(q)$ are a complementary pair of type $2k-2 = 0$ and $p' = q'$, thus $k \geq 2$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi_R(p) = p'$ and $\phi(q)_R = q'$ are a complementary pair of type $2k-2 \geq 2$.
$
$
$\textbf{Case 4}$ Suppose $p',q' \in Perm_{odd}(n+1)$ and $n+1 = 2m+1$, so there are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2m+1]$ and $q' = \pi_T[2b+1,2b+2m+1]$, and
$$ p = \pi_T[a,a+m+1] \hspace{8.0ex} q = \pi_T[b,b+m+1], $$
$$ p' = \phi_M(p) \hspace{8.0ex} q' = \phi_M(q). $$
Since $p'$ and $q'$ have the same form, $T[2a+1,2a+2m] = T[2b+1,2b+2m]$. Thus $T_{2a+1} = T_{2b+1}$ implies $T_{a} = T_{b}$, so
$$ T[2a,2a+1] = \mu_T(T_{a}) = \mu_T(T_{b}) = T[2b,2b+1]$$
and $T_{2a+2m} = T_{2b+2m}$ implies $T_{a+m} = T_{b+m}$, so
$$ T[2a+2m,2a+2m+1] = \mu_T(T_{a+m}) = \mu_T(T_{b+m}) = T[2b+2m,2b+2m+1].$$
Therefore,
$$T[2a,2a+2m+1] = T[2b,2b+2m+1].$$
If $T[a,a+m] \neq T[b,b+m]$ then $T[2a,2a+2m+1] \neq T[2b,2b+2m+1]$. Hence
$$T[a,a+m] = T[b,b+m]$$
and $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Lemma $\ref{pISqIFFppISqp}$, and $p'=M(\phi(p))=M(\phi(q))=q'$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$. If $k=1$, then $\phi_M(p)$ and $\phi_M(q)$ are a complementary pair of type $2k-3 = -1$ and $p' = q'$, thus $k \geq 2$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi_M(p) = p'$ and $\phi_M(q) = q'$ are a complementary pair of type $2k-3 \geq 1$.
Therefore subpermutations $p$ and $q$ have the same form if and only if $p$ and $q$ are a complementary pair of type $k$, for some $k \geq 1$.
$\qed$
\end{proof}
There are a number of useful corollaries of Theorem $\ref{SameFormIFFCompPair}$. These corollaries give the number of subpermutations that can have the same form and show when the maps $\phi_L$, $\phi_R$, and $\phi_M$ are not injective.
\begin{corollary}
\label{AtMostOneCompliment}
For a subpermutation $p$ of $\pi_T$, there can be at most one subpermutation $q$ of $\pi_T$ so that $p$ and $q$ are a complementary pair.
\end{corollary}
\begin{proof}
Assume that $p$ is a subpermutation of $\pi_T$ so that $p$ and $q$ are a complementary pair of type $s$, and $p$ and $r$ are a complementary pair of type $t$. Moreover, $s \neq t$, and thus $q \neq r$. Then there are decompositions:
\begin{align*}
p &= \pi_T[a,a+n] = [\alpha_1 \cdots \alpha_s \lambda_1 \cdots \lambda_x \beta_1 \cdots \beta_s] \\
q &= \pi_T[b,b+n] = [\beta_1 \cdots \beta_s \lambda_1 \cdots \lambda_x \alpha_1 \cdots \alpha_s]
\end{align*}
so that for $\varepsilon_s \in \{-1, 1 \}$, $\alpha_i = \beta_i + \varepsilon_s$ for each $i = 1, 2, \ldots, s$, and
\begin{align*}
p &= \pi_T[a,a+n] = [\alpha'_1 \cdots \alpha'_t \lambda'_1 \cdots \lambda'_y \beta'_1 \cdots \beta'_t] \\
r &= \pi_T[b,b+n] = [\beta'_1 \cdots \beta'_t \lambda'_1 \cdots \lambda'_y \alpha'_1 \cdots \alpha'_t]
\end{align*}
so that for $\varepsilon_t \in \{-1, 1 \}$, $\alpha'_i = \beta'_i + \varepsilon_t$ for each $i = 1, 2, \ldots, t$.
Since $p$ and $q$ are a complementary pair they have the same form, as do $p$ and $r$. Thus $q$ and $r$ are distinct subpermutations with the same form, so by Theorem $\ref{SameFormIFFCompPair}$ $q$ and $r$ are a complementary pair of type $k$, for some $k$.
If $\beta_1 = \beta'_1$ then $p_{n-s+1} = p_{n-t+1}$, but since $s \neq t$ this cannot happen. Thus $\beta_1 \neq \beta'_1$ and $\varepsilon_s \neq \varepsilon_t$, so $\varepsilon_s = -\varepsilon_t$. Hence
\begin{align*}
\alpha_1 = \beta_1 + \varepsilon_s \hspace{3.0ex} \Rightarrow \hspace{3.0ex} \beta_1 &= \alpha_1 - \varepsilon_s \\
\alpha'_1 = \beta'_1 + \varepsilon_t \hspace{3.0ex} \Rightarrow \hspace{3.0ex} \beta'_1 = \alpha'_1 - \varepsilon_t \hspace{3.0ex} \Rightarrow \hspace{3.0ex} \beta'_1 &= \alpha_1 + \varepsilon_s.
\end{align*}
Therefore $q_0 \neq r_0 \pm 1$, and $q$ and $r$ are not a complementary pair, contradicting the assumption.
$\qed$
\end{proof}
The next corollary follows directly from Theorem $\ref{SameFormIFFCompPair}$ and Corollary $\ref{AtMostOneCompliment}$
\begin{corollary}
\label{AtMostTwoSubpermsWithSameForm}
For a factor $u$ of $T$, there are at most two subpermutations of $\pi_T$ with form $u$.
\end{corollary}
The next corollary shows when the maps $\phi_L(p)$, $\phi_R(p)$, and $\phi_M(p)$ are not injective.
\begin{corollary}
\label{WhenTheMapsFailToBeABijection}
For subpermutations $p = \pi_T[a,a+n]$ and $q = \pi_T[b,b+n]$, where $p \neq q$:
\begin{itemize}
\item[(a)] $\phi_L(p) = \phi_L(q)$ if and only if $p$ and $q$ are a complementary pair of type 1.
\item[(b)] $\phi_R(p) = \phi_R(q)$ if and only if $p$ and $q$ are a complementary pair of type 1.
\item[(c)] $\phi_M(p) = \phi_M(q)$ if and only if $p$ and $q$ are a complementary pair of type 1.
\end{itemize}
\end{corollary}
\begin{proof}
It should be clear for all three cases that if $p$ and $q$ are a complementary pair of type 1 then
$$\phi_L(p) = \phi_L(q) \hspace{6.0ex} \phi_R(p) = \phi_R(q) \hspace{6.0ex} \phi_M(p) = \phi_M(q)$$
by Proposition $\ref{ImageOfTypeK}$. For the three cases, let $p = \pi_T[a,a+n]$ and $q = \pi_T[b,b+n]$ and $p \neq q$.
$\textbf{(a)}$ Suppose $\phi_L(p) = \phi_L(q)$, so $\pi_T[2a,2a+2n-1] = \pi_T[2b,2b+2n-1]$ and $T[2a,2a+2n-2] = T[2b,2b+2n-2]$. Thus $T_{2a+2n-2} = T_{2b+2n-2}$ implies $T_{a+n-1} = T_{b+n-1}$, so
$$ T[2a+2n-2,2a+2n-1] = \mu_T(T_{a+n-1}) = \mu_T(T_{b+n-1}) = T[2b+2n-2,2b+2n-1]$$
and
$$T[2a,2a+2n-1] = T[2b,2b+2n-1].$$
If $T[a,a+n-1] \neq T[b,b+n-1]$ then $T[2a,2a+2n-1] \neq T[2b,2b+2n-1]$. Hence
$$T[a,a+n-1] = T[b,b+n-1]$$
and $p$ and $q$ have the same form. By Theorem $\ref{SameFormIFFCompPair}$, $p$ and $q$ are a complementary pair of type $k \geq 1$. If $k > 1$, then $\phi_L(p)$ and $\phi_L(q)$ are a complementary pair of type $2k-2 > 1$, so $\phi_L(p) \neq \phi_L(q)$. Therefore $p$ and $q$ are a complementary pair of type 1.
$
$
$\textbf{(b)}$ Suppose $\phi_R(p) = \phi_R(q)$, so $\pi_T[2a+1,2a+2n] = \pi_T[2b+1,2b+2n]$ and $T[2a+1,2a+2n-1] = T[2b+1,2b+2n-1]$. Thus $T_{2a+1} = T_{2b+1}$ implies $T_a = T_b$, so
$$ T[2a,2a+1] = \mu_T(T_{a}) = \mu_T(T_{b}) = T[2b,2b+1]$$
and
$$T[2a,2a+2n-1] = T[2b,2b+2n-1].$$
If $T[a,a+n-1] \neq T[b,b+n-1]$ then $T[2a,2a+2n-1] \neq T[2b,2b+2n-1]$. Hence
$$T[a,a+n-1] = T[b,b+n-1]$$
and $p$ and $q$ have the same form. By Theorem $\ref{SameFormIFFCompPair}$, $p$ and $q$ are a complementary pair of type $k \geq 1$. If $k > 1$, then $\phi_R(p)$ and $\phi_R(q)$ are a complementary pair of type $2k-2 > 1$, so $\phi_R(p) \neq \phi_R(q)$. Therefore $p$ and $q$ are a complementary pair of type 1.
$
$
$\textbf{(c)}$ Suppose $\phi_M(p) = \phi_M(q)$, so $\pi_T[2a+1,2a+2n-1] = \pi_T[2b+1,2b+2n-1]$ and $T[2a+1,2a+2n-2] = T[2b+1,2b+2n-2]$. Thus $T_{2a+1} = T_{2b+1}$ implies $T_{a} = T_{b}$, so
$$ T[2a,2a+1] = \mu_T(T_{a}) = \mu_T(T_{b}) = T[2b,2b+1]$$
and $T_{2a+2n} = T_{2b+2n}$ implies $T_{a+n} = T_{b+n}$, so
$$ T[2a+2n,2a+2n+1] = \mu_T(T_{a+n}) = \mu_T(T_{b+n}) = T[2b+2n,2b+2n+1].$$
Therefore,
$$T[2a,2a+2n+1] = T[2b,2b+2n+1].$$
If $T[a,a+n] \neq T[b,b+n]$ then $T[2a,2a+2n+1] \neq T[2b,2b+2n+1]$. Hence
$$T[a,a+n] = T[b,b+n]$$
and $p$ and $q$ have the same form. By Theorem $\ref{SameFormIFFCompPair}$, $p$ and $q$ are a complementary pair of type $k \geq 1$. If $k > 1$, then $\phi_M(p)$ and $\phi_M(q)$ are a complementary pair of type $2k-3 \geq 1$, so $\phi_M(p) \neq \phi_M(q)$. Therefore $p$ and $q$ are a complementary pair of type 1.
$\qed$
\end{proof}
So when there are complementary pairs of type 1 none of the maps $\phi_L$, $\phi_R$, and $\phi_M$ are injective, and thus they are not bijective. In cases where there are no complementary pairs of type 1 the maps $\phi_L$, $\phi_R$, and $\phi_M$ are injective and the inequalities in Lemma $\ref{UpperBoundForTau}$ become equalities. So we need to know when complementary pairs of type 1 will occur, and how many complementary pairs there are.
\section{Type 1 Pairs}
\label{SecTypeOnePairs}
This section investigates when complementary pairs of type 1 arise and the number of pairs that occur. To show when the maps $\phi_L$, $\phi_R$, and $\phi_M$ are bijections we need to consider when complementary pairs of type 1 occur. The following lemma shows when there are complementary pairs of type $k$, for each $k \geq 0$. An induction argument will be used with Proposition $\ref{ImageOfTypeK}$ and Theorem $\ref{SameFormIFFCompPair}$ to show that all complementary pairs of a given length are of same type.
\begin{proposition}
\label{LengthOfAlphaForAGBForN}
Let $n > 4$ be a natural number and let $p$ and $q$ be subpermutations of $\pi_T$ of length $n+1$ with the same form. There exist $r$ and $c$ so that $n =2^r+c$, where $0 \leq c < 2^r$.
\begin{itemize}
\item[(a)] If $0 \leq c < 2^{r-1}+1$, then either $p = q$ or $p$ and $q$ are a complementary pair of type $c+1$.
\item[(b)] If $2^{r-1}+1 \leq c < 2^r$, then $p = q$.
\end{itemize}
\end{proposition}
\begin{proof}
This will be proved using an induction argument on $r$. By looking at the subpermutations in Appendix $\ref{SecTheSubperms}$ it can be readily verified that the lemma is true for $r = 2$ and $c = 0, 1, 2, 3$, so for $n = 4, 5, 6, 7$. Suppose that $r>2$ and that the statement of the lemma is true when $n < 2^r$. It will be shown that it is true for all $n = 2^r+c$ where $0 \leq c < 2^r$.
$
$
$\textbf{(a)}$ Let $n=2^r+c$ with $0 \leq c < 2^{r-1}+1$. If $p' = q'$ the proposition is satisfied, so assume that $p' \neq q'$. As it was stated in the proof of Theorem $\ref{SameFormIFFCompPair}$, if $p' \in Perm_{ev}(n+1)$ and $q' \in Perm_{odd}(n+1)$, then $p'$ and $q'$ cannot have the same form. We must also consider when $n+1$ is both even and odd. So there will be four subcases to consider, when $p',q' \in Perm_{ev}(n+1)$ or when $p',q' \in Perm_{odd}(n+1)$ and when $n+1$ is even or odd.
$\textbf{Case a.1:}$ Suppose $p',q' \in Perm_{ev}(n+1)$ and $n+1$ is odd, so $c$ is even. There is a $d$ so that $c=2d$, with $0 \leq d < 2^{r-2}+1$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2^r+2d]$ and $q' = \pi_T[2b,2b+2^r+2d]$, and
$$ p = \pi_T[a,a+2^{r-1}+d] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d], $$
$$ p' = \phi(p) \hspace{8.0ex} q' = \phi(q). $$
If $T[a,a+2^{r-1}+d-1] \neq T[b,b+2^{r-1}+d-1]$ then $T[2a,2a+2^r+2d-1] \neq T[2b,2b+2^r+2d-1]$. Hence
$$T[a,a+2^{r-1}+d-1] = T[b,b+2^{r-1}+d-1]$$
and $p$ and $q$ have the same form. If $p=q$ then $p'=q'$, by Corollary $\ref{CorTo_pisqiff}$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $d+1$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi(p) = p'$ and $\phi(q) = q'$ are a complementary pair of type $2(d+1)-1 = 2d+1 = c+1$.
$
$
$\textbf{Case a.2:}$ Suppose $p',q' \in Perm_{odd}(n+1)$ and $n+1$ is odd, so $c$ is even. There is a $d$ so that $c=2d$, with $0 \leq d < 2^{r-2}+1$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2^r+2d+1]$ and $q' = \pi_T[2b+1,2b+2^r+2d+1]$, and
$$ p = \pi_T[a,a+2^{r-1}+d+1] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d+1], $$
$$ p' = \phi_M(p) \hspace{8.0ex} q' = \phi_M(q). $$
Since $p'$ and $q'$ have the same form, $T[2a+1,2a+2^r+2d] = T[2b+1,2b+2^r+2d]$. Thus $T_{2a+1} = T_{2b+1}$ implies $T_{a} = T_{b}$, so
$$ T[2a,2a+1] = \mu_T(T_{a}) = \mu_T(T_{b}) = T[2b,2b+1]$$
and $T_{2a+2^r+2d} = T_{2b+2^r+2d}$ implies $T_{a+2^{r-1}+d} = T_{b+2^{r-1}+d}$, so
$$ T[2a+2^r+2d,2a+2^r+2d+1] = \mu_T(T_{a+2^{r-1}+d}) = \mu_T(T_{b+2^{r-1}+d}) = T[2b+2^r+2d,2b+2^r+2d+1].$$
Therefore,
$$T[2a,2a+2^r+2d+1] = T[2b,2b+2^r+2d+1].$$
If $T[a,a+2^{r-1}+d] \neq T[b,b+2^{r-1}+d]$ then $T[2a,2a+2^r+2d+1] \neq T[2b,2b+2^r+2d+1]$. Hence
$$T[a,a+2^{r-1}+d] = T[b,b+2^{r-1}+d]$$
and $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Corollary $\ref{CorTo_pisqiff}$, and $p'=M(\phi(p))=M(\phi(q))=q'$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $d+2$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi_M(p) = p'$ and $\phi_M(q) = q'$ are a complementary pair of type $2(d+2)-3 = 2d+1 = c+1$.
$
$
$\textbf{Case a.3:}$ Suppose $p',q' \in Perm_{ev}(n+1)$ and $n+1$ is even, so $c$ is odd. There is a $d$ so that $c=2d+1$, with $0 \leq d < 2^{r-2}+1$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2^r+2d+1]$ and $q' = \pi_T[2b,2b+2^r+2d+1]$, and
$$ p = \pi_T[a,a+2^{r-1}+d+1] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d+1], $$
$$ p' = \phi_L(p) \hspace{8.0ex} q' = \phi_L(q). $$
Since $p'$ and $q'$ have the same form, $T[2a,2a+2^r+2d] = T[2b,2b+2^r+2d]$. Thus $T_{2a+2^r+2d} = T_{2b+2^r+2d}$ implies $T_{a+2^{r-1}+d} = T_{b+2^{r-1}+d}$, so
$$ T[2a+2^r+2d,2a+2^r+2d+1] = \mu_T(T_{a+2^{r-1}+d}) = \mu_T(T_{b+2^{r-1}+d}) = T[2b+22^r+2d,2b+22^r+2d+1]$$
and
$$T[2a,2a+2^r+2d+1] = T[2b,2b+2^r+2d+1].$$
If $T[a,a+m-1] \neq T[b,b+m-1]$ then $T[2a,2a+2m-1] \neq T[2b,2b+2m-1]$. Hence
$$T[a,a+2^{r-1}+d] = T[b,b+2^{r-1}+d]$$
and $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Corollary $\ref{CorTo_pisqiff}$, and $p'=L(\phi(p))=L(\phi(q))=q'$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $d+2$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi_L(p) = p'$ and $\phi_L(q) = q'$ are a complementary pair of type $2(d+2)-2 = 2d+2 = c+1$.
$
$
$\textbf{Case a.4:}$ Suppose $p',q' \in Perm_{odd}(n+1)$ and $n+1$ is even, so $c$ is odd. There is a $d$ so that $c=2d+1$, with $0 \leq d < 2^{r-2}+1$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2^r+2d+2]$ and $q' = \pi_T[2b+1,2b+2^r+2d+2]$, and
$$ p = \pi_T[a,a+2^{r-1}+d+1] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d+1], $$
$$ p' = \phi_R(p) \hspace{8.0ex} q' = \phi_R(q). $$
Since $p'$ and $q'$ have the same form, $T[2a+1,2a+2^r+2d+1] = T[2b+1,2b+2^r+2d+1]$. Thus $T_{2a+1} = T_{2b+1}$ implies $T_{a} = T_{b}$, so
$$ T[2a,2a+1] = \mu_T(T_{a}) = \mu_T(T_{b}) = T[2b,2b+1]$$
and
$$T[2a,2a+2^r+2d+1] = T[2b,2b+2^r+2d+1].$$
If $T[a,a+2^{r-1}+d] \neq T[b,b+2^{r-1}+d]$ then $T[2a,2a+2^r+2d+1] \neq T[2b,2b+2^r+2d+1]$. Hence
$$T[a,a+2^{r-1}+d] = T[b,b+2^{r-1}+d]$$
and $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Corollary $\ref{CorTo_pisqiff}$, and $p'=R(\phi(p))=R(\phi(q))=q'$, thus $p \neq q$. By the induction hypothesis, $p$ and $q$ are a complementary pair of type $d+2$. Therefore, by Proposition $\ref{ImageOfTypeK}$, $\phi_R(p) = p'$ and $\phi(q)_R = q'$ are a complementary pair of type $2(d+2)-2 = 2d+2 = c+1$.
$
$
$\textbf{(b)}$ Let $n=2^r+c$ with $2^{r-1}+1 \leq c < 2^r$. There will again be the four subcases from part $(a)$ when $2^{r-1}+1 \leq c < 2^r-2$, when $p',q' \in Perm_{ev}(n+1)$ or when $p',q' \in Perm_{odd}(n+1)$ and when $n+1$ is even or odd. There will also be 2 additional special cases to consider, which are when $c = 2^r-2$ and $c=2^r-1$.
$\textbf{Case b.1:}$ Suppose $p',q' \in Perm_{ev}(n+1)$ and $n+1$ is odd, so $c$ is even. There is a $d$ so that $c=2d$, with $2^{r-2}+1 \leq d < 2^{r-1}$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2^r+2d]$ and $q' = \pi_T[2b,2b+2^r+2d]$, and
$$ p = \pi_T[a,a+2^{r-1}+d] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d], $$
$$ p' = \phi(p) \hspace{8.0ex} q' = \phi(q). $$
As in case $\textbf{a.1}$, $T[a,a+2^{r-1}+d-1] = T[b,b+2^{r-1}+d-1]$, so $p$ and $q$ have the same form. By the induction hypothesis $p = q$, so by Corollary $\ref{CorTo_pisqiff}$, $p' = \phi(p) = \phi(q) = q'$.
$
$
$\textbf{Case b.2:}$ Suppose $p',q' \in Perm_{odd}(n+1)$ and $n+1$ is odd, so $c$ is even. There is a $d$ so that $c=2d$, with $2^{r-2}+1 \leq d < 2^{r-1}$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2^r+2d+1]$ and $q' = \pi_T[2b+1,2b+2^r+2d+1]$, and
$$ p = \pi_T[a,a+2^{r-1}+d+1] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d+1], $$
$$ p' = \phi_M(p) \hspace{8.0ex} q' = \phi_M(q). $$
As in case $\textbf{a.2}$, $T[a,a+2^{r-1}+d] = T[b,b+2^{r-1}+d]$, so $p$ and $q$ have the same form. By the induction hypothesis $p = q$, so by Corollary $\ref{CorTo_pisqiff}$, $\phi(p) = \phi(q)$ and therefore $p' = \phi_M(p) = \phi_M(q) = q'$.
$
$
$\textbf{Case b.3:}$ Suppose $p',q' \in Perm_{ev}(n+1)$ and $n+1$ is even, so $c$ is odd. There is a $d$ so that $c=2d+1$, with $2^{r-2}+1 \leq d < 2^{r-1}$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2^r+2d+1]$ and $q' = \pi_T[2b,2b+2^r+2d+1]$, and
$$ p = \pi_T[a,a+2^{r-1}+d+1] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d+1], $$
$$ p' = \phi_L(p) \hspace{8.0ex} q' = \phi_L(q). $$
As in case $\textbf{a.3}$, $T[a,a+2^{r-1}+d] = T[b,b+2^{r-1}+d]$, so $p$ and $q$ have the same form. By the induction hypothesis $p = q$, so by Corollary $\ref{CorTo_pisqiff}$, $\phi(p) = \phi(q)$ and therefore $p' = \phi_L(p) = \phi_L(q) = q'$.
$
$
$\textbf{Case b.4:}$ Suppose $p',q' \in Perm_{odd}(n+1)$ and $n+1$ is even, so $c$ is odd. There is a $d$ so that $c=2d+1$, with $2^{r-2}+1 \leq d < 2^{r-1}$, and there are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2^r+2d+2]$ and $q' = \pi_T[2b+1,2b+2^r+2d+2]$, and
$$ p = \pi_T[a,a+2^{r-1}+d+1] \hspace{8.0ex} q = \pi_T[b,b+2^{r-1}+d+1], $$
$$ p' = \phi_R(p) \hspace{8.0ex} q' = \phi_R(q). $$
As in case $\textbf{a.4}$, $T[a,a+2^{r-1}+d] = T[b,b+2^{r-1}+d]$, so $p$ and $q$ have the same form. By the induction hypothesis $p = q$, so by Corollary $\ref{CorTo_pisqiff}$, $\phi(p) = \phi(q)$ and therefore $p' = \phi_R(p) = \phi_R(q) = q'$.
$
$
$\textbf{Case b.5:}$ Suppose $c = 2^r-2$. Thus $n = 2^r + 2^r-2 = 2^{r+1} - 2$, and the subpermutations $p'$ and $q'$ will have odd length. There will be two subcases, these being when $p',q' \in Perm_{ev}(n+1)$ and when $p',q' \in Perm_{odd}(n+1)$.
$\textbf{Case b.5.i:}$ Suppose $p',q' \in Perm_{ev}(n+1)$. There are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2^{r+1}-2]$ and $q' = \pi_T[2b,2b+2^{r+1}-2]$, and
$$ p = \pi_T[a,a+2^r-1] \hspace{8.0ex} q = \pi_T[b,b+2^r-1], $$
$$ p' = \phi(p) \hspace{8.0ex} q' = \phi(q). $$
As in cases $\textbf{a.1}$ and $\textbf{b.1}$, $T[a,a+2^r-2] = T[b,b+2^r-2]$, so $p$ and $q$ have the same form. By the induction hypothesis $p = q$, so by Corollary $\ref{CorTo_pisqiff}$, $p' = \phi(p) = \phi(q) = q'$.
$\textbf{Case b.5.ii:}$ Suppose $p',q' \in Perm_{odd}(n+1)$. There are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2^{r+1}-1]$ and $q' = \pi_T[2b+1,2b+2^{r+1}-1]$, and
$$ p = \pi_T[a,a+2^r] \hspace{8.0ex} q = \pi_T[b,b+2^r], $$
$$ p' = \phi_M(p) \hspace{8.0ex} q' = \phi_M(q). $$
As in cases $\textbf{a.2}$ and $\textbf{b.2}$, $T[a,a+2^r-1] = T[b,b+2^r-1]$, so $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Corollary $\ref{CorTo_pisqiff}$, and $p'=M(\phi(p))=M(\phi(q))=q'$. If $p \neq q$ then by case $\textbf{a.1}$, $p$ and $q$ are a complementary pair of type 1. Therefore, by Proposition $\ref{ImageOfTypeK}$, $p' = \phi_M(p) = \phi_M(q) = q'$.
$
$
$\textbf{Case b.6:}$ Suppose $c = 2^r-1$. Thus $n = 2^r + 2^r-1 = 2^{r+1}-1$, and the subpermutations $p'$ and $q'$ will have even length. There will be two subcases, these being when $p',q' \in Perm_{ev}(n+1)$ and when $p',q' \in Perm_{odd}(n+1)$.
$\textbf{Case b.6.i:}$ Suppose $p',q' \in Perm_{ev}(n+1)$. There are numbers $a$ and $b$ so that $p' = \pi_T[2a,2a+2^{r+1}-1]$ and $q' = \pi_T[2b,2b+2^{r+1}-1]$, and
$$ p = \pi_T[a,a+2^r] \hspace{8.0ex} q = \pi_T[b,b+2^r], $$
$$ p' = \phi_L(p) \hspace{8.0ex} q' = \phi_L(q). $$
As in cases $\textbf{a.3}$ and $\textbf{b.3}$, $T[a,a+2^r-1] = T[b,b+2^r-1]$, so $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Corollary $\ref{CorTo_pisqiff}$, and $p'=L(\phi(p))=L(\phi(q))=q'$. If $p \neq q$ then by case $\textbf{a.1}$, $p$ and $q$ are a complementary pair of type 1. Therefore, by Proposition $\ref{ImageOfTypeK}$, $p' = \phi_L(p) = \phi_L(q) = q'$.
$\textbf{Case b.6.ii:}$ Suppose $p',q' \in Perm_{odd}(n+1)$. There are numbers $a$ and $b$ so that $p' = \pi_T[2a+1,2a+2^{r+1}]$ and $q' = \pi_T[2b+1,2b+2^{r+1}]$, and
$$ p = \pi_T[a,a+2^r] \hspace{8.0ex} q = \pi_T[b,b+2^r], $$
$$ p' = \phi_R(p) \hspace{8.0ex} q' = \phi_R(q). $$
As in cases $\textbf{a.4}$ and $\textbf{b.4}$, $T[a,a+2^r-1] = T[b,b+2^r-1]$, so $p$ and $q$ have the same form. If $p=q$ then $\phi(p)=\phi(q)$, by Corollary $\ref{CorTo_pisqiff}$, and $p'=R(\phi(p))=R(\phi(q))=q'$. If $p \neq q$ then by case $\textbf{a.1}$, $p$ and $q$ are a complementary pair of type 1. Therefore, by Proposition $\ref{ImageOfTypeK}$, $p' = \phi_R(p) = \phi_R(q) = q'$.
Therefore the lemma is true when $n=2^r+c$ with $0 \leq c < 2^r$, and therefore for all $n$.
$\qed$
\end{proof}
Thus, only subpermutations of length $2^r+1$ can be a complementary pair of type 1, and we have the following corollary.
\begin{corollary}
\label{BijectionForTheMaps}
If $n \neq 2^r$, for $r \geq 1$, then for any subpermutations $p = \pi_T[a,a+n]$ and $q = \pi_T[b,b+n]$
\begin{itemize}
\item[(a)] $\phi_L(p) = \phi_L(q)$ if and only if $p = q$.
\item[(b)] $\phi_R(p) = \phi_R(q)$ if and only if $p = q$.
\item[(c)] $\phi_M(p) = \phi_M(q)$ if and only if $p = q$.
\end{itemize}
\end{corollary}
\begin{proof}
It should be clear in each case that if $p = q$ then
$$\phi_L(p) = \phi_L(q) \hspace{5.0ex} \phi_R(p) = \phi_R(q) \hspace{5.0ex} \phi_M(p) = \phi_M(q).$$
Suppose $\phi_L(p) = \phi_L(q)$. If $p \neq q$, by Corollary $\ref{WhenTheMapsFailToBeABijection}$ $p$ and $q$ are a complementary pair of type 1. By Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are cannot be complementary pair of type 1, therefore $p=q$.
A similar argument will show if $\phi_R(p) = \phi_R(q)$ then $p=q$, and if $\phi_M(p) = \phi_M(q)$ then $p=q$.
$\qed$
\end{proof}
We now consider the number of factors $u$ of $T$ of length $2^r$ that have two subpermutations which form a complementary pair of type 1.
\begin{lemma}
\label{NumberOfFactWithLenAlphaOne}
Let $n = 2^r$ or $2^r+1$, with $r \geq 2$. Then there are exactly $2^r$ factors $u$ of $T$ of length $n$ so that there exist subpermutations $p = \pi_T[a,a+n]$ and $q = \pi_T[b,b+n]$ with form $u$ and $p \neq q$.
\end{lemma}
\begin{proof}
It can be readily verified by looking at the subpermutations in Appendix $\ref{SecTheSubperms}$ that the lemma is true for $r=2$. So there are 4 factors $u$ of $T$ of length 4 with two distinct subpermutations of length 5 with form $u$, and there are 4 factors $v$ of $T$ of length 5 with two distinct subpermutations of length 6 with form $v$.
Suppose $r \geq 2$ and that the lemma is true for $r$. We now show the lemma is true for $r+1$. Let $\Gamma$ be the set of factors of length $2^r$, $\abs{\Gamma} = 2^r$, so that for $u \in \Gamma$ there are subpermutations $p$ and $q$ with form $u$ so that $p \neq q$, hence, by Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 1. Let $\Gamma'$ be the set of factors of length $2^{r+1}$ so that if $u \in \Gamma'$ then there exist subpermutations $p$ and $q$ with form $u$ so that $p \neq q$. Let $\Delta$ be the set of factors of length $2^r+1$, $\abs{\Delta} = 2^r$, so that for $v \in \Delta$ there are subpermutations $p$ and $q$ with form $v$ so that $p \neq q$, hence, by Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 2. Let $\Delta'$ be the set of factors of length $2^{r+1}+1$ so that if $v \in \Delta'$ then there exist subpermutations $p$ and $q$ with form $v$ so that $p \neq q$.
The sizes of $\Gamma'$ and $\Delta'$ will be considered in two cases.
$
$
$\textbf{Case $\Gamma'$:}$ Any factor in $\Gamma'$ will either start in an even position or an odd position, call these sets of factors $\Gamma'_{ev}$ and $\Gamma'_{odd}$ and hence
$$ \Gamma' = \Gamma'_{ev} \cup \Gamma'_{odd} .$$
Since the factors are of length $2^{r+1} \geq 8$, for any factors $s \in \Gamma'_{ev}$ and $t \in \Gamma'_{odd}$, $s \neq t$, thus
$$ \Gamma'_{ev} \cap \Gamma'_{odd} = \emptyset.$$
There will be two subcases to establish the size of $\Gamma'$, first by showing the size of $\Gamma'_{ev}$ and then the size of $\Gamma'_{odd}$.
$\textbf{Subcase $\Gamma'_{ev}$:}$ For $u \in \Gamma$ there are subpermutations $p$ and $q$ of $\pi_T$ of length $2^r+1$, so that $p \neq q$. By Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 1. By Proposition $\ref{ImageOfTypeK}$ $\phi(p)$ and $\phi(q)$ are a complementary pair of type 1, so $\phi(p) \neq \phi(q)$ and they both have form $\mu_T(u)$. Therefore for each $u \in \Gamma$, $\mu_T(u) \in \Gamma'_{ev}$. Hence
$$ \abs{\Gamma'_{ev}} \geq \abs{\Gamma}.$$
Suppose that $u' \in \Gamma'_{ev}$, so there are subpermutations $p' = \pi_T[2a, 2a+2^{r+1}]$ and $q' = \pi_T[2b, 2b+2^{r+1}]$ with form $u' = T[2a, 2a+2^{r+1}-1] = T[2b, 2b+2^{r+1}-1]$, so that $p' \neq q'$. Hence there exist subpermutations $p$ and $q$ so that $\phi(p) = p'$ and $\phi(q) = q'$. As in case $\textbf{a.1}$ of Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 1 with form $u$ where $\mu_T(u) = u'$. Thus for each $u' \in \Gamma'_{ev}$, there is some $u \in \Gamma$ so that $\mu_T(u) = u'$. Hence
$$ \abs{\Gamma'_{ev}} \leq \abs{\Gamma}.$$
Therefore $\abs{\Gamma'_{ev}} = \abs{\Gamma}$.
$\textbf{Subcase $\Gamma'_{odd}$:}$ For $u \in \Delta$, $u = T[a,a+2^r]$ , there are subpermutations $p$ and $q$ of $\pi_T$ of length $2^r+2$, so that $p \neq q$. By Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 2. By Proposition $\ref{ImageOfTypeK}$, $\phi(p)$ and $\phi(q)$ are a complementary pair of type 3 with form $\mu_T(u) = T[2a,2a+2^{r+1}+1]$ and $\phi_M(p)$ and $\phi_M(q)$ are a complementary pair of type 1, so $\phi_M(p) \neq \phi_M(q)$ and they both have form $T[2a+1,2a+2^{r+1}]$. Therefore for each $T[a,a+2^r] \in \Delta$, $T[2a+1,2a+2^{r+1}] \in \Gamma'_{odd}$. Hence
$$ \abs{\Gamma'_{odd}} \geq \abs{\Delta}.$$
Suppose that $u' \in \Gamma'_{odd}$, so there are subpermutations $p' = \pi_T[2a+1, 2a+2^{r+1}+1]$ and $q' = \pi_T[2b+1, 2b+2^{r+1}+1]$ with form $u' = T[2a+1, 2a+2^{r+1}] = T[2b+1, 2b+2^{r+1}]$, so that $p' \neq q'$. Hence there exist subpermutations $p$ and $q$ so that $\phi_M(p) = p'$ and $\phi_M(q) = q'$. As in case $\textbf{a.2}$ of Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 2 with form $T[a,a+2^r]$. Thus for each $u' \in \Gamma'_{odd}$, there is some $T[a,a+2^r] \in \Delta$ so that $u' = T[2a+1, 2a+2^{r+1}]$. Hence
$$ \abs{\Gamma'_{odd}} \leq \abs{\Delta}.$$
Therefore $\abs{\Gamma'_{odd}} = \abs{\Delta}$.
Therefore
$$\abs{\Gamma'} = \abs{\Gamma'_{ev}} +\abs{\Gamma'_{odd}} = \abs{\Gamma} + \abs{\Delta} = 2^r + 2^r = 2^{r+1}. $$
$
$
$\textbf{Case $\Delta'$:}$ Any factor in $\Delta'$ will either start in an even position or an odd position, call these sets of factors $\Delta'_{ev}$ and $\Delta'_{odd}$ and hence
$$ \Delta' = \Delta'_{ev} \cup \Delta'_{odd} .$$
Since the factors are of length $2^{r+1}+1 \geq 8$, for any factors $s \in \Delta'_{ev}$ and $t \in \Delta'_{odd}$, $s \neq t$, thus
$$ \Delta'_{ev} \cap \Delta'_{odd} = \emptyset.$$
There will be two subcases to establish the size of $\Delta'$, first by showing the size of $\Delta'_{ev}$ and then the size of $\Delta'_{odd}$.
$\textbf{Subcase $\Delta'_{ev}$:}$ For $u \in \Delta$, $u = T[a,a+2^r]$ , there are subpermutations $p$ and $q$ of $\pi_T$ of length $2^r+2$, so that $p \neq q$. By Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 2. By Proposition $\ref{ImageOfTypeK}$, $\phi(p)$ and $\phi(q)$ are a complementary pair of type 3 with form $\mu_T(u) = T[2a,2a+2^{r+1}+1]$ and $\phi_L(p)$ and $\phi_L(q)$ are a complementary pair of type 2, so $\phi_L(p) \neq \phi_L(q)$ and they both have form $T[2a,2a+2^{r+1}]$. Therefore for each $T[a,a+2^r] \in \Delta$, $T[2a,2a+2^{r+1}] \in \Delta'_{ev}$. Hence
$$ \abs{\Delta'_{ev}} \geq \abs{\Delta}.$$
Suppose that $u' \in \Delta'_{ev}$, so there are subpermutations $p' = \pi_T[2a+1, 2a+2^{r+1}+2]$ and $q' = \pi_T[2b+1, 2b+2^{r+1}+2]$ with form $u' = T[2a+1, 2a+2^{r+1}+1] = T[2b+1, 2b+2^{r+1}+1]$, so that $p' \neq q'$. Hence there exist subpermutations $p$ and $q$ so that $\phi_L(p) = p'$ and $\phi_L(q) = q'$. As in case $\textbf{a.3}$ of Proposition $\ref{LengthOfAlphaForAGBForN}$, $p$ and $q$ are a complementary pair of type 2 with form $u = T[a,a+2^r]$. Thus for each $u' \in \Gamma'_{ev}$, there is some $T[a,a+2^r] \in \Delta$ so that $u' = T[2a+1, 2a+2^{r+1}+1]$. Hence
$$ \abs{\Delta'_{ev}} \leq \abs{\Delta}.$$
Therefore $\abs{\Delta'_{ev}} = \abs{\Delta}$.
$\textbf{Subcase $\Delta'_{odd}$:}$ A symmetric argument to the argument used in Subcase $\Delta'_{ev}$ will show $\abs{\Delta'_{odd}} = \abs{\Delta}$.
Therefore
$$\abs{\Delta'} = \abs{\Delta'_{ev}} +\abs{\Delta'_{odd}} = \abs{\Delta} + \abs{\Delta} = 2^r + 2^r = 2^{r+1}. $$
$\qed$
\end{proof}
Now we know when there are complementary pairs of type 1, and how many pairs of type 1 there are in each case.
\section{Permutation Complexity of $T$}
\label{FormulaForPermComp}
We are now ready to give a recursive definition for the permutation complexity of $T$. To show this we consider when the maps $\phi$, $\phi_L$, $\phi_R$, and $\phi_M$ are bijective. After the recursive definition is given, it will be shown that the recursive definition yields a formula for the permutation complexity.
\begin{proposition}
\label{RecursivePermComp}
Let $n \in \N$. When $2n+1 = 2^r-1$, for some $r \geq 3$:
$$ \tau_T(2n+1) = \tau_T(n+1) + \tau_T(n+2) - 2^{r-1}. $$
When $2n = 2^r$, for some $r \geq 3$:
$$ \tau_T(2n) = 2(\tau_T(n+1) - 2^{r-1}). $$
For all other $n \geq 3$:
\begin{align*}
\tau_T(2n+1) &= \tau_T(n+1) + \tau_T(n+2) \\
\tau_T(2n) &= 2(\tau_T(n+1)).
\end{align*}
\end{proposition}
\begin{proof}
For any $n$,
$$ \tau_T(n) = \abs{Perm(n)} = \abs{Perm_{ev}(n)} + \abs{Perm_{odd}(n)}. $$
This proof will be done in three cases. The first is when $2n+1 = 2^r-1$ for some $r \geq 3$, the second is when $2n = 2^r$ for some $r \geq 3$, and the third for all other $n$.
$\textbf{Case $2n+1 = 2^r-1$: }$ It can be readily verified by looking at the subpermutations in Appendix $\ref{SecTheSubperms}$ that the proposition is true for $r=3$. Suppose $r \geq 3$ and the lemma is true for $r$. We show that the lemma is true for $r+1$. So $2n+1 = 2^{r+1}-1$, and
$$ Perm(2n+1) = Perm_{ev}(2n+1) + Perm_{odd}(2n+1). $$
Since the map
$$\phi: Perm(n+1) \rightarrow Perm_{ev}(2n+1)$$
is a bijection, the size of $Perm(n+1)$ is the same as the size of $Perm_{ev}(2n+1)$. Therefore
$$ \abs{Perm_{ev}(2n+1)} = \abs{Perm(n+1)} = \tau_T(n+1). $$
Then the map
$$ \phi_M: Perm(n+2) \rightarrow Perm_{odd}(2n+1) $$
is a surjective map, so
$$ \abs{Perm_{odd}(2n+1)} \leq \abs{Perm(n+2)}, $$
but it is not injective because $n+2 = 2^r + 1$. So there are $2^r$ factors $u$ of length $2^r$ with a complementary pair of type 1 by Proposition $\ref{LengthOfAlphaForAGBForN}$ and Lemma $\ref{NumberOfFactWithLenAlphaOne}$. Thus there are exactly $2^{r}$ complementary pairs of type 1 in $Perm(n+2)$. So $2^{r+1}$ subpermutations in $Perm(n+2)$ will be mapped to $2^r$ subpermutations in $Perm_{odd}(2n+1)$ under $\phi_M$. The other $Perm(n+2) - 2^{r+1}$ subpermutations in $Perm(n+2)$ are pairwise distinct and not complementary pairs, and thus will be pairwise distinct under $\phi_M$. Hence
$$ \abs{Perm_{odd}(2n+1)} = \left( \abs{Perm(n+2)} - 2^{r+1} \right) + 2^r = \tau_T(n+2) - 2^r. $$
Therefore
$$ \tau_T(n) = \tau_T(n+1) + \tau_T(n+2) - 2^r. $$
$
$
$\textbf{Case $2n+1 = 2^r$: }$ It can be readily verified by looking at the subpermutations in Appendix $\ref{SecTheSubperms}$ that the proposition is true for $r=3$. Suppose $r \geq 3$ and the lemma is true for $r$, and we show that the lemma is true for $r+1$. So $2n+1 = 2^{r+1}$, and
$$ Perm(2n) = Perm_{ev}(2n) + Perm_{odd}(2n). $$
The map
$$ \phi_L: Perm(n+1) \rightarrow Perm_{ev}(2n) $$
is a surjective map, so
$$ \abs{Perm_{ev}(2n)} \leq \abs{Perm(n+1)}, $$
but it is not injective because $n+1 = 2^r + 1$. So there are $2^r$ factors $u$ of length $2^r$ with a complementary pair of type 1 by Proposition $\ref{LengthOfAlphaForAGBForN}$ and Lemma $\ref{NumberOfFactWithLenAlphaOne}$. Thus there are exactly $2^{r}$ complementary pairs of type 1 in $Perm(n+1)$. So $2^{r+1}$ subpermutations in $Perm(n+1)$ will be mapped to $2^r$ subpermutations in $Perm_{ev}(2n)$ under $\phi_M$. The other $Perm(n+1) - 2^{r+1}$ subpermutations in $Perm(n+1)$ are pairwise distinct and not complementary pairs, and thus will be pairwise distinct under $\phi_L$. Hence
$$ \abs{Perm_{ev}(2n)} = \left( \abs{Perm(n+1)} - 2^{r+1} \right) + 2^r = \abs{Perm(n+1)} - 2^r. $$
The map
$$ \phi_R: Perm(n+1) \rightarrow Perm_{odd}(2n) $$
is a surjective map, so
$$ \abs{Perm_{odd}(2n)} \leq \abs{Perm(n+1)}, $$
but it is not injective because $n+1 = 2^r + 1$. By a similar argument to above we can see that
$$ \abs{Perm_{odd}(2n)} = \abs{Perm(n+1)} - 2^r. $$
Therefore
$$ \tau_T(n) = (\abs{Perm(n+1)} - 2^r) + (\abs{Perm(n+1)} - 2^r) = 2(\tau_T(n+1) - 2^r). $$
$
$
$\textbf{Case $n \geq 3$: }$ It can be readily verified by looking at the subpermutations in Appendix $\ref{SecTheSubperms}$ that the proposition is true for $n=3$. Suppose $n \geq 3$ and the lemma is true for $n$, and we show that the lemma is true for $n+1$. Since $2(n+1)+1, 2(n+1) \notin \{2^r-1, 2^r | r \geq 2\}$ for any $r$, we have $n+2, n+3 \notin \{ 2^r+1 | r \geq 2 \}$. So for $2(n+1)$ and $2(n+1) + 1$ we know that the maps
\begin{align*}
&\phi: Perm(n+2) \rightarrow Perm_{ev}(2(n+1)+1) \\
&\phi_L: Perm(n+2) \rightarrow Perm_{ev}(2(n+1)) \\
&\phi_R: Perm(n+2) \rightarrow Perm_{odd}(2(n+1)) \\
&\phi_M: Perm(n+3) \rightarrow Perm_{odd}(2(n+1)+1)
\end{align*}
are all bijections. Therefore:
\begin{align*}
&\abs{Perm_{ev}(2(n+1)+1)} = \abs{Perm(n+2)} = \tau_T(n+2) \\
&\abs{Perm_{ev}(2(n+1))} = \abs{Perm(n+2)} = \tau_T(n+2) \\
&\abs{Perm_{odd}(2(n+1))} = \abs{Perm(n+2)} = \tau_T(n+2) \\
&\abs{Perm_{odd}(2(n+1)+1)} = \abs{Perm(n+3)} = \tau_T(n+3).
\end{align*}
So:
\begin{align*}
&\tau_T(2(n+1)) = \abs{Perm_{ev}(2(n+1))} + \abs{Perm_{odd}(2(n+1))} = 2( \tau_T(n+2)) \\
&\tau_T(2(n+1)+1) = \abs{Perm_{ev}(2(n+1)+1)} + \abs{Perm_{odd}(2(n+1)+1)} = \tau_T(n+2) + \tau_T(n+3).
\end{align*}
$\qed$
\end{proof}
\begin{theorem}
\label{PermCompIsTheFormula}
For any $n \geq 6$, where $n = 2^a + b$ with $0 < b \leq 2^a$,
$$ \tau_T(n) = 2(2^{a+1}+b-2).$$
\end{theorem}
\begin{proof}
The proof will be done by induction on $n$. The above formula can be readily verified by looking at the subpermutations listed in Appendix $\ref{SecTheSubperms}$ for $n \leq 9$. Suppose the theorem is true for all values less than or equal to $2n$.
$\textbf{Case $2n+1 = 2^a-1$: }$ Suppose $2n+1 = 2^a-1$. If $2n+1 = 2^a-1 = 2^{a-1}+2^{a-1} - 1$, then $n = 2^{a-1}-1$, so $n+1 = 2^{a-1} = 2^{a-2}+2^{a-2}$ and $n+2=2^{a-1}+1$. Thus:
\begin{align*}
\tau_T(n+1) &= 2(2^{a-2+1}+2^{a-2}-2) = 2(2^{a-1}+2^{a-2}-2) = 2(3(2^{a-2})-2)\\
\tau_T(n+2) &= 2(2^{a-1+1}+1-2) = 2(2^a-1)
\end{align*}
From Proposition $\ref{RecursivePermComp}$:
\begin{align*}
\tau_T(2n+1) &= 2(3(2^{a-2})-2) + 2(2^a-1) - 2^{a-1} = 2(3(2^{a-2})-2 + 2^a-1 - 2^{a-2}) \\
&= 2(2(2^{a-2}) + 2^a-3) = 2(2^a + (2^{a-1}-1) - 2)
\end{align*}
$
$
$\textbf{Case $2n+2 = 2(n+1) = 2^a$: }$ Suppose $2n+2 = 2(n+1) = 2^a = 2^{a-1}+2^{a-1}$:
\begin{align*}
\tau_T(2(n+1)) &= 2(2(2^a-1) - 2^{a-1}) = 2(2^{a+1} - 2^{a-1} - 2) = 2(3(2^{a-1}) - 2)\\
&= 2(2(2^{a-1}) + 2^{a-1} - 2) = 2(2^a + 2^{a-1} - 2)
\end{align*}
$
$
$\textbf{Case Else: }$ Suppose $2n+1 = 2^a + b$, $2n+2 = 2(n+1) = 2^a + b+1$, and $0 < b < 2^a - 1$. Since $2n+1 = 2^a + b$ is odd, $b$ is odd. So $n = 2^{a-1}+\frac{b-1}{2}$, $n+1 = 2^{a-1}+\frac{b+1}{2}$, and $n+2 = 2^{a-1}+\frac{b+3}{2}$. Thus:
\begin{align*}
\tau_T(n+1) &= 2(2^a + \frac{b+1}{2} -2)\\
\tau_T(n+2) &= 2(2^a + \frac{b+3}{2} -2).
\end{align*}
From Proposition $\ref{RecursivePermComp}$:
\begin{align*}
\tau_T(2n+1) &= 2(2^a + \frac{b+1}{2} -2) + 2(2^a + \frac{b+3}{2} -2) = 2(2^a + 2^a + \frac{b+1}{2}+ \frac{b+3}{2} -2 -2)\\
&= 2(2^{a+1} + \frac{2b+4}{2} -4) = 2(2^{a+1} + b -2) \\
\\
\tau_T(2(n+1)) &= 2(2(2^a + \frac{b+3}{2} -2)) = 2(2^{a+1} + b+3 -4)\\
&= 2(2^{a+1} + (b+1) -2).
\end{align*}
Therefore, for all $n \geq 6$, where $n = 2^a + b$ with $0 < b \leq 2^a$, $ \tau_T(n) = 2(2^{a+1}+b-2)$
$\qed$
\end{proof}
\section{Conclusion}
\label{SecConclusion}
There seem to be some natural ways to continue this research. For the binary doubling map $\delta$, defined as $\delta(0) =00$ and $\delta(1)=11$, it has been shown that $T$ and $\delta(T)$ have the same factor complexity ($\cite{AberBrle02}$). One natural question is, do $T$ and $\delta(T)$ have the same permutation complexity? The answer is no. As can be seen in Appendix $\ref{SecTheSubperms}$, $\tau_T(5) = 14$ but $\tau_{\delta(T)}(5) = 16$. With $T$, there are at most two distinct subpermutations that have the same, but with $\delta(T)$ there are cases where three subpermutations have the same form. One open question is, what is the permutation complexity of $\delta(T)$?
This paper also investigates the action of the $\mu_T$ on the subpermutations of $\pi_T$. Since $\mu_T$ is an order preserving map, we know that if there are distinct subpermutations $\pi_T[a,a+n]$ and $\pi_T[b,b+n]$ then $\pi_T[2a,2a+2n] \neq \pi_T[2b,2b+2n]$. This seems to be true in general for binary words that are fixed points of morphisms by using a similar argument from Lemma $\ref{pISqIFFppISqp}$, but the converse is not true in general. Another open question is to investigate properties of infinite permutations associated with aperiodic binary words that are fixed points of a morphism. For such words, is there a way to define a mapping on the subpermutations of $\pi_\w$ similar to the map $\phi$ defined on the subpermutations of $\pi_T$?
These are only a couple of the open questions in the area of permutation complexity.
\paragraph{Acknowledgements:}
Steve Widmer thanks Luca Zamboni and Amy Glen for comments and suggestions that helped him to improve and clarify this paper.
\appendix
\section{Subpermutations of $\pi_T$}
\label{SecTheSubperms}
The subpermutations and their form for factors of length 1 through 8 are shown below.
$$ 0 : [1 2] \hspace{5.0ex} 1 : [2 1] $$
\begin{align*}
01 : [1 3 2] \hspace{2.0ex} [2 3 1] \hspace{4.0ex} 00 : [1 2 3]& \\
10 : [3 1 2] \hspace{2.0ex} [2 1 3] \hspace{4.0ex} 11 : [3 2 1]&
\end{align*}
\begin{align*}
010 : [2 4 1 3] \hspace{2.0ex} [1 3 2 4] \hspace{5.0ex} 001 : [1 2 4 3] \hspace{5.0ex} 100 : [3 1 2 4]& \\
101 : [4 2 3 1] \hspace{2.0ex} [3 1 4 2] \hspace{5.0ex} 011 : [2 4 3 1] \hspace{5.0ex} 110 : [4 3 1 2]&
\end{align*}
\begin{align*}
0011 : [2 3 5 4 1] \hspace{2.0ex} [1 3 5 4 2]& \hspace{5.0ex} 0010 : [1 2 4 3 5] \hspace{5.0ex} 1010 : [5 2 4 1 3]\\
0110 : [2 5 4 1 3] \hspace{2.0ex} [3 5 4 1 2]& \hspace{5.0ex} 0100 : [2 4 1 3 5] \hspace{5.0ex} 1011 : [4 2 5 3 1]\\
1001 : [4 1 2 5 3] \hspace{2.0ex} [3 1 2 5 4]& \hspace{5.0ex} 0101 : [1 4 2 5 3] \hspace{5.0ex} 1101 : [5 4 2 3 1]\\
1100 : [5 3 1 2 4] \hspace{2.0ex} [4 3 1 2 5]&
\end{align*}
\begin{align*}
00110 : [2 4 6 5 1 3] \hspace{2.0ex} [1 3 6 5 2 4] \hspace{5.0ex} 00101 : [1 2 5 3 6 4] \hspace{5.0ex} 10010 : [4 1 2 5 3 6]& \\
01100 : [3 6 4 1 2 5] \hspace{2.0ex} [2 5 4 1 3 6] \hspace{5.0ex} 01001 : [2 5 1 3 6 4] \hspace{5.0ex} 10100 : [5 2 4 1 3 6]& \\
10011 : [5 2 3 6 4 1] \hspace{2.0ex} [4 1 3 6 5 2] \hspace{5.0ex} 01011 : [2 5 3 6 4 1] \hspace{5.0ex} 10110 : [5 2 6 4 1 3]& \\
11001 : [6 4 1 2 5 3] \hspace{2.0ex} [5 3 1 2 6 4] \hspace{5.0ex} 01101 : [3 6 5 2 4 1] \hspace{5.0ex} 11010 : [6 5 2 4 1 3]&
\end{align*}
\begin{align*}
011001 : [3 7 5 1 2 6 4] \hspace{2.0ex} [2 6 4 1 3 7 5] \hspace{5.0ex} 001011 : [1 3 6 4 7 5 2] \hspace{5.0ex} 100101 : [4 1 2 6 3 7 5]& \\
100110 : [6 2 4 7 5 1 3] \hspace{2.0ex} [5 1 3 7 6 2 4] \hspace{5.0ex} 001100 : [2 4 7 5 1 3 6] \hspace{5.0ex} 101001 : [6 2 5 1 3 7 4]& \\
001101 : [2 4 7 6 3 5 1] \hspace{5.0ex} 101100 : [5 2 6 4 1 3 7]& \\
010010 : [2 5 1 3 6 4 7] \hspace{5.0ex} 101101 : [6 3 7 5 2 4 1]& \\
010011 : [3 6 2 4 7 5 1] \hspace{5.0ex} 110010 : [6 4 1 2 5 3 7]& \\
010110 : [2 6 3 7 5 1 4] \hspace{5.0ex} 110011 : [6 4 1 3 7 5 2]& \\
011010 : [4 7 6 2 5 1 3] \hspace{5.0ex} 110100 : [7 5 2 4 1 3 6]&
\end{align*}
\begin{align*}
0010110 : [1 3 7 4 8 6 2 5] \hspace{5.0ex} 0101100 : [2 6 3 7 5 1 4 8] \hspace{5.0ex} 1001011 : [5 1 3 7 4 8 6 2] \hspace{5.0ex} 1011001 : [6 2 7 4 1 3 8 5]&\\
0011001 : [2 4 8 6 1 3 7 5] \hspace{5.0ex} 0101101 : [3 7 4 8 6 2 5 1] \hspace{5.0ex} 1001100 : [6 2 4 8 5 1 3 7] \hspace{5.0ex} 1011010 : [7 4 8 6 2 5 1 3]&\\
0011010 : [2 5 8 7 3 6 1 4] \hspace{5.0ex} 0110010 : [3 7 5 1 2 6 4 8] \hspace{5.0ex} 1001101 : [6 2 4 8 7 3 5 1] \hspace{5.0ex} 1100101 : [7 4 1 2 6 3 8 5]&\\
0100101 : [2 5 1 3 7 4 8 6] \hspace{5.0ex} 0110011 : [3 7 5 1 4 8 6 2] \hspace{5.0ex} 1010010 : [6 2 5 1 3 7 4 8] \hspace{5.0ex} 1100110 : [7 5 1 3 8 6 2 4]&\\
0100110 : [3 7 2 5 8 6 1 4] \hspace{5.0ex} 0110100 : [4 8 6 2 5 1 3 7] \hspace{5.0ex} 1010011 : [7 3 6 2 4 8 5 1] \hspace{5.0ex} 1101001 : [8 6 2 5 1 3 7 4]&
\end{align*}
\begin{align*}
00101101 : [2 4 8 5 9 7 3 6 1] \hspace{2.0ex} [1 4 8 5 9 7 3 6 2]& \hspace{5.0ex} 00101100 : [1 3 7 4 8 6 2 5 9] \hspace{5.0ex} 10011001 : [7 2 4 9 6 1 3 8 5] \\
01001011 : [3 6 1 4 8 5 9 7 2] \hspace{2.0ex} [2 6 1 4 8 5 9 7 3]& \hspace{5.0ex} 00110010 : [2 4 8 6 1 3 7 5 9] \hspace{5.0ex} 10011010 : [7 2 5 9 8 3 6 1 4] \\
01011010 : [4 8 5 9 7 2 6 1 3] \hspace{2.0ex} [3 8 5 9 7 2 6 1 4]& \hspace{5.0ex} 00110100 : [2 5 9 7 3 6 1 4 8] \hspace{5.0ex} 10100110 : [8 3 7 2 5 9 6 1 4] \\
01101001 : [4 9 7 2 6 1 3 8 5] \hspace{2.0ex} [5 9 7 2 6 1 3 8 4]& \hspace{5.0ex} 01001100 : [3 7 2 5 9 6 1 4 8] \hspace{5.0ex} 10110011 : [7 3 8 5 1 4 9 6 2] \\
10010110 : [6 1 3 8 4 9 7 2 5] \hspace{2.0ex} [5 1 3 8 4 9 7 2 6]& \hspace{5.0ex} 01011001 : [2 7 3 8 5 1 4 9 6] \hspace{5.0ex} 11001011 : [8 5 1 3 7 4 9 6 2] \\
10100101 : [7 2 5 1 3 8 4 9 6] \hspace{2.0ex} [6 2 5 1 3 8 4 9 7]& \hspace{5.0ex} 01100101 : [3 8 5 1 2 7 4 9 6] \hspace{5.0ex} 11001101 : [8 6 2 4 9 7 3 5 1] \\
10110100 : [8 4 9 6 2 5 1 3 7] \hspace{2.0ex} [7 4 9 6 2 5 1 3 8]& \hspace{5.0ex} 01100110 : [3 8 6 1 4 9 7 2 5] \hspace{5.0ex} 11010011 : [9 7 3 6 2 4 8 5 1] \\
11010010 : [9 6 2 5 1 3 7 4 8] \hspace{2.0ex} [8 6 2 5 1 3 7 4 9]&
\end{align*}
\end{document}
|
\begin{document}
\begin{abstract}
We consider a framework for representing double loop spaces (and more generally $E_2$ spaces) as commutative monoids. There are analogous commutative rectifications of braided monoidal structures and we use this framework to define iterated double deloopings. We also consider commutative rectifications of $E_{\infty}$ spaces and symmetric monoidal categories and we relate this to the category of symmetric spectra.
\end{abstract}
\subjclass[2000]{Primary 18D10, 18D50, 55P48; Secondary 55P43}
\keywords{Braided monoidal categories, double loop spaces, diagram spaces}
\title{Braided injections and double loop spaces}
\section{Introduction}
The study of multiplicative structures on spaces has a long history in algebraic topology. For many spaces of interest the notion of a strictly associative and commutative multiplication is too rigid and must be replaced by the more flexible notion of an $E_{\infty}$ multiplication encoding higher homotopies between iterated products. This is analogous to the situation for categories where strictly commutative multiplications rarely occur in practice and the more useful $E_{\infty}$ notion is that of a symmetric monoidal structure. Similar remarks apply to multiplicative structures on other types of objects. However, for certain kinds of applications it is desirable to be able to replace $E_{\infty}$ structures by strictly commutative ones, and this can sometimes be achieved by modifying the underlying category of objects under consideration. An example of this is the introduction of modern categories of spectra (in the sense of stable homotopy theory) \cite{EKMM,HSS,MMSS} equipped with symmetric monoidal smash products. These categories of spectra have homotopy categories equivalent to the usual stable homotopy category but come with refined multiplicative structures allowing the rectification of $E_{\infty}$ ring spectra to strictly commutative ring spectra. This has proven useful for the import of ideas and constructions from commutative algebra into stable homotopy theory. Likewise there are symmetric monoidal refinements of spaces \cite{BCS,Sagave-Schlichtkrull} allowing for analogous rectifications of $E_{\infty}$ structures.
Our main objective in this paper is to construct similar commutative rectifications in braided monoidal contexts. In order to provide a setting for this we introduce the category $\mathfrak{B}$ of \emph{braided injections}, see Section \ref{sec:braided-injections}. This is a braided monoidal small category that relates to the category $\mathcal I$ of finite sets and injections in the same way the braid groups relate to the symmetric groups.
We first explain how our rectification works in the setting of small categories $\mathit{Cat}$ and let $\mathfrak{B}r$-$\mathit{Cat}$ denote the category of braided (strict) monoidal small categories.
Let $\mathit{Cat}^{\mathfrak{B}}$ be the diagram category of functors from $\mathfrak{B}$ to $\mathit{Cat}$ and let us refer to such functors as
\emph{$\mathfrak{B}$-categories}. The category $\mathit{Cat}^{\mathfrak{B}}$ inherits a braided monoidal convolution product from $\mathfrak{B}$ and there is a corresponding category $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ of braided monoidal $\mathfrak{B}$-categories. A morphism $A\to A'$ in $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ is said to be a \emph{$\mathfrak{B}$-equivalence} if the induced functor of Grothendieck constructions $\mathfrak{B}\!\int\!A\to \mathfrak{B}\!\int\!A'$ is a weak equivalence of categories in the usual sense. We write $w_{\mathfrak{B}}$ for the class of $\mathfrak{B}$-equivalences and $w$ for the class of morphisms in $\mathfrak{B}r$-$\mathit{Cat}$ whose underlying functors are weak equivalences. The following rectification theorem is obtained by combining Proposition~\ref{prop:Bint-Delta--braided-equivalence} and Theorem~\ref{thm:Braided-B-Cat-rectification}.
\begin{theorem}\label{thm:main-theorem-1}
The Grothendieck construction $\mathfrak{B}\!\int\!$ and the constant embedding $\Delta$ define an equivalence of the localized categories
\[
\textstyle\mathfrak{B}\!\int\colon\thinspacelon \text{$\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$}[w_{\mathfrak{B}}^{-1}]\simeq
\text{$\mathfrak{B}r$-$\mathit{Cat}$}[w^{-1}]:\!\Delta
\]
and every object in $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ is naturally $\mathfrak{B}$-equivalent to a strictly commutative $\mathfrak{B}$-category monoid.
\end{theorem}
Thus, working with braided monoidal categories is weakly equivalent to working with braided monoidal $\mathfrak{B}$-categories and the latter category has the advantage that we may assume multiplications to be strictly commutative.
This implies in particular that every braided monoidal small category is weakly equivalent to one of the form $\mathfrak{B}\!\int\!A$ for a commutative $\mathfrak{B}$-category monoid $A$.
Let $\mathfrak{B}r$ be the categorical operad such that the category of $\mathfrak{B}r$-algebras can be identified with $\mathfrak{B}r$-$\mathit{Cat}$ (see Section~\ref{subsec:operads} for details). For the analogous rectification in the category of spaces $\mathcal S$ (which we interpret as the category of simplicial sets) we consider the operad $\mathrm{N} \mathfrak{B}r$ in $\mathcal S$ obtained by evaluating the nerve of $\mathfrak{B}r$. This is an $E_2$ operad in the sense of being equivalent to the little 2-cubes operad and we may think of the category of algebras $\mathrm{N}\mathfrak{B}r$-$\mathcal S$ as the category of $E_2$ spaces. In order to rectify $E_2$ spaces to strictly commutative monoids we work in the diagram category of $\mathfrak{B}$-spaces $\mathcal S^{\mathfrak{B}}$ equipped with the braided monoidal convolution product inherited from $\mathfrak{B}$. There is an analogous category of $E_2$ $\mathfrak{B}$-spaces $\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$. After localization with respect to the appropriate classes of $\mathfrak{B}$-equivalences $w_{\mathfrak{B}}$ in \mbox{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$} and weak equivalences $w$ in
$\mathrm{N}\mathfrak{B}r$-$\mathcal S$, Proposition~\ref{prop:hocolim-B-equivalence} and Theorem~\ref{thm:Br-SB-rectification} combine to give the following result.
\begin{theorem}\label{thm:main-theorem-2}
The homotopy colimit $(-)_{h\mathfrak{B}}$ and the constant embedding $\Delta$ define an equivalence of the localized categories
\[
(-)_{h\mathfrak{B}}\colon\thinspacelon \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$}[w_{\mathfrak{B}}^{-1}]\simeq
\text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}[w^{-1}] :\!\Delta
\]
and every object in $\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$ is naturally $\mathfrak{B}$-equivalent to a strictly commutative $\mathfrak{B}$-space monoid.
\end{theorem}
This implies in particular that every double loop space is equivalent to an $E_2$ space of the form $A_{h\mathfrak{B}}$ for a commutative $\mathfrak{B}$-space monoid $A$. To give an example why this may be useful, notice that if $A$ is a commutative $\mathfrak{B}$-space monoid, then the category $\mathcal S^{\mathfrak{B}}/A$ of $\mathfrak{B}$-spaces over $A$ inherits the structure of a braided monoidal category. It is less obvious how to define such a structure for the corresponding category of spaces over an $E_2$ space.
The above rectification theorems have corresponding versions for symmetric monoidal categories and $E_{\infty}$ spaces that we spell out in Section~\ref{sec:I-section}. As an application of this we show how to rectify certain $E_{\infty}$ ring spectra to strictly commutative symmetric ring spectra. However, the braided monoidal setting is somewhat more subtle and is the main focus of this paper.
Our main tool for replacing braided monoidal structures by strictly commutative ones is a refinement of the usual strictification construction used to replace monoidal categories by strictly monoidal ones, see e.g.\ \cite[Section~1]{JS}. While it is well-known that this construction cannot be used to turn braided monoidal categories into categories with a strictly commutative multiplication, we shall see that it can be reinterpreted so as to take values in commutative $\mathfrak{B}$-category monoids instead. This gives rise to the \emph{$\mathfrak{B}$-category rectification functor} $\Phi$ introduced in Section~\ref{subsec:rectification}. In order to obtain an analogous rectification on the space level we apply the results of Fiedorowicz-Stelzer-Vogt~\cite{Fiedorowicz-V_simplicial,FSV} that show how to associate braided monoidal categories to $E_2$ spaces. Our rectification functor $\Phi$ then applies to these braided monoidal categories and we can apply the nerve functor level-wise to get back into the category of commutative $\mathfrak{B}$-space monoids.
It was pointed out by Stasheff and proved by Fiedorowicz~\cite{fied} and Berger \cite{Berger_double} that the classifying space of a braided monoidal small category becomes a double loop space after group completion. As an application of our techniques we show in Section~\ref{sec:classifying} how one can very simply define the double delooping: Given a braided monoidal category $\mathcal A$, we apply the rectification functor $\Phi$ and the level-wise nerve to get a commutative $\mathfrak{B}$-space monoid $\mathrm{N}\Phi(\mathcal A)$. The basic fact (valid for any commutative monoid in a braided monoidal category whose unit is terminal) is now that the bar construction applied to $\mathrm{N}\Phi(\mathcal A)$ is a simplicial monoid and hence can be iterated once to give a bisimplicial $\mathfrak{B}$-space. Evaluating the homotopy colimit of this $\mathfrak{B}$-space we get the double delooping. This construction in fact gives an alternative proof of Stasheff's result independent of the operadic recognition theorem for double loop spaces.
Another ingredient of our work is a general procedure for constructing equivalences between localized categories that we detail in Appendix~\ref{app:localization}. This improves on previous work by
Fiedorowicz-Stelzer-Vogt~\cite[Appendix~C]{FSV} and has subsequently been used by these authors in \cite{FSV2} to sharpen some of their earlier results.
\subsection{Organization}
We begin by introducing the category of braided injections in Section~\ref{sec:braided-injections} and establish the basic homotopy theory of $\mathfrak{B}$-spaces in Section~\ref{sec:homotopy-B-spaces}. Then we switch to the categorical setting in
Section~\ref{sec:B-categories} where we prove Theorem~\ref{thm:main-theorem-1}. In Section~\ref{sec:E2-spaces} we return to the analysis of $\mathfrak{B}$-spaces and prove Theorem~\ref{thm:main-theorem-2}, whereas Section~\ref{sec:classifying} is dedicated to double deloopings of commutative $\mathfrak{B}$-space monoids. Finally, we consider the symmetric monoidal version of the theory and relate this to the category of symmetric spectra in Section~\ref{sec:I-section}. The material on localizations of categories needed for the paper is collected in Appendix~\ref{app:localization}.
\section{The category of braided injections}\label{sec:braided-injections}
We generalize the geometric definition of the braid groups by introducing the notion of a \emph{braided injection}. In this way we obtain a category $\mathfrak{B}$ of braided injections such that the classical braid groups appear as the endomorphism monoids.
In the following we write $I$ for the unit interval. Let $\mathbf{n}$ denote the ordered set $\{1,\ldots,n\}$ for $n\geq 1$. A braided injection $\alpha$ from
$\mathbf{m}$ to $\mathbf{n}$, written $\alpha\colon\thinspacelon \mathbf m\to\mathbf n$,
is a homotopy class of $m$-tuples $(\alpha_1,\ldots,\alpha_m)$, where each $\alpha_i$ is a path
$\alpha_i \colon\thinspace I \rightarrow \mathbb{R}^{2}$ starting in $(i,0)$ and ending in one of the points
$(1,0),...,(n,0)$ with the requirement that $\alpha_i(t)\neq \alpha_j(t)$ for all $t$ in $I$, whenever $i\neq j$.
Two $m$-tuples $(\alpha_1,\ldots,\alpha_m)$ and $(\beta_1,\ldots,\beta_m)$ are homotopic if
there exists an $m$-tuple of homotopies $H_i \colon\thinspace I \times I \rightarrow \mathbb{R}^2$
from $\alpha_i$ to $\beta_i$, fixing endpoints,
such that $H_i(s,t) \neq H_j(s,t)$ for all $(s,t)$ in $I\times I$ whenever $i\neq j$.
The requirement that $H_i$ fixes endpoints ensures that a braided injection $\alpha$ from $\mathbf m$ to
$\mathbf n$ defines an underlying injective function $\bar\alpha\colon\thinspacelon \mathbf m\to \mathbf n$ by writing
$\alpha_i(1)=(\bar\alpha(i),0)$. When visualising an injective braid, we think of the points $\alpha_i(t)$ for $i=1,\dots,m$ as a family of distinct points in $\mathbb{R}^2$ moving downwards from the initial position $(1,0)$, \dots, $(m,0)$, for $t=0$, to the final position $(\bar\alpha(1),0)$,\dots,
$(\bar\alpha(m),0)$, for $t=1$.
\begin{figure}
\caption{Braided injections with the same underlying injective map: $1 \mapsto 2$, $2 \mapsto 4$, $3 \mapsto 1$.}
\label{injective braids}
\end{figure}
We can compose two braided injections $\alpha\colon\thinspace \mathbf{m}\rightarrow \mathbf{n}$ and
$\beta\colon\thinspace\mathbf{n}\rightarrow \mathbf{p}$ by choosing representatives
$(\alpha_1,\ldots,\alpha_m)$ and $(\beta_1,\ldots,\beta_n)$,
and set $\beta\circ\alpha$ to be the homotopy class of the paths
$$(\beta_{\bar\alpha(1)}\cdot\alpha_1,...,\beta_{\bar\alpha(m)}\cdot\alpha_m)\text{.}$$
Here $\beta_{\bar\alpha(i)}\cdot\alpha_i$ denotes the usual composition of paths,
\[
\beta_{\bar\alpha(i)}\cdot\alpha_i(t)=
\begin{cases}
\alpha_i(2t), &\text{for $0\leq t\leq 1/2$},\\
\beta_{\bar\alpha(i)}(2t-1),& \text{for $1/2\leq t\leq1$}.
\end{cases}
\]
We let $\mathbf{0}$ denote the empty set and say that there is exactly one braided injection from
$\mathbf{0}$ to $\mathbf{n}$ for $n\geq 0$.
\begin{definition}\label{def B}
The category $\mathfrak{B}$ of braided injections has objects the finite sets $\mathbf n$ for $n\geq 0$ and morphisms the
braided injections between these sets.
\end{definition}
Next we recall the definitions of some categories closely related to $\mathfrak{B}$.
\begin{definition}\label{def B S I M}
The categories $\mathcal{B}$, $\Sigma$, $\mathcal{I}$ and $\mathcal{M}$ all have as objects the finite sets
$\mathbf{n}$ for $n\geq 0$. Here the \emph{braid category}
$\mathcal{B}$ and the \emph{permutation category} $\Sigma$ have respectively the braid group
$\mathcal{B}_n$ and the permutation group $\Sigma_n$ as the endomorphism set of $\mathbf{n}$, and no other morphisms.
The morphisms in $\mathcal{I}$ and $\mathcal{M}$ are the injective functions and
the order preserving injective functions, respectively.
\end{definition}
There is a canonical functor $\varPi$ from $\mathfrak{B}$ to $\mathcal{I}$
that takes a braided injection $\alpha\colon\thinspace\mathbf m \rightarrow \mathbf n$ to the underlying injective
function $\bar\alpha\colon\thinspace \mathbf m\rightarrow\mathbf n$.
By definition, $\mathcal{B}$ is a subcategory of $\mathfrak{B}$ and
$\Sigma$ is a subcategory of $\mathcal{I}$. Clearly $\varPi$ restricts to a functor from
$\mathcal{B}$ to $\Sigma$, which we also denote by $\varPi$.
The category $\mathcal{M}$ is a subcategory of $\mathcal{I}$ and there is a canonical embedding
$\Upsilon\colon\thinspace\mathcal{M} \rightarrow\mathfrak{B}$ with
$\Upsilon(\mathbf{n})=\mathbf{n}$. For an injective order preserving function
$\mu\colon\thinspace\mathbf{m}\rightarrow\mathbf{n}$, let $\mu_i$ be the straight path from
$(i,0)$ to $(\mu(i),0)$ for $1\leq i \leq m$.
Since $\mu$ is order preserving, $\mu_i(t)$ is different from $\mu_j(t)$ whenever $i\neq j$, and we can define $\Upsilon(\mu)$ as the braided injection represented by the tuple
$(\mu_1,\ldots,\mu_m)$.
These functors fit into the following commutative diagram
\begin{equation}\label{diagram B B Sigma I M}
\xymatrix{
\mathcal{B} \ar@{}[r]|\subseteq \ar[d]_\varPi & \mathfrak{B} \ar[d]_\varPi \\
\Sigma \ar@{}[r]|\subseteq & \mathcal{I} & \mathcal{M}. \ar[lu]_\Upsilon
\ar@{}[l]|\supseteq
} \end{equation}
The categories $\mathcal B$, $\Sigma$, $\mathcal I$ and $\mathcal M$ are all monoidal categories with monoidal product
$\sqcup$ given on objects by $\mathbf m\sqcup\mathbf n=\mathbf{m\!+\!n}$. In addition, $\mathcal B$ is braided
monoidal and $\Sigma$ and $\mathcal I$ are symmetric monoidal. We will extend these monoidal structures
to a braided monoidal structure on $\mathfrak{B}$ such that all functors in the diagram are strict monoidal
functors and functors between braided monoidal categories are braided strict monoidal functors.
In order to do this, we will show that every morphism in $\mathfrak{B}$ can be uniquely written in terms
of a braid and a morphism in $\mathcal M$.
\begin{lemma}\label{lemma unique decomposition}
Every braided injection $\alpha\colon\thinspacelon \mathbf m\to\mathbf n$ can be written uniquely as a composition
$\alpha=\Upsilon(\mu)\circ\zeta$ with $\mu$ in $\mathcal M(\mathbf m,\mathbf n)$ and $\zeta$ in the braid group $\mathcal B_m$.
\end{lemma}
\begin{proof}
Let $\mu\colon\thinspacelon \mathbf m\to\mathbf n$ be the unique order preserving injective function whose image equals that of $\bar \alpha$, and let $\{j_1,\ldots,j_m\}$ be the permutation of the set $\mathbf{m}=\{1,\ldots,m\}$ determined by $\bar\alpha(i)=\mu(j_i)$ for $i=1,\dots,m$. Choose representatives $(\mu_1,\ldots,\mu_m)$ and $(\alpha_1,\ldots,\alpha_m)$ for $\Upsilon(\mu)$
and $\alpha$ respectively. Let ${\mu}'_i$ be the reverse path of $\mu_i$ for $1\leq i\leq m$.
Since the path ${\mu}'_{j_i}$ starts in $(\mu(j_i),0)=\alpha_i(1)$ and ends in $(j_i,0)$,
the homotopy class of the concatenated paths
$({\mu}'_{j_1}\cdot\alpha_1,\ldots,{\mu}'_{j_m}\cdot\alpha_m)$
is a braid on $m$ strings and we define this to be $\zeta$.
The composite $\Upsilon(\mu)\circ\zeta$ is represented by
$(\mu_{j_1}\cdot{\mu}'_{j_1}\cdot\alpha_1,\ldots,\mu_{j_m}\cdot{\mu}'_{j_m}\cdot\alpha_m)$,
which is clearly homotopic to $(\alpha_1,\ldots,\alpha_m)$.
The morphism $\mu$ is uniquely determined by $\bar\alpha$ and we see from
the construction that $\zeta$ is then also uniquely determined.
\end{proof}
The above lemma implies that there is a canonical identification
\begin{equation}\label{eq:B-identification}
\mathfrak{B}(\mathbf m,\mathbf n) \colon\thinspaceng \mathcal M(\mathbf m,\mathbf n)\times \mathcal B_m.
\end{equation}
Now consider a pair $(\mu,\zeta)$ in $\mathcal M(\mathbf m,\mathbf n)\times \mathcal B_m$ and a pair $(\nu,\xi)$ in $\mathcal M(\mathbf n,\mathbf p)\times\mathcal B_n$. By Lemma \ref{lemma unique decomposition} there exists a unique morphism $\xi_*(\mu)$ in $\mathcal M(\mathbf m,\mathbf n)$ and a unique braid $\mu^*(\xi)$ in $\mathcal B_m$ such that the diagram
$$\[email protected]{
\mathbf{m} \ar[rr]^{\Upsilon(\mu)} \ar[d]_{\mu^\ast(\xi)} && \mathbf{n} \ar[d]^\xi \\
\mathbf{m} \ar[rr]^{\Upsilon(\xi_\ast(\mu))} && \mathbf{n}
}$$
commutes in $\mathfrak{B}$. Hence we see that composition in $\mathfrak{B}$ translates into the formula
\[
(\nu,\xi)\circ(\mu,\zeta)=(\nu\circ\xi_*(\mu),\mu^*(\xi)\circ\zeta)
\]
under the identification in \eqref{eq:B-identification}.
In order to define functors out of the categories considered in Definition~\ref{def B S I M}, it is sometimes convenient to have these categories expressed in terms of generators and relations. Consider first the case of $\mathcal M$ and write $\partial_n^i\colon\thinspacelon \mathbf n\to \mathbf{n\sqcup 1}$ for the morphism that misses the element $i$ in $\{1,\dots,n+1\}$. It is well known that
$\mathcal M$ is generated by the morphisms $\partial^i_n$ subject to the relations
\begin{equation*}
\partial^i_{n+1}\partial^j_n = \partial^{j+1}_{n+1}\partial^{i}_n \quad\text{ for } i\leq j.
\end{equation*}
Now consider the category $\mathfrak{B}$ and let $\zeta_n^1,\dots,\zeta_n^{n-1}$ be the standard generators for the braid group $\mathcal B_n$, see e.g. \cite[Theorem~1.8]{bir}.
\begin{figure}
\caption{The generator $\zeta_n^i$ and its inverse \label{fig generator zeta}
\label{fig generator zeta}
\end{figure}
We also write $\partial_n^i\colon\thinspacelon \mathbf n\to \mathbf{n\sqcup 1}$ for the braided injections obtained by applying the functor $\Upsilon$ to the corresponding morphisms in $\mathcal M$.
\begin{lemma}\label{lemma generating morphisms for B}
The category $\mathfrak{B}$ is generated by the morphisms $\zeta^i_n\colon\thinspace \mathbf{n}\rightarrow \mathbf{n}$
for $n\geq 2$ and $1\leq i \leq n-1$, and the morphisms $\partial^i_n\colon\thinspace \mathbf{n}\rightarrow\mathbf{n\sqcup1}$ for
$n\geq 0$ and $1\leq i \leq n\!+\!1$, subject to the relations
\begin{align*}
&\!\begin{aligned}
&\zeta^i_n\zeta^j_n=\zeta^j_n\zeta^i_n &&\text{for } |i-j|\geq2 \\
&\zeta^i_n\zeta^{i+1}_n\zeta^i_n=\zeta^{i+1}_n\zeta^i_n\zeta^{i+1}_n &&\text{for } 1\leq i\leq n-2 \\
&\partial^i_{n+1}\partial^j_n = \partial^{j+1}_{n+1}\partial^{i}_n &&\text{for } i\leq j
\end{aligned}\\
&\zeta^i_{n+1}\partial^j_n =
\begin{cases} \partial^j_n\zeta^{i-1}_n & \text{for } j<i \\
\partial^{j+1}_n & \text{for } j=i \\
\partial^{j-1}_n & \text{for } j=i+1 \\
\partial^j_n\zeta^i_n & \text{for } j>i+1.
\end{cases}
\end{align*}
\end{lemma}
\begin{proof}
The identification $\mathfrak{B}(\mathbf{m},\mathbf{n}) \colon\thinspaceng
\mathcal{M}(\mathbf{m},\mathbf{n})\times \mathcal{B}_m$
makes it clear that any morphism can be written in terms of the generators. The two first
relations are the relations for the braid groups (see e.g.,\ \cite[Theorem~1.8]{bir}), the next are the relations in $\mathcal{M}$, so that leaves the relations between the $\partial^i_n$'s and $\zeta^i_n$'s.
It is easy to see that these relations hold in $\mathfrak{B}$ and that they can be used to decompose
any product of the $\partial^i_n$'s and the $\zeta^i_n$'s into the form $\Upsilon(\mu)\circ \zeta$ for a braid $\zeta$ and a morphism $\mu$ in $\mathcal M$. Since such a decomposition is
unique, the relations are also sufficient.
\end{proof}
Finally, we consider the category $\mathcal I$ and write $\sigma_n^i\colon\thinspacelon \mathbf n\to\mathbf n$ for the image of
$\zeta_n^i$ under the projection $\varPi\colon\thinspacelon \mathfrak{B}\to\mathcal I$.
We obtain a presentation of $\mathcal I$ from the presentation of $\mathfrak{B}$ by imposing the relation $\sigma_n^i\sigma_n^i=\mathrm{id}_n$, just as the symmetric group $\Sigma_n$ is obtained from $\mathcal B_n$.
We use the above to define a strict monoidal structure on $\mathfrak{B}$ with unit $\mathbf 0$.
Just as for the monoidal categories considered in Diagram \eqref{diagram B B Sigma I M}, the monoidal product $\mathbf m\sqcup \mathbf n$
of two objects $\mathbf m$ and $\mathbf n$ in $\mathfrak{B}$ is $\mathbf{m\!+\!n}$.
The decomposition of a braided injection given in \eqref{eq:B-identification} lets us define the monoidal product $(\mu,\zeta)\sqcup(\nu,\xi)$ of two morphisms $(\mu,\zeta)$ and $(\nu,\xi)$ in $\mathfrak{B}$ as $(\mu\sqcup\nu,\zeta\sqcup\xi)$ using the monoidal structures on $\mathcal M$ and $\mathcal B$, for an illustration of this see Figure \ref{fig monoidal product}.
\begin{figure}
\caption{The monoidal product of two braided injections. \label{fig monoidal product}
\label{fig monoidal product}
\end{figure}
It is well known that the subcategory $\mathcal B$ is braided with braiding $\chi_{\mathbf m, \mathbf n}\colon\thinspace \mathbf m\sqcup \mathbf n \rightarrow \mathbf n\sqcup \mathbf m$ moving the first $m$ strings over the last $n$ strings while keeping the order among the $m$ strings and the $n$ strings respectively. This family of isomorphisms is in fact also a braiding on $\mathfrak{B}$. The hexagonal axioms for a braiding only involve morphisms in $\mathcal B$
so it remains to check that $\chi_{\mathbf m, \mathbf n}$ is natural with respect to the generators $\partial^i_k$. This is quite clear geometrically (see Figure \ref{fig naturality chi} for an illustration) and can be checked algebraically by writing $\chi_{\mathbf m, \mathbf n}$ in terms of the generators.
\begin{figure}
\caption{The equality $(\partial^2_3\sqcup\mathrm{id}
\label{fig naturality chi}
\end{figure}
\section{The homotopy theory of $\mathfrak{B}$-spaces}\label{sec:homotopy-B-spaces}
In this section we introduce $\mathfrak{B}$-spaces as functors from $\mathfrak{B}$ to the category of spaces and
equip the category of $\mathfrak{B}$-spaces with a braided monoidal model structure. We assume some familiarity
with the basic theory of cofibrantly generated model categories as presented in \cite[Section~2.1]{hov} and
\cite[Section~11]{hirsch}.
\subsection{The category of $\mathfrak{B}$-spaces}
A $\mathfrak{B}$-space is a functor $X\colon\thinspace \mathfrak{B}\rightarrow\mathcal{S}$, where $\mathcal{S}$
is the category of simplicial sets. We call a natural transformation between two such functors a
morphism between the two $\mathfrak{B}$-spaces and write ${\mathcal{S}^\mathfrak{B}}$ for the category of $\mathfrak{B}$-spaces so defined.
The category $\mathcal{S}^\mathfrak{B}$ inherits much structure from $\mathcal S$. All small limits and colimits
exists and are constructed level-wise. Furthermore, ${\mathcal{S}^\mathfrak{B}}$ is enriched, tensored and cotensored over $\mathcal{S}$.
For a $\mathfrak{B}$-space $X$ and a simplicial set $K$, the tensor $X\times K$ and cotensor
$X^K$ are the $\mathfrak{B}$-spaces given in level $\mathbf{n}$ by
$$(X\times K)(\mathbf{n})=X(\mathbf{n})\times K \quad\text{ and } \quad
X^K(\mathbf{n})=\mathrm{Map}_\mathcal{S}(K,X(\mathbf{n})),$$
where $\mathrm{Map}_\mathcal{S}$ is the standard simplicial function complex.
The simplicial set of maps from $X$ to $Y$ is the end
$$\mathrm{Map}_{\mathcal{S}^\mathfrak{B}}(X,Y)=
\int_{\mathbf{n}\in\mathfrak{B}}\mathrm{Map}_\mathcal{S}\big(X(\mathbf{n}),Y(\mathbf{n})\big).$$
\begin{lemma}
The category of $\mathfrak{B}$-spaces is a bicomplete simplicial
category with the above defined structure. \qed
\end{lemma}
\subsection{The $\mathfrak{B}$-model structure on $\mathcal{S}^\mathfrak{B}$}
We will use the free $\mathfrak{B}$-space functors $F_\mathbf n\colon\thinspace \mathcal S \rightarrow {\mathcal{S}^\mathfrak{B}}$ given by $F_{\mathbf n}(K)=\mathfrak{B}(\mathbf n,-)\times K$ to transport the usual model structure on simplicial sets to ${\mathcal{S}^\mathfrak{B}}$. The functor $F_\mathbf n$ is left adjoint to the evaluation functor $\mathrm{Ev}_\mathbf n$ taking a $\mathfrak{B}$-space $X$ to the simplicial set $X(\mathbf n)$. Note that since $\mathbf 0$ is initial in $\mathfrak{B}$, the functor $F_\mathbf 0$ takes a simplicial set to a constant $\mathfrak{B}$-space. We often use the notation $\Delta$ for $F_\mathbf 0$.
It is a standard fact, see for instance \cite[Theorem~11.6.1]{hirsch}, that ${\mathcal{S}^\mathfrak{B}}$ has a level model structure where a morphism is a weak equivalence (or respectively a fibration) if it is a weak equivalence (or respectively a fibration) of simplicial sets when evaluated at each level $\mathbf n$. This model structure is cofibrantly generated with generating cofibrations
\begin{equation*}
I=\{F_{\mathbf n}(i)\,\,|\,\, \mathbf n \in \mathfrak{B},\, i\colon\thinspace \partial\Delta^k\rightarrow\Delta^k \text{ for } 0\leq k\}
\end{equation*}
and generating acyclic cofibrations
\begin{equation*}
J=\{F_{\mathbf n}(j)\,\,|\,\, \mathbf n \in \mathfrak{B}, \, j\colon\thinspace \Lambda^k_l\rightarrow\Delta^k \text{ for } k>0 \text{ and }0\leq l\leq k\}
\end{equation*}
where $i$ and $j$ denote the inclusion of the boundary of $\Delta^k$ and the $l$th horn of $\Delta^k$ in
$\Delta^k$ respectively. The cofibrations in the level model structure have a concrete description using latching maps. The $n$th latching space of a $\mathfrak{B}$-space $X$ is defined as
$$L_{\mathbf n}(X)=\colon\thinspacelim_{(\mathbf m \rightarrow\mathbf n)\in\partial(\mathfrak{B}\!\downarrow\!arrow\mathbf n)}X(\mathbf m),$$
where $\partial(\mathfrak{B}\!\!\downarrow\!arrow\!\mathbf n)$ is the full subcategory of the
comma category $(\mathfrak{B}\!\!\downarrow\!arrow\!\mathbf n)$ with objects the non-isomorphisms.
For a map of $\mathfrak{B}$-spaces $f\colon\thinspace X\rightarrow Y$, the $n$th latching map is the $\mathcal B_n$-equivariant map
$L_{\mathbf n}f\colon\thinspace L_{\mathbf n}(Y)\amalg_{L_{\mathbf n}(X)} X(\mathbf n) \rightarrow Y(\mathbf n)$. A map $f\colon\thinspace X\rightarrow Y$ is a cofibration if for every $n\geq0$, the $n$th latching map $L_{\mathbf n}f$
is a cofibration of simplicial sets such that the $\mathcal B_n$-action on the complement of the image is free. We refer to such cofibrations as $\mathfrak{B}$-cofibrations.
The level model structure is primarily used as a convenient first step in equipping $\mathcal S^{\mathfrak{B}}$ with a model structure making it Quillen equivalent to $\mathcal S$. In such a model structure we need a wider class of weak equivalences. Recall that the Bousfield-Kan construction of the homotopy colimit of a functor
$X$ from a small category $\mathcal C$ to $\mathcal S$ is the simplicial set $\hocolim_\mathcal C X$ with $k$-simplices
\begin{equation}\label{hocolim}
\colon\thinspaceprod_{\mathbf m_0\leftarrow\cdots\leftarrow \mathbf m_k}X(\mathbf m_k)_k
\end{equation}
for morphisms $\mathbf m_0\leftarrow \mathbf m_1,\dots,\mathbf m_{k-1}\leftarrow \mathbf m_k$ in $\mathcal C$, cf.\ \cite[Section~XII.5.1]{BK}. When the functor $X$ is a $\mathfrak{B}$-space we will often denote its homotopy colimit by $X_{h\mathfrak{B}}$.
\begin{definition}\label{def B-equivalence}
A morphisms $X\rightarrow Y$ of $\mathfrak{B}$-spaces is a $\mathfrak{B}$-equivalence if the induced map
$X_{h\mathfrak{B}} \rightarrow Y_{h\mathfrak{B}}$ is a weak equivalence of simplicial sets.
\end{definition}
We say that a morphism $X\rightarrow Y$ of $\mathfrak{B}$-spaces is a $\mathfrak{B}$-fibration if $X(\mathbf n)\rightarrow Y(\mathbf n)$ is a fibration of simplicial sets for every $\mathbf n\in \mathfrak{B}$
and if the square
\begin{equation}\label{diagram homotpy cartesian}
\xymatrix{
X(\mathbf{m}) \ar[r]^{X(\alpha)} \ar[d] & X(\mathbf{n}) \ar[d] \\
Y(\mathbf{m}) \ar[r]^{Y(\alpha)} & Y(\mathbf{n})
}\end{equation}
is homotopy cartesian for every braided injection
$\alpha \colon\thinspace \mathbf{m}\rightarrow \mathbf{n}$.
In order to make the $\mathfrak{B}$-equivalences and $\mathfrak{B}$-fibrations part of a cofibrantly generated model structure we have to add more generating acyclic cofibrations compared to the level model structure. We follow the approach taken for diagram spectra in \cite[Section~3.4]{HSS} and \cite[Section~9]{MMSS} and for diagram spaces in \cite[Section~6.11]{Sagave-Schlichtkrull}: Each braided injection $\alpha\colon\thinspace\mathbf m\rightarrow \mathbf n$ gives rise to a map of $\mathfrak{B}$-spaces $\alpha^\ast\colon\thinspace F_{\mathbf{n}}(\ast) \rightarrow F_{\mathbf{m}}(\ast)$. The latter map factors through the mapping cylinder $M(\alpha^\ast)$ as $\alpha^\ast=r_\alpha j_\alpha$, where $j_\alpha$ is a cofibration in the level model structure and $r_\alpha$ is a simplicial homotopy equivalence.
We now set
$$\bar{J}= \{j_\alpha\mathfrak{B}ox i\,\,|\,\, \alpha\colon\thinspace\mathbf m\rightarrow \mathbf n \in \mathfrak{B},\, i\colon\thinspace \partial\Delta^k\rightarrow\Delta^k \text{ for } 0\leq k\},$$
where $\mathfrak{B}ox$ denotes the pushout-product, see e.g.\ \cite[Definition~4.2.1]{hov}.
\begin{proposition}\label{prop B-model structure}
There is a model structure on ${\mathcal{S}^\mathfrak{B}}$, the $\mathfrak{B}$-model structure, with weak equivalences the $\mathfrak{B}$-equivalences, fibrations the $\mathfrak{B}$-fibrations and cofibrations the $\mathfrak{B}$-cofibrations. This model structure is simplicial and cofibrantly generated
where $I_\mathfrak{B}=I$ is the set of generating cofibrations and $J_\mathfrak{B}=J\cup \bar{J}$ is the set of generating acyclic cofibrations.
\end{proposition}
\begin{proof}
The proof is similar to the proofs of Propositions~6.16 and 6.19 in \cite{Sagave-Schlichtkrull}. (We refer the reader to Remark~\ref{rem:sagave-schlichtkrull-remark} for a summary of the extent to which the results for symmetric monoidal diagram categories established in \cite{Sagave-Schlichtkrull} carries over to the present setting.)
\end{proof}
As promised this model structure makes $\mathfrak{B}$-spaces Quillen equivalent to simplicial sets.
\begin{proposition}
The adjunction
$\colon\thinspacelim_\mathfrak{B}\colon\thinspace{\mathcal{S}^\mathfrak{B}} \rightleftarrows \mathcal S \thinspace\colon\thinspacelon \Delta$
is a Quillen equivalence.
\end{proposition}
\begin{proof}
The category $\mathfrak{B}$ has an initial object so $\mathrm{N} \mathfrak{B}$ is a contractible simplicial set.
Arguing as in the proof of Proposition~6.23 in \cite{Sagave-Schlichtkrull} yields the result.
\end{proof}
\begin{example}\label{ex:X-bullet-example-1}
In general an $\mathcal I$-space $Z\colon\thinspacelon \mathcal I\to \mathcal S$ pulls back to a $\mathfrak{B}$-space $\Pi^*Z$ via the functor $\Pi\colon\thinspacelon\mathfrak{B}\to \mathcal I$ from Section~\ref{sec:braided-injections}. Consider in particular a based space $X$ with base point $*$ and the $\mathcal I$-space $X^{\bullet}\colon\thinspacelon \mathcal I\to\mathcal S$ such that $X^{\bullet}(\mathbf n)=X^n$. A morphism $\alpha\colon\thinspacelon\mathbf m\to\mathbf n$ in $\mathcal I$ acts on an element $\mathbf x=(x_1,\dots,x_m)$ by
\[
\alpha_*(\mathbf x)=(x_{\alpha^{-1}(1)},\dots,
x_{\alpha^{-1}(n)}),
\]
where $x_{\alpha^{-1}(j)}=x_i$ if $\alpha(i)=j$ and $x_{\alpha^{-1}(j)}=*$ if $j$ is not in the image of $\alpha$.
It is proved in \cite{SHom} that if $X$ is connected, then the geometric realization $|X^{\bullet}_{h\mathcal I}|$ is equivalent to the infinite loop space $\Omega^{\infty}\Sigma^{\infty}(|X|)$. In contrast to this we shall prove in Example~\ref{ex:X-bullet-example-2} that $|(\Pi^*X^{\bullet})_{h\mathfrak{B}}|$ is equivalent to $\Omega^2\Sigma^2(|X|)$ for connected $X$.
\end{example}
\subsection{The flat $\mathfrak{B}$-model structure on $\mathcal{S}^\mathfrak{B}$}
We will now consider another structure on $\mathfrak{B}$-spaces, the flat $\mathfrak{B}$-model structure, which takes into account that each level of a $\mathfrak{B}$-space has a left action of a braid group. The weak equivalences are again the
$\mathfrak{B}$-equivalences, but the flat $\mathfrak{B}$-model structure has more cofibrant objects than the $\mathfrak{B}$-model structure. In some places, in particular in Section~\ref{sec:classifying}, we get more general results by considering these ``flat'' objects instead of only the $\mathfrak{B}$-cofibrant objects. The flat $\mathfrak{B}$-model structure is constructed similarly to the $\mathfrak{B}$-model structure, but the starting point is Shipley's mixed model structure on the category $\mathcal B_n$-$\mathcal S$ of simplicial sets with left $\mathcal B_n$-action, see \cite[Proposition~1.3]{Shipley}. Shipley only considers finite groups, but the construction applies equally well to discrete groups in general if one allows all subgroups to be considered. An equivariant map is a weak equivalence (or respectively a cofibration) in the mixed model structure if the underlying map of simplicial sets is.
Recall that given a group $H$ and an $H$-space $K$, the space of homotopy fixed points $K^{hH}$
is the homotopy limit of $K$
viewed as a diagram over the one-object category $H$. An equivariant map $K\rightarrow L$
is a fibration in the mixed model structure if the induced maps $K^H\rightarrow L^H$ of fixed points are fibrations and the diagrams
$$\xymatrix{
K^H \ar[r] \ar[d] & K^{hH} \ar[d] \\
L^H \ar[r]& L^{hH}
}$$
are homotopy cartesian for all subgroups $H$ of $\mathcal B_n$. This model structure is cofibrantly generated, see the proof of \cite[Proposition~1.3]{Shipley} for a description of the generating (acyclic) cofibrations.
The forgetful functor $\mathrm{Ev}_{\mathbf n}\colon\thinspace {\mathcal{S}^\mathfrak{B}} \rightarrow \mathcal B_n\text{-}\mathcal S$ evaluating a $\mathfrak{B}$-space $X$ at the $n$th level has a right adjoint $G_{\mathbf n}$ given by $G_{\mathbf n}(K)=\mathfrak{B}(\mathbf n,-)\times_{\mathcal B_n} K$. We proceed as in the previous subsection and get a new level model structure on
${\mathcal{S}^\mathfrak{B}}$ where a morphism is a weak equivalence (or respectively a fibration) if it is a weak equivalence (or respectively a fibration) in the mixed model structure on $\mathcal B_n$-$\mathcal S$ when evaluated at each level $\mathbf n$. This model structure is cofibrantly generated with generating (acyclic) cofibrations $I_\mathrm{m}$ ($J_\mathrm{m}$) obtained by applying $G_\mathbf n$ to the generating (acyclic) cofibrations for the mixed model structure on $\mathcal B_n$-$\mathcal S$ for all $\mathbf n$ in $\mathfrak{B}$.
A morphisms $f\colon\thinspace X\rightarrow Y$ is a cofibration in this level model structure if for every $n\geq0$, the $n$th latching map $L_{\mathbf n}f$
is a cofibration of simplicial sets. We refer to such cofibrations as flat $\mathfrak{B}$-cofibrations.
A morphism $X\rightarrow Y$ of $\mathfrak{B}$-spaces is said to be a flat $\mathfrak{B}$-fibration if $X(\mathbf n)\rightarrow Y(\mathbf n)$ is a fibration in the mixed model structure on $\mathcal B_n$-$\mathcal S$ for every $\mathbf n$ in $ \mathfrak{B}$
and if the square \eqref{diagram homotpy cartesian}
is homotopy cartesian for every braided injection
$\alpha \colon\thinspace \mathbf{m}\rightarrow \mathbf{n}$.
\begin{proposition}\label{prop flat model structure}
There is a model structure on ${\mathcal{S}^\mathfrak{B}}$, the flat $\mathfrak{B}$-model structure, with weak equivalences the $\mathfrak{B}$-equivalences, fibrations the flat $\mathfrak{B}$-fibrations and cofibrations the flat $\mathfrak{B}$-cofibrations. This model structure is simplicial and cofibrantly generated
where $I_{\mathrm{flat}}=I_\mathrm{m}$ is the set of generating cofibrations and $J_{\mathrm{flat}}=J_\mathrm{m}\cup \bar{J}$ is the set of generating acyclic cofibrations.
\end{proposition}
\begin{proof}
The proof is similar to the proofs of Propositions~6.16 and 6.19 in \cite{Sagave-Schlichtkrull}.
\end{proof}
We will refer to the flat $\mathfrak{B}$-cofibrant objects simply as flat objects. These will play an important role also when we are considering the $\mathfrak{B}$-model structure.
The next result gives a criterion for an object to be flat which is easier to check than the one given above.
\begin{proposition}\label{prop flat B-space}
A $\mathfrak{B}$-space $X$ is flat if and only if each
morphism $\mathbf m \rightarrow \mathbf n$ induces a cofibration $X(\mathbf m) \rightarrow X(\mathbf n)$
and for each diagram of the
following form (with maps induced by the evident order preserving morphisms)
\begin{equation}\label{diagram condition for flatness}
\xymatrix{
X(\mathbf m) \ar[r] \ar[d] & X(\mathbf{m\sqcup n}) \ar[d] \\
X(\mathbf{l\sqcup m}) \ar[r] & X(\mathbf{l\sqcup m\sqcup n})}
\end{equation}
the intersection of the images of $ X(\mathbf{l\sqcup m})$ and $X(\mathbf{m\sqcup n})$
in $ X(\mathbf{l\sqcup m\sqcup n})$ equals the image of $X(\mathbf m)$.
\end{proposition}
\begin{proof}
Recall from Definition~\ref{def B S I M} the canonical embedding
$\Upsilon\colon\thinspace\mathcal M\rightarrow\mathfrak{B}$, where $\mathcal M$ is the category with the same objects as $\mathfrak{B}$
and injective order preserving functions as morphisms. This induces an embedding $(\mathcal M\!\!\downarrow\!arrow\!\mathbf n) \rightarrow (\mathfrak{B}\!\!\downarrow\!arrow\!\mathbf n)$
whose image is a skeletal subcategory by Lemma~\ref{lemma unique decomposition}. Identifying
$(\mathcal M\!\!\downarrow\!arrow\!\mathbf n)$ with the poset category of subsets of $\mathbf n$, we see that a $\mathfrak{B}$-space gives rise to an $\mathbf n$-cubical diagram for all $\mathbf n$. Furthermore, it follows from the definitions that a map of $\mathfrak{B}$-spaces is a flat $\mathfrak{B}$-cofibration if and only if the induced maps of cubical diagrams are cofibrations in the usual sense. Given this, the proof proceeds along the same lines as the proof of the analogous result for $\mathcal I$-spaces, see \cite[Proposition~3.11]{Sagave-Schlichtkrull}.
\end{proof}
\subsection{The braided monoidal structure on $\mathcal{S}^\mathfrak{B}$}\label{subsec:braided-monoidal-B-spaces}
Any category of diagrams in $\mathcal{S}$ indexed by a braided monoidal small category inherits a
braided monoidal convolution product from the indexing category. We proceed to explain how this works in the case of $\mathcal S^{\mathfrak{B}}$. Given $\mathfrak{B}$-spaces $X$ and $Y$, we define the $\mathfrak{B}$-space $X\boxtimes Y$ to be the left Kan extension of the ($\mathfrak{B}\times \mathfrak{B}$)-space
\[
\mathfrak{B}\times \mathfrak{B} \xrightarrow{X\times Y} \mathcal S\times\mathcal S \xrightarrow{\times} \mathcal S
\]
along the monoidal structure map $\sqcup\colon\thinspacelon \mathfrak{B}\times\mathfrak{B}\to \mathfrak{B}$. Thus, the data specifying a map of
$\mathfrak{B}$-spaces $X\boxtimes Y\to Z$ is equivalent to the data giving a map of ($\mathfrak{B}\times \mathfrak{B}$)-spaces $X(\mathbf m)\times Y(\mathbf n)\to Z(\mathbf m\sqcup\mathbf n)$. We also have the level-wise description
\[
X\boxtimes Y(\mathbf n)=\colon\thinspacelim_{\mathbf n_1\sqcup\mathbf n_2\to\mathbf n} X(\mathbf n_1)\times Y(\mathbf n_2)
\]
where the colimit is taken over the comma category $(\sqcup\!\!\downarrow\!arrow\!\mathbf n)$ associated to the monoidal product $\sqcup\colon\thinspace \mathfrak{B}\times\mathfrak{B}\rightarrow\mathfrak{B}$.
The monoidal unit for the $\boxtimes$-product is the terminal $\mathfrak{B}$-space
$U^{\mathfrak{B}}=\mathfrak{B}(\mathbf 0,-)$. Using that $\mathcal S$ is cartesian closed one easily defines the coherence isomorphisms for associativity and unity required to make $\mathcal S^{\mathfrak{B}}$ a monoidal category. We specify a braiding
$\mathfrak{b}\colon\thinspacelon X\boxtimes Y\to Y\boxtimes X$ on $\mathcal S^{\mathfrak{B}}$ by requiring that the diagram of
($\mathfrak{B}\times \mathfrak{B}$)-spaces
\begin{equation}\label{eq:B-spaces-braiding}
\xymatrix{
X(\mathbf m)\times Y(\mathbf n) \ar[rrr]^{\mathrm{twist}} \ar[d] &&&Y(\mathbf n)\times X(\mathbf m) \ar[d]\\
X\boxtimes Y(\mathbf m\sqcup \mathbf n) \ar[r]^{\mathfrak{b}(\mathbf m\sqcup \mathbf n)} &
Y\boxtimes X(\mathbf m\sqcup \mathbf n) \ar[rr]^{Y\boxtimes X(\chi_{\mathbf m,\mathbf n})} &&
Y\boxtimes X(\mathbf n\sqcup \mathbf m)
}
\end{equation}
be commutative. The following proposition can either be checked by hand or deduced from the general theory in \cite{Day}.
\begin{proposition}\label{prop:B-spaces-braided-monoidal}
The category $\mathcal S^{\mathfrak{B}}$ equipped with the $\boxtimes$-product, the unit $U^{\mathfrak{B}}$, and the braiding $\mathfrak{b}$ is a braided monoidal category. \qed
\end{proposition}
We use the term \emph{$\mathfrak{B}$-space monoid} for a monoid in ${\mathcal{S}^\mathfrak{B}}$. By the universal property of the
$\boxtimes$-product, the data needed to specify the unit $u\colon\thinspacelon U^{\mathfrak{B}}\to A$ and the multiplication
$\mu\colon\thinspacelon A\boxtimes A\to A$ on a $\mathfrak{B}$-space monoid $A$ amounts to a zero simplex $u$ in $A(\mathbf 0)$ and a map of ($\mathfrak{B}\times \mathfrak{B}$)-spaces $\mu\colon\thinspacelon A(\mathbf m)\times A(\mathbf n)\to A(\mathbf m\sqcup \mathbf n)$ satisfying the usual associativity and unitality conditions. By the definition of the braiding, $A$ is commutative (that is,
$\mu\circ \mathfrak{b}=\mu$) if and only if the diagram of ($\mathfrak{B}\times\mathfrak{B}$)-spaces
\begin{equation}\label{eq:commutativity-condition}
\xymatrix{
A(\mathbf m)\times A(\mathbf n) \ar[r]^-{\mu} \ar[d]_{\mathrm{twist}}& A(\mathbf m\sqcup\mathbf n) \ar[d]^{A(\chi_{\mathbf m,\mathbf n})} \\
A(\mathbf n)\times A(\mathbf m) \ar[r]^-{\mu} & A(\mathbf n\sqcup\mathbf m)
}
\end{equation}
is commutative.
Recall that given maps $f_1\colon\thinspace X_1\rightarrow Y_1$ and $f_2\colon\thinspace X_2\rightarrow Y_2$ of $\mathfrak{B}$-spaces,
the pushout-product is the induced map
$$f_1\mathfrak{B}ox f_2\colon\thinspace(X_1\boxtimes Y_2)\amalg_{(X_1\boxtimes X_2)}
(Y_1\boxtimes X_2) \rightarrow Y_1\boxtimes Y_2.$$
Following \cite[Definition~4.2.6]{hov} we say that a model structure on ${\mathcal{S}^\mathfrak{B}}$ is a monoidal model structure if given any two cofibrations $f_1$ and $f_2$, the pushout-product $f_1\mathfrak{B}ox f_2$
is a cofibration that is in addition acyclic if $f_1$ or $f_2$ is.
\begin{lemma}\label{lemma pushout-product axiom}
Both the $\mathfrak{B}$-model structure and the flat $\mathfrak{B}$-model structure
are monoidal model structures.
\end{lemma}
\begin{proof}
We give the proof for the $\mathfrak{B}$-model structure, the proof for the flat case is similar.
By Lemma~3.5 in \cite{SSAlgebrasModules} it suffices to verify the condition for the generating (acyclic) cofibrations. For two generating cofibrations $i,i'$ in $\mathcal S$ it is easy to check that
$F_\mathbf{m}(i)\mathfrak{B}ox F_\mathbf{n}(i')$ is isomorphic to $F_{\mathbf{m}\sqcup\mathbf{n}}(i\mathfrak{B}ox i')$.
This uses that $F_\mathbf{m}(K)\boxtimes F_\mathbf{n}(L)$ is naturally isomorphic
to $F_{\mathbf{m}\sqcup\mathbf{n}}(K\times L)$ for two simplicial sets $K$ and $L$ and also
that $F_{\mathbf{m}\sqcup\mathbf{n}}$ is a left adjoint and hence commutes with colimits.
Simplicial sets is a monoidal model category, therefore $i\mathfrak{B}ox i'$ is a cofibration and then so
is $F_{\mathbf{m}\sqcup\mathbf{n}}(i\mathfrak{B}ox i')$, since $F_{\mathbf{m}\sqcup\mathbf{n}}$ preserves cofibrations.
Similarly $F_\mathbf{m}(i)\mathfrak{B}ox F_\mathbf{n}(j)$ is an acyclic cofibration if
$j$ is a generating acyclic cofibration in $\mathcal{S}$.
Now let $\alpha\colon\thinspace \mathbf m\rightarrow \mathbf m'$ be a morphism in $\mathfrak{B}$. We check that
$(j_\alpha\mathfrak{B}ox i)\mathfrak{B}ox F_\mathbf{n}(i')$ is an acyclic cofibration for
$i\colon\thinspace \partial\Delta^k\rightarrow\Delta^k$ and $i'\colon\thinspace \partial\Delta^l\rightarrow\Delta^l$ generating cofibrations in $\mathcal S$.
Using that $j_{\alpha}\mathfrak{B}ox i\colon\thinspaceng j_{\alpha}\mathfrak{B}ox F_{\mathbf 0}(i)$, we get the identifications
\[
(j_\alpha\mathfrak{B}ox i)\mathfrak{B}ox F_\mathbf{n}(i')\colon\thinspaceng j_\alpha\mathfrak{B}ox(F_{\mathbf 0}(i)\mathfrak{B}ox F_\mathbf{n}(i'))\colon\thinspaceng
j_{\alpha}\mathfrak{B}ox F_{\mathbf n}(i\mathfrak{B}ox i')\colon\thinspaceng j_{\alpha}\boxtimes F_{\mathbf n}(*)\times (i\mathfrak{B}ox i').
\]
Since $j_{\alpha}$ is a cofibration by construction, it follows from the first part of the lemma and the fact that
the $\mathfrak{B}$-model structure is simplicial, that this is a cofibration. For the same reason it therefore suffices to show that $j_{\alpha}\boxtimes F_{\mathbf n}(*)$ is a $\mathfrak{B}$-equivalence.
For this we apply the two out of three property for $\mathfrak{B}$-equivalences to the diagram
$$\xymatrix{
F_\mathbf{m}(\ast)\boxtimes F_\mathbf{n}(*) \ar[d]^{\colon\thinspaceng}
\ar[rr]^{j_\alpha\boxtimes \mathrm{id}_{F_\mathbf{n}(*)}}
&& M(\alpha^\ast)\boxtimes F_\mathbf{n}(*)
\ar[rr]^{r_\alpha\boxtimes \mathrm{id}_{F_\mathbf{n}(*)}}
&& F_{\mathbf{m}'}(\ast)\boxtimes F_\mathbf{n}(*) \ar[d]^{\colon\thinspaceng}\\
F_{\mathbf{m}\sqcup\mathbf{n}}(\ast)
\ar[rrrr]^{(\alpha\sqcup\mathrm{id}_\mathbf{n})^\ast}_\sim
&&&& F_{\mathbf{m}'\sqcup\mathbf{n}}(\ast).
}$$
The vertical maps are isomorphisms and the lower horizontal map is a $\mathfrak{B}$-equivalence since both
$F_{\mathbf m\sqcup\mathbf n}(\ast)_{h\mathfrak{B}}$ and $F_{\mathbf m'\sqcup\mathbf n}(\ast)_{h\mathfrak{B}}$ are contractible.
Furthermore, $r_\alpha\boxtimes\mathrm{id}_{F_\mathbf n(*)}$ is a simplicial homotopy equivalence
since $r_\alpha$ is a simplicial homotopy equivalence and $-\boxtimes \mathrm{id}_{F_\mathbf{n}(*)}$
preserves simplicial homotopy equivalences. This completes the proof.
\end{proof}
\begin{remark}\label{rem:sagave-schlichtkrull-remark}
In \cite{Sagave-Schlichtkrull} a projective model structure is defined for a general diagram category
$\mathcal S^\mathcal K$ indexed by a small symmetric monoidal category $\mathcal K$ that is well-structured as per
Definition~5.5 in \cite{Sagave-Schlichtkrull}. Similarly a flat model structure is defined for
$\mathcal S^\mathcal K$ if in addition $\mathcal K$ together with its subcategory of automorphisms form a well-structured
relative index category as per Definition~5.2 in \cite{Sagave-Schlichtkrull}. These definitions can be
canonically extended to allow braided monoidal categories as index categories such that similar
model structures exist. This will not make $\mathfrak{B}$ a well-structured index category because the comma category
$(\mathbf k\sqcup -\!\!\downarrow\!arrow\!\mathbf l)$ will in general not have a terminal object for $\mathbf k$
and $\mathbf l$ in $\mathfrak{B}$. This property is however not used to establish the model structures, so
Proposition~\ref{prop B-model structure} and Proposition~\ref{prop flat B-space} are proved as
the similar results in \cite{Sagave-Schlichtkrull}.
But the proofs of results concerning how the monoidal structure interacts with the model structures do use the mentioned property. Above we have shown that the model structures we consider are monoidal model structures by an alternative argument. It is not clear if the arguments in \cite{Sagave-Schlichtkrull} can be generalized to define model structures on monoids and commutative monoids in $\mathfrak{B}$-spaces.
\end{remark}
Let $X$ and $Y$ be $\mathfrak{B}$-spaces and consider the natural transformation
$$\nu_{X,Y}\colon\thinspace X_{h\mathfrak{B}} \times Y_{h\mathfrak{B}} \xrightarrowightarrow{\colon\thinspaceng}(X\times Y)_{h(\mathfrak{B}\times\mathfrak{B})}\rightarrow
((-\sqcup-)^\ast(X\boxtimes Y))_{h(\mathfrak{B}\times\mathfrak{B})}\rightarrow (X\boxtimes Y)_{h\mathfrak{B}}$$
where the second map is induced by the universal natural transformation of $\mathfrak{B}\times\mathfrak{B}$ diagrams
$X(\mathbf m) \times Y(\mathbf n) \rightarrow (X\boxtimes Y )(\mathbf{m\sqcup n})$.
These maps gives rise to a monoidal
structure on the functor $(-)_{h\mathfrak{B}}$, c.f.\ \cite[Proposition~4.17]{SThom}.
\begin{lemma}\label{lemma X and Y flat implies weak equivalnece between times and boxtimes}
If both $X$ and $Y$ are flat, then
$\nu_{X,Y}\colon\thinspace X_{h\mathfrak{B}} \times Y_{h\mathfrak{B}} \rightarrow (X\boxtimes Y)_{h\mathfrak{B}}$ is a weak equivalence.
\end{lemma}
\begin{proof}
The fact that the flat $\mathfrak{B}$-model structure is monoidal combined with Ken Brown's Lemma implies that the functor $X\boxtimes (-)$ takes $\mathfrak{B}$-equivalences between flat $\mathfrak{B}$-spaces to $\mathfrak{B}$-equivalences since $X$ is itself flat. Therefore we can take a cofibrant replacement of $Y$ in the $\mathfrak{B}$-model structure and it will suffice to prove the result when $Y$ is $\mathfrak{B}$-cofibrant. Applying a symmetric argument we reduce to the case where both $X$ and $Y$ are $\mathfrak{B}$-cofibrant, which in turn implies that also $X\boxtimes Y$ is $\mathfrak{B}$-cofibrant. By Proposition~18.9.4 in \cite{hirsch}
the canonical map $\hocolim_\mathfrak{B} Z \rightarrow \colon\thinspacelim_B Z$ is a weak equivalence for any $\mathfrak{B}$-cofibrant $\mathfrak{B}$-space $Z$. The claim now follows since the colimit functor is strong symmetric monoidal.
\end{proof}
\section{$\mathfrak{B}$-categories and braided monoidal structures}\label{sec:B-categories}
In this section we introduce the notion of a $\mathfrak{B}$-category and equip the category of such with a braided monoidal structure. We then relate the braided (strict) monoidal objects in this setting to braided (strict) monoidal categories in the usual sense. Finally we introduce the $\mathfrak{B}$-category rectification functor and use this to show that any braided monoidal structure can be rectified to a strictly commutative structure up to $\mathfrak{B}$-equivalence.
\subsection{$\mathfrak{B}$-categories and the Grothendieck construction}\label{subsec:B-categories-Grothendieck}
Let $\mathit{Cat}$ denote the category of small categories and let $\mathit{Cat}^{\mathfrak{B}}$ be the functor category of
$\mathfrak{B}$-diagrams in $\mathit{Cat}$. We shall refer to an object in $\mathit{Cat}^{\mathfrak{B}}$ as a \emph{$\mathfrak{B}$-category}. Recall that the Grothendieck construction $\mathfrak{B}\!\int\! X$ on a $\mathfrak{B}$-category $X$ is a category with objects
$(\mathbf n, \mathbf x)$ given by an object $\mathbf n$ in $\mathfrak{B}$ and an object $\mathbf x$ in the category $X(\mathbf n)$. A morphism $(\alpha,s)\colon\thinspacelon
(\mathbf m,\mathbf x)\to (\mathbf n,\mathbf y)$ is a morphism $\alpha\colon\thinspacelon \mathbf m\to \mathbf n$ in $\mathfrak{B}$ together with a morphism $s\colon\thinspacelon X(\alpha)(\mathbf x)\to\mathbf y$ in $X(\mathbf n)$. The composition of morphisms is defined by
\[
(\beta,t)\circ (\alpha,s) =(\beta\circ \alpha, t\circ X(\beta)(s)).
\]
This construction defines a functor $\mathfrak{B}\!\int\colon\thinspacelon \mathit{Cat}^{\mathfrak{B}}\to \mathit{Cat}$ in the obvious way. We think of
$\mathfrak{B}\!\int \! X$ as the homotopy colimit of $X$ in $\mathit{Cat}$. This is justified by Thomason's homotopy colimit theorem \cite[Theorem~1.2]{T} which states that there is a natural weak equivalence
\begin{equation}\label{eq:Thomason-equivalence}
\eta\colon\thinspacelon \hocolim_{\mathbf n\in \mathfrak{B}} \mathrm{N}(X(\mathbf n)) \xrightarrow{\simeq} \textstyle \mathrm{N}(\mathfrak{B}\!\int\!X).
\end{equation}
Let us say that a functor $Y\to Y'$ between small categories is a weak equivalence if the induced map of nerves $\mathrm{N}(Y)\to \mathrm{N}(Y')$ is a weak equivalence of simplicial sets. We say that a map of $\mathfrak{B}$-categories $X\to X'$ is a \emph{$\mathfrak{B}$-equivalence} if the map of Grothendieck constructions $\mathfrak{B}\!\int\!X\to \mathfrak{B}\!\int\!X'$ is a weak equivalence in this sense. By the natural weak equivalence in \eqref{eq:Thomason-equivalence} this is equivalent to the level-wise nerve $\mathrm{N}(X)\to \mathrm{N}(X')$ being a $\mathfrak{B}$-equivalence in the sense of the previous section. Let $w$ denote the class of weak equivalences in $\mathit{Cat}$, and let $w_{\mathfrak{B}}$ be the class of
$\mathfrak{B}$-equivalences in $\mathit{Cat}^{\mathfrak{B}}$. With the given definition of $\mathfrak{B}$-equivalences it is not surprising that the categories $\mathit{Cat}^{\mathfrak{B}}$ and $\mathit{Cat}$ become equivalent after localization with respect to these classes of equivalences. For the convenience of the reader we have collected the relevant background material on localization in Appendix~\ref{app:localization}. Let us write $\Delta\colon\thinspacelon \mathit{Cat}\to \mathit{Cat}^{\mathfrak{B}}$ for the functor that takes a small category to the corresponding constant $\mathfrak{B}$-category.
\begin{proposition}\label{prop:Bint-Delta-equivalence}
The functors $\mathfrak{B}\!\int$ and $\Delta$ induce an equivalence of the localized categories
\[
\textstyle\mathfrak{B}\!\int \colon\thinspacelon \mathit{Cat}^{\mathfrak{B}}[w_{\mathfrak{B}}^{-1}]\simeq \mathit{Cat}[w^{-1}] :\!\Delta.
\]
\end{proposition}
For the proof of the proposition we need to introduce an auxiliary endofunctor on $\mathit{Cat}^{\mathfrak{B}}$. Let
$(\mathfrak{B}\!\downarrow\!\bullet)$ be the $\mathfrak{B}$-category defined by the comma categories $(\mathfrak{B}\!\downarrow\! \mathbf n)$. By definition, an object of $(\mathfrak{B}\!\downarrow\! \mathbf n)$ is a pair $(\mathbf m,\gamma)$ given by an object $\mathbf m$ and a morphism $\gamma\colon\thinspacelon \mathbf m\to \mathbf n$ in $\mathfrak{B}$. A morphism $\alpha\colon\thinspacelon(\mathbf m_1,\gamma_1) \to
(\mathbf m_2,\gamma_2)$ is a morphism $\alpha\colon\thinspacelon\mathbf m_1\to\mathbf m_2$ in $\mathfrak{B}$ such that $\gamma_1=\gamma_2\circ\alpha$. Let $\pi_n\colon\thinspacelon (\mathfrak{B}\!\downarrow\!\mathbf n)\to \mathfrak{B}$ be the forgetful functor mapping
$(\mathbf m,\gamma)$ to $\mathbf m$. Clearly these functors assemble to a map of $\mathfrak{B}$-categories $\pi\colon\thinspacelon (\mathfrak{B}\!\downarrow\! \bullet)\to\Delta(\mathfrak{B})$. Given a $\mathfrak{B}$-category $X$, the \emph{bar resolution} $\overline X$ is the $\mathfrak{B}$-category defined by the
level-wise Grothendieck constructions
\[
\textstyle\overline X(\mathbf n)=(\mathfrak{B}\!\downarrow\! \mathbf n)\!\int\! X\circ\pi_n.
\]
The structure maps making $\overline X$ a $\mathfrak{B}$-category are inherited from the $\mathfrak{B}$-category
$(\mathfrak{B}\!\downarrow\!\bullet)$ in the obvious way. Our use of the term ``bar resolution'' is motivated by the analogous bar resolution for
$\mathfrak{B}$-spaces that we shall consider in Section~\ref{subsec:E2-rectification}.
\begin{lemma}\label{lem:ev-equivalence}
There is a natural level-wise weak equivalence $\mathrm{ev}\colon\thinspacelon \overline X\to X$.
\end{lemma}
\begin{proof}
For each $\mathbf n$ we define a functor
$
\mathrm{ev}(\mathbf n)\colon\thinspacelon \textstyle(\mathfrak{B}\!\downarrow\! \mathbf n)\!\int\! X\circ\pi_n\to X(\mathbf n).
$
An object in the domain has the form $((\mathbf m,\gamma),\mathbf x)$ with $(\mathbf m,\gamma)$ in $(\mathfrak{B}\!\downarrow\! \mathbf n)$ and $\mathbf x$ an object in $X(\mathbf m)$. We map this to the object $X(\gamma)(\mathbf x)$ in $X(\mathbf n)$.
A morphism from $((\mathbf m_1,\gamma_1),\mathbf x_1)$ to $((\mathbf m_2,\gamma_2),\mathbf x_2)$ amounts to a morphism $\alpha\colon\thinspacelon (\mathbf m_1,\gamma_1)\to (\mathbf m_2,\gamma_2)$ in $(\mathfrak{B}\!\downarrow\!\mathbf n)$ together with a morphism $s\colon\thinspacelon X(\alpha)(\mathbf x_1)\to \mathbf x_2$ in $X(\mathbf m_2)$. We map such a morphism to the morphism
\[
X(\gamma_2)(s)\colon\thinspacelon X(\gamma_1)(\mathbf x_1)=X(\gamma_2)(X(\alpha)(\mathbf x_1))\to X(\gamma_2)(\mathbf x_2)
\]
in $X(\mathbf n)$. These functors are compatible when $\mathbf n$ varies and give rise to the map of $\mathfrak{B}$-categories in the lemma. To show that $\mathrm{ev}(\mathbf n)$ is a weak equivalence, we consider the canonical functor
\[
j(\mathbf n)\colon\thinspacelon X(\mathbf n)\to \textstyle(\mathfrak{B}\!\downarrow\! \mathbf n)\!\int\! X\circ\pi_n,\qquad \mathbf x\mapsto (1_n,\mathbf x)
\]
where $1_n$ denotes the terminal object in $(\mathfrak{B}\!\downarrow\! \mathbf n)$. Then $\mathrm{ev}(\mathbf n)\circ j(\mathbf n)$ is the identity functor on $X(\mathbf n)$ and it is easy to see that there is a natural transformation from the identity functor on $\textstyle(\mathfrak{B}\!\downarrow\! \mathbf n)\!\int\! X\circ\pi_n$ to $j(\mathbf n)\circ \mathrm{ev}(\mathbf n)$. Hence $j(\mathbf n)$ defines a homotopy inverse of $\mathrm{ev}(\mathbf n)$.
\end{proof}
\begin{lemma}\label{lem:pi-equivalence}
There is a natural $\mathfrak{B}$-equivalence $\pi\colon\thinspacelon \overline X\to \Delta(\mathfrak{B}\!\int\! X)$.
\end{lemma}
\begin{proof}
For each $\mathbf n$ the forgetful functor $\pi_n\colon\thinspacelon (\mathfrak{B}\!\downarrow\! \mathbf n)\to \mathfrak{B}$ gives rise to a functor
\[
\textstyle(\mathfrak{B}\!\downarrow\! \mathbf n)\!\int\! X\circ\pi_n\to \mathfrak{B}\!\int\!X
\]
by mapping an object $((\mathbf m,\gamma),\mathbf x)$ to $(\mathbf m,\mathbf x)$. Letting $\mathbf n$ vary this defines the map of $\mathfrak{B}$-categories in the lemma. We must show that the functor $\mathfrak{B}\!\int\!\pi$ is a weak equivalence and for this we consider the diagram of categories
\[
\xymatrix{
\textstyle\mathfrak{B}\!\int \!\big( (\mathfrak{B}\!\downarrow\!\bullet)\!\int\!X\circ\pi_{\bullet}\big) \ar[r]^-{\mathfrak{B}\!\int\!\pi}
\ar[d]_{\mathfrak{B}\!\int\!\mathrm{ev}}
&
\mathfrak{B}\!\int\!\Delta(\mathfrak{B}\!\int\! X) \ar[d]^{\colon\thinspaceng}\\
\mathfrak{B}\!\int\! X& \ar[l]_-{\mathrm{proj}} \mathfrak{B}\times (\mathfrak{B}\!\int\!X)
}
\]
where $\mathrm{proj}$ denotes the projection away from the first variable. This diagram is not commutative but we claim that it commutes up to a natural transformation. Indeed, consider an object $(\mathbf n,(\mathbf m,\gamma),\mathbf x)$ with $\mathbf n$ in $\mathfrak{B}$, $(\mathbf m,\gamma)$ an object in $(\mathfrak{B}\!\downarrow\! \mathbf n)$, and $\mathbf x$ an object in $X(\mathbf m)$. The functor $\mathfrak{B}\!\int\!\mathrm{ev}$ maps this to $(\mathbf n,X(\gamma)(\mathbf x))$ whereas the other composition maps it to $(\mathbf m,\mathbf x)$. It is easy to see that the morphisms
\[
(\gamma,\mathrm{id}_{X(\gamma)(\mathbf x)})\colon\thinspacelon (\mathbf m,\mathbf x)\to (\mathbf n,X(\gamma)(\mathbf x))
\]
define a natural transformation between these functors. Since $\mathfrak{B}\!\int\!\mathrm{ev}$ is a weak equivalence by Lemma~\ref{lem:ev-equivalence} and $\mathrm{proj}$ is a weak equivalence because $\mathfrak{B}$ has an initial object, it follows that also $\mathfrak{B}\!\int\!\pi$ is a weak equivalence.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{prop:Bint-Delta-equivalence}]
We first observe that the localization of $\mathit{Cat}$ with respect to $w$ actually exists since Thomason has realized it as the homotopy category of a suitable model structure, see \cite{Thomason_Cat}. With terminology from Appendix~\ref{app:localization}, Lemmas~\ref{lem:ev-equivalence} and \ref{lem:pi-equivalence} give a chain of natural
$\mathfrak{B}$-equivalences relating $\Delta(\mathfrak{B}\!\int\! X)$ to $X$. The other composition $\mathfrak{B}\!\int\!\Delta Y$ can be identified with the product category $\mathfrak{B}\times Y$ which is weakly equivalent to $Y$ since $\mathfrak{B}$ has an initial object. Hence the result follows from Proposition~\ref{prop:localization-equivalence}.
\end{proof}
\begin{remark}
Let $(\bullet\!\downarrow\!\mathfrak{B})$ denote the $\mathfrak{B}^{\mathrm{op}}$-category defined by the comma categories
$(\mathbf n\!\downarrow\! \mathfrak{B})$. The universal property of the Grothendieck construction established in \cite[Proposition~1.3.1]{T} implies that $\mathfrak{B}\!\int\!X$ can be identified with the coend $(\bullet\!\downarrow\! \mathfrak{B})\times_{\mathfrak{B}}X$ in $\mathit{Cat}$. This in turn implies that the functor $\mathfrak{B}\!\int$ participates as the left adjoint in an adjunction
\[
\textstyle\mathfrak{B}\!\int\colon\thinspacelon \mathit{Cat}^{\mathfrak{B}}\rightleftarrows \mathit{Cat} : \!\mathit{Cat}((\bullet\!\downarrow\!\mathfrak{B}),-).
\]
The right adjoint takes a small category $Y$ to the $\mathfrak{B}$-category for which the objects of $\mathit{Cat}((\mathbf n\!\downarrow\!\mathfrak{B}),Y)$ are the functors from $(\mathbf n\!\downarrow\! \mathfrak{B})$ to $Y$ and the morphisms are the natural transformations. However, this adjunction is not so useful for our purposes since it cannot be promoted to an adjunction between the braided monoidal structures we shall consider later.
\end{remark}
\subsection{Braided monoidal structures}
As in the case of $\mathfrak{B}$-spaces considered in Section~\ref{subsec:braided-monoidal-B-spaces}, the braided monoidal structure of $\mathfrak{B}$ induces a braided monoidal structure on $\mathit{Cat}^{\mathfrak{B}}$: Given $\mathfrak{B}$-categories $X$ and $Y$, we define $X\boxtimes Y$ to be the left Kan extension of the ($\mathfrak{B}\times \mathfrak{B}$)-category
\[
\mathfrak{B}\times \mathfrak{B} \xrightarrow{X\times Y} \mathit{Cat}\times\mathit{Cat} \xrightarrow{\times} \mathit{Cat}
\]
along the monoidal structure map $\sqcup\colon\thinspacelon \mathfrak{B}\times\mathfrak{B}\to \mathfrak{B}$. Thus, the data specifying a map of
$\mathfrak{B}$-categories $X\boxtimes Y\to Z$ is equivalent to the data giving a map of ($\mathfrak{B}\times \mathfrak{B}$)-categories
$X(\mathbf m)\times Y(\mathbf n)\to Z(\mathbf m\sqcup\mathbf n)$. We also have the level-wise description
\[
X\boxtimes Y(\mathbf n)=\colon\thinspacelim_{\mathbf n_1\sqcup\mathbf n_2\to\mathbf n} X(\mathbf n_1)\times Y(\mathbf n_2).
\]
The monoidal unit for the $\boxtimes$-product is the terminal $\mathfrak{B}$-category $U^{\mathfrak{B}}=\mathfrak{B}(\mathbf 0,-)$. Using that $\mathit{Cat}$ is cartesian closed one easily defines the coherence isomorphisms for associativity and unity required to make $\mathit{Cat}^{\mathfrak{B}}$ a monoidal category. We specify a braiding
$\mathfrak{b}\colon\thinspacelon X\boxtimes Y\to Y\boxtimes X$ on $\mathit{Cat}^{\mathfrak{B}}$ by requiring that the categorical analogue of the diagram \eqref{eq:B-spaces-braiding} be commutative. The following is the categorical analogue of Proposition~\ref{prop:B-spaces-braided-monoidal}.
\begin{proposition}
The category $\mathit{Cat}^{\mathfrak{B}}$ equipped with the $\boxtimes$-product, the unit $U^{\mathfrak{B}}$, and the braiding $\mathfrak{b}$ is a braided monoidal category. \qed
\end{proposition}
We use the term \emph{$\mathfrak{B}$-category monoid} for a monoid in $\mathit{Cat}^{\mathfrak{B}}$. By the universal property of the
$\boxtimes$-product, the data needed to specify the unit $U^{\mathfrak{B}}\to A$ and the multiplication
$\otimes\colon\thinspacelon A\boxtimes A\to A$ on a $\mathfrak{B}$-category monoid $A$ amounts to a unit object $\mathbf u$ in $A(\mathbf 0)$ and a map of ($\mathfrak{B}\times \mathfrak{B}$)-categories $\otimes\colon\thinspacelon A(\mathbf m)\times A(\mathbf n)\to A(\mathbf m\sqcup \mathbf n)$ satisfying the usual associativity and unitality conditions. By the definition of the braiding, $A$ is commutative (that is,
$\otimes\circ \mathfrak{b}=\otimes$) if and only if the categorical version of the diagram \eqref{eq:commutativity-condition} is commutative.
In order to talk about braided $\mathfrak{B}$-category monoids we need the notion of a natural transformation between maps of $\mathfrak{B}$-categories: Given maps of $\mathfrak{B}$-categories $f,g\colon\thinspacelon X\to Y$, a natural transformation $\phi\colon\thinspacelon f\Rightarrow g$ is a family of natural transformations
$\phi(\mathbf n)\colon\thinspacelon f(\mathbf n) \Rightarrow g(\mathbf n)$ such that for any morphism $\alpha\colon\thinspacelon \mathbf m\to\mathbf n$
in $\mathfrak{B}$ we have an equality of natural transformations
$\phi(\mathbf n)\circ X(\alpha) = Y(\alpha)\circ \phi(\mathbf m)$ between the functors $f(\mathbf n)\circ X(\alpha)=Y(\alpha)\circ f(\mathbf m)$ and $g(\mathbf n)\circ X(\alpha)=Y(\alpha)\circ g(\mathbf m)$.
Here the symbol $\circ$ denotes the usual ``horizontal'' composition, and we use the notation $X(\alpha)$ and $Y(\alpha)$ both for the functors defined by $X$ and $Y$ and for the corresponding identity natural transformations.
A braiding of a $\mathfrak{B}$-category monoid $A$ is then a natural transformation $\Theta\colon\thinspacelon \otimes\Rightarrow \otimes\circ\mathfrak{b}$ as depicted in the diagram
\begin{equation}\label{eq:Theta-braiding}
\xymatrix{
A\boxtimes A \ar[rr]^{\mathfrak{b}} \ar[dr]_{\otimes}^{\ \ \ \ \ \Theta \ \Rightarrow}& & A\boxtimes A \ar[dl]^{\otimes}\\
& A, &
}
\end{equation}
such that $\Theta$ has an inverse and the familiar axioms for a braided monoidal structure holds. In order to formulate this in a convenient manner we observe that the data defining a natural isomorphism $\Theta$ as above amounts to a natural isomorphism
\[
\Theta_{\mathbf m,\mathbf n}\colon\thinspacelon \mathbf a\otimes \mathbf b\to A(\chi_{\mathbf m,\mathbf n}^{-1})(\mathbf b\otimes \mathbf a)
\]
of functors $A(\mathbf m)\times A(\mathbf n)\to A(\mathbf m\sqcup\mathbf n)$ for all $(\mathbf m,\mathbf n)$, with the requirement that for each pair of morphisms $\alpha\colon\thinspacelon \mathbf m_1\to \mathbf m_2$ and $\beta\colon\thinspacelon \mathbf n_1\to\mathbf n_2$ we have
\[
A(\alpha\sqcup \beta)\circ \Theta_{\mathbf m_1,\mathbf n_1} =\Theta_{\mathbf m_2,\mathbf n_2}\circ(A(\alpha)\times A(\beta))
\]
as an equality of natural transformations.
\begin{definition}\label{def:B-cat-braiding}
A braiding of a $\mathfrak{B}$-category monoid $A$ is a natural isomorphism $\Theta$ as in \eqref{eq:Theta-braiding} such that the diagrams
\[
\xymatrix{
\mathbf a\otimes \mathbf b\otimes\mathbf c \ar[r]^-{\Theta_{\mathbf l,\mathbf m}\otimes \mathrm{id}_{\mathbf c}} \ar[d]_{\Theta_{\mathbf l,\mathbf m\sqcup\mathbf n}}& A(\chi_{\mathbf l,\mathbf m}^{-1}\sqcup 1_{\mathbf n})(\mathbf b\otimes\mathbf a\otimes\mathbf c)
\ar[d]^{A(\chi_{\mathbf l,\mathbf m}^{-1}\sqcup 1_{\mathbf n})(\mathrm{id}_{\mathbf b}\otimes\Theta_{\mathbf l,\mathbf n})} \\
A(\chi^{-1}_{\mathbf l,\mathbf m\sqcup\mathbf n})(\mathbf b\otimes\mathbf c\otimes\mathbf a) \ar@{=}[r] &
A(\chi_{\mathbf l,\mathbf m}^{-1}\sqcup 1_{\mathbf n})A(1_{\mathbf m}\sqcup\chi_{\mathbf l,\mathbf n}^{-1})
(\mathbf b\otimes\mathbf c\otimes\mathbf a)
}
\]
and
\[
\xymatrix{
\mathbf a\otimes \mathbf b\otimes\mathbf c \ar[r]^-{\mathrm{id}_{\mathbf a}\otimes \Theta_{\mathbf m,\mathbf n}} \ar[d]_{\Theta_{\mathbf l\sqcup \mathbf m,\mathbf n}}& A(1_{\mathbf l}\sqcup\chi_{\mathbf m,\mathbf n}^{-1})(\mathbf a\otimes\mathbf c\otimes\mathbf b)
\ar[d]^{A(1_{\mathbf l}\sqcup\chi_{\mathbf m,\mathbf n}^{-1})(\Theta_{\mathbf l,\mathbf n}\otimes\mathrm{id}_{\mathbf b})} \\
A(\chi^{-1}_{\mathbf l\sqcup\mathbf m,\mathbf n})(\mathbf c\otimes\mathbf a\otimes\mathbf b) \ar@{=}[r] &
A(1_{\mathbf l}\sqcup \chi_{\mathbf m,\mathbf n}^{-1})A(\chi_{\mathbf l,\mathbf n}^{-1}\sqcup 1_{\mathbf m})
(\mathbf c\otimes\mathbf a\otimes\mathbf b)
}
\]
commute for all objects $\mathbf a\in A(\mathbf l)$, $\mathbf b\in A(\mathbf m)$, and $\mathbf c\in A(\mathbf n)$.
\end{definition}
Notice that for $A$ a constant $\mathfrak{B}$-category monoid this definition recovers the usual notion of a braided strict monoidal category. We write $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ for the category of braided $\mathfrak{B}$-category monoids and braiding preserving (strict) maps of $\mathfrak{B}$-category monoids. Thus, a morphism
$f\colon\thinspacelon A\to B$ in $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ is a map of $\mathfrak{B}$-category monoids such that for all $\mathbf m,\mathbf n$ we have
\[
f(\mathbf m\sqcup\mathbf n)\circ \Theta_{\mathbf m,\mathbf n}^A=
\Theta_{\mathbf m,\mathbf n}^B\circ(f(\mathbf m)\times f(\mathbf n))
\]
as an equality of natural transformations between functors from $A(\mathbf m)\times A(\mathbf n)$ to
$B(\mathbf m\sqcup\mathbf n)$.
Similarly, we write $\mathfrak{B}r$-$\mathit{Cat}$ for the category of braided strict monoidal small categories and braiding preserving strict monoidal functors.
\begin{remark}
The natural transformations between maps of $\mathfrak{B}$-categories make $\mathit{Cat}^{\mathfrak{B}}$ a 2-category in the obvious way. Furthermore, this enrichment is compatible with the $\boxtimes$-product such that $\mathit{Cat}^{\mathfrak{B}}$ is a braided monoidal 2-category in the sense of \cite[Section~5]{JS}. In such a setting there is a notion of braided monoidal objects with coherence isomorphisms generalizing those for a braided monoidal category. With the terminology from \cite{JS}, our notion of a braided $\mathfrak{B}$-category monoid is thus the same thing as a braided strict monoidal object in $\mathit{Cat}^{\mathfrak{B}}$. We shall not be concerned with the coherence theory for $\mathfrak{B}$-categories and leave the details for the interested reader.
\end{remark}
Our main goal in this subsection is to show that the functor $\mathfrak{B}\!\int\!$ induces an equivalence between the categories $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ and $\mathfrak{B}r$-$\mathit{Cat}$ after localization as in
Proposition~\ref{prop:Bint-Delta-equivalence}. Consider in general a $\mathfrak{B}$-category monoid $A$. Then $\mathfrak{B}\!\int\! A$ inherits the structure of a strict monoidal category with product $\otimes\colon\thinspacelon \mathfrak{B}\!\int\! A\times \mathfrak{B}\!\int\! A\to \mathfrak{B}\!\int\! A$ defined on objects and morphisms by
\[
\begin{aligned}
&\big[(\mathbf m_1,\mathbf a_1)\xrightarrow{(\alpha,s)}(\mathbf m_2,\mathbf a_2)\big] \otimes
\big[(\mathbf n_1,\mathbf b_1)\xrightarrow{(\beta,t)}(\mathbf n_2,\mathbf b_2)\big]\\
&=\big[(\mathbf m_1\sqcup \mathbf n_1,\mathbf a_1\otimes \mathbf b_1)\xrightarrow{(\alpha\sqcup \beta,s\otimes t)}
(\mathbf m_2\sqcup \mathbf n_2,\mathbf a_2\otimes \mathbf b_2)\big].
\end{aligned}
\]
The monoidal unit for $\otimes$ is the object $(\mathbf 0,\mathbf u)$ defined by the unit object $\mathbf u\in A(\mathbf 0)$.
Now suppose that $A$ has a braiding given by a compatible family of natural isomorphisms
$
\Theta_{\mathbf m,\mathbf n}\colon\thinspacelon \mathbf a\otimes\mathbf b\to A(\chi_{\mathbf m,\mathbf n}^{-1})(\mathbf b\otimes\mathbf a)
$.
Then we define a braiding of $\mathfrak{B}\!\int\! A$ by the natural transformation
\[
(\mathbf m,\mathbf a)\otimes(\mathbf n,\mathbf b)=(\mathbf m\sqcup \mathbf n,\mathbf a\otimes\mathbf b)
\xrightarrow{(\chi_{\mathbf m,\mathbf n},A(\chi_{\mathbf m,\mathbf n})(\Theta_{\mathbf m,\mathbf n}))}
(\mathbf n\sqcup \mathbf m,\mathbf b\otimes\mathbf a)=(\mathbf n,\mathbf b)\otimes(\mathbf m,\mathbf a).
\]
We summarize the construction in the next proposition.
\begin{proposition}\label{prop:Grothendieck-promotion}
The Grothendieck construction gives rise to a functor
\[
\textstyle\mathfrak{B}\!\int\!\colon\thinspacelon \text{$\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$}\to \text{$\mathfrak{B}r$-$\mathit{Cat}$}.
\eqno\qed
\]
\end{proposition}
\begin{remark}
It is clear from the definition that the functor $\mathfrak{B}\!\int\!$ is monoidal and hence takes monoids in $\mathit{Cat}^{\mathfrak{B}}$ to monoids in $\mathit{Cat}$. However, $\mathfrak{B}\!\int\!$ is not braided monoidal and consequently does not take commutative monoids to commutative monoids. The main point of the above proposition is that it nonetheless preserves braided monoidal structures.
\end{remark}
For the next proposition we write $w$ for the class of morphisms in $\mathfrak{B}r$-$\mathit{Cat}$ whose underlying functors are weak equivalences in $\mathit{Cat}$. Similarly we write $w_{\mathfrak{B}}$ for the class of morphisms in $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ whose underlying maps of $\mathfrak{B}$-categories are $\mathfrak{B}$-equivalences.
\begin{proposition}\label{prop:Bint-Delta--braided-equivalence}
The functors $\mathfrak{B}\!\int$ and $\Delta$ induce an equivalence of the localized categories
\[
\textstyle\mathfrak{B}\!\int \colon\thinspacelon \text{$\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$}[w^{-1}_{\mathfrak{B}}]\simeq
\text{$\mathfrak{B}r$-$\mathit{Cat}$}[w^{-1}] :\!\Delta.
\]
\end{proposition}
The proof of the proposition is based on the following lemma.
\begin{lemma}\label{lem:B-down-bullet-braided}
The bar resolution functor taking a $\mathfrak{B}$-category $X$ to $\overline X$ can be promoted to an endofunctor on $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$.
\end{lemma}
\begin{proof}
Consider in general a $\mathfrak{B}$-category monoid $A$ with unit object $\mathbf u\in A(\mathbf 0)$ and multiplication specified by functors $\otimes\colon\thinspacelon A(\mathbf m)\times A(\mathbf n)\to A(\mathbf m\sqcup\mathbf n)$. Then
$\overline A$ inherits a monoid structure with unit object
$(1_{\mathbf 0},\mathbf u)$ in $\overline A(\mathbf 0)$, and multiplication
\[
\textstyle\overline\otimes\colon\thinspacelon (\mathfrak{B}\!\downarrow\!\mathbf m)\!\int\!A \circ \pi_{\mathbf m}\times
(\mathfrak{B}\!\downarrow\!\mathbf n)\!\int\!A \circ \pi_{\mathbf n}\to
(\mathfrak{B}\!\downarrow\!\mathbf m\sqcup \mathbf n)\!\int\!A \circ \pi_{\mathbf m\sqcup\mathbf n}
\]
defined on objects and morphisms by
\[
\begin{aligned}
&\big[((\mathbf m_1,\gamma_1),\mathbf a_1)\xrightarrow{(\alpha,s)} ((\mathbf m_2,\gamma_2),\mathbf a_2)\big] \overline\otimes
\big[((\mathbf n_1,\delta_1),\mathbf b_1)\xrightarrow{(\beta,t)} ((\mathbf n_2,\delta_2),\mathbf b_2)\big] \\
&=\big[((\mathbf m_1\sqcup \mathbf n_1,\gamma_1\sqcup\delta_1),\mathbf a_1\otimes\mathbf b_1)\xrightarrow{(\alpha\sqcup \beta,s\otimes t)} ((\mathbf m_2\sqcup \mathbf n_2,\gamma_2\sqcup\delta_2),\mathbf a_2\otimes \mathbf b_2)\big].
\end{aligned}
\]
Now suppose that in addition $A$ has a braiding specified by a family of natural isomorphisms
$
\Theta_{\mathbf m,\mathbf n}\colon\thinspacelon \mathbf a\otimes\mathbf b\to A(\chi_{\mathbf m,\mathbf n}^{-1})(\mathbf b\otimes\mathbf a)
$. Then we define a braiding $\overline\Theta$ of $\overline A$ by the natural isomorphisms
\[
\xymatrix{
((\mathbf m,\gamma),\mathbf a)\overline\otimes ((\mathbf n,\delta),\mathbf b) \ar[rrr]^-{\overline\Theta}\ar@{=}[d] &&&
\overline A(\chi_{\mathbf m,\mathbf n}^{-1})\big[((\mathbf n,\delta),\mathbf b)\overline\otimes ((\mathbf m,\gamma),\mathbf a)\big]
\ar@{=}[d] \\
((\mathbf m\sqcup\mathbf n,\gamma\sqcup\delta),\mathbf a\otimes\mathbf b)
\ar[rrr]^-{(\chi_{\mathbf m,\mathbf n},A(\chi_{\mathbf m,\mathbf n})(\Theta_{\mathbf m,\mathbf n}))\\ } && &
((\mathbf n\sqcup\mathbf m,\chi_{\mathbf m,\mathbf n}^{-1}\circ(\delta\sqcup\gamma)),\mathbf b\otimes\mathbf a).
}
\]
It is straight forward to check the axioms for a braiding as formulated in Definition~\ref{def:B-cat-braiding}.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{prop:Bint-Delta--braided-equivalence}]
We first observe that the work of Fiedorowicz-Stelzer-Vogt \cite{FSV} shows that the localization of $\mathfrak{B}r$-$\mathit{Cat}$ exists,
cf.\ Example~\ref{ex:FSV-example} in the appendix. Given this, the proof of the proposition follows the same pattern as the proof of Proposition~\ref{prop:Bint-Delta-equivalence}: For a braided $\mathfrak{B}$-category monoid $A$ we know from Lemma~\ref{lem:B-down-bullet-braided} that
$\overline A$ has the structure of a braided $\mathfrak{B}$-category monoid and it is clear from the definitions that the $\mathfrak{B}$-equivalences $\mathrm{ev}$ and $\pi$ in Lemmas~\ref{lem:ev-equivalence} and \ref{lem:pi-equivalence} are morphisms in $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$. With the terminology from
Appendix~\ref{app:localization} we therefore have a chain of natural $\mathfrak{B}$-equivalences in $\mathfrak{B}r$-$\mathit{Cat}^{\mathfrak{B}}$ relating $A$ and
$\Delta(\mathfrak{B}\!\int\! A)$. Given a braided strict monoidal category $\mathcal A$, the other composition
$\mathfrak{B}\!\int\!\Delta(\mathcal A)$ can be identified with the product category $\mathfrak{B}\times \mathcal A$ as an object in $\mathfrak{B}r$-$\mathit{Cat}$. Clearly the projection $\mathfrak{B}\times \mathcal A\to \mathcal A$ is a weak equivalence in $\mathfrak{B}r$-$\mathit{Cat}$ and the proposition therefore follows from Proposition~\ref{prop:localization-equivalence}.
\end{proof}
\subsection{Rectification and strict commutativity}\label{subsec:rectification}
Now we proceed to introduce the $\mathfrak{B}$-category rectification functor and show how it allows us to replace braided monoidal structures by strictly commutative structures up to $\mathfrak{B}$-equivalence.
Let $(\mathcal{A}, \otimes, \mathbf u)$ be a braided strict monoidal small category.
We shall define the \emph{$\mathfrak{B}$-category rectification} of $\mathcal A$ to be a certain $\mathfrak{B}$-category $\Phi(\mathcal A)$ such that the objects of $\Phi(\mathcal A)(\mathbf n)$ are $n$-tuples $(\mathbf a_1,\ldots,\mathbf a_n)$ of objects in $\mathcal{A}$. By definition $\Phi(\mathcal A)(\mathbf 0)$ has the ``empty string'' $\emptyset$ as its only object. The morphisms in $\Phi(\mathcal A)(\mathbf n)$ are given by
\[
\Phi(\mathcal{A})(\mathbf{n})\big((\mathbf a_1,\ldots,\mathbf a_n),(\mathbf b_1,\ldots,\mathbf b_n)\big)=
\mathcal{A}(\mathbf a_1\otimes\ldots\otimes\mathbf a_n,\mathbf b_1\otimes\ldots\otimes\mathbf b_n)
\]
with composition inherited from $\mathcal A$. Here we agree that the $\otimes$-product of the empty string is the unit object $\mathbf u$ so that $\Phi(\mathcal A)(\mathbf 0)$ can be identified with the monoid of endomorphisms
$\mathcal A(\mathbf u,\mathbf u)$. For a morphism $\alpha\colon\thinspacelon \mathbf m\to\mathbf n$ in $\mathfrak{B}$, the induced functor
\[
\Phi(\mathcal A)(\alpha)\colon\thinspacelon \Phi(\mathcal A)(\mathbf m)\to \Phi(\mathcal A)(\mathbf n)
\]
is given on objects by
\[
\Phi(\mathcal A)(\alpha)(\mathbf a_1,\dots,\mathbf a_m)=(\mathbf a_{\bar\alpha^{-1}(1)},\dots,\mathbf a_{\bar\alpha^{-1}(n)})
\]
where $\bar\alpha\colon\thinspacelon\mathbf m\to\mathbf n$ denotes the underlying injection, $\mathbf a_{\bar\alpha^{-1}(j)}=\mathbf a_i$ if $\bar\alpha(i)=j$, and $\mathbf a_{\bar\alpha^{-1}(j)}=\mathbf u$ if $j$ is not in the image of $\bar\alpha$. In order to describe the action on morphisms we use Lemma~\ref{lemma unique decomposition} to get a factorization $\alpha=\Upsilon(\nu)\circ \xi$ with $\nu\in \mathcal M(\mathbf m,\mathbf n)$ and $\xi\in \mathcal B_m$. The action of $\Phi(\mathcal A)(\alpha)$ on a morphism $f$ from $(\mathbf a_1,\dots,\mathbf a_m)$ to $(\mathbf b_1,\dots,\mathbf b_m)$
is then determined by the commutativity of the diagram
\[
\xymatrix{
\mathbf a_{\bar\alpha^{-1}(1)}\otimes\dots\otimes\mathbf a_{\bar\alpha^{-1}(n)} \ar[d]_{\Phi(\mathcal A)(\alpha)(f)}
\ar@{=}[r] & \mathbf a_{\bar\xi^{-1}(1)}\otimes\dots\otimes\mathbf a_{\bar\xi^{-1}(m)}
& \mathbf a_1\otimes\dots\otimes \mathbf a_m \ar[l]_-{\xi_*}\ar[d]_f\\
\mathbf b_{\bar\alpha^{-1}(1)}\otimes\dots\otimes\mathbf b_{\bar\alpha^{-1}(n)}
\ar@{=}[r] & \mathbf b_{\bar\xi^{-1}(1)}\otimes\dots\otimes\mathbf b_{\bar\xi^{-1}(m)}
& \mathbf b_1\otimes\dots\otimes \mathbf b_m \ar[l]_-{\xi_*}
}
\]
where $\xi_*$ denotes the canonical action of $\xi$ on the $m$-fold $\otimes$-product. In particular, this describes the action of $\Phi(\mathcal A)(\alpha)$ on the generating morphisms in
Lemma~\ref{lemma generating morphisms for B} and one easily checks that the relations in this lemma are preserved. Hence the above construction does indeed define a $\mathfrak{B}$-category.
The construction is clearly functorial in $\mathcal A$ so that we have defined a functor
$\Phi\colon\thinspacelon \text{$\mathfrak{B}r$-$\mathit{Cat}$}\to \mathit{Cat}^\mathfrak{B}$. This functor was first considered in the unpublished Master's Thesis by the second author \cite{mir}.
The $\mathfrak{B}$-category $\Phi(\mathcal A)$ is homotopy constant in positive degrees in the sense of the next lemma. Here we let $\mathfrak{B}_+$ denote the full subcategory of $\mathfrak{B}$ obtained by excluding the initial object $\mathbf 0$.
\begin{lemma}\label{lem:Phi-positive-equivalences}
The functor $\Phi(\mathcal A)(\alpha)\colon\thinspacelon \Phi(\mathcal A)(\mathbf m)\to \Phi(\mathcal A)(\mathbf n)$ is a weak equivalence for any morphism $\alpha\colon\thinspacelon\mathbf m\to\mathbf n$ in $\mathfrak{B}_+$.
\end{lemma}
\begin{proof}
We first consider a morphism of the form $j\colon\thinspacelon \mathbf 1\to\mathbf m$ and claim that the functor $\Phi(\mathcal A)(j)$ is in fact an equivalence of categories. Indeed, let $p\colon\thinspacelon \Phi(\mathcal A)(\mathbf m)\to \Phi(\mathcal A)(\mathbf 1)$ be the obvious functor taking $(\mathbf a_1,\dots,\mathbf a_m)$ to $(\mathbf a_1\otimes\dots\otimes\mathbf a_m)$.
Then $p\circ j$ is the identity on $\Phi(\mathcal A)(\mathbf 1)$ and it is clear that the other composition $j\circ p$ is naturally isomorphic to the identity on
$\Phi(\mathcal A)(\mathbf m)$. For a general morphism $\alpha\colon\thinspacelon\mathbf m\to\mathbf n$ in $\mathfrak{B}_+$ we have a commutative diagram
\[
\xymatrix{
&\Phi(\mathcal A)(\mathbf 1) \ar[dl]_{\Phi(\mathcal A)(j)} \ar[dr]^{\Phi(\mathcal A)(\alpha j)}& \\
\Phi(\mathcal A)(\mathbf m) \ar[rr]^{\Phi(\mathcal A)(\alpha)} && \Phi(\mathcal A)(\mathbf n)
}
\]
and the result follows.
\end{proof}
The next proposition shows that $\Phi$ takes braided monoidal structures to strictly commutative structures and is the reason why we refer to $\Phi$ as a ``rectification functor''. Let us write $\mathcal C(\mathit{Cat}^{\mathfrak{B}})$ for the category of commutative $\mathfrak{B}$-category monoids.
\begin{proposition}
The $\mathfrak{B}$-category ${\Phi}(\mathcal{A})$ is a commutative monoid in $\mathit{Cat}^\mathfrak{B}$ and ${\Phi}$
defines a functor $\Phi\colon\thinspacelon\mathfrak{B}rCat\to \mathcal{C}(\mathit{Cat}^\mathfrak{B})$.
\end{proposition}
\begin{proof}
We define functors $\otimes\colon\thinspacelon \Phi(\mathcal{A})(\mathbf{m})\times\Phi(\mathcal{A})(\mathbf{n})\rightarrow \Phi(\mathcal{A})(\mathbf{m}\sqcup\mathbf{n})$ by
\[
(\mathbf a_1,\dots,\mathbf a_m)\otimes(\mathbf b_1,\dots,\mathbf b_n)=
(\mathbf a_1,\dots,\mathbf a_m,\mathbf b_1,\dots,\mathbf b_n)
\]
on objects and by applying the monoidal structure $f\otimes g$ of $\mathcal A$ on morphisms. These functors are natural in $(\mathbf m,\mathbf n)$ as one verifies by checking for the generating morphisms in Lemma~\ref{lemma generating morphisms for B}. By the universal property of the $\boxtimes$-product we therefore get an associative product on $\Phi(\mathcal A)$. It is clear that the object $\emptyset$ in $\Phi(\mathcal A)(\mathbf 0)$ specifies a unit for this multiplication. The categorical analogue of the criteria for commutativity expressed by the commutativity of \eqref{eq:commutativity-condition} clearly holds on objects and on morphisms it follows from the naturality of the braiding on $\mathcal A$.
\end{proof}
\begin{remark}\label{rem:Phi-non-strict}
The definition of $\Phi(\mathcal A)$ can be extended to braided monoidal small categories $\mathcal A$ that are not necessarily strict monoidal. Indeed, the objects of $\Phi(\mathcal A)(\mathbf n)$ are again $n$-tuples $(\mathbf a_1,\dots,\mathbf a_n)$ of objects in $\mathcal A$ and a morphism
from $(\mathbf a_1,\dots,\mathbf a_n)$ to $(\mathbf b_1,\dots,\mathbf b_n)$ is defined to be a morphism
\[
(\cdots((\mathbf a_1\otimes\mathbf a_2)\otimes \mathbf a_3)\otimes\dots\otimes\mathbf a_{n-1})\otimes\mathbf a_n\to
(\cdots((\mathbf b_1\otimes\mathbf b_2)\otimes \mathbf b_3)\otimes\dots\otimes\mathbf b_{n-1})\otimes\mathbf b_n
\]
in $\mathcal A$. Proceeding as in the strict monoidal case, the coherence theory for braided monoidal categories ensures that $\Phi(\mathcal A)$ canonically has the structure of a commutative $\mathfrak{B}$-category monoid. This is functorial with respect to braided strong monoidal functors that strictly preserve the unit objects.
\end{remark}
We shall view $\mathcal C(\mathit{Cat}^{\mathcal B})$ as the full subcategory of $\mathfrak{B}rCatB$ given by the braided $\mathfrak{B}$-category monoids with identity braiding $\otimes=\otimes\circ \mathfrak b$.
\begin{proposition}\label{prop:Phi-Bint-equivalence}
The composite functor
\[
\mathfrak{B}rCat\xrightarrow{\Phi} \mathcal C(\mathit{Cat}^{\mathfrak{B}}) \xrightarrow{} \mathfrak{B}rCatB\xrightarrow{\mathfrak{B}\!\int\!}\mathfrak{B}rCat
\]
is related to the identity functor on $\mathfrak{B}rCat$ by a natural weak equivalence.
\end{proposition}
\begin{proof}
For a braided strict monoidal category $\mathcal A$ we define a functor $P\colon\thinspacelon \mathfrak{B}\!\int\!\Phi(\mathcal A)\to \mathcal A$ such that $P$ takes an object $(\mathbf m,(\mathbf a_1,\dots,\mathbf a_m))$ to $\mathbf a_1\otimes\dots\otimes\mathbf a_m$. A morphism $(\alpha,f)$ from $(\mathbf m,(\mathbf a_1,\dots,\mathbf a_m))$ to $(\mathbf n,(\mathbf b_1,\dots,\mathbf b_n))$ is given by a morphism $\alpha\colon\thinspacelon \mathbf m\to\mathbf n$ in $\mathfrak{B}$ together with a morphism $f$ from
$\mathbf a_{\bar\alpha^{-1}(1)}\otimes\dots\otimes\mathbf a_{\bar\alpha^{-1}(n)}$ to $\mathbf b_1\otimes \dots\otimes\mathbf b_n$. Using Lemma~\ref{lemma unique decomposition} we get a factorization $\alpha=\Upsilon(\nu)\circ \xi$ with $\nu\in \mathcal M(\mathbf m,\mathbf n)$ and $\xi\in \mathcal B_m$, and let $P(\alpha,f)$ be the composition
\[
\xymatrix{
\mathbf a_1\otimes\dots\otimes \mathbf a_m \ar[r]^{P(\alpha,f)} \ar[d]_{\xi_*}& \mathbf b_1\otimes\dots\otimes \mathbf b_n \\
\mathbf a_{\bar\xi^{-1}(1)}\otimes\dots\otimes\mathbf a_{\bar\xi^{-1}(m)} \ar@{=}[r]&
\mathbf a_{\bar\alpha^{-1}(1)}\otimes\dots\otimes\mathbf a_{\bar\alpha^{-1}(n)}.\ar[u]^f
}
\]
It is straight forward to check that $P$ is a braided strict monoidal functor. Furthermore, it follows from the definition of Thomason's equivalence \eqref{eq:Thomason-equivalence} that the composition
\[
\mathrm{N}(\Phi(\mathcal A)(\mathbf 1))\to \hocolim_{\mathbf n\in \mathfrak{B}}\mathrm{N}(\Phi(\mathcal A)(\mathbf n))\xrightarrow{\eta} \textstyle\mathrm{N}(\mathfrak{B}\!\int\!\Phi(\mathcal A))\xrightarrow{P} \mathrm{N}(\mathcal A)
\]
is the canonical identification. Hence it suffices to prove that the first map, induced by the inclusion $\{\mathbf 1\}\to\mathfrak{B}$, is a weak equivalence. To this end we first restrict $\mathrm{N}(\Phi(\mathcal A))$ to $\mathfrak{B}_+$ such that all the structure maps are weak equivalences by Lemma~\ref{lem:Phi-positive-equivalences}. Then it follows from \cite[Lemma~IV.5.7]{GJ} that the diagram
\[
\xymatrix{
\mathrm{N}(\Phi(\mathcal A)(\mathbf 1)) \ar[r] \ar[d]& \mathrm{N}(\Phi(\mathcal A))_{h\mathfrak{B}_+}\ar[d]\\
\{\mathbf 1\} \ar[r] &\mathrm{N}(\mathfrak{B}_+)
}
\]
is homotopy cartesian, and since $\mathrm{N}(\mathfrak{B}_+)$ is contractible this in turn implies that $\mathrm{N}(\Phi(\mathcal A)(\mathbf 1)) \to\mathrm{N}(\Phi(\mathcal A))_{h\mathfrak{B}_+}$ is a weak equivalence. Secondly, it is easy to see that the inclusion of $\mathfrak{B}_+$ in $\mathfrak{B}$ is homotopy cofinal such that the induced map
$\mathrm{N}(\Phi(\mathcal A))_{h\mathfrak{B}_+}\to \mathrm{N}(\Phi(\mathcal A))_{h\mathfrak{B}}$ is a weak equivalence by \cite[Theorem~19.6.13]{hirsch}.
\end{proof}
Combining the result obtained in this section we get the following theorem.
\begin{theorem}\label{thm:Braided-B-Cat-rectification}
Every braided $\mathfrak{B}$-category monoid is related to a strictly commutative $\mathfrak{B}$-category monoid by a chain of natural $\mathfrak{B}$-equivalences in $\mathfrak{B}rCatB$.
\end{theorem}
\begin{proof}
Given a braided $\mathfrak{B}$-category monoid $A$, we have the following chain of $\mathfrak{B}$-equivalences
\[
\textstyle A\simeq \Delta(\mathfrak{B}\!\int\! A)\simeq \Delta\big(\mathfrak{B}\!\int\!\Phi(\mathfrak{B}\!\int\!A)\big) \simeq \Phi(\mathfrak{B}\!\int\!A).
\]
The first and last equivalences are the chains of $\mathfrak{B}$-equivalences $\Delta(\mathfrak{B}\!\int\!(-))\simeq(-)$ from the proof of Proposition~\ref{prop:Bint-Delta--braided-equivalence} and the
$\mathfrak{B}$-equivalence in the middle is obtained by applying $\Delta$ to the weak equivalence $\mathfrak{B}\!\int\!\Phi(-)\simeq(-)$ in
Proposition~\ref{prop:Phi-Bint-equivalence}.
\end{proof}
\section{$E_2$ spaces and braided commutativity}\label{sec:E2-spaces}
Building on the categorical foundations in the last section, we proceed to show that every $E_2$ space can be represented by a strictly commutative $\mathfrak{B}$-space monoid up to $\mathfrak{B}$-equivalence.
\subsection{Operadic interpretation of braided monoidal structures}\label{subsec:operads}
In order to relate our results from the previous section to multiplicative structures on spaces, it is convenient to work with an operadic interpretation of braided monoidal structures. By a $\mathit{Cat}$-operad we understand an operad internal to the category $\mathit{Cat}$. Thus, a $\mathit{Cat}$-operad $\mathsf M$ is given by a sequence of small categories $\mathsf M(k)$ for $k\geq 0$ together with functors
\[
\gamma \colon\thinspacelon \mathsf M(k)\times \mathsf M(j_1)\times\dots\times \mathsf M(j_k)\to \mathsf M(j_1+\dots j_k),
\]
a unit object $\mathbf 1\in \mathsf M(1)$, and a right $\Sigma_k$-action on $\mathsf M(k)$. These data are required to satisfy the usual axioms for associativity, unity, and equivariance as listed in \cite[Definition~1.1]{may}. We shall always assume that a $\mathit{Cat}$-operad $\mathsf M$ is reduced in the sense that $\mathsf M(0)$ is the terminal category with one object and one morphism. A $\mathit{Cat}$-operad as above gives rise to a monad $\mathbb M$ on $\mathit{Cat}$ by letting
\[
\mathbb M(X)=\colon\thinspaceprod_{n\geq 0}\mathsf M(k)\times_{\Sigma_k}X^k
\]
for a small category $X$. Here $X^0$ denotes the terminal category. By definition, an $\mathsf M$-algebra in $\mathit{Cat}$ is an algebra for this monad and we write $\mathsf M$-$\mathit{Cat}$ for the category of $\mathsf M$-algebras. An algebra structure $\theta\colon\thinspacelon \mathbb M(X)\to X$ is determined by a family of functors
$\theta_k\colon\thinspacelon \mathsf M(k)\times X^k\to X$ satisfying the axioms listed in \cite[Lemma~1.4]{may}.
Following \cite[Section~ 8]{FSV} we introduce a $\mathit{Cat}$-operad $\mathfrak{B}r$ such that $\mathfrak{B}r$-algebras are braided strict monoidal small categories. The objects of $\mathfrak{B}r(k)$ are the elements $a\in \Sigma_k$ and given objects $a$ and $b$, a morphism $\alpha\colon\thinspacelon a\to b$ is an element $\alpha\in \mathcal B_k$ such that
$\bar\alpha a=b$. Composition in $\mathfrak{B}r(k)$ is inherited from $\mathcal B_k$ and the right action of an element $g\in \Sigma_k$ is defined on objects and morphisms by taking $(\alpha\colon\thinspacelon a\to b)$ to $(\alpha\colon\thinspacelon ag\to bg)$. The structure map
\[
\gamma\colon\thinspacelon \mathfrak{B}r(k)\times \mathfrak{B}r(j_1)\times\dots\times\mathfrak{B}r(j_k)\to \mathfrak{B}r(j_1+\dots+j_k)
\]
is defined on objects by
\[
\gamma(a,b_1,\dots,b_k)=a(j_1,\dots,j_k)\circ b_1\sqcup\dots\sqcup b_k
\]
where $a(j_1,\dots,j_k)$ denotes the block permutation of $\mathbf j_1\sqcup\dots\sqcup \mathbf j_k$ specified by $a$. The action on morphisms is analogous except for the obvious permutation of the indices.
Let $\mathsf A$ be the discrete $\mathit{Cat}$-operad given by the objects of $\mathfrak{B}r$. It is well-known and easy to check that $\mathsf A$-algebras are the same thing as monoids in $\mathit{Cat}$, that is, strict monoidal small categories. Hence a $\mathfrak{B}r$-algebra $X$ has an underlying strict monoidal category with unit object determined by the structure map $\theta_0\colon\thinspacelon \mathfrak{B}r(0)\times X^0\to X$ and monoidal structure $\otimes=\theta_2(1_2,-,-)$ determined by restricting the structure map $\theta_2\colon\thinspacelon\mathfrak{B}r(2)\times X^2\to X$ to the unit object $1_2\in \mathfrak{B}r(2)$. With $t$ the non-unit object of $\mathfrak{B}r(2)$ and $\zeta$ the generator of
$\mathcal B_2$, the morphism $\zeta\colon\thinspacelon 1_2\to t$ determines a natural transformation
\[
\theta_2(\zeta,\mathrm{id}_{\mathbf x_1},\mathrm{id}_{\mathbf x_2})\colon\thinspacelon \mathbf x_1\otimes\mathbf x_2\to \mathbf x_2\otimes \mathbf x_1
\]
which gives a braiding of $X$. Conversely, for a braided strict monoidal category $X$ we define a $\mathfrak{B}r$-algebra structure by the functors
$
\theta_k\colon\thinspacelon \mathfrak{B}r(k)\times X^k\to X
$
taking a tuple of morphisms $\alpha\colon\thinspacelon a\to b$ in $\mathfrak{B}r(k)$ and $f_i\colon\thinspacelon \mathbf x_i\to\mathbf y_i$ in $X$ for $i=1,\dots,k$, to the composition in the commutative diagram
\[
\xymatrix{
\mathbf x_{a^{-1}(1)}\otimes\dots\otimes \mathbf x_{a^{-1}(k)} \ar[r]^{\alpha_*}
\ar[d]_{f_{a^{-1}(1)}\otimes \dots\otimes f_{a^{-1}(k)}} &
\mathbf x_{b^{-1}(1)}\otimes\dots\otimes \mathbf x_{b^{-1}(k)}
\ar[d]^{f_{b^{-1}(1)}\otimes \dots\otimes f_{b^{-1}(k)}} \\
\mathbf y_{a^{-1}(1)}\otimes\dots\otimes \mathbf y_{a^{-1}(k)} \ar[r]^{\alpha_*} &
\mathbf y_{b^{-1}(1)}\otimes\dots\otimes \mathbf y_{b^{-1}(k)}.
}
\]
Here $\alpha_*$ denotes the canonical action of $\alpha$ defined by the braided monoidal structure. Summarizing, we have the following consistency result that justifies our use of the notation $\mathfrak{B}r$-$\mathit{Cat}$ in the previous section.
\begin{lemma}\label{lem:Cat-Br-algebras}
The category $\mathfrak{B}rCat$ of $\mathfrak{B}r$-algebras is isomorphic to the category of braided strict monoidal categories. \qed
\end{lemma}
It is natural to ask for an analogous operadic characterization of braided $\mathfrak{B}$-category monoids. However, since the symmetric groups do not act on the iterated $\boxtimes$-products in $\mathit{Cat}^{\mathfrak{B}}$, we instead have to work with braided operads as introduced by Fiedorowicz \cite{fied}. By definition, a braided $\mathit{Cat}$-operad $\mathsf M$ is a sequence of small categories $\mathsf M(k)$ for $k\geq 0$ together with structure maps and a unit just as for a $\mathit{Cat}$-operad. The difference from an (unbraided) $\mathit{Cat}$-operad is that in the braided case we require a right $\mathcal B_k$-action on $\mathsf M(k)$ for all $k$ such that the braided analogue of the equivariance axiom for a $\mathit{Cat}$-operad holds. A braided $\mathit{Cat}$-operad $\mathsf M$ defines a monad on $\mathit{Cat}^{\mathfrak{B}}$ by letting
\[
\mathbb M(X)=\colon\thinspaceprod_{k\geq 0}\mathsf M(k)\times_{\mathcal B_k}X^{\boxtimes k}
\]
for a $\mathfrak{B}$-category $X$. By definition, an $\mathsf M$-algebra in $\mathit{Cat}^{\mathfrak{B}}$ is an algebra for this monad and we write $\mathsf M$-$\mathit{Cat}^{\mathfrak{B}}$ for the category of
$\mathsf M$-algebras. It follows from the universal property of the $\boxtimes$-product that an $\mathsf M$-algebra structure on a $\mathfrak{B}$-category $X$ can be described in terms of functors
\begin{equation}\label{eq:level-Br-algebra}
\theta_k\colon\thinspacelon\mathsf M(k)\times X(\mathbf n_1)\times\dots\times X(\mathbf n_k)\to X(\mathbf n_1\sqcup\dots\sqcup\mathbf n_k)
\end{equation}
such that the usual associativity and unity axioms hold as well as the equivariance axiom stating that the diagram
\[
\xymatrix{
\mathsf M(k)\times X(\mathbf n_1)\times\dots\times X(\mathbf n_k) \ar[r]^-{\theta_k\circ(\sigma\times\mathrm{id})} \ar[d]_{\mathrm{id}\times\bar\sigma}&
X(\mathbf n_1\sqcup\dots\sqcup\mathbf n_k)\ar[d]^{X(\sigma(n_1,\dots,n_k))} \\
\mathsf M(k)\times X(\mathbf n_{\bar \sigma^{-1}(1)})\times \dots\times X(\mathbf n_{\bar\sigma^{-1}(k)}) \ar[r]^-{\theta_k} &
X(\mathbf n_{\bar \sigma^{-1}(1)}\sqcup\dots \sqcup\mathbf n_{\bar\sigma^{-1}(k)})
}
\]
is commutative for all $\sigma\in \mathcal B_k$. We also use the notation $\mathfrak{B}r$ for the braided $\mathit{Cat}$-operad for which the category $\mathfrak{B}r(k)$ has objects the
elements $a\in \mathcal B_k$ and a morphism $\alpha\colon\thinspacelon a\to b$ is an element $\alpha\in \mathcal B_k$ such that $\alpha a=b$. The structure maps making this a braided $\mathit{Cat}$-operad are defined as for the analogous unbraided operad. Let $\mathsf A$ be the discrete braided $\mathit{Cat}$-operad given by the objects in $\mathfrak{B}r$. It is easy to see that an $\mathsf A$-algebra in $\mathit{Cat}^{\mathfrak{B}}$ is the same thing as a $\mathfrak{B}$-category monoid and hence that a $\mathfrak{B}r$-algebra is a $\mathfrak{B}$-category monoid with extra structure. Indeed, suppose that $X$ is a $\mathfrak{B}r$-algebra in $\mathit{Cat}^{\mathfrak{B}}$ and write $\otimes\colon\thinspacelon X\boxtimes X\to X$ for the monoid structure defined by restricting $\theta_2\colon\thinspacelon \mathfrak{B}r(2)\times X^{\boxtimes 2}\to X$ to the unit object $1_2\in \mathfrak{B}r(2)$. With $\zeta$ the standard generator of $\mathcal B_2$, the morphism $\zeta\colon\thinspacelon 1_2\to \zeta$ determines a natural isomorphism $\Theta=\theta(\zeta,-,-)$ as in the diagram \eqref{eq:Theta-braiding} and $\Theta$ satisfies the axioms for a braiding of $X$. Arguing as in the unbraided setting we get the following analogue of Lemma~\ref{lem:Cat-Br-algebras}.
\begin{lemma}\label{lem:Cat-B-Br-algebras}
The category $\mathfrak{B}rCatB$ is isomorphic to the category of braided $\mathfrak{B}$-category monoids.\qed
\end{lemma}
\subsection{Rectification of $E_2$ algebras}\label{subsec:E2-rectification}
Applying the nerve functor $\mathrm{N}$ to the unbraided $\mathit{Cat}$-operad $\mathfrak{B}r$ we get an operad $\mathrm{N}\mathfrak{B}r$ in simplicial sets with $k$th space $\mathrm{N}\mathfrak{B}r(k)$. This is an $E_2$ operad in the sense that its geometric realization is equivalent to the little 2-cubes operad, cf.\ \cite[Proposition~8.13]{FSV}. Since the nerve functor preserves products it is clear that it induces a functor $\mathrm{N}\colon\thinspacelon \mathfrak{B}rCat\to \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}$. This was first observed by Fiedorowicz~\cite{fied}, and is the braided version of the analogous construction for permutative categories considered by May~\cite{may2}. Similarly, the braided version of the $\mathit{Cat}$-operad $\mathfrak{B}r$ gives rise to the braided operad
$\mathrm{N}\mathfrak{B}r$ in simplicial sets. By the level-wise characterization of $\mathfrak{B}r$-algebras in \eqref{eq:level-Br-algebra} it is equally clear that the level-wise nerve induces a functor $\mathrm{N}\colon\thinspacelon \mathfrak{B}rCatB\to\text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$}$.
Now we want to say that the homotopy colimit functor induces a functor from $\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$ to $\mathrm{N}\mathfrak{B}r$-$\mathcal S$, but to explain this properly requires some preparation. Recall that the pure braid group $\mathcal P_k$ is the kernel of the projection $\varPi\colon\thinspacelon\mathcal B_k\to \Sigma_k$. Following \cite{FSV}, a braided operad $\mathsf M$ can be ``debraided'' to an (unbraided) operad $\mathsf M/\mathcal P_k$ with $k$th term the orbit space $\mathsf M(k)/\mathcal P_k$. The structure maps are inherited from the structure maps of $\mathsf M$ and $\Sigma_k$ acts from the right via the isomorphism $\Sigma_k\colon\thinspaceng \mathcal B_k/\mathcal P_k$. For instance, the debraiding of the braided $\mathit{Cat}$-operad $\mathfrak{B}r$ is the corresponding unbraided $\mathit{Cat}$-operad $\mathfrak{B}r$ and similarly for the braided operad $\mathrm{N}\mathfrak{B}r$. In the following lemma we consider the product of the latter with an arbitrary braided operad $\mathsf M$ and form the debraided operad $(\mathrm{N}\mathfrak{B}r\times \mathsf M)/\mathcal P$.
\begin{lemma}\label{lem:hocolim-promoted}
Let $\mathsf M$ be a braided operad in simplicial sets. Then the homotopy colimit functor can be promoted to a functor
\[
(-)_{h\mathfrak{B}}\colon\thinspacelon \text{$\mathsf M$-$\mathcal S^{\mathfrak{B}}$}\to \text{$(\mathrm{N}\mathfrak{B}r\times \mathsf M)/\mathcal P$-$\mathcal S$}.
\]
\end{lemma}
\begin{proof}
Let $X$ be a $\mathfrak{B}$-space with $\mathsf M$-action defined by natural maps
\[
\theta_k\colon\thinspacelon \mathsf M(k)\times X(\mathbf n_1)\times\dots\times X(\mathbf n_k)\to X(\mathbf n_1\sqcup\dots\sqcup\mathbf n_k).
\]
To $X$ we associate the simplicial category (that is, simplicial object in $\mathit{Cat}$) $\mathfrak{B}\!\int\!X$ obtained by applying the Grothendieck construction in each simplicial degree of $X$ thought of as a $\mathfrak{B}$-diagram of simplicial discrete categories. It is clear from the definition that the nerve of $\mathfrak{B}\!\int\!X$ can be identified with $X_{h\mathfrak{B}}$. Let us further view $\mathfrak{B}r(k)$ as a constant simplicial category and $\mathsf M(k)$ as a simplicial discrete category. Then we define maps of simplicial categories
\[
\textstyle\theta_k\colon\thinspacelon \mathfrak{B}r(k)\times \mathsf M(k)\times (\mathfrak{B}\!\int\!X)^k\to \mathfrak{B}\!\int\!X
\]
such that a tuple of objects $a\in\mathfrak{B}r(k)$, $\mathsf m\in \mathsf M(k)$, and $(\mathbf m_i,x_i)\in \mathfrak{B}\!\int\!X$ for $i=1,\dots,k$, is mapped to the object
\[
\big(\mathbf m_{\bar a^{-1}(1)}\sqcup\dots\sqcup \mathbf m_{\bar a^{-1}(k)},X(a(m_1,\dots,m_k))\theta_k(\mathsf m,x_1,\dots,x_k)\big).
\]
A tuple of morphisms $\alpha\colon\thinspacelon a\to b$ in $\mathfrak{B}r(k)$ and $\beta_i\colon\thinspacelon (\mathbf m_i,x_i)\to (\mathbf n_i,y_i)$ in $\mathfrak{B}\!\int\!X$ for $i=1,\dots,k$, is mapped to the morphism specified by
\[
\alpha(m_{\bar\alpha^{-1}(1)},\dots,m_{\bar\alpha^{-1}(k)})\circ \beta_{\bar\alpha^{-1}(1)}\sqcup\dots\sqcup\beta_{\bar\alpha^{-1}(k)}.
\]
Evaluating the nerves of these simplicial categories we get a map of bisimplicial sets and by restricting to the simplicial diagonal a map of simplicial sets
\[
\textstyle(\mathrm{N}\mathfrak{B}r(k)\times \mathsf M(k))\times \mathrm{N}(\mathfrak{B}\!\int\!X)^k\to \mathrm{N}(\mathfrak{B}\!\int\!X).
\]
It is not difficult to check that these maps satisfy the conditions for a braided operad action and hence descends to an action of the debraided operad
$(\mathrm{N}\mathfrak{B}r\times \mathsf M)/\mathcal P$. Clearly this is functorial in $X$.
\end{proof}
When $\mathsf M$ is the braided operad $\mathrm{N} \mathfrak{B}r$ we can compose with the diagonal map of (unbraided) operads
$ \mathrm{N}\mathfrak{B}r/\mathcal P\to(\mathrm{N}\mathfrak{B}r\times \mathrm{N}\mathfrak{B}r)/\mathcal P$ to get the next lemma.
\begin{lemma}\label{lem:hocolim-Br-promoted}
The homotopy colimit functor can be promoted to a functor
\[
(-)_{h\mathfrak{B}}\colon\thinspacelon \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$}\to \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}.
\eqno\qed
\]
\end{lemma}
The natural maps introduced so far are compatible in the expected way.
\begin{proposition}\label{prop:Thomason-diagram}
The diagram
\[
\xymatrix{
\mathfrak{B}rCatB \ar[r]^{\mathrm{N}} \ar[d]_{\mathfrak{B}\!\int} & \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$} \ar[d]^{(-)_{h\mathfrak{B}}}\\
\mathfrak{B}rCat \ar[r]^{\mathrm{N}} & \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}
}
\]
commutes up to natural weak equivalence.
\end{proposition}
\begin{proof}
Given a braided $\mathfrak{B}$-category $X$, we claim that Thomason's equivalence $\eta$ in \eqref{eq:Thomason-equivalence} is in fact a morphism in $\text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}$.
In order to verify the claim we first use Proposition~\ref{prop:Grothendieck-promotion} and
Lemma~\ref{lem:Cat-Br-algebras} to get an explicit description of the $\mathrm{N}\mathfrak{B}r$-algebra structure on $\mathrm{N}(\mathfrak{B}\!\int\!X)$. Secondly, we use Lemmas~\ref{lem:Cat-B-Br-algebras} and
\ref{lem:hocolim-Br-promoted} to get an explicit description of the $\mathrm{N}\mathfrak{B}r$-algebra structure on
$(\mathrm{N} X)_{h\mathfrak{B}}$. It is then straight forward (although somewhat tedious) to check that Thomason's explicit description of $\eta$ in \cite[Lemma~1.2.1]{T} is compatible with the algebra structures.
\end{proof}
We proceed to show that the functor $(-)_{h\mathfrak{B}}$ in Lemma~\ref{lem:hocolim-Br-promoted} induces an equivalence after suitable localizations of the domain and target. Let us write $w$ for the class of morphisms in \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$} whose underlying maps of spaces are weak equivalences and $w_{\mathfrak{B}}$ for the class of morphisms in \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$} whose underlying maps of $\mathfrak{B}$-spaces are $\mathfrak{B}$-equivalences. The following is the $\mathfrak{B}$-space version of Proposition~\ref{prop:Bint-Delta--braided-equivalence}. As usual $\Delta$ denotes the constant functor embedding.
\begin{proposition}\label{prop:hocolim-B-equivalence}
The functors $(-)_{h\mathfrak{B}}$ and $\Delta$ induce an equivalence of the localized categories
\[
(-)_{h\mathfrak{B}}\colon\thinspacelon \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$}[w_{\mathfrak{B}}^{-1}]\simeq \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}[w^{-1}]:\!\Delta.
\]
\end{proposition}
For the proof of the proposition we need to invoke the \emph{bar resolution} for $\mathfrak{B}$-spaces. Given a $\mathfrak{B}$-space $X$, this is the $\mathfrak{B}$-space $\overline X$ defined by
\[
\overline X(\mathbf n)=\hocolim_{(\mathfrak{B}\!\!\downarrow\!arrow \mathbf n)}X\circ\pi_{\mathbf n}
\]
with notation as for the categorical bar resolution considered in Section~\ref{subsec:B-categories-Grothendieck}. (See e.g.\ \cite{Hollender-V_modules} for the interpretation of this as an actual bar construction.) Arguing as in the proof of Lemma~\ref{lem:hocolim-promoted} one sees that this construction can be promoted to an endofunctor on $\text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$}$.
\begin{proof}[Proof of Proposition~\ref{prop:hocolim-B-equivalence}]
First recall that the localization $\text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}[w^{-1}]$ exists since it can be realized as the homotopy category of a suitable model structure. As for the categorical analogue in Proposition~\ref{prop:Bint-Delta--braided-equivalence} there are natural $\mathfrak{B}$-equivalences $\mathrm{ev}\colon\thinspacelon \overline A\to A$ and $\pi\colon\thinspacelon \overline A\to \Delta(A_{h\mathfrak{B}})$ in $\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$. For a $\mathfrak{B}r$-algebra $Y$ in $\mathcal S$, the other composition
$\Delta(Y)_{h\mathfrak{B}}$ can be identified with the product algebra $\mathrm{N}\mathfrak{B}\times Y$ such that the projection defines a weak equivalence of $\mathfrak{B}r$-algebras $\Delta(Y)_{h\mathfrak{B}}\xrightarrow{\sim} Y$. The statement therefore follows from Proposition~\ref{prop:localization-equivalence}.
\end{proof}
With these preparations we can finally prove that $\mathrm{N}\mathfrak{B}r$-algebras in $\mathcal S^{\mathfrak{B}}$ can be rectified to strictly commutative $\mathfrak{B}$-space monoids. Our proof of this result differs from the proof of the analogous categorical statement in Theorem~\ref{thm:Braided-B-Cat-rectification} since we do not have a space-level version of the rectification functor $\Phi$. Instead we shall make use of the functor $F\colon\thinspacelon \text{$\mathrm{N}\mathfrak{B}r$-$\mathcal S$}\to \text{$\mathfrak{B}r$-$\mathit{Cat}$}$ introduced by
Fiedorowicz-Stelzer-Vogt \cite{FSV} and then compose the latter with $\Phi$. The relevant facts about the functor $F$ are discussed in the context of localization in Example~\ref{ex:FSV-example}.
\begin{theorem}\label{thm:Br-SB-rectification}
Every $\mathrm{N}\mathfrak{B}r$-algebra in $\mathcal S^{\mathfrak{B}}$ is related to a strictly commutative $\mathfrak{B}$-space monoid by a chain of natural $\mathfrak{B}$-equivalences in
$\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$.
\end{theorem}
\begin{proof}
Let $A$ be an $\mathrm{N}\mathfrak{B}r$-algebra in $\mathcal S^{\mathfrak{B}}$. Then $A_{h\mathfrak{B}}$ is an $\mathrm{N}\mathfrak{B}r$-algebra in $\mathcal S$ and applying the functor $F$ we get a $\mathfrak{B}r$-algebra $F(A_{h\mathfrak{B}})$ in $\mathit{Cat}$. We claim that $A$ is related to the commutative $\mathfrak{B}$-space monoid $\mathrm{N}\Phi(F(A_{h\mathfrak{B}}))$ by a chain of $\mathfrak{B}$-equivalences in $\mathrm{N}\mathfrak{B}r$-$\mathcal S^{\mathfrak{B}}$. To this end we first proceed as in the proof of Proposition~\ref{prop:hocolim-B-equivalence} to get a chain of $\mathfrak{B}$-equivalences $A\simeq \Delta(A_{h\mathfrak{B}})$. Then we compose the chains of weak equivalences
\[
\textstyle A_{h\mathfrak{B}}\simeq \mathrm{N} F(A_{h\mathfrak{B}})\simeq \mathrm{N}(\mathfrak{B}\!\int\!\Phi(F(A_{h\mathfrak{B}})))\simeq \mathrm{N}\Phi(F(A_{h\mathfrak{B}}))_{h\mathfrak{B}}
\]
defined respectively in \cite[C.2]{FSV}, Proposition~\ref{prop:Phi-Bint-equivalence}, and Proposition~\ref{prop:Thomason-diagram}. This in turn gives a chain of $\mathfrak{B}$-equivalences
\[
A\simeq\Delta(A_{h\mathfrak{B}})\simeq \Delta(\mathrm{N}\Phi(F(A_{h\mathfrak{B}}))_{h\mathfrak{B}})\simeq \mathrm{N}\Phi(F(A_{h\mathfrak{B}})),
\]
again by Proposition~\ref{prop:hocolim-B-equivalence}.
\end{proof}
\begin{example}\label{ex:X-bullet-example-2}
In general an (unbraided) operad $\mathsf M$ in $\mathcal S$ gives rise to a functor $\mathsf M\colon\thinspacelon \mathcal I^{\mathrm{op}}\to \mathcal S$ as explained in \cite{CMT}. Given a based space $X$ we have the $\mathcal I$-space $X^{\bullet}$ from Example~\ref{ex:X-bullet-example-1} and may form the coend $\mathsf M\otimes_{\mathcal I}X^{\bullet}$ (whose geometric realization is denoted $\mathsf M|X|$ by May~\cite{may}). In the same way a braided operad $\mathsf M$ gives rise to a functor $\mathsf M\colon\thinspacelon \mathfrak{B}^{\mathrm{op}}\to\mathcal S$ and using the same notation for the pullback of $X^{\bullet}$ to a $\mathfrak{B}$-space we may form the coend $\mathsf M\otimes_{\mathfrak{B}}X^{\bullet}$ considered by Fiedorowicz~\cite{fied}. Writing $\mathsf M/\mathcal P$ for the debraided operad, the fact that the pure braid groups $\mathcal P_n$ act trivially on $X^n$ implies that there is a natural isomorphism $\mathsf M\otimes_{\mathfrak{B}}X^{\bullet}\colon\thinspaceng \mathsf M/\mathcal P_n\otimes_{\mathcal I}X^{\bullet}$. Now specialize to the braided operad $\mathrm{N}\mathfrak{B}r$ and recall that the homotopy colimit $X^{\bullet}_{h\mathfrak{B}}$ can be identified with the coend $\mathrm{N}(\bullet\!\!\downarrow\!arrow\!\mathfrak{B})\otimes_{\mathfrak{B}}X^{\bullet}$. Proceeding as in \cite[Section~4.2]{SHom} we define a map of $\mathfrak{B}^{\mathrm{op}}$-spaces $\mathrm{N}(\bullet\!\!\downarrow\!arrow\!\mathfrak{B})\to \mathrm{N}\mathfrak{B}r$ such that the induced map of coends
\[
\mathrm{N}(\bullet\!\!\downarrow\!arrow\!\mathfrak{B})\otimes_{\mathfrak{B}}X^{\bullet} \to \mathrm{N}\mathfrak{B}r\otimes_{\mathfrak{B}}X^{\bullet}
\]
is an equivalence. The above remarks together with the fact that the geometric realization of the debraiding $\mathrm{N}\mathfrak{B}r/\mathcal P$ is equivalent to the little 2-cubes operad $\mathcal C_2$ imply that there are equivalences
\[
|\mathrm{N}\mathfrak{B}r\otimes_{\mathfrak{B}}X^{\bullet}|\colon\thinspaceng |\mathrm{N}\mathfrak{B}r/\mathcal P\otimes_{\mathcal I}X^{\bullet}|
\simeq \mathcal C_2\otimes_{\mathcal I}|X|^{\bullet}.
\]
For connected $X$ it therefore follows from \cite[Theorem~2.7]{may}
that the geometric realization of $X^{\bullet}_{h\mathfrak{B}}$ is homotopy equivalent to $\Omega^2\Sigma^2(|X|)$. We may interpret this as saying that the commutative $\mathfrak{B}$-space monoid $X^{\bullet}$ represents the 2-fold loop space $\Omega^2\Sigma^2(|X|)$.
\end{example}
\section{Classifying spaces for braided monoidal categories}\label{sec:classifying}
We consider a monoidal category $(\mathcal A, \otimes, I)$ and therein a monoid $A$, a right $A$-module $M$, and a left $A$-module $N$.
Suppressing a choice of parentheses from the notation, the two-sided bar construction $B^{^{_\otimes}}_\bullet (M,A,N)$ is the simplicial object defined by
\begin{equation*}
[k] \mapsto M\otimes A^{\otimes k}\otimes N
\end{equation*}
with structure maps as for the usual bar construction for spaces, see for instance \cite[Chapter~9]{may}. If the unit $I$ for the monoidal structure is
both a right and left $A$-module we can define the bar construction on $A$ as
$B^{^{_\otimes}}_\bullet(A)=B^{^{_\otimes}}_\bullet (I,A,I)$. This works in
particular when $I$ is a terminal object in $\mathcal A$.
In order to say something about the multiplicative properties
of $B^{^{_\otimes}}_\bullet(A)$ we investigate how monoids behave with respect to the monoidal product.
If $\mathcal A$ is a braided monoidal category with braiding $b$ the monoidal product $A\otimes B$ of two monoids
$A$ and $B$ is again a monoid. Suppressing parentheses, the multiplication $\mu_{A\otimes B}$ is the morphism
$$A\otimes B\otimes A\otimes B \xrightarrowightarrow{\mathrm{id}_A\otimes b_{B,A}\otimes \mathrm{id}_B} A\otimes A\otimes B\otimes B
\xrightarrowightarrow{\mu_A\otimes\mu_B} A\otimes B$$
where $\mu_A$ and $\mu_B$ are the multiplications of the monoids $A$ and $B$ respectively. Unlike in a symmetric
monoidal category, the monoidal product of two commutative monoids in $\mathcal A$ is not necessarily a commutative monoid.
But it is straightforward to check that if $A$ is a commutative monoid, then the multiplication $\mu_A\colon\thinspace A\otimes A\rightarrow A$
is a monoid morphism.
Suppose given a monoid $A$ in $\mathcal A$ such that the unit $I$ is a right and left $A$-module. Then the above implies that for each
$k$, $B^{^{_\otimes}}_k(A)$ is a monoid. If in addition $A$ is commutative, the family of multiplication maps assemble into
a morphism $B^{^{_\otimes}}_\bullet(A)\otimes B^{^{_\otimes}}_\bullet(A)\rightarrow B^{^{_\otimes}}_\bullet(A)$ of simplicial objects,
where the monoidal product is taken degreewise. The bar construction on a commutative
monoid $A$ is a simplicial monoid in $\mathcal A$ with this multiplication.
Now we specialize to the braided monoidal category $\mathcal S^\mathfrak{B}$ of $\mathfrak{B}$-spaces. Here we can realize a
simplicial object $Z_\bullet$ by taking the diagonal $|Z_\bullet|$ of the two simplicial
directions to obtain a $\mathfrak{B}$-space. We define the bar construction on a
$\mathfrak{B}$-space monoid $A$ as $\mathfrak{B}Box(A)= |\mathfrak{B}Box_\bullet (A)|$.
From now on we will refer to the simplicial version
as the simplicial bar construction. The above discussion about the multiplicative
properties of the simplicial bar
construction implies the following result.
\begin{lemma}
The bar construction $\mathfrak{B}Box(A)$ on a commutative $\mathfrak{B}$-space monoid $A$ is a
(not necessarily commutative) monoid in $\mathcal S^\mathfrak{B}$.
\qed
\end{lemma}
Recall that the natural transformation $\nu_{A,B}\colon\thinspace A_{h\mathfrak{B}} \times B_{h\mathfrak{B}} \rightarrow (A\boxtimes B)_{h\mathfrak{B}}$ from Lemma~\ref{lemma X and Y flat implies weak equivalnece between times and boxtimes}
gives the homotopy colimit functor $(-)_{h\mathfrak{B}}\colon\thinspace {\mathcal{S}^\mathfrak{B}} \rightarrow\mathcal S$ the structure of a
lax monoidal functor. As usual when we have a lax monoidal functor,
it follows that if $A$ is a $\mathfrak{B}$-space monoid, then $A_{h\mathfrak{B}}$ inherits the structure of a monoid in
$\mathcal S$. If $M$ is a right $A$-module, then $M_{h\mathfrak{B}}$ inherits the structure of a right
$A_{h\mathfrak{B}}$-module in $\mathcal S$ and similarly for a left $A$-module $N$. We can then apply the two-sided
simplicial bar construction in $\mathcal S$ to $A_{h\mathfrak{B}}$, $M_{h\mathfrak{B}}$ and $N_{h\mathfrak{B}}$ and obtain
$B_\bullet(M_{h\mathfrak{B}}, A_{h\mathfrak{B}}, N_{h\mathfrak{B}})$. The natural transformation $\nu$ gives rise
to maps $B_k(M_{h\mathfrak{B}}, A_{h\mathfrak{B}}, N_{h\mathfrak{B}})\rightarrow \mathfrak{B}Box_k(M, A, N)_{h\mathfrak{B}}$ that commute with the
simplicial structure maps. Hence we obtain a morphism
$$B(M_{h\mathfrak{B}}, A_{h\mathfrak{B}}, N_{h\mathfrak{B}})\rightarrow \mathfrak{B}Box(M, A, N)_{h\mathfrak{B}}$$
in $\mathcal S$. By specializing to the case where $M$ and $N$ is the unit $U^{\mathfrak{B}}$ we can relate
$\mathfrak{B}Box(A)_{h\mathfrak{B}}$ to $B(A_{h\mathfrak{B}})$ via $B(U^{\mathfrak{B}}_{h\mathfrak{B}}, A_{h\mathfrak{B}}, U^{\mathfrak{B}}_{h\mathfrak{B}})$.
The homotopy colimit of $U^{\mathfrak{B}}$ is homeomorphic to $\mathrm{N}\mathfrak{B}$ which is a contractible simplicial set.
Hence the map
$B(\mathrm{N}\mathfrak{B}, A_{h\mathfrak{B}}, \mathrm{N}\mathfrak{B}) \rightarrow B(A_{h\mathfrak{B}})$ induced by the
projection $\mathrm{N}\mathfrak{B}\rightarrow \ast$ is a weak equivalence.
\begin{proposition}\label{proposition bar construction and homotopy colimit}
If $A$ is a $\mathfrak{B}$-space monoid with underlying flat $\mathfrak{B}$-space the above defined maps
$$\mathfrak{B}Box(A)_{h\mathfrak{B}} \xleftarroweftarrow{\simeq} B(\mathrm{N}\mathfrak{B}, A_{h\mathfrak{B}}, \mathrm{N}\mathfrak{B}) \xrightarrowightarrow{\simeq} B(A_{h\mathfrak{B}})$$
are weak equivalences.
\end{proposition}
\begin{proof}
The argument for the right hand map being a weak equivalence is given before the proposition.
The map $(A_{h\mathfrak{B}})^{\times k}\rightarrow (A^{\boxtimes k})_{h\mathfrak{B}}$ is a weak equivalence for
each $k\geq0$ since $A$ is flat,
see Lemma~\ref{lemma X and Y flat implies weak equivalnece between times and boxtimes}.
It follows that the left hand map is the diagonal of a map of bisimplicial sets which is a
weak equivalence at each simplicial degree of the bar construction. Therefore it is itself
a weak equivalence.
\end{proof}
Our goal is to use the bar construction in $\mathfrak{B}$-spaces to give a double delooping of the group completion of $A_{h\mathfrak{B}}$
for a commutative $\mathfrak{B}$-space monoid $A$ with underlying flat $\mathfrak{B}$-space. In order to apply the previous proposition twice we will show that the bar construction on something flat is also flat.
\begin{lemma}\label{lemma bar construction flat}
If $A$ is a $\mathfrak{B}$-space monoid with underlying flat $\mathfrak{B}$-space, then the underlying
$\mathfrak{B}$-space of the bar construction
$\mathfrak{B}Box(A)$ on $A$ is also flat.
\end{lemma}
\begin{proof}
When $A$ is flat, it follows from Lemma~\ref{lemma pushout-product axiom} that $\mathfrak{B}Box_k(A)$ is flat for each $k\geq0$.
The criterion for flatness given in Proposition~\ref{prop flat B-space}
can be checked in each simplicial degree. Thus, $\mathfrak{B}Box(A)$ is the diagonal of
a bisimplicial object which is flat at each simplicial degree of the bar construction
and is therefore flat.
\end{proof}
We use the well known fact that the group completion of a homotopy commutative simplicial monoid $M$ may be modelled by the canonical map $M\rightarrow \Omega (B (M)^{\mathrm{fib}})$, where the fibrant replacement
$B(M)^{\mathrm{fib}}$ is the singular simplicial set of the geometric realization of $B(M)$.
By a double delooping of a simplicial set $K$ we mean a based simplicial set $L$ such that $\Omega^2(L^{\mathrm{fib}})\simeq K$.
\begin{proposition}
If $A$ is a commutative $\mathfrak{B}$-space monoid with underlying flat $\mathfrak{B}$-space,
then $\mathfrak{B}Box(\mathfrak{B}Box(A))_{h\mathfrak{B}}$ is a double delooping of the group completion of $A_{h\mathfrak{B}}$.
\end{proposition}
\begin{proof}
Letting $A$ equal $\mathfrak{B}Box(A)$ in Proposition~\ref{proposition bar construction and homotopy colimit}
and using
Lemma~\ref{lemma bar construction flat} we get
\begin{equation*}
\mathfrak{B}Box(\mathfrak{B}Box(A))_{h\mathfrak{B}} \simeq B( \mathfrak{B}Box(A)_{h\mathfrak{B}}).
\end{equation*}
Evaluating $\Omega((-)^{\mathrm{fib}})$ on this we get equivalences
\begin{equation*}
\Omega(\mathfrak{B}Box(\mathfrak{B}Box(A))_{h\mathfrak{B}}^{\mathrm{fib}}) \simeq \Omega( B( \mathfrak{B}Box(A)_{h\mathfrak{B}})^{\mathrm{fib}})
\simeq \mathfrak{B}Box (A)_{h\mathfrak{B}}^{\mathrm{fib}} \simeq B(A_{h\mathfrak{B}})^{\mathrm{fib}}
\end{equation*}
where the map in the middle is an equivalence since $\mathfrak{B}Box (A)_{h\mathfrak{B}}$ is connected and hence group-like. Looping once more we see that $\mathfrak{B}Box(\mathfrak{B}Box(A))_{h\mathfrak{B}}$ is indeed a double delooping of the group completion of $B(A_{h\mathfrak{B}})$.
\end{proof}
Recall from Remark~\ref{rem:Phi-non-strict} that we can construct a commutative $\mathfrak{B}$-space monoid $\mathrm{N}\Phi(\mathcal A)$ for any braided (not necessarily strict) monoidal small category.
Next, we show that $\mathrm{N}\Phi(\mathcal A)$ has underlying flat $\mathfrak{B}$-space so we can apply the above result to the double bar construction on $\mathrm{N}\Phi(\mathcal A)$.
\begin{lemma}
Let $\mathcal A$ is a braided monoidal small category. The commutative $\mathfrak{B}$-space monoid $\mathrm{N}\Phi(\mathcal A)$ has underlying flat $\mathfrak{B}$-space.
\end{lemma}
\begin{proof}
Here we prove the result for a braided strict monoidal small category, the non-strict case is left to the reader. We use the criterion given in Proposition~\ref{prop flat B-space}. For each braided injection $\mathbf m\rightarrow \mathbf n$ the induced functor $\Phi(\mathcal A)(\mathbf m) \rightarrow \Phi(\mathcal A)(\mathbf n)$ is injective on both objects and morphisms. Thus the nerve of that map is a cofibration of simplicial sets.
The functor $\Phi(\mathcal A)(\mathbf m)\rightarrow \Phi(\mathcal A)(\mathbf m\sqcup \mathbf n)$ induced by the inclusion of $\mathbf m$ in $\mathbf m\sqcup \mathbf n$ takes an object $(\mathbf a_1,\ldots, \mathbf a_m)$ to
$(\mathbf a_1,\ldots, \mathbf a_m, U^{\mathfrak{B}}, \ldots U^{\mathfrak{B}})$. Since we have a strict monoidal structure it takes a morphism $f$ to the morphism $f\boxtimes \mathrm{id}_{U^{\mathfrak{B}}\boxtimes \cdots \boxtimes U^{\mathfrak{B}}}= f$.
If we consider a diagram similar to \eqref{diagram condition for flatness} for the $\mathfrak{B}$-category $\Phi(\mathcal A)$ it is clear that the intersection of the images of $\Phi(\mathcal A)(\mathbf{l\sqcup m})$ and $\Phi(\mathcal A)(\mathbf{m\sqcup n})$
in $ \Phi(\mathcal A)(\mathbf{l\sqcup m\sqcup n})$ equals the image of $\Phi(\mathcal A)(\mathbf m)$. The same then holds for the $\mathfrak{B}$-space $\mathrm{N}\Phi(\mathcal A)$.
\end{proof}
\begin{corollary}
If $\mathcal A$ is a braided monoidal small category, then
$\mathfrak{B}Box(\mathfrak{B}Box(\mathrm{N}\Phi(\mathcal{A})))_{h\mathfrak{B}}$ is a double
delooping of the group completion of $\mathrm{N}\mathcal A$.
\end{corollary}
\begin{proof}
The underlying $\mathfrak{B}$-space of $\mathrm{N}\Phi(\mathcal A)$ is flat, so we can apply the proposition and get that $\mathfrak{B}Box(\mathfrak{B}Box(\mathrm{N}\Phi(\mathcal{A})))_{h\mathfrak{B}}$ is a double
delooping of the group completion of $\mathrm{N}\Phi(\mathcal A)_{h\mathfrak{B}}$. But by combining Propositions~\ref{prop:Thomason-diagram} and \ref{prop:Phi-Bint-equivalence}, the latter is weakly equivalent to $\mathrm{N}\mathcal A$.
\end{proof}
\section{$\mathcal{I}$-categories and $E_{\infty}$ spaces}\label{sec:I-section}
In this section we focus on diagrams indexed by the category $\mathcal I$ and we record the constructions and results analogous to those worked out for diagrams indexed by the category $\mathfrak{B}$ in the previous sections.
The proofs are completely analogous to those in the braided case (if not simpler) and will be omitted throughout. We then relate this material to the category of symmetric spectra.
Let $\mathit{Cat}^{\mathcal I}$ denote the category of $\mathcal I$-categories with the symmetric monoidal convolution product inherited from $\mathcal I$. The Grothendieck construction defines a functor $\mathcal I\!\int\colon\thinspacelon \mathit{Cat}^{\mathcal I}\to \mathit{Cat}$ and a map of $\mathcal I$-categories $X\to Y$ is said to be an
\emph{$\mathcal I$-equivalence} if the induced functor $\mathcal I\!\int\! X\to\mathcal I\!\int\! Y$ is a weak equivalence. We write $\mathsf{Sym}$ for the symmetric monoidal analogue of the $\mathit{Cat}$-operad $\mathfrak{B}r$. Thus, the category $\mathsf{Sym}(k)$ has as its objects the elements $a$ in $\Sigma_k$ and a morphism
$\alpha\colon\thinspacelon a\to b$ is an element $\alpha\in \Sigma_k$ such that $\alpha a=b$. It is proved in \cite{may2} that a $\mathsf{Sym}$-algebra in $\mathit{Cat}$ is the same thing as a permutative (i.e., symmetric strict monoidal) category and that the nerve $\mathrm{N}\mathsf{Sym}$ can be identified with the Barratt-Eccles operad. The latter is an $E_{\infty}$ operad in the sense that $\mathrm{N}\mathsf{Sym}(k)$ is $\Sigma_k$-free and contractible for all $k$. As in Proposition~\ref{prop:Bint-Delta--braided-equivalence} one checks that there is an equivalence of localized categories
\begin{equation*}
\textstyle\mathcal I\!\int \colon\thinspacelon \text{$\mathsf{Sym}$-$\mathit{Cat}^{\mathcal I}$}[w_{\mathcal I}^{-1}] \simeq \text{$\mathsf{Sym}$-$\mathit{Cat}$}[w^{-1}] :\!\Delta.
\end{equation*}
The rectification functor $\Phi$ from Section~\ref{subsec:rectification} also has a symmetric monoidal version, now in the form of a functor $\Phi\colon\thinspacelon \text{$\mathsf{Sym}$-$\mathit{Cat}^{\mathcal I}$}\to \mathcal C(\mathit{Cat}^{\mathcal I})$ where the codomain is the category of commutative $\mathcal I$-category monoids. The composite functor
\begin{equation}\label{eq:I-rectification-equivalence}
\text{$\mathsf{Sym}$-$\mathit{Cat}$}\xrightarrow{\Phi} \mathcal C(\mathit{Cat}^{\mathcal I}) \xrightarrow{} \text{$\mathsf{Sym}$-$\mathit{Cat}^{\mathcal I}$}\xrightarrow{\mathcal I\!\int} \text{$\mathsf{Sym}$-$\mathit{Cat}$}
\end{equation}
is weakly equivalent to the identity functor and arguing as in the proof of Theorem~\ref{thm:Braided-B-Cat-rectification} we get the following result.
\begin{theorem}
Every $\mathsf{Sym}$-algebra in $\mathit{Cat}^{\mathcal I}$ is related to a strictly commutative $\mathcal I$-category monoid by a chain of $\mathcal I$-equivalences in
\text{$\mathsf{Sym}$-$\mathit{Cat}^{\mathcal I}$}.\qed
\end{theorem}
In particular, every symmetric monoidal category is weakly equivalent to one of the form $\mathcal I\!\int\! A$ for $A$ a strictly commutative $\mathcal I$-category monoid. Now let $\mathcal S^{\mathcal I}$ be the category of $\mathcal I$-spaces equipped with the symmetric monoidal convolution product inherited from $\mathcal I$. A map of $\mathcal I$-spaces $X\to Y$ is an \emph{$\mathcal I$-equivalence} if the induced map of homotopy colimits $X_{h\mathcal I}\to Y_{h\mathcal I}$ is a weak equivalence and the $\mathcal I$-space version of Proposition~\ref{prop:hocolim-B-equivalence} gives an equivalence of the localized categories
\begin{equation}\label{NSym-I-equivalence}
(-)_{h\mathcal I}\colon\thinspacelon\text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S^{\mathcal I}$}[w_{\mathcal I}^{-1}]\simeq \text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S$}[w^{-1}] :\!\Delta.
\end{equation}
Furthermore, one checks that the $\mathcal I$-category version of Thomason's equivalence \eqref{eq:Thomason-equivalence} gives a natural weak equivalence relating the two compositions in the diagram
\begin{equation}\label{eq:I-Thomason-commutativity}
\xymatrix{
\text{$\mathsf{Sym}$-$\mathit{Cat}^{\mathcal I}$} \ar[r]^{\mathrm{N}} \ar[d]_{\mathcal I\!\int} &\text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S^{\mathcal I}$} \ar[d]^{(-)_{h\mathcal I}}\\
\text{$\mathsf{Sym}$-$\mathit{Cat}$} \ar[r]^{\mathrm{N}} &\text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S$}.
}
\end{equation}
Arguing as in the proof of Theorem~\ref{thm:Br-SB-rectification} one can use this to show that every $\mathrm{N}\mathsf{Sym}$-algebra in $\mathcal S^{\mathcal I}$ is $\mathcal I$-equivalent to one that is strictly commutative. However, a stronger form of this statement has been proved in \cite{Sagave-Schlichtkrull}: There is a model structure on
$\text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S^{\mathcal I}$}$ such that the equivalence \eqref{NSym-I-equivalence} can be derived from a Quillen equivalence, and a further model structure on
$\mathcal C(\mathcal S^{\mathcal I})$ (the category of commutative $\mathcal I$-space monoids) making the latter Quillen equivalent to $\text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S^{\mathcal I}$}$.
\subsection{Symmetric spectra and $E_{\infty}$ spaces} Let $\mathit{Sp}^{\Sigma}$ be the category of symmetric spectra as defined in \cite{HSS}. The smash product of symmetric spectra makes this a symmetric monoidal category with monoidal unit the sphere spectrum. Given an (unbased) space $X$ we write $\Sigma^{\infty}(X_+)$ for the suspension spectrum with $n$th space $X_+\wedge S^n$ where $X_+$ denotes the union of $X$ with a disjoint base point. If $X$ is an $E_{\infty}$ space (i.e., an algebra for an $E_{\infty}$ operad in $\mathcal S$), then $\Sigma^{\infty}(X_+)$ is an $E_{\infty}$ symmetric ring spectrum for the same operad. It is proved in \cite{Elmendorf-M_infinite-loop} that in general an $E_{\infty}$ symmetric ring spectrum is stably equivalent to a strictly commutative symmetric ring spectrum. However, the proof of this fact is not very constructive and it is of interest to find more memorable
commutative models of the $E_{\infty}$ ring spectra in common use. Here we shall do this for $E_{\infty}$ symmetric ring spectra of the form $\Sigma^{\infty}(\mathrm{N} \mathcal A_+)$ for a permutative category $\mathcal A$. The relevant operad is the Barratt-Eccles operad $\mathrm{N}\mathsf{Sym}$ as explained above. In order to make use of the rectification functor $\Phi$ we recall from \cite[Section~3]{Sagave-Schlichtkrull} that the suspension spectrum functor extends to a strong symmetric monoidal functor $\mathbb S^{\mathcal I}\colon\thinspacelon \mathcal S^{\mathcal I}\to \mathit{Sp}^{\Sigma}$ taking an $\mathcal I$-space $X$ to the symmetric spectrum $\mathbb S^{\mathcal I}[X]$ with $n$th space $X(\mathbf n)_+\wedge S^n$. Given a permutative category $\mathcal A$ we may apply this functor to the commutative $\mathcal I$-space monoid $\mathrm{N}\Phi(\mathcal A)$ to get the commutative symmetric ring spectrum $\mathbb S^{\mathcal I}[\mathrm{N}\Phi(\mathcal A)]$.
\begin{proposition}\label{prop:S[NA]-model}
Given a permutative category $\mathcal A$, the commutative symmetric ring spectrum
$\mathbb S^{\mathcal I}[\mathrm{N}\Phi(\mathcal A)]$ is related to
$\Sigma^{\infty}(\mathrm{N} \mathcal A_+)$ by a chain of natural stable equivalences of $E_{\infty}$ symmetric ring spectra.
\end{proposition}
\begin{proof}
Composing the natural weak equivalence relating the composite functor \eqref{eq:I-rectification-equivalence} to the identity functor with Thomason's weak equivalence relating the two compositions in \eqref{eq:I-Thomason-commutativity}, we get a weak equivalence of $\mathrm{N}\mathsf{Sym}$-algebras
\[
\textstyle \mathrm{N}\mathcal A \xleftarrow{\simeq} \mathrm{N}(\mathcal I\!\int\!\Phi(\mathcal A)) \xleftarrow{\simeq} (\mathrm{N}\Phi(\mathcal A))_{h\mathcal I}.
\]
Furthermore, using the bar resolution $\overline{(-)}$ as in Section~\ref{subsec:E2-rectification} we get a chain of $\mathcal I$-equivalences
\[
\Delta(\mathrm{N}\Phi(\mathcal A)_{h\mathcal I}) \xleftarrow{\simeq} \overline{\mathrm{N}\Phi(\mathcal A)}\xrightarrow{\simeq} \mathrm{N}\Phi(\mathcal A)
\]
in $\text{$\mathrm{N}\mathsf{Sym}$-$\mathcal S^{\mathcal I}$}$. This gives the result since the functor $\mathbb S^{\mathcal I}[-]$
takes $\mathcal I$-equivalences to stable equivalences.
\end{proof}
We also note that the symmetric spectrum $\mathbb S^{\mathcal I}[\mathrm{N}\Phi(\mathcal A)]$ has several of the pleasant properties discussed in \cite[Section~5]{HSS}: The fact that it is $S$-cofibrant (this is what some authors call flat) ensures that it is homotopically well-behaved with respect to the smash product, and the fact that it is semistable ensures that its spectrum homotopy groups can be identified with the stable homotopy groups of $\mathrm{N} \mathcal A$.
\begin{example}
The underlying infinite loop space $Q(S^0)$ of the sphere spectrum plays a fundamental role in stable homotopy theory. In order to realize the $E_{\infty}$ ring spectrum $\Sigma^{\infty}(Q(S^0)_+)$ as a commutative symmetric ring spectrum, we use that $Q(S^0)$ is weakly equivalent to the classifying space of Quillen's localization construction $\Sigma^{-1}\Sigma$, where as usual $\Sigma$ denotes the category of finite sets and bijections. (We refer to \cite{Grayson-higher} for a general discussion of Quillen's localization construction and to \cite{Sagave-Schlichtkrull} for an explicit description of $\Sigma^{-1}\Sigma$.) The category $\Sigma^{-1}\Sigma$ inherits a permutative structure from $\Sigma$ and it follows from Proposition~\ref{prop:S[NA]-model} that the commutative symmetric ring spectrum $\mathbb S^{\mathcal I}[\mathrm{N}\Phi(\Sigma^{-1}\Sigma)]$ is a model of $\Sigma^{\infty}(Q(S^0)_+)$.
\end{example}
\appendix
\section{Localization of categories}\label{app:localization}
We make some elementary remarks on localization of categories. Let $\mathcal C$ be a (not necessarily small) category and let $\mathcal V$ be a class of morphisms in $\mathcal C$. Recall that a \emph{localization of $\mathcal C$ with respect to $\mathcal V$} is a category $\mathcal C'$ together with a functor $L\colon\thinspacelon C\to \mathcal C'$ that maps the morphisms in $\mathcal V$ to isomorphisms in $\mathcal C'$ and is initial with this property: Given a category $\mathcal D$ and a functor $F\colon\thinspacelon \mathcal C\to \mathcal D$ that maps the morphisms in $\mathcal V$ to isomorphisms in $\mathcal D$, there exists a unique functor $F'\colon\thinspacelon \mathcal C'\to \mathcal D$ such that $F=F'\circ L$. Clearly a localization of $\mathcal C$ with respect to $\mathcal V$ is uniquely determined up to isomorphism if it exists. We sometimes use the notation $\mathcal C\to \mathcal C[\mathcal V^{-1}]$ for such a localization. It will be convenient to assume that $\mathcal C$ and $\mathcal C[\mathcal V^{-1}]$ always have the same objects.
Let again $\mathcal C$ be a category equipped with a class of morphisms $\mathcal V$ and consider a category $\mathcal A$ together with a pair of functors $F,G\colon\thinspacelon \mathcal A\to \mathcal C$. In this situation we say that $F$ and $G$ are \emph{related by a chain of natural transformations in $\mathcal V$}, written $F\simeq_{\mathcal V}G$, if there exists a finite sequence of functors $H_1,\dots, H_n$ from $\mathcal A$ to $\mathcal C$ with $H_1=F$ and $H_n=G$, and for each $1\leq i<n$ either a natural transformation $H_i\to H_{i+1}$ with values in $\mathcal V$ or a natural transformation $H_{i+1}\to H_i$ with values in $\mathcal V$. In the next proposition we write $I_{\mathcal C}$ for the identity functor on $\mathcal C$.
\begin{proposition}\label{prop:localization-equivalence}
Let $\mathcal C$ and $\mathcal D$ be categories related by the functors $F\colon\thinspacelon \mathcal C\to \mathcal D$ and $G\colon\thinspacelon \mathcal D\to \mathcal C$.
Suppose that $\mathcal V$ is a class of morphisms in $\mathcal C$ and that $\mathcal W$ is a class of morphisms in $\mathcal D$ such that $F(\mathcal V)\subseteq \mathcal W$, $G(\mathcal W)\subseteq \mathcal V$, $G\circ F\simeq_{\mathcal V} I_{\mathcal C}$, and $F\circ G\simeq_{\mathcal W} I_{\mathcal D}$. Then the localization of $\mathcal C$ with respect to $\mathcal V$ exists if and only if the localization of $\mathcal D$ with respect to $\mathcal W$ exists, and in this case $F$ and $G$ induce an equivalence of categories $F\colon\thinspacelon \mathcal C[\mathcal V^{-1}]\rightleftarrows \mathcal D[\mathcal W^{-1}] :\!G$.
\end{proposition}
\begin{proof}
Suppose that a localization of $\mathcal D$ with respect to $\mathcal W$ exists and write $L\colon\thinspacelon\mathcal D\to \mathcal D'$ for such a localization. Then we define a category $\mathcal C'$ with the same objects as $\mathcal C$ and morphism sets
$\mathcal C'(C_1,C_2)=\mathcal D'(LF(C_1),LF(C_2))$. The composition in $\mathcal C'$ is inherited from the composition in
$\mathcal D'$. We claim that the canonical functor $L^{\mathcal C}\colon\thinspacelon \mathcal C\to \mathcal C'$, which is the identity on objects and given by $LF$ on morphisms, is a localization of $\mathcal C$ with respect to $\mathcal V$. Thus, consider a category $\mathcal E$ and a functor $H\colon\thinspacelon \mathcal C\to \mathcal E$ that maps the morphisms in $\mathcal V$ to isomorphisms in $\mathcal E$. We must define a functor $H'\colon\thinspacelon \mathcal C'\to \mathcal E$ such that $H=H'\circ L^{\mathcal C}$, and it is clear that we must have $H'(C)=H(C)$ for all objects $C$ in $\mathcal C'$. In order to define the action on morphisms, we factor the composite functor
$K=H\circ G\colon\thinspacelon \mathcal D\to \mathcal E$ over the localization of $\mathcal D'$ to get a functor $K'\colon\thinspacelon\mathcal D'\to \mathcal E$. The relation $G\circ F\simeq_{\mathcal V}I_{\mathcal C}$ gives a natural isomorphism $\phi_C\colon\thinspacelon HGF(C)\to H(C)$ and we define the action of $H'$ on the morphism set $\mathcal C'(C_1,C_2)$ to be the composition
\[
\mathcal D'(LF(C_1),LF(C_2))\xrightarrow{K'}\mathcal E(HGF(C_1),HGF(C_2))\xrightarrow{\phi_{C_2}\circ(-)\circ \phi_{C_1}^{-1}}
\mathcal E(H(C_1),H(C_2)).
\]
It is immediate from the definition that $H'$ satisfies the required conditions and it remains to show that it is uniquely determined. The composition $L^{\mathcal C}\circ G$ factors over $\mathcal D'$ to give the left hand square in the commutative diagram
\[
\xymatrix@-1pt{
\mathcal D\ar[r]^G \ar[d]_L & \mathcal C \ar[r]^F \ar[d]_{L^{\mathcal C}}& \mathcal D \ar[d]_L\\
\mathcal D' \ar[r]^{G'} & \mathcal C' \ar[r] & \mathcal D'.
}
\]
Notice that the relation $F\circ G\simeq_{\mathcal W} I_{\mathcal D}$ gives a natural isomorphism relating the composition $L\circ F\circ G$ to $L$. Let $\mathcal J$ be the category with objects $0$ and $1$, and two non-identity arrows $i\colon\thinspacelon 0\to 1$ and $j\colon\thinspacelon 1\to 0$. (Thus, $\mathcal J$ is a groupoid with inverse isomorphisms $i$ and $j$.) Then we may interpret the natural isomorphism in question as a functor $\mathcal D\times \mathcal J\to \mathcal D'$, or, by adjointness, a functor $\mathcal D\to (\mathcal D')^{\mathcal J}$. The latter factors over $\mathcal D'$ to give a natural isomorphism relating the composition in the bottom row of the diagram to the identity functor on $\mathcal D'$. It follows that $G'\colon\thinspacelon \mathcal D'\to \mathcal C'$ is fully faithful and consequently that $H'$ is uniquely determined on the full subcategory of $\mathcal C'$ generated by objects of the form $G(D)$ for $D$ in $\mathcal D$. Furthermore, the relation $G\circ F\simeq_{\mathcal V}I_{\mathcal C}$ implies that any morphism in $\mathcal C'$ can be written as a composition of morphisms in this subcategory with morphisms in the image of $L^{\mathcal C}$ and inverses of morphisms in the image of
$L^{\mathcal C}$. This shows that $H'$ is uniquely determined on the whole category $\mathcal C'$. The last statement in the proposition is an immediate consequence.
\end{proof}
\begin{example}\label{ex:FSV-example}
The work of Fiedorowicz-Stelzer-Vogt \cite{FSV} fits into this framework. Let $\mathsf M$ be a $\mathit{Cat}$-operad that is $\Sigma$-free in the sense that $\Sigma_k$ acts freely on $\mathsf M(k)$ for all $k$. In \cite[C.2]{FSV} the authors define a functor $F\colon\thinspacelon\text{$\mathrm{N} \mathsf M$-$\mathcal S$}\to
\text{$\mathsf M$-$\mathit{Cat}$}$ and show that if $\mathsf M$ satisfies a certain ``factorization condition'', then there are chains of weak equivalences $\mathrm{N}\circ F\simeq_wI$ and $F\circ \mathrm{N} \simeq_wI$ with $I$ the respective identity functors. By \cite[Lemma~8.12]{FSV} this applies in particular to the (unbraided) operad $\mathfrak{B}r$ discussed in Section~\ref{subsec:operads}. It is well-known that the localization of $\mathrm{N}\mathfrak{B}r$-$\mathcal S$ with respect to the weak equivalences exists, since it can be realized as the homotopy category of a suitable model category. Thus, it follows from Proposition~\ref{prop:localization-equivalence} that also the localization of $\mathfrak{B}r$-$\mathit{Cat}$ with respect to the weak equivalences exists and that these localized categories are equivalent. This is shown in \cite[Proposition~7.4]{FSV} except that the discussion of Grothendieck universes and ``localization up to equivalence'' is not really needed in order to state this result.
\end{example}
\end{document}
|
\begin{document}
\leftline{ \scriptsize \it }
\title[Jain Operators]{On Sz\'{a}sz-Mirakyan-Jain Operators preserving exponential functions}
\maketitle
\begin{center}
{\bf G. C. Greubel} \\
Newport News, VA, United States \\
[email protected]
\end{center}
\noindent \textbf{Abstract.} In the present article we define the Jain type modification of the generalized
Sz{\'a}sz-Mirakjan operators that preserve constant and exponential mappings. Moments, recurrence formulas, and other
identities are established for these operators. Approximation properties are also obtained with use of the
Boham-Korovkin theorem.
\noindent \textbf{Keywords.} Sz{\'a}sz-Mirakjan operators, Jain basis functions, Jain operators,
Lambert W-function, Boham-Korovkin theorem.
\noindent \textbf{2010 Mathematics Subject Classification}: 33E20, 41A25, 41A36.
\section{Introduction}
In Approximation theory positive linear operatos have been studied with the test functions $\{1, x, x^2 \}$ in order
to determine the convergence of a function. Of interest are the Sz\'{a}sz-Mirakjan operators, based on the Poisson
distribution, which are useful in approximating functions on $[0, \infty)$ and are defined as, \cite{GMM}, \cite{OS},
\begin{align}\label{e1}
S_{n}(f;x) = \sum_{k=0}^{\infty} \frac{(n x)^{k}}{k!} \, e^{- n x} \, f\left(\frac{k}{n}\right).
\end{align}
In 1972, Jain \cite{GCJ}, used the Lagrange expansion formula
\begin{align}\label{e2}
\phi(z) = \phi(0) + \sum_{k=1}^{\infty} \frac{1}{k!} \, \left[ D^{k-1} \, \left( f^{k}(z) \, \phi'(z) \right)
\right]_{z=0} \, \left(\frac{z}{f(z)}\right)^{k}
\end{align}
with $\phi(z) = e^{\alpha \, z}$ and $f(z) = e^{\beta \, z}$ to determined that
\begin{align}\label{e3}
1 = \alpha \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (\alpha + \beta k)^{k-1} \, z^{k} \, e^{- (
\alpha + \beta \, k) \, z}.
\end{align}
Jain established the basis functions
\begin{align}\label{e4}
L_{n,k}^{(\beta)}(x) = \frac{n x \, (n x + \beta k)^{k-1}}{k!} \, e^{-(n x + \beta \, k)}
\end{align}
with the normalization
\begin{align*}
\sum_{k=0}^{\infty} L_{n,k}^{(\beta)}(x) = 1
\end{align*}
and considered the operators
\begin{align}\label{e5}
B_{n}^{\beta}(f,x) = \sum_{k=0}^{\infty} L_{n,k}^{(\beta)}(x) \, f\left(\frac{k}{n}\right) \hspace{15mm}
x \in [0,\infty).
\end{align}
In the reduction of $\beta = 0$ the Jain operators reduce to the Sz\'{a}sz-Mirakjan operators.
Recently Acar, Aral, and Gonska \cite{AAG} considered the Sz\'{a}sz-Mirakjan operators which preserve the
test functions $\{1, e^{a x} \}$ and established the operators
\begin{align}\label{e6}
R_{n}^{*}(f;x) = e^{-n \, \gamma_{n}(x)} \, \sum_{k=0}^{\infty} \frac{(n \, \gamma_{n}(x))^{k}}{k!} \,
f\left(\frac{k}{n}\right)
\end{align}
for functions $f \in C[0,\infty)$, $x \geq 0$, and $n \in \mathbb{N}$ with the reservation property
\begin{align}\label{e7}
R_{n}^{*}(e^{2 a t};x) = e^{2 a x}.
\end{align}
Here the Jain basis is used to extend the the class of operators for the test functions $\{1, e^{- \lambda x}\}$ by
defining Sz\'{a}sz-Mirakyan-Jain operators which preserve the mapping of $e^{- \lambda x}$, for $\lambda, x > 0$.
In the case of $\lambda=0$ the Sz\'{a}sz-Mirakyan-Jain operators are constant preserving operators. Moments, recurrence
formulas, and other identities are established for these new operators. Approximation properties are also
obtained with use of the Boham-Korovkin theorem. The Lambert W-function and related properties are used
in the analaysis of the properties obtained for the Sz\'{a}sz-Mirakyan-Jain operators.
\section{Sz\'{a}sz-Mirakyan-Jain Operators}
The Sz\'{a}sz-Mirakyan-Jain operators, (SMJ), which are a generalization of the Sz\'{a}sz-Mirakyan operators, are defined by
\begin{align}\label{e8}
R_{n}^{(\beta)}(f;x) = n \, \alpha_{n}(x) \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \, \alpha_{n}(x) + \beta \, k)^{k-1}
\, e^{-(n \, \alpha_{n}(x) + \beta \, k)} \, f\left(\frac{k}{n}\right)
\end{align}
for $f \in C[0,\infty)$. It is required that these operators preserve the mapping of $e^{- \lambda \, x}$, as
given by
\begin{align}\label{e9}
R_{n}^{(\beta)}(e^{ - \lambda t} ; x) = e^{- \lambda x}
\end{align}
where $x \geq 0$ and $n \in \mathbb{N}$, and $\lambda \geq 0$. When $\beta = 0$ in \eqref{e8} the operator reduces to that defined
by Acar, Aral, and Gonska \cite{AAG}. When $\beta = 0$ and $\alpha_{n}(x) = x$ the operator reduces to the well
known Sz\'{a}sz-Mirakyan operators given by \eqref{e1}. For $0 \leq \beta < 1$ and $\alpha_{n}(x) = x$ these operators
reduce to the Sz\'{a}sz-Mirakyan-Durrmeyer operators defined by Gupta and Greubel in \cite{GG1}.
\begin{lemma} \label{L1}
For $x \geq 0, \lambda \geq 0$, we have
\begin{align}\label{e10}
\alpha_{n}(x) = \frac{ - \lambda \, x}{n \, (z(\lambda/n, \beta) - 1)},
\end{align}
where $-\beta \, z(t, \beta) = W(-\beta \, e^{-\beta - t})$ and $W(x)$ is the Lambert W-function.
\end{lemma}
\begin{proof}
Considering the mapping \eqref{e9} it is required that
\begin{align}\label{e11}
e^{- \lambda x} &= n \, \alpha_{n}(x) \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \, \alpha_{n}(x) + \beta \, k)^{k-1}
\, e^{-(n \, \alpha_{n}(x) + \beta k)} \, e^{- \lambda k/n}
\end{align}
Making use of \eqref{e3} in the form
\begin{align}\label{e12}
e^{n \, \alpha_{n}(x) \, z} = n \, \alpha_{n}(x) \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \, \alpha_{n}(x) +
\beta k )^{k-1} \, e^{-(\beta z - \ln(z)) k}
\end{align}
and letting $\beta \, z - \ln(z) = \beta + \frac{\lambda}{n}$ then
\begin{align*}
e^{n \, \alpha_{n}(x) \, z} = n \, \alpha_{n}(x) \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \, \alpha_{n}(x) +
\beta k)^{k-1} \, e^{-(\beta + \lambda/n) \, k}
\end{align*}
which provides
\begin{align*}
e^{-\lambda x} = e^{n \, \alpha_{n}(x) \, (z-1)}
\end{align*}
or
\begin{align*}
\alpha_{n}(x) = - \frac{\lambda \, x}{n \, (z(\lambda/n, \beta) - 1)}.
\end{align*}
The value of $z$ is determined by the equation $\beta \, z - \ln(z) = \beta + \frac{\lambda}{n}$ which can be seen
in the form
\begin{align*}
z \, e^{- \beta \, z} = e^{- \beta - \lambda/n}
\end{align*}
and has the solution
\begin{align}\label{e13}
z(\lambda/n, \beta) = - \frac{1}{\beta} \, W(- \beta \, e^{- \beta - \lambda/n}),
\end{align}
where $W(x)$ is the Lambert W-function.
\end{proof}
\begin{remark}
For the case $\lambda \to 0$ the resulting $\alpha_{n}(x)$ is
\begin{align*}
\lim_{\lambda \to 0} \, \alpha_{n}(x) = (1-\beta) \, x.
\end{align*}
\end{remark}
\begin{proof}
For the case $\lambda \to 0$ the resulting $z = z(\lambda/n, \beta)$ of \eqref{e13} yields $z(0, \beta) = 1$.
By considering
\begin{align*}
\frac{\partial z}{\partial \lambda} = - \frac{1}{\beta} \, \frac{\partial}{\partial \lambda} \, W(- \beta \, e^{- \beta - \lambda/n})
= \frac{W(- \beta \, e^{- \beta - \lambda/n})}{n \, \beta \, ( 1 + W(- \beta \, e^{- \beta - \lambda/n}))}
\end{align*}
and
\begin{align*}
\lim_{\lambda \to 0} \, \frac{\partial z}{\partial \lambda} = - \frac{1}{n \, ( 1 - \beta)}.
\end{align*}
Now, by use of L'Hospital's rule,
\begin{align*}
\lim_{\lambda \to 0} \, \alpha_{n}(x) &= \frac{x}{n} \, \lim_{\lambda \to 0} \frac{\lambda}{z - 1} = \frac{x}{n}
\, \lim_{\lambda \to 0} \frac{1}{\frac{\partial z}{\partial \lambda}} = (1-\beta) \, x
\end{align*}
as claimed.
\end{proof}
By taking the case of $\lambda \to 0$ the operators $R_{n}^{(\beta)}(f;x)$ reduce from exponential preserving to
constant preserving operators. In this case the operators $R_{n}^{(\beta)}(f;x)|_{\lambda \to 0}$ are related to the
Jain operators, \eqref{e5}, by $R_{n}^{(\beta)}(f; x) = B_{n}^{\beta}(f;(1- \beta) \, x)$.
The SMJ operators are now completely defined by
\begin{equation}\label{e14}
\left \{
\begin{aligned}
R_{n}^{(\beta)}(f;x) &= n \, \alpha_{n}(x) \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \, \alpha_{n}(x) + \beta \, k)^{k-1}
\, e^{-(n \, \alpha_{n}(x) + \beta \, k)} \, f\left(\frac{k}{n}\right), \\
\alpha_{n}(x) &= - \frac{\lambda \, x}{n \, (z(\lambda/n, \beta) - 1)}, \\
z(t, \beta) &= - \frac{1}{\beta} \, W(- \beta \, e^{- \beta - t})
\end{aligned} \right.
\end{equation}
and the requirement that $R_{n}^{(\beta)}(e^{- \lambda t} ; x) = e^{-\lambda x}$, for $x \geq 0$, $\lambda \geq 0$ and
$n \in \mathbb{N}$.
\section{Moment Estimations}
\begin{lemma} \label{L2}
The moments for the SMJ operators are given by:
\begin{align}\label{e15}
R_{n}^{(\beta)}(1; x) &= 1 \nonumber\\
R_{n}^{(\beta)}(t; x) &= \frac{\alpha_{n}(x)}{1-\beta} \nonumber\\
R_{n}^{(\beta)}(t^{2}; x) &= \frac{\alpha_{n}^{2}(x)}{(1-\beta)^{2}} + \frac{\alpha_{n}(x)}{n \, (1-\beta)^{3}} \\
R_{n}^{(\beta)}(t^{3}; x) &= \frac{\alpha_{n}^{3}(x)}{(1-\beta)^{3}} + \frac{3 \, \alpha_{n}^{2}(x)}{n \, (1-\beta)^{4}} +
(1 + 2 \, \beta) \, \frac{\alpha_{n}(x)}{n^{2} \, (1-\beta)^{5}} \nonumber\\
R_{n}^{(\beta)}(t^{4}; x) &= \frac{\alpha_{n}^{4}(x)}{(1-\beta)^{4}} + \frac{6 \, \alpha_{n}^{3}(x)}{n \, (1-\beta)^{5}} +
(7 + 8 \, \beta) \, \frac{\alpha_{n}^{2}(x)}{n^{2} \, (1-\beta)^{6}} + (1 + 8 \beta + 6 \beta^{2}) \,
\frac{\alpha_{n}(x)}{n^{3} \, (1-\beta)^{7}} \nonumber\\
R_{n}^{(\beta)}(t^{5}; x) &= \frac{\alpha_{n}^{5}(x)}{(1-\beta)^{5}} + \frac{10 \, \alpha_{n}^{4}(x)}{n \, (1-\beta)^{6}} +
5 \, (5 + 4 \, \beta) \, \frac{\alpha_{n}^{3}(x)}{n^{2} \, (1-\beta)^{7}} \nonumber\\
& \hspace{10mm} + 15 \, (1 + 4 \beta + 2 \beta^{2}) \, \frac{\alpha_{n}^{2}(x)}{n^{3} \, (1-\beta)^{8}}
+ (1 + 22 \beta + 58 \beta^{2} + 24 \beta^{3}) \, \frac{\alpha_{n}(x)}{n^{4} \, (1-\beta)^{9}}.
\nonumber
\end{align}
\end{lemma}
The proof follows directly from work of the author dealing with moment operators for the Jain basis,
see \cite{G1, GG1, GG2}.
\begin{lemma} \label{L3}
Let, $\phi = t - x$, then the central moments of the SMJ operators are:
\begin{align}\label{e16}
R_{n}^{(\beta)}(\phi^{0}; x) &= 1 \nonumber\\
R_{n}^{(\beta)}(\phi^{1}; x) &= \frac{\alpha_{n}(x)}{1-\beta} - x \nonumber\\
R_{n}^{(\beta)}(\phi^{2}; x) &= \left(\frac{\alpha_{n}(x)}{1-\beta} - x \right)^{2} + \frac{\alpha_{n}(x)}{n \, (1-\beta)^{3}} \\
R_{n}^{(\beta)}(\phi^{3}; x) &= \left(\frac{\alpha_{n}(x)}{1-\beta} - x \right)^{3} + \frac{3 \, \alpha_{n}(x)}{n \, (1-\beta)^{3}}
\, \left(\frac{\alpha_{n}(x)}{1-\beta} - x \right) + (1 + 2 \, \beta) \, \frac{\alpha_{n}(x)}{n^{2} \, (1-\beta)^{5}}
\nonumber
\end{align}
\begin{align*}
R_{n}^{(\beta)}(\phi^{4}; x) &= \left(\frac{\alpha_{n}(x)}{1-\beta} -x \right)^{4} + \frac{6 \, \alpha_{n}(x)}{n \, (1-\beta)^{3}}
\, \left(\frac{\alpha_{n}(x)}{1-\beta} - x \right)^{2} +
(7 + 8 \, \beta) \, \frac{\alpha_{n}(x)}{n^{2} \, (1-\beta)^{5}} \nonumber\\
& \hspace{10mm} \cdot \left(\frac{\alpha_{n}(x)}{1-\beta} - x \right) + (1 + 8 \beta + 6 \beta^{2}) \,
\frac{\alpha_{n}(x)}{n^{3} \, (1-\beta)^{7}} + \frac{3 \, \alpha_{n}(x)}{n^{2} \, (1-\beta)^{5}} \nonumber\\
R_{n}^{(\beta)}(\phi^{5}; x) &= \left(\frac{\alpha_{n}(x)}{1-\beta} - x\right)^{5} + \frac{10 \, \alpha_{n}(x)}{n \, (1-\beta)^{3}}
\left( \frac{\alpha_{n}(x)}{1-\beta} - x \right)^{3} + \frac{5 \,\alpha_{n}(x)}{n^{2} \, (1-\beta)^{5}} \left(
\frac{\alpha_{n}(x)}{1-\beta} - x\right) \cdot \mu_{1} \nonumber\\
& \hspace{15mm} + \frac{5 \, \alpha_{n}(x)}{n^{3} \, (1-\beta)^{7}} \cdot \mu_{2} + (1 + 22 \beta + 58 \beta^{2}
+ 24 \beta^{3}) \, \frac{\alpha_{n}(x)}{n^{4} \, (1-\beta)^{9}},
\nonumber
\end{align*}
\end{lemma}
where
\begin{align*}
\mu_{1} &= (5 + 4 \beta) \, \left( \frac{\alpha_{n}(x)}{1 - \beta} - x \right) + 3 \, x \\
\mu_{2} &= 3 \, (1 + 4 \beta + 2 \beta^{2}) \, \left( \frac{\alpha_{n}(x)}{1-\beta} - x \right) + 2 \, (1 + 2
\beta) \, x.
\end{align*}
\begin{proof}
Utilizing the binomial expansion
\begin{align*}
\phi^{m} = (t - x)^{m} = \sum_{k=0}^{m} (-1)^{k} \, \binom{m}{k} \, t^{m-k} \, x^{k}
\end{align*}
then
\begin{align}\label{e17}
R_{n}^{(\beta)}(\phi^{m}; x) = \sum_{k=0}^{m} (-1)^{k} \, \binom{m}{k} \, x^{k} \, R_{n}^{(\beta)}(t^{m-k}; x).
\end{align}
Wih the use of \eqref{e15} the first few values of $m$ are:
\begin{align*}
R_{n}^{(\beta)}(\phi^{0}; x) &= R_{n}^{(\beta)}(t^{0}; x) = 1 \\
R_{n}^{(\beta)}(\phi^{1}; x) &= R_{n}^{(\beta)}(t; x) - x \, R_{n}^{(\beta)}(t^{0}; x) = \frac{\alpha_{n}(x)}{1-\beta} - x \\
R_{n}^{(\beta)}(\phi^{2}; x) &= R_{n}^{(\beta)}(t^{2}; x) - 2 x \, R_{n}^{(\beta)}(t;x) + x^{2} \, R_{n}^{(\beta)}(t^{0}; x) \\
&= \frac{\alpha_{n}^{2}(x)}{(1-\beta)^{2}} + \frac{\alpha_{n}(x)}{n \, (1-\beta)^{3}} - 2x \, \frac{\alpha_{n}(x)}
{1-\beta} + x^{2} \\
&= \left( \frac{\alpha_{n}(x)}{1 - \beta} - x\right)^{2} + \frac{\alpha_{n}(x)}{n \, (1-\beta)^{3}}
\end{align*}
The remainder of the central moments follow from \eqref{e15} and \eqref{e17}.
\end{proof}
\begin{lemma} \label{L4}
The central moments, given in Lemma 3, lead to the limits:
\begin{equation}
\begin{aligned}\label{e18}
\lim_{n \to \infty} \, n \, R_{n}^{(\beta)}(\phi; x) &= \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \\
\lim_{n \to \infty} \, n \, R_{n}^{(\beta)}(\phi^{2}; x) &= \frac{x}{(1-\beta)^{2}}
\end{aligned}
\end{equation}
\end{lemma}
\begin{proof}
By setting $t = \lambda/n$ in \eqref{a4} then
\begin{align*}
& \frac{(- \lambda)}{n \, (1-\beta) \, (z(\lambda/n,\beta) - 1)} = 1 + \frac{v}{2!} + 2\, (1-4\beta) \, \frac{v^{2}}{4!} +
6 \beta^{2} \, \frac{v^{3}}{4!} \nonumber\\
& \hspace{10mm} - (1 - 8\beta + 88\beta^{2} + 144\beta^{3}) \, \frac{v^{4}}{6!} + 840 \beta^{2}\,(1 + 12\beta +
8\beta^{2}) \, \frac{v^{5}}{8!} + O(v^{6}),
\end{align*}
where $n \, (1-\beta)^{2} \, v = \lambda$. This expansion may be placed into the form
\begin{align*}
\frac{\alpha_{n}(x)}{1-\beta} - x &= \frac{v \, x}{2!} \, \left( 1 + (1-4\beta) \, \frac{v}{3!} + 12\beta^{2} \,
\frac{v^{2}}{4!} - O(v^{3}) \right).
\end{align*}
Multiplying by $n$ and taking the desired limit the resulting value is given by
\begin{align*}
\lim_{n \to \infty} \, n \, R_{n}^{(\beta)}(\phi; x) = \frac{\lambda \, x}{2! \, (1-\beta)^{2}}.
\end{align*}
It is evident that
\begin{align*}
\left(\frac{\alpha_{n}(x)}{1-\beta} - x \right)^{2} &= \left(\frac{v \, x}{2!}\right)^{2} \, \left( 1 + 2(1-4\beta)
\, \frac{v}{3!} + 20 \, (1-8\beta + 52\beta^{2}) \, \frac{v^{2}}{6!} - O(v^{3}) \right)
\end{align*}
for which
\begin{align*}
\left( \frac{\alpha_{n}(x)}{1-\beta} - x\right)^{2} &+ \frac{\alpha_{n}(x)}{n \, (1-\beta)^{3}} \\
&= \left(\frac{v \, x}{2!}\right)^{2} \, \left( 1 + 2(1-4\beta) \, \frac{v}{3!} + 20 \, (1-8\beta + 52\beta^{2}) \,
\frac{v^{2}}{6!} - O(v^{3}) \right) \\
& \hspace{5mm} + \frac{x}{n \, (1-\beta)^{2}} \, \left(1 + \frac{v}{2!} + 2\, (1-4\beta) \, \frac{v^{2}}{4!} - O(v^{3}) \right)
\end{align*}
Multiplying by $n$ and taking the limit yields
\begin{align*}
\lim_{n \to \infty} \, n \, R_{n}^{(\beta)}(\phi^{2}; x) = \frac{x}{(1-\beta)^{2}}.
\end{align*}
\end{proof}
\begin{remark}
Other limits may be determined by extending the work of Lemma 4, such as:
\begin{equation}\label{e19}
\begin{aligned}
\lim_{n\to \infty} R_{n}^{(\beta)}(\phi^{m}; x) &= 0, \mbox{ for } m \geq 1 \\
\lim_{n \to \infty} n \, R_{n}^{(\beta)}(\phi^{m}; x) &= 0, \mbox{ for } m \geq 3 \\
\lim_{n \to \infty} n^{2} \, R_{n}^{(\beta)}(\phi^{3}; x) &= \frac{2(1 + 2 \beta) \, x + 3 \lambda \, x^{2}}{2! \,
(1-\beta)^{4}} \\
\lim_{n \to \infty} n^{2} \, R_{n}^{(\beta)}(\phi^{4}; x) &= \frac{3 \, x^{2}}{(1-\beta)^{4}}
\end{aligned}
\end{equation}
\end{remark}
\begin{lemma} \label{L5}
Expansion on a general exponential weight is given by
\begin{align*}
R_{n}^{(\beta)}(e^{-\mu \, t} ; x) = e^{n \, \alpha_{n}(x) \, (z(\mu/n, \beta) - 1)},
\end{align*}
or
\begin{align}\label{e20}
R_{n}^{(\beta)}(e^{- \mu \, t} ; x) = Exp\left[ -\lambda \, x \, \left(\frac{z(\mu/n,\beta) - 1}{z(\lambda/n, \beta)-1} \right)
\right] = Exp\left[ - \mu \, x \cdot \frac{\lambda}{\mu} \, \frac{z(\mu/n, \beta) - 1}{z(\lambda/n, \beta) - 1} \right]
\end{align}
for $\mu \geq 0$ and has the expansion
\begin{equation}\label{e21}
\begin{aligned}
R_{n}^{(\beta)}(e^{-\mu \, t}; x) &= e^{-\mu \, x} \, \left( 1 + \frac{\mu (\mu - \lambda) x}{2! \, n (1-\beta)^{2}} +
( (3 \mu x - 4 - 8 \beta) \mu \right. \\
& \hspace{10mm} \left. - (3 \mu x - 2 + 8 \beta) \lambda) \, \frac{\mu (\mu - \lambda) x}{4! \, n^{2} (1-\beta)^{4}}
+ \mathcal{O}\left(\frac{\mu (\mu - \lambda) x}{6! \, n^{3} (1-\beta)^{6}}\right) \right)
\end{aligned}
\end{equation}
where $-\beta \, z(\mu/n,\beta) = W(- \beta \, e^{-\beta - \mu/n})$, $-\beta \, z(\lambda/n, \beta) = W(- \beta \,
e^{- \beta - \lambda/n})$. In the limit as $n \to \infty$ it is evident that
\begin{align}\label{e22}
\begin{split}
\lim_{n \to \infty} R_{n}^{(\beta)}(e^{-\mu t}; x) &= e^{-\mu x} \\
\lim_{n \to \infty} n \, \left[ R_{n}^{(\beta)}(e^{- \mu t}; x) - e^{- \mu x} \right] &= \frac{\mu (\mu - \lambda) \, x}{
2! \, (1-\beta)^{2}} \, e^{- \mu x}.
\end{split}
\end{align}
\end{lemma}
\begin{proof}
It is fairly evident that
\begin{align*}
R_{n}^{(\beta)}(e^{-\mu t}; x) &= n \alpha_{n}(x) \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \, \alpha_{n}(x) + \beta k)^{k-1} \,
e^{-n \, \alpha_{n}(x) - (\beta + \mu/n) k}
\end{align*}
which, by comparison to \eqref{e12}, leads to
\begin{align*}
R_{n}^{(\beta)}(e^{-\mu t}; x) = e^{- n \, \alpha_{n}(x) \, (z(\mu/n, \beta) - 1)} = Exp\left[- \lambda \, x \,
\left(\frac{z(\mu/n,\beta)-1}{z(\lambda/n,\beta)-1}\right) \right].
\end{align*}
The expansion of \eqref{e20}, with use of \eqref{a5}, is given by
\begin{align*}
R_{n}^{(\beta)}(e^{- \mu t}; x) &= \sum_{k=0}^{\infty} \frac{(-\mu x)^{k}}{k!} \, \left(\frac{\lambda}{\mu} \, \frac{z(\mu/n,
\beta) - 1}{z(\lambda/n, \beta) - 1} \right)^{k} \\
&= \sum_{k=0}^{\infty} \frac{(-\mu x)^{k}}{k!} \, \left(1 - \frac{k (\mu - \lambda)}{2! \, (1-\beta)^{2}} + k ( (3k + 1 +
8 \beta) \mu \right. \\
& \hspace{5mm} \left. + (3k -1 - 8 \beta) \lambda ) \, \frac{\mu - \lambda}{4! \, (1-\beta)^{4}} + \mathcal{O}\left(
\frac{\mu - \lambda}{6! \, (1-\beta)^{6}} \right) \right) \\
&= e^{- \mu x} \, \left( 1 + \frac{\mu (\mu - \lambda) x}{2! \, n (1-\beta)^{2}} + ( (3 \mu x - 4 - 8 \beta) \mu \right. \\
& \hspace{10mm} \left. - (3 \mu x - 2 + 8 \beta) \lambda) \, \frac{\mu (\mu - \lambda) x}{4! \, n^{2} (1-\beta)^{4}}
+ \mathcal{O}\left(\frac{\mu (\mu - \lambda) x}{6! \, n^{3} (1-\beta)^{6}}\right) \right).
\end{align*}
Taking the appropriate limits yields the desired results.
\end{proof}
\begin{remark}
By use of Lemma 5 it may be stated that:
\begin{align}\label{e23}
\lim_{n\to \infty} n^{2} \, R_{n}^{(\beta)}((e^{-t} - e^{-x})^{4}; x) = \frac{3 \, x^{2} \, e^{-4 x}}{(1 - \beta)^{4}}.
\end{align}
\end{remark}
\begin{proof}
Since
\begin{align*}
R_{n}^{(\beta)}((e^{-t} - e^{-x})^{4};x) &= R_{n}^{(\beta)}(e^{-4 t}; x) - 4 \, e^{-x} \, R_{n}^{(\beta)}(e^{-3 t}; x) + 6 \,
e^{-2 x} \, R_{n}^{(\beta)}(e^{-2 t}; x) \nonumber\\
& \hspace{10mm} - 4 \, e^{-3 x} \, R_{n}^{(\beta)}(e^{-t}; x) + e^{-4 x} \, R_{n}^{(\beta)}(1; x)
\end{align*}
then, by making use of \eqref{e21}, it becomes evident that
\begin{align*}
R_{n}^{(\beta)}((e^{-t} - e^{-x})^{4}; x) = \frac{3 \, x^{2} \, e^{-4 x}}{n^{2} \, (1-\beta)^{4}} + \mathcal{O}\left(
\frac{1}{n^{3} \, (1-\beta)^{6}}
\right).
\end{align*}
Multiplying by $n^{2}$ and taking the limit as $n \to \infty$ yields the desired result.
\end{proof}
\section{Analysis}
\begin{theorem}
Given the sequence $A_{n} : C^{*}[0,\infty) \to C^{*}[0,\infty)$of positive linear operators which satisfies
the conditions
\begin{align*}
\lim_{n \to \infty} A_{n}(e^{-k t}; x) = e^{-k x}, \hspace{5mm} k=0,1,2
\end{align*}
uniformly in $[0,\infty)$ then
\begin{align*}
\lim_{n \to \infty} A_{n}(f; x) = f(x)
\end{align*}
uniformly in $[0,\infty)$ for every $f \in C^{*}[0,\infty)$.
\end{theorem}
The proof of this theorem 1 can be found in \cite{Alt-Camp,Boy-Ves,Holhos} and has, in essense, been demonstarted by \eqref{e21} for
$\mu \geq 0$. An estimate of the rate of convergence for the SMJ operators will require the use of the modulus of continuity
\begin{align*}
\omega(F,\delta) = \Sup \, |F(t) - F(x)|
\end{align*}
and can be seen as, for the case where $F(e^{-t}) = f(t)$,
\begin{align*}
\omega^{*}(f;\delta) = \Sup_{|e^{-t} - e^{-x}| \leq \delta} \, |f(t) - f(x)|
\end{align*}
and is well defined for $\delta \geq 0$ and all functions $f \in C^{*}[0,\infty)$. In the present case the modulus of continuity
has the property
\begin{align}\label{e24}
|f(t) - f(x)| \leq \left(1 + \frac{(e^{-x} - e^{-t})^{2}}{\delta^{2}}\right) \, \omega^{*}(f;\delta), \hspace{5mm} \delta > 0.
\end{align}
Further properties and use of the modulus of continuity can be found in \cite{Boy-Ves, Holhos}. The following theorem can also
be found in the later.
\begin{theorem}
If a sequence of positive linear operators $A_{n} : C^{*}[0,\infty) \to C^{*}[0,\infty)$ satisfy the equalities:
\begin{align*}
\|A_{n}(1 ;x) - 1 \|_{[0,\infty)} &= a_{n} \\
\|A_{n}(e^{-t};x) - e^{-x} \|_{[0,\infty)} &= b_{n} \\
\|A_{n}(e^{-2 t};x) - e^{-2 x} \|_{[0,\infty)} &= c_{n},
\end{align*}
where $a_{n}, b_{n}$ and $c_{n}$ are bounded and finite, in the limit $n \to \infty$, then
\begin{align*}
\| A_{n}(f; x) - f(x) \|_{[0, \infty)} \leq a_{n} \, |f(x)| + (2 + a_{n}) \, \omega^{*}(f, \sqrt{a_{n} + 2 \, b_{n} + c_{n}}),
\end{align*}
for every function $f \in C^{*}[0,\infty)$, and satisfies
\begin{align*}
\| A_{n}(f; x) - f(x) \|_{[0, \infty)} \leq 2 \, \omega^{*}(f, \sqrt{2 \, b_{n} + c_{n}})
\end{align*}
for constant preserving operators.
\end{theorem}
\begin{proof}
Since
\begin{align*}
A_{n}((e^{-t} - e^{-x})^{2}; x) = [A_{n}(e^{-2 t}; x) - e^{-2 x}] - 2 \, e^{-x} \, [A_{n}(e^{-t}; x) - e^{-x}] + e^{-2x}
\, [A_{n}(1;x) - 1]
\end{align*}
then, by use of \eqref{e24},
\begin{align*}
A_{n}(|f(t) - f(x)|; x) &\leq \left( A_{n}(1;x) + \frac{1}{\delta^{2}} \, A_{n}((e^{-t} - e^{-x})^{2}; x) \right) \,
\omega^{*}(f,\delta) \\
&\leq \left( 1 + a_{n} + \frac{a_{n} + 2 \, b_{n} + c_{n}}{\delta^{2}} \right) \, \omega^{*}(f,\delta).
\end{align*}
By choosing $\delta = \sqrt{a_{n} + 2 \, b_{n} + c_{n}}$ then
\begin{align*}
A_{n}(|f(t) - f(x)|; x) &\leq ( 2 + a_{n}) \, \omega^{*}(f,\sqrt{a_{n} + 2 \, b_{n} + c_{n}}).
\end{align*}
Now, making use of
\begin{align*}
|A_{n}(f;x) - f(x)| \leq |f| \, |A_{n}(1;x) - 1| + A_{n}(|f(t) - f(x)|; x)
\end{align*}
leads to the uniform estimation of convergence in the form
\begin{align*}
\| A_{n}(f; x) - f(x) \|_{[0, \infty)} \leq a_{n} \, |f(x)| + (2 + a_{n}) \, \omega^{*}(f, \sqrt{a_{n} + 2 \, b_{n} + c_{n}}).
\end{align*}
For constant preserving operators the property $\|A_{n}(1;x) - 1\|_{[0,\infty)} = a_{n} = 0$ holds and leads to
\begin{align*}
\| A_{n}(f; x) - f(x) \|_{[0, \infty)} \leq 2 \, \omega^{*}(f, \sqrt{2 \, b_{n} + c_{n}}).
\end{align*}
\end{proof}
\begin{remark}
The SMJ operators satisfy
\begin{align*}
\| R_{n}^{(\beta)}(f; x) - f(x) \|_{[0, \infty)} \leq 2 \, \omega^{*}(f, \sqrt{2 \, b_{n} + c_{n}}).
\end{align*}
\end{remark}
\begin{proof}
By using Lemma 2 it is evident that $R_{n}^{(\beta)}(1; x) = 1$ and yields $a_{n} = 0$. By using \eqref{e21}, of
Lemma 5, it is seen that
\begin{align*}
R_{n}^{(\beta)}(e^{-\mu \, t}; x) - e^{-\mu x} &= e^{-\mu \, x} \, \left( \frac{\mu (\mu - \lambda)x}{2! \, n \, (1-\beta)^{2}}
- \frac{\Lambda(x, \mu, \lambda) \, \mu (\mu - \lambda) x}{4! \, n^{2} \, (1-\beta)^{4}} + \mathcal{O}\left( \frac{1}{n^{3} \,
(1-\beta)^{6}}
\right) \right),
\end{align*}
where $\Lambda(x, \mu, \lambda) = (3 \mu x - 4 - 8 \beta) \, \mu - (3 \mu x - 2 + 8 \beta) \lambda$, and provides
\begin{align*}
\| R_{n}^{(\beta)}(e^{-\mu \, t}; x) - e^{-\mu x} \| = \left\| \frac{\mu (\mu - \lambda) \, x \, e^{-\mu \, x}}{
2! \, n (1-\beta)^{2}} \, \left( 1 + \frac{2 \, \Lambda(x, \mu, \lambda)}{4! \, n \, (1-\beta)^{2}} + \mathcal{O}\left(
\frac{1}{n^{2} \, (1-\beta)^{4}} \right) \right) \right\|
\end{align*}
which, for $\mu \in \{1,2\}$, the remaining limiting values, $b_{n}$ and $c_{n}$ can be seen to be bounded and
finite. It is also evident that in the limiting case, $n \to \infty$, $b_{n}$ and $c_{n}$ tend to zero. By the
resulting statements of Theorem 2 it is determined that
\begin{align*}
\| R_{n}^{(\beta)}(f; x) - f(x) \|_{[0, \infty)} \leq 2 \, \omega^{*}(f, \sqrt{2 \, b_{n} + c_{n}}).
\end{align*}
as claimed.
\end{proof}
For the SMJ operators a quantitative Voronovskaya-type theorem can be defined in the following way.
\begin{theorem}
Let $f, f', f'' \in C^{*}[0,\infty)$ then
\begin{align*}
&\left| n \, [R_{n}^{(\beta)}(f;x) - f(x)] - \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \, f'(x) - \frac{x}{n \, (1-\beta)^{2}}
\, f''(x) \right| \\
& \hspace{5mm} \leq |\mu_{n}(x,\beta)| \, |f'(x)|+ |\nu_{n}(x,\beta)| \, |f''(x)| \\
& \hspace{15mm} + 2 \, (2 \, \nu_{n}(x,\beta) + \frac{x}
{(1-\beta)^{2}} + \zeta_{n}(x,\beta) ) \, \omega^{*}\left(f''; \frac{1}{\sqrt{n}}\right)
\end{align*}
where
\begin{align*}
\mu_{n}(x, \beta) &= n \, R_{n}^{(\beta)}(\phi; x) - \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \\
\nu_{n}(x, \beta) &= \frac{1}{2!} \, \left(n \, R_{n}^{(\beta)}(\phi^{2}; x) - \frac{x}{(1-\beta)^{2}} \right) \\
\zeta_{n}(x, \beta) &= n^{2} \, \sqrt{R_{n}^{(\beta)}((e^{-x} - e^{-t})^{4}; x)} \, \sqrt{R_{n}^{(\beta)}(\phi^{4}; x)}.
\end{align*}
\end{theorem}
\begin{proof}
The Taylor expansion for the function $f(x)$ is seen by
\begin{align}\label{e25}
f(t) = f(x) + f'(x) \, (t-x) + \frac{f''(x)}{2!} \, (t-x)^{2} + \theta(t,x) \, (t-x)^{2}
\end{align}
where $2 \, \theta(t,x) = f''(\eta) - f''(x)$ for $x \leq \eta \leq t$. Applying the SMJ operator to the Taylor
expansion it is determined that
\begin{align*}
& |R_{n}^{(\beta)}(f(t);x) - f(x) \, R_{n}^{(\beta)}(1;x) - f'(x) \, R_{n}^{(\beta)}(\phi;x) - \frac{f''(x)}{2!}
\, R_{n}^{(\beta)}(\phi^{2};x)| \\
& \hspace{30mm}\leq |R_{n}^{(\beta)}(\theta(t,x) \, \phi^{2};x)|.
\end{align*}
Using the results of lemma 4 and 5 this can be seen by
\begin{align*}
& \left|n \, \left( R_{n}^{(\beta)}(f;x) - f(x) \right) - \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \, f'(x) - \frac{x}{2! \,
(1-\beta)^{2}} \, f''(x) \right| \\
& \hspace{10mm} \leq \left| n \, R_{n}^{(\beta)}(\phi;x) - \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \right| \, |f'(x)|+ \frac{1}{2!}
\, \left| n \, R_{n}^{(\beta)}(\phi^{2};x) - \frac{x}{(1-\beta)^{2}} \right| \, |f''(x)| \\
& \hspace{20mm} + |n \, R_{n}^{(\beta)}(\theta(t,x) \, \phi^{2}; x)|
\end{align*}
or
\begin{align*}
& \left|n \, \left( R_{n}^{(\beta)}(f;x) - f(x) \right) - \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \, f'(x) - \frac{x}{2! \,
(1-\beta)^{2}} \, f''(x) \right| \\
& \hspace{10mm} \leq |\mu_{n}(x,\beta)| \, |f'(x)|+ |\nu_{n}(x,\beta)| \, |f''(x)| + |n \, R_{n}(\theta(t,x) \, \phi^{2}; x)|
\end{align*}
where
\begin{align*}
\mu_{n}(x, \beta) &= n \, R_{n}^{(\beta)}(\phi; x) - \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \\
\nu_{n}(x, \beta) &= \frac{1}{2!} \, \left(n \, R_{n}^{(\beta)}(\phi^{2}; x) - \frac{x}{(1-\beta)^{2}} \right).
\end{align*}
By using \eqref{e22} it is given that
\begin{align*}
|\theta(t,x)| \leq \left( 1 + \frac{(e^{-t} - e^{-x})^{2}}{\delta^{2}} \right) \, \omega^{*}(f''; \delta)
\end{align*}
which becomes, when $|e^{-t} - e^{-x}| \leq \delta$ is taken into consideration, $|\theta(t,x)| \leq 2 \, \omega^{*}
(f''; \delta)$. If $|e^{-t} - e^{-x}| > \delta$ then $|\theta(t,x)| \leq (2/\delta^{2}) \, (e^{-t} - e^{-x})^{2} \,
\omega^{*}(f''; \delta)$. Therefore, it can be concluded that
\begin{align*}
|\theta(t,x)| \leq 2 \, \left( 1 + \frac{(e^{-t} - e^{-x})^{2}}{\delta^{2}} \right) \, \omega^{*}(f''; \delta).
\end{align*}
The term $n \, R_{n}^{(\beta)}(\theta(t,x) \, \phi^{2}; x)$ becomes
\begin{align*}
n \, R_{n}^{(\beta)}(\theta(t,x) \, \phi^{2}; x) \leq 2 \, n \, \left( R_{n}^{(\beta)}(\phi^{2}; x) + \frac{1}{\delta^{2}} \,
R_{n}^{(\beta)}((e^{-t} - e^{-x})^{2} \, \phi^{2}; x) \right) \, \omega^{*}(f''; \delta)
\end{align*}
which, by applying the Cauchy-Swarz inequality, becomes
\begin{align*}
n \, R_{n}^{(\beta)}(\theta(t,x) \, \phi^{2}; x) \leq 2 \, n \, \left( R_{n}^{(\beta)}(\phi^{2}; x) + \frac{1}{\delta^{2}} \,
\zeta_{n}(x, \beta) \right) \, \omega^{*}(f''; \delta),
\end{align*}
where
\begin{align*}
\zeta_{n}(x, \beta) &= n^{2} \, \sqrt{R_{n}^{(\beta)}((e^{-x} - e^{-t})^{4}; x)} \, \sqrt{R_{n}^{(\beta)}(\phi^{4}; x)}.
\end{align*}
Now, by choosing $\delta = 1/\sqrt{n}$, the desired result is obtained.
\end{proof}
\begin{remark}
By use of Lemma 4 it is clear that $\mu_{n}(x, \beta) \to 0$ and $\nu_{n}(x,\beta) \to 0$ as $n \to \infty$.
Using \eqref{e19} and \eqref{e23} the limit of $\zeta_{n}(x,\beta)$ becomes
\begin{align*}
\lim_{n \to \infty} \zeta_{n}(x,\beta) = \frac{3 \, x^{2} \, e^{-2 x}}{(1-\beta)^{4}}
\end{align*}
and yields
\begin{align*}
\lim_{n \to \infty} \left( 2 \, \nu_{n}(x,\beta) + \frac{x}{(1-\beta)^{2}} + \zeta_{n}(x,\beta) \right) =
\frac{x}{(1-\beta)^{2}} + \frac{3 \, x^{2} \, e^{-2 x}}{(1-\beta)^{4}}.
\end{align*}
\end{remark}
\begin{corollary}
Let $f, f', f'' \in C^{*}[0, \infty)$ then the inequality
\begin{align*}
\lim_{n \to \infty} \, n \, \left| R_{n}^{(\beta)}(f;x) - f(x) \right| = \frac{\lambda \, x}{2! \, (1-\beta)^{2}} \, f'(x)
+ \frac{x}{(1-\beta)^{2}} \, f''(x)
\end{align*}
holds for all $x \in [0, \infty)$.
\end{corollary}
\section{Further Considerations}
Having established several results for the Sz\'{a}sz-Mirakyan-Jain operators further considerations can be considered. One
such consideration could be an application of a theorem found in a recent work of Gupta and Tachev, \cite{GT}. In order to
do so the following results are required.
\begin{lemma} \label{L6}
Let $z_{\mu} = z(\mu/n, \beta)$, $\phi = t-x$, and $f = \text{Exp}[n \, \alpha_{n}(x) \, (z_{\mu} - 1)]$. The exponentially
weighted moments are then given by:
\begin{align}\label{e51}
\begin{split}
R_{n}^{(\beta)}(e^{- \mu x} \, \phi^{0}; x) &= f \\
R_{n}^{(\beta)}(e^{- \mu x} \, \phi^{1}; x) &= \left[ \frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}} - x \right] \, f \\
R_{n}^{(\beta)}(e^{- \mu x} \, \phi^{2}; x) &= \left[ \left(\frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}}
- x \right)^{2} + \frac{\alpha_{n}(x) \, z_{\mu}}{n \, (1-\beta \, z_{\mu})^{3}} \right] \, f \\
R_{n}^{(\beta)}(e^{- \mu x} \, \phi^{3}; x) &= \left[ \left(\frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}}
- x \right)^{3} + \frac{3 \, \alpha_{n}(x) \, z_{\mu}}{n \, (1-\beta \, z_{\mu})^{3}}
\, \left(\frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}} - x \right) \right. \\
& \hspace{15mm} \left. + (1 + 2 \, \beta \, z_{\mu}) \, \frac{\alpha_{n}(x) \, z_{\mu}}{n^{2} \, (1-\beta \, z_{\mu})^{5}}
\right] \, f \\
R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{4}; x) &= \left[ \left(\frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}} -x \right)^{4} +
\frac{6 \,
\alpha_{n}(x) \, z_{\mu}}{n \, (1-\beta \, z_{\mu})^{3}} \, \left(\frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}} - x \right)^{2}
\right. \\
& \hspace{5mm} \left. + (7 + 8 \, \beta \, z_{\mu}) \, \frac{\alpha_{n}(x) \, z_{\mu}}{n^{2} \, (1-\beta \, z_{\mu})^{5}}
\cdot \left(\frac{\alpha_{n}(x) \, z_{\mu}}{1-\beta \, z_{\mu}} - x \right) + (1 + 8 \beta \, z_{\mu} + 6 \beta^{2}
\, z_{\mu}) \right. \\
& \hspace{15mm} \left. \cdot \frac{\alpha_{n}(x) \, z_{\mu}}{n^{3} \, (1-\beta \, z_{\mu})^{7}} + \frac{3 \, \alpha_{n}(x) \,
z_{\mu}}{n^{2} \, (1-\beta \, z_{\mu})^{5}} \right] \, f
\end{split}
\end{align}
\end{lemma}
\begin{proof}
By using \eqref{e14} then
\begin{align*}
R_{n}^{(\beta)}(e^{- \mu t} \phi^{m}; x) &= n \, \alpha_{n} \, \sum_{k=0}^{\infty} \frac{1}{k!} \, (n \alpha_{n} + \beta k)^{k-1}
\, e^{-(n \alpha_{n} + \beta k)} \, e^{- \mu k/n} \, \left(\frac{k}{n} - x\right)^{m} \\
&= (-1)^{m} \, \left(\frac{d}{d \mu } + x\right)^{m} \, e^{n \alpha_{n}(x) \, (z_{\mu} - 1)}.
\end{align*}
For the case $m =1$ it is given that
\begin{align*}
R_{n}^{(\beta)}(e^{- \mu t} \, \phi; x) &= - \left(\frac{d}{d\mu} + x\right) \, e^{n \alpha_{n}(x) \, (z_{\mu} - 1)}
= \left[ \frac{\alpha_{n}(x) \, z_{\mu}}{1 - \beta \, z_{\mu}} - x \right] \, e^{n \alpha_{n}(x) \, (z_{\mu} - 1)}.
\end{align*}
The remainder of the moments follow.
\end{proof}
\begin{remark} \label{R6}
The ratio of $R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{4}; x)$ and $R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{2}; x)$ as $n \to \infty$
is
\begin{align}
\lim_{n \to \infty} \frac{R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{4}; x)}{R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{2}; x)} = 0,
\end{align}
with order of convergence $\mathcal{O}(n^{-2})$.
\end{remark}
\begin{proof}
Consider the expansion of
\begin{align*}
\frac{\alpha_{n}(x) \, z_{\mu}}{1- \beta \, z_{\mu}} = z_{\mu} \cdot \frac{1-\beta}{1 - \beta \, z_{\mu}} \cdot \frac{\alpha_{n}(x)}
{1-\beta}
\end{align*}
by making use of the expansion used in the proof of Lemma \ref{L4}, \eqref{a3}, and by
\begin{align*}
\frac{1-\beta}{1-\beta \, z_{\mu}} = 1 - \frac{\beta \, \mu}{n(1-\beta)^{2}} + \frac{3 \, \beta^{2} \, \mu^{2}}{2! \, n^{2} (1-
\beta)^{4}} - \frac{(\beta + 14 \beta^2) \, \mu^{3}}{3! \, n^{3} (1-\beta)^{6}} + \mathcal{O}\left(\frac{\mu^{4}}{n^{4} (1-
\beta)^{8}}\right)
\end{align*}
then
\begin{align}
\frac{\alpha_{n}(x) \, z_{\mu}}{1 - \beta \, z_{\mu}} - x = \frac{x}{2 \, n (1-\beta)^{2}} \, \left( (\lambda - 2 \mu) +
\frac{\sigma(\lambda, \mu)}{3! \, n (1-\beta)^{2}} + \mathcal{O}\left(\frac{1}{n^{2} (1-\beta)^{4}}\right) \right).
\end{align}
where $\sigma(\lambda, \mu) = (1-4\beta) \lambda - 6 \lambda \mu + 6(1-2\beta + 3 \beta^{2}) \mu^{2}$. By squaring this result
and taking the limit it is determined that
\begin{align*}
\lim_{n \to \infty} \frac{R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{4}; x)}{R_{n}^{(\beta)}(e^{- \mu t} \, \phi^{2}; x)} =
\lim_{n \to \infty} \frac{(\lambda - 2 \mu)^{2} \, x^{2}}{4 \, n^{2} (1-\beta)^{4}} \, \left( 1 + \mathcal{O}\left( \frac{1}{
n} \right) \right) \to 0.
\end{align*}
\end{proof}
With Lemma \ref{L6} and Remark \ref{R6} use could be made of Theorem 5 of Gupta and Tachev, \cite{GT}, which can be stated
as
\begin{theorem}
Let $E$ be a subspace of $C[0,\infty)$ which contains the polynomials and suppose $L_{n} : E \to C[0,\infty)$ is a
sequence of linear positive operators preserving linear functions. Suppose that for each constant $\mu > 0$, and fixed
$x \in [0, \infty)$, the operators $L_{n}$ satisfy
\begin{align*}
L_{n}( e^{- \mu t} \, (t-x)^{2}; x) \leq Q(\mu, x) \, R_{n}^{(\beta)}(e^{- \mu t} (t-x)^{2}; x).
\end{align*}
Additionally, if $f \in C^{2}[0, \infty) \bigcap E$ and $f^{n} \in Lip(\alpha, \mu)$, for $0 < \alpha \leq 1$, then,
for $x \in [0, \infty)$,
\begin{align*}
& \left| L_{n}(f; x) - f(x) - \frac{f^{''}(x)}{2} \, \mu_{n,2}^{R^{(\beta)}} \right| \\
& \hspace{5mm} \leq \left[ e^{-\mu x} + \frac{Q(\mu, x)}{2} + \sqrt{\frac{Q(2 \mu ,x)}{4}} \right] \, \mu_{n,2}^{R^{(\beta)}}
\cdot \omega_{1}\left( f^{n}, \sqrt{\frac{\mu_{n,4}^{R^{(\beta)}}}{\mu_{n,2}^{R^{(\beta)}}}}, \mu \right)
\end{align*}
where $\mu_{n,2}^{R^{(\beta)}} = R_{n}^{(\beta)}(e^{- \mu t} (t-x)^{2}; x)$.
\end{theorem}
\section{Appendix}
Expansion of the function $f(a e^{t})$ in powers of $t$ is is given by
\begin{align}\label{a1}
f(a e^{t}) = \sum_{k=0}^{\infty} \left[ D^{k}_{t} \, f(a e^{t}) \right]_{t=0} \, \frac{t^{k}}{k!} = f(a) +
\sum_{k=1}^{\infty} p_{k}(a) \, \frac{t^{k}}{k!},
\end{align}
where
\begin{align}\label{a2}
p_{n}(a) = \left[ D^{n}_{t} \, f(a e^{t}) \right]_{t=0} = \sum_{r=1}^{n} S(n, n-r) \, a^{r} \, f^{(r)}(a),
\end{align}
with $S(n,m)$ being the Stirling numbers of the second kind. Applying this expansion to the Lambert W-function
the formula $W(x e^{x}) = x$ and the $n^{th}$-derivative coefficients, Oeis A042977, \cite{Oeis1, LWM} are
required to obtain
\begin{align}\label{a3}
- \frac{1}{\beta} \, W(- \beta \, e^{-\beta + t}) &= 1 + (1-\beta) \, \sum_{n=1}^{\infty} \frac{B_{n-1}(\beta) \, u^{n}}{n!},
\end{align}
where $(1-\beta)^{2} \, u = t$ and $B_{n}(x)$ are the Eulerian polynomials of the second kind. Let $z(t)$ be the
left-hand side of \eqref{a3}, $- \beta \, z(t) = W(- \beta \, e^{-\beta + t})$, to obtain
\begin{align}\label{a4}
\frac{t}{(1-\beta) \, (z(t)-1)} &= 1 - \frac{u}{2!} + 2\, (1-4\beta) \, \frac{u^{2}}{4!} - 6 \beta^{2} \, \frac{u^{3}}
{4!} - (1 - 8\beta + 88\beta^{2} + 144\beta^{3}) \, \frac{u^{4}}{6!} \nonumber\\
& \hspace{5mm} - 840 \beta^{2}\,(1 + 12\beta + 8\beta^{2}) \, \frac{u^{5}}{8!} + O(u^{6}).
\end{align}
The ratio of $z(x)-1$ to $z(t)-1$ is given by
\begin{align}\label{a5}
\frac{t}{x} \, \frac{z(x)-1}{z(t)-1} &= 1 + \frac{(x-t)}{2! \, (1-\beta)^{2}} + \delta_{1} \, \frac{(x-t)}{4! \,
(1-\beta)^{4}} + \delta_{2} \, \frac{(x-t)}{4! \, (1-\beta)^{6}}
+ \mathcal{O}\left(\frac{(x-t)}{8! \, (1-\beta)^{8}} \right),
\end{align}
where
\begin{align*}
\delta_{1} &= 4(1+2 \beta) x - 2(1-4\beta) \, t \\
\delta_{2} &= (1 + 8\beta + 6\beta^{2}) \, x^{2} - (1-4\beta - 6\beta^{2}) \, x t + 6 \beta^{2} \, t^{2} \\
\end{align*}
\end{document}
|
\begin{document}
\maketitle
\renewcommand{\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fiefootnote}{\fnsymbol{footnote}}
\footnotetext[1]{
Department of Computer Science, University of British Columbia.
Email: \texttt{[email protected]}.
Supported by an NSERC Discovery Grant and a Sloan Foundation Fellowship.
}
\renewcommand{\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fiefootnote}{\arabic{footnote}}
\begin{abstract}
A folklore result uses the Lov\'asz local lemma to analyze the discrepancy of
hypergraphs with bounded degree and edge size.
We generalize this result to the context of
real matrices with bounded row and column sums.
\end{abstract}
\section{Introduction}
In combinatorics, discrepancy theory is the study of red-blue colorings
of a hypergraph's vertices such that every hyperedge contains a roughly equal number of red and blue
vertices.
A classic survey on this topic is \cite{BeckSos}.
Many combinatorial discrepancy results have a more general form as a geometric statement about
discrepancy of real vectors \cite[\S 4]{BeckSos}.
Some examples include the Beck-Fiala theorem \cite{BeckFiala}
and Spencer's ``six standard deviations'' theorem \cite{Spencer}.
One exception is the following folklore result on the discrepancy of hypergraphs of bounded degree
and edge size \cite[pp.~693]{Srinivasan} \cite[Proposition 12]{BPRS}.
\begin{theorem}
\TheoremName{folklore}
Let $H$ be a hypergraph of maximum degree $\Delta$ and maximum edge size $R$.
Then there is a red-blue coloring of the vertices such that, for every edge $e$,
the numbers of red and blue vertices in $e$ differ by at most $2 \sqrt{R \ln(R \Delta)}$.
\end{theorem}
The proof is a short exercise using the Lov\'asz local lemma.
We show that this theorem also has a more general form as a geometric statement
about discrepancy of real vectors.
\Theorem{newthm} recovers \Theorem{folklore} (up to constants)
by letting $V_{i,j} \in \set{0,1}$ indicate whether vertex $j$ is contained in edge $i$.
Let $v^i$ denote the $i\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fi$ row of $V$ and $v_j$ denote the $j\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fi$ column of $V$.
As usual, let $[n] = \set{1,\ldots,n}$ and let $\norm{\cdot}_p$ denote the $\ell_p$-norm.
\begin{theorem}
\TheoremName{newthm}
Let $V$ be an $n \times m$ real matrix with $\abs{V_{i,j}} \leq 1$,
$\norm{ v^i }_1 \leq R$, and $\norm{v_j}_1 \leq \Delta$ for all $i \in [n], j \in [m]$.
Assume that $R \geq \max \set{ \Delta, 4 }$ and $\Delta \geq 2$.
There exists $y \in \set{-1,+1}^m$ with
$\norm{ V y }_\infty \leq O(\sqrt{R \log(R \Delta)})$.
\end{theorem}
\section{The Proof}
\SectionName{LLL}
\Theorem{newthm} follows as an easy corollary of the next theorem,
by rescaling the vectors and separately considering the positive and negative coordinates.
Let $\lg x$ denote the base-2 logarithm of $x$.
\begin{theorem}
\TheoremName{weakbeckfiala}
Let $A$ be a non-negative real matrix of size $n \times m$,
and let $a_1,\ldots,a_m \in \mathbb{R}^n_{\geq 0}$ denote its columns.
Assume that $\beta \leq \min \set{ \delta/2, 1/4 }$ and $\delta \leq 1$.
Suppose that
\begin{itemize}
\item $\norm{\sum_j a_j}_\infty \leq 1$,
\item $\norm{a_j}_\infty \leq \beta$ for every $j$, and
\item $\norm{a_j}_1 \leq \delta$ for every $j$.
\end{itemize}
Define $\alpha := \sqrt{\lg(\delta/\beta^2)} \geq \sqrt{2}$.
Then there exists a vector $y \in \set{-1,+1}^m$ such that
$$
\norm{ A y }_\infty ~\leq~ 16 \alpha \sqrt{\beta}.
$$
\end{theorem}
\comment{
\begin{proofof}{\Theorem{newthm}}
First we rescale each $v_j$ and divide it into its non-positive and non-negative parts.
Define $v_j^+, v_j^- \in \mathbb{R}^n$ by
\begin{align*}
(v_j^+)_i &~=~ \max \set{ (v_j)_i/R, 0 } \\
(v_j^-)_i &~=~ \max \set{ -(v_j)_i/R, 0 }
\end{align*}
Let $a_j \in \mathbb{R}^{2n}$ be the vector obtained by concatenating $v_j^+$ and $v_j^-$,
and let $A$ be the matrix whose $j\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fi$ column is $a_j$.
Then $\norm{ a_j }_\infty \leq 1/R$, $\norm{ a_j }_1 = \norm{ v_j }_1 / R \leq \Delta/R$,
and $\norm{ A_i }_1 \leq 1$.
Thus we may set $\delta = \Delta/R$ and $\beta = 1/R$.
By \Theorem{weakbeckfiala}, we get a vector $y \in \set{-1,+1}^m$ with
$$
\norm{ Ay }_\infty
~=~ O( \alpha \sqrt{\beta} )
~=~ O( \sqrt{ \lg( R \Delta ) / R } ).
$$
Scaling up by $R$ and using the triangle inequality,
we obtain a discrepancy bound for $v_1,\ldots,v_m$ that is at most twice as large:
$ \norm{ Vy }_\infty = O( \sqrt{ R \lg( R \Delta )} ) $.
\end{proofof}
}
We now prove \Theorem{weakbeckfiala}.
Suppose we choose the vector $y \in \set{-1,+1}^m$ uniformly at random.
The discrepancy of row $i$ is the value $\abs{\sum_j A_{i,j} y_j}$.
Our goal is to bound $\norm{Ay}_\infty = \max_i \, \abs{\sum_j A_{i,j} y_j}$,
which is the maximum discrepancy of any row.
One annoyance in analyzing $\norm{ A y }_\infty$ is that
the entries of $A$ can have wildly differing magnitudes.
The natural approach is to stratify:
to partition each row of $A$ into sets whose entries all have roughly the same magnitude.
Define $b := \floor{-\lg \beta} \geq 2$,
so that every entry of every $A$ is at most $2^{-b}$.
For $k \geq b$, let
$$
S_{i,k} ~=~ \setst{ j }{ \floor{-\lg A_{i,j}} = k }
$$
be the locations of the entries in row $i$ that take values in $(2^{-(k+1)},2^{-k}]$.
To bound the discrepancy of row $i$, we will actually bound the discrepancy of each
set $S_{i,k}$ (i.e., $\abs{ \sum_{j \in S_{i,k}} A_{i,j} y_j }$).
By the triangle inequality,
the total discrepancy of row $i$ is at most the sum of the discrepancies of each $S_{i,k}$.
Define
\begin{equation}
\EquationName{epsdef}
\epsilonilon ~:=~ 8 \alpha \sqrt{\beta} ~>~ 8 \sqrt{\beta}.
\end{equation}
Let $\mathcal{E}_{i,k}$ be the event that the discrepancy of $S_{i,k}$ exceeds
\begin{equation}
\EquationName{TDef}
T_k ~:=~ \epsilonilon \sum_{j \in S_{i,k}} A_{i,j} + \alpha 2^{-k/2}.
\end{equation}
We can analyze the probability of $\mathcal{E}_{i,k}$ by a Hoeffding bound:
if $\set{X_i}_{i \leq \ell}$ are independent random variables,
each $X_i \in [-1,+1]$,
and $X = X_1+\cdots+X_\ell$,
then $\prob{|X|>a} \leq 2 e^{-a^2/2\ell}$.
Applying this bound to the discrepancy of $S_{i,k}$, we get that
\begin{align}
\nonumber
\prob{\mathcal{E}_{i,k}}
&~\leq~ 2\exp\big( - (T_k 2^k)^2 / 2 \card{S_{i,k}} \big) \\\nonumber
&~<~ 2\exp\Bigg(
- \frac{\epsilonilon^2}{2 \card{S_{i,k}} } \Big(2^k \smallsum{j \in S_{i,k}}{} A_{i,j}\Big)^2
- \frac{2 \epsilonilon}{2 \card{S_{i,k}} } \alpha 2^{k/2}
\Big(2^k \smallsum{j \in S_{i,k}}{} A_{i,j}\Big)
\Bigg) \\
&~\leq~ 2\exp\Big(
- \frac{\epsilonilon^2}{8} \card{S_{i,k}} - \frac{\epsilonilon}{2} \alpha2^{k/2}
\Big)
~=:~ p_{i,k},
\EquationName{pikdef},
\end{align}
where the last inequality uses
$\smallsum{j \in S_{i,k}}{} A_{i,j} \geq 2^{-(k+1)} \card{S_{i,k}}$.
\subsection{Discrepancy assuming no events occur}
Suppose that none of the events $\mathcal{E}_{i,k}$ happen.
Then the total discrepancy of row $i$ is at most
\begin{align}
\nonumber
\sum_{k \geq b} T_k
&~=~ \epsilonilon
\sum_{k \geq b} \sum_{j \in S_{i,k}} A_{i,j}
+ \alpha \sum_{k \geq b} 2^{-k/2} \\\nonumber
&~\leq~ \epsilonilon + \alpha \sum_{k \geq b} 2^{-k/2}
\qquad\text{(since we assume $\smallsum{j=1}{m} A_{i,j} \leq 1$)} \\\nonumber
&~=~ \epsilonilon + \alpha \frac{2^{-b/2}}{1-2^{-1/2}} \\\nonumber
&~\leq~ \epsilonilon + 4 \alpha \sqrt{2 \beta}
\qquad\text{(since $2^{-b} \leq 2^{-(\lg(1/\beta)-1)} = 2\beta$)}
\\\EquationName{nobad}
&~\leq~ 16 \alpha \sqrt{\beta}.
\end{align}
\subsection{Avoiding the events}
We will use the local lemma to show that, with positive probability,
none of the events $\mathcal{E}_{i,k}$ occur.
To do so, we must show that these events have limited dependence.
Consider $\mathcal{E}_{i,k}$, which is the event
that the elements in row $i$ of value roughly $2^{-k}$ have large discrepancy.
This event depends only on the random values $\setst{ y_j }{ j \in S_{i,k} }$.
We will bound the total failure probability of the events that depend on those random values.
The local lemma can be stated as follows \cite[Theorem 5.1.1]{AlonSpencer}:
\begin{theorem}
Let $\mathcal{E}_1,\ldots,\mathcal{E}_m$ be events in a probability space.
Let $\Gamma(\mathcal{E}_i)$ be the events (other than $\mathcal{E}_i$ itself)
which are not independent of $\mathcal{E}_i$.
If one can associate a value $x(\mathcal{E}_i) \in (0,1)$ with each event $\mathcal{E}_i$ such that
\begin{equation}
\EquationName{LLL}
\prob{\mathcal{E}_i} ~\leq~ x(\mathcal{E}_i) \cdot \prod_{\mathcal{F} \in \Gamma(\mathcal{E}_i)} \big(1-x(\mathcal{F})\big)
\end{equation}
then, with positive probability, no event $\mathcal{E}_i$ occurs.
\end{theorem}
The weight that we assign to $\mathcal{E}_{i,k}$ is
\begin{equation}
\EquationName{weightdef}
x(\mathcal{E}_{i,k})
~:=~ 2 \exp\big( - \epsilonilon^2 \card{S_{i,k}}/16 - \epsilonilon \alpha 2^{k/2} /2 \big).
\end{equation}
Comparing to \eqref{eq:pikdef}, we see that this value is closely related to
(but slightly larger than) $p_{i,k}$,
which is our upper bound on the probability of $\mathcal{E}_{i,k}$.
\begin{claim}
\ClaimName{xsmall}
$x(\mathcal{E}_{i,k}) < 1/2$ for every $i \in [n]$ and $k \geq b$.
\end{claim}
\begin{proof}
By \eqref{eq:epsdef} we have $\epsilonilon > 4 \sqrt{\beta}$, so
$$
\epsilonilon 2^{k/2}
~\geq~ \epsilonilon \sqrt{2^b}
~\geq~ \epsilonilon \sqrt{2^{\lg(1/\beta)-1}}
~\geq~ \epsilonilon \sqrt{1/2\beta}
~>~ 2 \sqrt{2}.
$$
It follows that $x(\mathcal{E}_{i,k}) \leq 2 \exp( - \epsilonilon 2^{k/2}/2) < 2 \exp( - \sqrt{2} ) < 1/2$.
\end{proof}
Our next step is to characterize $\Gamma(\mathcal{E}_{i,k})$,
the events that are dependent on $\mathcal{E}_{i,k}$.
We let $\mathcal{C}_{j,k}$ be the events corresponding to all entries
of value roughly $2^{-k}$ in the $j\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fi$ column.
$$
\mathcal{C}_{j,k} ~\,:=\,~ \setst{ \mathcal{E}_{i,k} }{ \floor{-\lg A_{i,j}} = k }
\qquad(\text{for } j \in [m],~ k \geq b)
$$
Next, $\mathcal{Y}_j$ contains all events corresponding to all entries
in the $j\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fi$ column. In other words, $\mathcal{Y}_j$ is the set of all events
that depend on the random variable $y_j$.
$$
\mathcal{Y}_{j}
~\,:=\,~ \bigcup_{k \geq b} \mathcal{C}_{j,k}
~=~ \setst{ \mathcal{E}_{i,\floor{-\lg A_{i,j}}} }{ i \in [n] }
\qquad(\text{for } j \in [m])
$$
Finally, since $\mathcal{E}_{i,k}$ depends only on
the random labels of elements in $S_{i,k}$,
the set $\Gamma(\mathcal{E}_{i,k})$ consists of all events that depend on any of those labels.
$$
\Gamma(\mathcal{E}_{i,k}) ~=~ \bigcup_{j \in S_{i,k}} \mathcal{Y}_{j}.
$$
\begin{claim}
\ClaimName{LLLworks}
For every event $\mathcal{E}_{i,k}$, inequality \eqref{eq:LLL} is satisfied.
\end{claim}
\begin{proof}
The main goal of the proof is to give a good lower bound for
$\prod_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} (1-x(\mathcal{F}))$.
\Claim{xsmall} shows that $x(\mathcal{F}) \leq 1/2$, so
\begin{equation}
\EquationName{prodtoexp}
\prod_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} (1-x(\mathcal{F}))
~\geq~ \prod_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} \exp( -2 x(\mathcal{F}))
~=~ \exp\Bigg( - 2 \sum_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} x(\mathcal{F}) \Bigg).
\end{equation}
So it suffices to give a good upper bound for $\sum_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} x(\mathcal{F})$.
First we need to derive an inequality that is rather brutal, but suffices for our proof.
\begin{align}\nonumber
\epsilonilon \cdot \alpha 2^{k/2} / 2
&~=~ 8 \alpha \sqrt{\beta} \cdot \alpha 2^{k/2} /2
\qquad\text{(by \eqref{eq:epsdef})} \\\nonumber
&~=~ \alpha^2 \cdot 2 \sqrt{\beta} \cdot 2^{1+b/2 + (k-b)/2} \\\nonumber
&~=~ \lg(\delta/\beta^2) \cdot \big(2 \sqrt{\beta} 2^{b/2} \big) \cdot 2^{1+(k-b)/2} \\\nonumber
&~\geq~ \big(b+\lg(\delta/\beta)\big) \cdot 2^{1+(k-b)/2}
\qquad\text{(since $\lg(1/\beta) \geq b$ and $2^{b/2} \geq \sqrt{1/2\beta}$)}
\\\nonumber
&~\geq~ \big(b+\lg(\delta/\beta)\big) + 2^{1+(k-b)/2}
\qquad\text{(since $xy \geq x+y$ if $x,y \geq 2$)}\\\nonumber
&~\geq~ \big(b+\lg(\delta/\beta)\big) + (k-b)
\qquad\text{(since $2^{1+i/2} \geq i$ for all $i \geq 0$)}
\\\EquationName{eps2k}
&~=~ k + \lg(\delta/\beta)
\end{align}
Next, consider all the events that depend on $y_j$.
Then
\begin{align*}
\sum_{\mathcal{F} \in \mathcal{Y}_j} x(\mathcal{F})
&~=~ \sum_{k \geq b} \sum_{\mathcal{F} \in \mathcal{C}_{j,k}} x(\mathcal{F}) \\
&~\leq~ \sum_{k \geq b} \sum_{\mathcal{F} \in \mathcal{C}_{j,k}} \exp( - \epsilonilon \alpha 2^{k/2} /2 )
\qquad\text{(by \eqref{eq:weightdef})}\\
&~\leq~ \sum_{k \geq b} \card{\mathcal{C}_{j,k}} \cdot e^{-(k+\lg(\delta/\beta))}
\qquad\text{(by \eqref{eq:eps2k})}\\
&~\leq~ \sum_{k \geq b}
\Big| \setst{i}{A_{i,j} \in (2^{-k-1},2^{-k}] } \Big| \cdot 2^{-(k+\lg(\delta/\beta))} \\
&~\leq~ (\beta/\delta) \cdot (2\delta) ~=~ 2 \beta,
\end{align*}
since the $j\ifmmode{^{\textrm{th}}}\else{\textsuperscript{th}\ }\fi$ column sums to $\delta$.
Therefore
$$
\sum_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} x(\mathcal{F})
~=~ \sum_{j \in S_{i,k}} \sum_{\mathcal{F} \in \mathcal{Y}_j} x(\mathcal{F})
~\leq~ 2 \card{S_{i,k}} \beta.
$$
Combining this with \eqref{eq:prodtoexp}, we obtain the lower bound
\begin{align*}
x(\mathcal{E}_{i,k}) \cdot \prod_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} (1-x(\mathcal{F}))
&~\geq~ x(\mathcal{E}_{i,k}) \cdot \exp\Bigg( - 2 \sum_{\mathcal{F} \in \Gamma(\mathcal{E}_{i,k})} x(\mathcal{F}) \Bigg) \\
&~\geq~ 2 \exp\big( - \epsilonilon^2 \card{S_{i,k}} /16 - \epsilonilon \alpha 2^{k/2} /2 \big)
\cdot \exp\big( - 4 \card{S_{i,k}} \beta \big) \\
&~=~ 2 \exp\Big( - \card{S_{i,k}} (\epsilonilon^2 /16 + 4 \beta)
- \epsilonilon \alpha 2^{k/2} /2 \Big) \\
&~\geq~ 2 \exp\Big( - \card{S_{i,k}} \epsilonilon^2/8
- \epsilonilon \alpha 2^{k/2} /2 \Big) \\
&~=~ p_{i,k} ~\geq~ \prob{\mathcal{E}_{i,k}}
\end{align*}
where the penultimate inequality holds because $\epsilonilon^2/8 \geq \epsilonilon^2/16 + 4 \beta$,
which follows because $\epsilonilon \geq 8 \sqrt{\beta}$ (cf.~\eqref{eq:epsdef}).
This proves \eqref{eq:LLL}.
\end{proof}
The previous claim shows that the hypotheses of the local lemma are satisfied.
So there exists a vector $y \in \set{-1,+1}^m$
such that none of the events $\mathcal{E}_{i,k}$ hold.
As in \eqref{eq:nobad}, this implies that every row has discrepancy
at most $16 \alpha \sqrt{\beta}$.
In other words, $\norm{A y}_\infty \leq 16 \alpha \sqrt{\beta}$.
This completes the proof of \Theorem{weakbeckfiala}.
\section{Conclusion}
Many discrepancy theorems on hypergraphs have a
more general statement about the discrepancy of real-valued matrices \cite[\S 4]{BeckSos}.
We have provided another occurrence of this phenomenon
by proving \Theorem{newthm}, which generalizes \Theorem{folklore}.
We are not aware of any result showing that
either Theorem~\ref{thm:folklore} or \ref{thm:newthm}
is optimal. It seems conceivable that the logarithmic factor could be removed.
\begin{conj}
Let $V$ be an $n \times m$ real matrix with $\abs{V_{i,j}} \leq 1$,
$\norm{ v^i }_1 \leq R$, and $\norm{v_j}_1 \leq \Delta$ for all $i \in [n], j \in [m]$.
Assume $R \geq \Delta$.
There exists $y \in \set{-1,+1}^m$ with $\norm{ V y }_\infty \leq O(\sqrt{R})$.
\end{conj}
Let us mention now the recent discrepancy result of Marcus et al.~\cite{MSS},
which implies a solution to the long-standing Kadison-Singer problem.
\begin{theorem}[Corollary 1.3 of Marcus et al.~\cite{MSS}]
\TheoremName{MSS}
Let $u_1,\ldots,u_m \in \mathbb{C}^n$ satisfy $\sum_{i=1}^m u_i u_i^* = I$
and $\norm{u_i}_2^2 \leq \delta$ for all $i$.
Then there exists $y \in \set{-1,+1}^m$ such that
$
\norm{ \sum_{i=1}^m y_i u_i u_i^* } ~\leq~ O(\sqrt{\delta}),
$
where $\norm{\cdot}$ is the $\ell_2$-operator norm.
\end{theorem}
There is a relationship between
Theorems~\ref{thm:weakbeckfiala} and \ref{thm:MSS},
in the sense that both are implied by the following conjecture.
\Theorem{MSS} is the special case where each $A_i$ has rank one,
and \Theorem{weakbeckfiala} implies (ignoring the additional logarithmic factor $\alpha$)
the special case where each $A_i$ is a diagonal matrix.
\begin{conj}
Let $A_1,\ldots,A_m$ be Hermitian, positive semi-definite matrices of the same size
satisfying $\sum_{i=1}^m A_i = I$ and $\operatorname{tr} A_i \leq \delta$ for all $i$.
There exists $y \in \set{-1,+1}^m$ with $\norm{ \sum_{i=1}^m y_i A_i } \leq O(\sqrt{\delta})$.
\end{conj}
\end{document}
|
\begin{document}
\title{
Conditional Copula Models for Right-Censored Clustered Event Time Data
}
\author{
Candida Geerdens$^{1}$,
Elif F. Acar$^{2,\ast}$ and
Paul Janssen$^{1}$\\
\small{$^{1}$ Center for Statistics, Universiteit Hasselt}\\
\small{$^{2}$Department of Statistics, University of Manitoba}\\
\footnotesize{$^{\ast}$ Corresponding Author: \href{mailto:[email protected]}{[email protected]}}
}
\date{}
\maketitle
\begin{abstract}
This paper proposes a modelling strategy to infer the impact of a covariate on the dependence structure of right-censored clustered event time data.
The joint survival function of the event times is modelled using a parametric conditional copula whose parameter depends on a cluster-level covariate in a functional way.
We use a local likelihood approach to estimate the form of the copula parameter and outline a generalized likelihood ratio-type test strategy to formally test its constancy. A bootstrap procedure is employed to obtain an approximate $p$-value for the test.
The performance of the proposed estimation and testing methods are evaluated in simulations under different rates of right-censoring and for various parametric copula families, considering both parametrically and nonparametrically estimated margins.
We apply the methods to data from the Diabetic Retinopathy Study to assess the impact of disease onset age on the loss of visual acuity.
\end{abstract}
{\bf Keywords:} {\em
Beran's estimator, conditional copulas, generalized likelihood ratio test, local likelihood, right-censoring.
}
\blfootnote{Submitted on April 10, 2016.}
\section{Introduction}
\label{sec:1}
Many biomedical studies involve clustered time-to-event data, which can be right-censored and which may exhibit strong dependence.
For instance, lifetimes of twins or married couples are often dependent due to shared genetic or environmental factors, and characterizing these dependencies helps making informed decisions in health research.
Other examples include time to failure of matched organs, such as eyes or kidneys, and occurrence times of linked diseases.
In such studies, the data analysis should be directed towards unraveling the within-cluster dependence, or one should at least account for its presence in the applied modelling strategy.
Copula models are well-suited for this task.
Copulas are dependence functions that link together the marginal survival functions to form the joint survival function.
Their use in survival analysis has a long history dating back to \cite{Clayton:1978}, followed by \cite{Oakes:1982}, \cite{Hougaard:1986}, and more recently, \cite{Shih/Louis:1995} and \cite{Chen/Fan/Douzo/Ying:2010}, among others.
In these papers, the focus is mainly on the unconditional dependence structure of event times and not on the presence of covariates that could provide additional information on the joint survival function.
One exception is \cite{Clayton:1978}, which devotes a section on strategies to include covariates in the association analysis of bivariate failure times and suggests adjusting both the marginal survival functions and the dependence parameter for covariates, but without any elaborate treatment.
Despite Clayton's suggestion, most commonly used approaches in survival analysis incorporate covariates only in the marginal models, and neglect their potential impact on the association structure.
For instance, in an effort to perform covariate adjustment, \cite{Huster/Brookmeyer/Self:1989} proposed a parametric analysis of paired right-censored event time data in the presence of binary covariates, with an application to diabetic retinopathy data.
In this analysis, the type of diabetes, classified into juvenile or adult groups based on age at onset, is considered as the covariate; and its impact is accounted for only in the marginal models for the time to loss of visual acuity in the laser-treated and untreated eyes, but not in the association structure.
This amounts to an implicit assumption that the dependence parameter is the same for the juvenile and adult groups, which may not be realistic or at least needs to verified.
Note that, based on a visual representation of the data, it is difficult to track whether the dependence parameters
of the two groups differ or not, mainly due to the high rate of right-censoring (see Figure~\ref{fig:drs1}).
\begin{figure}
\caption{Scatter plots of time to blindness (in years) of treated and untreated eyes in the juvenile and adult diabetes groups.
The data points shown via `$\textcolor{darkpastelgreen}
\label{fig:drs1}
\end{figure}
While there exists many tools to account for covariates in the marginal survival functions of clustered right-censored time-to-event data, there is a need to extend copula-based models to include covariate information in the association structure.
This paper proposes covariate-adjusted dependence analysis for clustered right-censored event time data using a parametric conditional copula whose parameter is allowed to depend on a cluster-level covariate.
When the latter is binary or discrete with few categories, one can form two or more strata according to the covariate value and fit a copula to each stratum separately.
The impact of a continuous covariate on the dependence parameter is notoriously more difficult to formulate, as it should be specified in functional terms and is typically data specific.
This invites the use of nonparametric techniques for function estimation.
In the case of complete data, i.e. when there is no censoring, nonparametric estimation of the copula parameter function has been previously studied in \cite{Acar/Craiu/Yao:2011} and \cite{Abegaz/Gijbels/Veraverbeke:2012} for parametrically and nonparametrically estimated margins, respectively.
These proposals are built on local likelihood methods \citep{Tibshirani/Hastie:1987} combined with local polynomial estimation \citep{Fan/Gijbels:1996}.
They are, however, not directly applicable to right-censored event times.
The presence of right-censoring in the event times greatly challenges the statistical analysis, and its incorporation in the copula parameter estimation is necessary.
A recent work in this domain is \cite{Ding/Hsieh/Wang:2015}, which proposes a nonparametric estimator for the concordance probability as a function of covariates. However, this approach does not readily generalize to a likelihood-based model formulation.
Here, the first contribution is an extension of the conditional copula framework in \cite{Acar/Craiu/Yao:2011} and \cite{Abegaz/Gijbels/Veraverbeke:2012} to handle right-censored event time data, for both parametrically and nonparametrically estimated marginal survival functions. For the former, we focus on the Weibull model as employed in \cite{Huster/Brookmeyer/Self:1989}, and for the latter we consider the Beran's estimator \citep{Beran:1981}.
The second contribution is a test strategy for the constancy of the conditional copula parameter across the range of a cluster-level covariate. In the case of a discrete covariate, one can employ the traditional likelihood ratio test to assess whether the dependence parameters for different strata can be deemed the same.
However, for a continuous covariate, one is required to test the constancy of the whole dependence parameter function.
Here, this is achieved by adopting the test strategy in \cite{Acar/Craiu/Yao:2013}. The test is built on the generalized likelihood ratio statistic of \cite{Fan/Zhang/Zhang:2001} for testing a parametric or a nonparametric null hypothesis versus a nonparametric alternative hypothesis.
For conditional copulas with complete data, \cite{Acar/Craiu/Yao:2013} derived the asymptotic null distribution of the test statistic and used it to obtain a decision rule.
The presence of right-censoring complicates the development of the asymptotic null distribution. Therefore, we alternatively propose a bootstrap procedure to obtain an approximate p-value for the test.
The proposed estimation and testing methods are detailed in Sections~\ref{sec:2} and \ref{sec:3}, respectively.
Section~\ref{sec:4} contains the results from our simulations under different rates of right-censoring and for various parametric copula families, considering both parametrically and nonparametrically estimated margins.
In Section~\ref{sec:5}, we revisit the diabetic retinopathy data and assess the impact of age at onset on the time to loss of visual acuity.
The paper concludes with a brief discussion.
The bootstrap algorithms are provided in the Appendix.
Part of the simulation and data analysis results are collected in the Supplemental Material.
\vspace*{-8pt}
\section{Conditional Copula Model for Right-Censored Event Time Data}
\label{sec:2}
In this section, we introduce the notation and describe the proposed conditional copula approach for right-censored clustered event times.
To ease the presentation, we focus on the bivariate setting. However, the results can be extended to settings with clusters of higher (but equal) size using a multivariate copula.
Let $(T_1, T_2)$ be a vector of bivariate event times, and let $X$ be a continuous cluster-level covariate.
Then, for each $x$ in the support of $X$, the conditional joint survival function $S_X(t_1,t_2|x)=P(T_1>t_1,T_2>t_2|X=x)$ of the vector $(T_1,T_2)|X=x$ has a unique representation (Patton, 2002) given by
\begin{equation}
\label{eq1}
S_X(t_1,t_2 \mid X= x) = \mathbb{C}_{X}\left( S_{1\mid x}(t_1 \mid x), S_{2 \mid x}(t_2 \mid x) \mid X=x \right),
\end{equation}
where $\mathbb{C}_{X}$ is the conditional copula of the event times, and $S_{k \mid x}(t_k|x)=P(T_k>t_k|X=x)$ is the conditional marginal survival function of $T_k|X=x$ ($k=1,2$).
A major complication in fitting the model in \eqref{eq1} is that for right-censored data, the true event time is not always recorded, but instead, a lower time, called the censoring time, is observed.
Let $(C_1, C_2)$ be a vector of bivariate censoring times, independent of $(T_1,T_2)$.
We observe the minima $(Y_1,Y_2)= \left(\min\{T_1,C_1\},\min\{T_2,C_2\}\right)$, together with the corresponding censoring indicators $(\delta_1,~\delta_2)=(I\{T_1\leq C_1\},~I\{T_2\leq C_2\})$. In the special case where the same censoring time applies to all members of a cluster, we have $C= C_1=C_2$, a situation referred to as univariate censoring.
Given a random sample $\{(Y_{1i}, Y_{2i}, \delta_{1i}, \delta_{2i}, X_i)\}_{i=1}^n$, the fitting of the model in \eqref{eq1} is typically performed in two-stages; first for the conditional marginal survival functions $S_{k\mid x}$, and second for the conditional copula $\mathbb{C}_{X}$.
To estimate $S_{k\mid x}$, one can employ parametric or nonparametric strategies.
These are outlined briefly in Section \ref{sec:2.1}.
We then describe the proposed nonparametric strategy for fitting the conditional copula in Section \ref{sec:2.2}. The details on the bandwidth selection for the nonparametric methods are given in Section \ref{sec:2.3}.
\vspace*{-8pt}
\subsection{Estimation of the conditional marginal survival functions}
\label{sec:2.1}
In the case of parametric conditional margins, such as Weibull, we have $ S_{k\mid x}(t_k \mid x) = S_{k\mid x}(t_k \mid x, \boldsymbol{{\gamma}}_k )$, with $\boldsymbol{{\gamma}}_k$ an unknown parameter vector ($k=1,2$).
One can then use maximum likelihood estimation to obtain
$$\widehat{S}_{k\mid x}(t_k \mid x) = S_{k\mid x}(t_k \mid x, \boldsymbol{\hat{\gamma}}_k ),$$
where
$
\boldsymbol{\hat{\gamma}}_k = \operatornamewithlimits{argmax}_{\boldsymbol{\gamma}_k}{ \sum_{i=1}^n \delta_{ki} \ln f_{k\mid x}(Y_{ki} | X_i , \boldsymbol{\gamma}_k) + (1- \delta_{ki}) \ln S_{k\mid x}(Y_{ki} | X_i , \boldsymbol{\gamma}_k)},
$
is the maximum likelihood estimate of the vector of marginal parameters ($k=1,2$).
In the absence of a suitable parametric model, the conditional margins can be estimated nonparametrically using the Beran's estimator (Beran, 1981), also called the conditional Kaplan-Meier estimator, which takes the form
$$
\widetilde{S}_{k\mid x}(t_k \mid x) = \prod_{Y_{ki} \leq t_k, \delta_{ki} = 1} \left( 1- \dfrac{ w_{nki} (x; h_{k}) } { \sum_{j=1}^{n} I\{Y_{kj}\geq Y_{ki}\} w_{nkj} (x; h_{k}) } \right)
$$
The weights $w_{nki}$ are typically defined as
$$
w_{nki} (x; h_{k}) = \dfrac{ K_{h_k}\left( X_{i} - x \right) }{\sum_{j=1}^n K_{h_k}\left( X_{j}- x \right) },
$$
where $K_{h_k} (\cdot)= K(\cdot /h_k) /h_k $, with $K$ the kernel function and $h_k$ the bandwidth parameter ($k=1,2$).
\vspace*{-8pt}
\subsection{Estimation of the conditional copula}
\label{sec:2.2}
Given the estimated margins, and assuming that for each value of $x$, the conditional copula $\mathbb{C}_{X}$ belongs to the same parametric copula family, one can fit $\mathbb{C}_{X}$ within the likelihood framework.
In this case, the impact of a covariate is considered to be solely on the strength of dependence, which is captured by the copula parameter $\theta(X)$ of $\mathbb{C}_{X}$.
Due to the restricted parameter range of some copula families, instead of directly modelling $\theta(X)$, we consider the reparametrization $ \theta(x) = g^{-1}(\end{itemize}a(x))$, where $\end{itemize}a(\cdot)$ is called the \emph{calibration function} and $g^{-1}: \mathbb{R} \rightarrow \Theta $ is a prespecified inverse-link function with $\Theta$ being the parameter space of a given copula family. For some commonly used copula families, the inverse link functions are provided in Table \ref{table:Copulas}.
\begin{table}[h] \footnotesize
\begin{centering}
\caption{Inverse link functions and Kendall's tau conversions for some copula families.}
\label{table:Copulas}
\begin{tabular}{ l l c c c c c c}
&Family && $\mathbb{C}(u_{1},u_{2})$ && $\theta $ & $g^{-1}(\end{itemize}a)$ & $\tau$
\\ [1ex] \hline
&Clayton && $ (u_{1}^{-\theta}+u_{2}^{-\theta}-1)^{-\frac{1}{\theta}} $ && $(0,\infty)$ & $\exp(\end{itemize}a)$ &$\frac{\theta}{\theta +2}$
\\ [2ex]
&Frank && $- \frac{1}{\theta} \ln \Big\{ 1+ \frac{(e^{-\theta u_{1}}-1) (e^{-\theta u_{2}}-1)}{ e^{-\theta }-1} \Big\} $ && $(-\infty,\infty) \setminus \{0\}$ & $\end{itemize}a$ & $1+ \frac{4}{\theta} [D_{1}(\theta)-1]$
\\ [2ex]
&Gumbel && $ \exp \Big\{ - [(-\ln u_{1})^{\theta} + (- \ln u_{2})^{\theta} ]^{\frac{1}{\theta}} \Big\} $ && $[1,\infty) $ & $\exp(\end{itemize}a)+1$ & $1- \frac{1}{\theta}$
\\ [0.5ex]
\hline
\end{tabular}
\end{centering}
\\
{\footnotesize where $D_{1}(\theta) = \frac{1}{\theta} \int_{0}^{\theta} \frac{t}{e^{t}-1} dt $ is the Debye function.}
\end{table}
Letting $U_k \equiv S_{k\mid x}(\cdot \mid x)$ for $k=1,2$, the model in \eqref{eq1} becomes
\begin{equation}
\label{eq2}
\left(U_{1},U_{2}\right) \mid X= x \sim \mathbb{C}_{X} \left( u_{1},u_{2} \mid g^{-1}(\end{itemize}a(x)) \right).
\end{equation}
Hence, the loglikelihood function takes the form \citep{Shih/Louis:1995, Massonnet:2009}
\begin{equation}
\label{eq3}
\displaystyle \sum_{i=1}^n \ell \left( g^{-1}(\end{itemize}a(X_i)), {U}_{1i}, {U}_{2i} \right),
\end{equation}
where
\begin{eqnarray*}
\ell (v, u_1,u_2 ) &=& (1-\delta_{1}) (1-\delta_{2}) \ln \mathbb{C}_{X}(u_1,u_2 \mid v ) \\
&& + \quad \delta_{1} (1-\delta_{2}) \ln \left[\frac{\partial \mathbb{C}_{X}(u_1,u_2 \mid v )}{\partial u_1} \right] \\
&& + \quad (1-\delta_{1}) \delta_{2} \ln \left[\frac{\partial \mathbb{C}_{X}(u_1,u_2 \mid v)}{\partial u_2} \right] \\
&& + \quad \delta_{1} \delta_{2} \ln \left[\frac{\partial^2 \mathbb{C}_{X}(u_1,u_2 \mid v)}{\partial u_1 \partial u_2} \right].
\end{eqnarray*}
Note that due to right-censoring, the loglikelihood contributions of the data vectors are non-trivial, i.e., they involve the copula function $\mathbb{C}_{X}$ as well as its first and second order derivatives.
To fit the conditional copula, one can use maximum likelihood estimation by specifying a parametric form for $\end{itemize}a(X)$. However, as noted before, the impact of the covariate on the dependence strength is difficult to predetermine in most applications. Therefore, it is often advised to employ nonparametric strategies \citep{Acar/Craiu/Yao:2011, Abegaz/Gijbels/Veraverbeke:2012}.
Suppose that $\end{itemize}a(\cdot)$ is sufficiently smooth, i.e., $\end{itemize}a(\cdot)$ has the ($p+1$)th derivative at the point $x$. Then, for a covariate value $X_i$ in a neighborhood of $x$, we can approximate $\end{itemize}a(X_i)$ by a Taylor expansion of order $p$:
\begin{eqnarray*}
\end{itemize}a(X_i) &\approx& \end{itemize}a(x) + \end{itemize}a^{(1)}(x) (X_i - x) + \ldots + \displaystyle \frac{\end{itemize}a^{(p)}(x)}{p!} (X_i - x)^p \\
& \equiv & \beta_{0,x} + \beta_{1,x} (X_i - x) + \ldots + \beta_{p,x} (X_i - x)^p \equiv {\boldsymbol{x}}_{i,x}^{T}~\boldsymbol{\beta}_x
\end{eqnarray*}
where ${\boldsymbol x}_{i, x}= (1, X_{i}-x,\ldots, (X_{i}-x)^{p} ) ^{T} $ and
$\boldsymbol{\beta}_x = (\beta_{0,x},\beta_{1,x},\ldots,\beta_{p,x})^{T}$ with $\beta_{r,x}=\end{itemize}a^{(r)}(x) / r!$.
We then estimate $\boldsymbol{\beta}_x$ by maximizing a local version of \eqref{eq3}, which is given by
\begin{equation}
\label{eq4}
\sum_{i=1}^n \ell \left( g^{-1}( \boldsymbol{x}_{i,x}^{T} \boldsymbol{\beta}_x), {U}_{1i}, {U}_{2i} \right) \; K_{h_\mathbb{C}}(X_i - x),
\end{equation}
where $K_{h_\mathbb{C}} (\cdot)= K(\cdot /h_\mathbb{C}) /h_\mathbb{C} $, with $K$ the kernel function, $h_\mathbb{C}$ the bandwidth parameter and ${U}_{ki} \equiv {S}_{k\mid x}(Y_{ki} \mid X_i) $ ($k=1,2$).
In practice, the conditional survival margins $U_{ki}$ in \eqref{eq4} are replaced by either parametric estimates, $\widehat{U}_{ki} \equiv \widehat{S}_{k\mid x}(Y_{ki} \mid X_i) $, or nonparametric estimates $\widetilde{U}_{ki} \equiv \widetilde{S}_{k\mid x}(Y_{ki}~\mid~X_i)$ ($k=1,2$).
The resulting local maximum likelihood estimates are denoted by $\boldsymbol{\widehat{\beta}}_x = (\hat{\beta}_{0,x},\hat{\beta}_{1,x}, \ldots, \hat{\beta}_{p,x})^{T} $ and $\boldsymbol{\widetilde{\beta}}_x= (\tilde{\beta}_{0,x},\tilde{\beta}_{1,x}, \ldots, \tilde{\beta}_{p,x})^{T}$, respectively.
From these, one can obtain $\hat{\end{itemize}a}(x) = \hat{\beta}_{0,x}$ and $\tilde{\end{itemize}a}(x) = \tilde{\beta}_{0,x}$, which in turn yield the estimates of the copula parameter at covariate value $x$ via $\hat{\theta}(x) = g^{-1} ( \hat{\end{itemize}a}(x) )$ and $\tilde{\theta}(x) = g^{-1} ( \tilde{\end{itemize}a}(x) )$.
\vspace*{-8pt}
\subsection{Bandwidth selection}
\label{sec:2.3}
The choice of the bandwidth parameter $h_\mathbb{C} $ plays an important role in the local likelihood estimation given in \eqref{eq4}.
If the conditional marginal survival functions are estimated parametrically, the leave-one-out cross-validated loglikelihood criterion in \cite{Acar/Craiu/Yao:2011} can be employed to obtain a data-driven bandwidth. In this case, we select the bandwidth value that maximizes the leave-one-out cross-validated loglikelihood function
\begin{equation}
\label{eq:5}
B(h_\mathbb{C} ) = \displaystyle \sum_{i=1}^n \ell \left( \widehat{\theta}^{(-i)}_{h_\mathbb{C}}(X_i) , \widehat{U}_{1i}, \widehat{U}_{2i} \right),
\end{equation}
where $\widehat{\theta}^{(-i)}_{h_\mathbb{C}}(X_i)$ is the estimated copula parameter at the bandwidth value ${h_\mathbb{C}}$, obtained by leaving the $i^{\rm{th}}$ data point ($\widehat{U}_{1i},\widehat{U}_{2i}, \delta_{1i}, \delta_{2i}, X_i$) out and using the remaining data points ($\widehat{U}_{1j},\widehat{U}_{2j}, \delta_{1j}, \delta_{2j}, X_j$) with $j = 1,\ldots, i-1, i+1, \ldots, n$.
If the Beran's estimator is used to obtain the conditional marginal survival functions, the bandwidth selection involves two additional parameters, $h_1$ and $h_2$.
Since the model fitting is performed in two stages, one could first choose $h_1$ and $h_2$, separately, and then determine $h_\mathbb{C}$.
However, the few available bandwidth selectors for the Beran's estimator are either not easy to implement or not data-driven \citep{VanKeilegom:1998, Gang/Datta:2001, Demin/Chimitova:2014}.
Therefore, we propose to choose the bandwidth values $(h_1, h_2, h_\mathbb{C})$ jointly,
exploiting the loglikelihood framework. This amounts to maximizing the following function
\begin{equation}
\label{eq:6}
B^{\ast}(h_1, h_2, h_\mathbb{C} ) = \displaystyle \sum_{i=1}^n \ell \left( \widetilde{\theta}^{(-i)}_{h_\mathbb{C}}(X_i) , \widetilde{U}_{1i (h_1)}, \widetilde{U}_{2i (h_2)} \right).
\end{equation}
Here $\widetilde{U}_{ki (h_k)}$ denotes the nonparametric estimate of the $k^{\rm{th}}$ conditional margin at the bandwidth value $h_k$ ($k=1,2$), and $\widetilde{\theta}^{(-i)}_{h_\mathbb{C}}(X_i)$ is the leave-one-out cross-validated copula parameter estimate for the $i^{\rm{th}}$ observation based on the fitted conditional margins and using the bandwidth value ${h_\mathbb{C}}$.
Note that, due to joint selection of $(h_1, h_2, h_\mathbb{C})$, the estimates of the conditional margins and the copula parameter depend on all three bandwidth values. Nevertheless, for notational simplicity, we use only the corresponding bandwidth value in each component.
\vspace*{-8pt}
\section{Generalized Likelihood Ratio Test}
\label{sec:3}
A relevant question in applications is whether a covariate has a significant impact on the underlying dependence structure of the clustered event times. This is equivalent to testing the constancy of the conditional copula parameter as formalized by
\begin{equation}n
\label{eq:null}
\rm{H}_0 : \theta(\cdot) \in \mathfrak{f}_c \qquad \quad \text{versus} \qquad \quad \rm{H}_1 : \theta(\cdot) \notin \mathfrak{f}_c,
\end{equation}n
where $\mathfrak{f}_c = \{ \theta(\cdot): \exists \; \theta_0 \in \Theta \mbox{ such that } \theta(X)=\theta_0, \; \; \forall X\in \mathcal{X} \}$ is the set of all constant functions on $X\in \mathcal{X}$.
Note that the null hypothesis corresponds to the so-called simplifying assumption in pair-copula constructions \citep{HobaekHaff/Aas/Frigessi:2010, Acar/Genest/Neslehova:2012}.
The hypothesis testing problem in \eqref{eq:null} can be evaluated through the difference \citep{Acar/Craiu/Yao:2013}
$$
\lambda_n = \sup_{\theta(\cdot) \notin \mathfrak{f}_c } \{\mathbb{L}_n (\rm{H}_1) \} - \sup_{\theta(\cdot) \in \mathfrak{f}_c} \{\mathbb{L}_n (\rm{H}_0)\},
$$
where
$$
\mathbb{L}_n ({\rm{H}}_0) =\sum_{i=1}^n \ell \left( \theta_0 , U_{1i}, U_{2i} \right)
\qquad \text{and} \qquad
\mathbb{L}_n ({\rm{H}}_1) = \sum_{i=1}^n \ell \left( {\theta}(X_i), U_{1i}, U_{2i} \right).
$$
The statistic $\lambda_n$ is referred to as the \emph{generalized likelihood ratio} (GLR), and differs from the canonical likelihood ratio in that the model under the alternative hypothesis is specified nonparametrically. Hence, the distribution of the test statistic depends on the bandwidth parameter and the kernel function used in the nonparametric estimation.
Further, the presence of right-censoring complicates the loglikelihood expressions, making the assessment of the asymptotic null distribution of the test statistic quite cumbersome.
Even when available, the convergence of the null distribution to the asymptotic distribution might be slow; hence a bootstrap estimate is typically used in finite samples to approximate the null distribution \citep{Fan/Zhang:2004, Fan/Jiang:2005}.
Here, we follow a similar strategy and propose a bootstrap algorithm to obtain an approximate p-value for the test.
We distinguish two cases according to whether the conditional marginal survival functions are estimated parametrically or nonparametrically.
When the conditional marginal survival functions are estimated parametrically, the supremum of the loglikelihood function under the null hypothesis is given by
$$
\mathbb{L}_n ({\rm{H}}_0, \widehat{\theta}_0)= \sum_{i=1}^n \ell \left( \widehat{\theta}_0, \widehat{U}_{1i}, \widehat{U}_{2i} \right),
$$
where $ \widehat{\theta}_0$ is the maximum likelihood estimator of the constant conditional copula parameter $\theta_0$ based on observations $(\widehat{U}_{1i}, \widehat{U}_{2i}, \delta_{1i}, \delta_{2i})$, $i=1,\ldots,n$.
For the alternative hypothesis, we use the local likelihood estimator $\widehat \theta_{h_\mathbb{C}}$ at each covariate value (Section \ref{sec:2.2}) and obtain
$$
\mathbb{L}_n ({\rm{H}}_1,\widehat \theta_{h_\mathbb{C}} ) = \sum_{i=1}^n \ell \left( \widehat{\theta}_{h_\mathbb{C}} (X_i), \widehat{U}_{1i}, \widehat{U}_{2i} \right),
$$
where $\widehat{\theta}_{h_\mathbb{C}}$ is the estimated copula parameter obtained by maximization of \eqref{eq:5} with bandwidth $h_\mathbb{C}$, the optimal bandwidth value. The generalized likelihood ratio statistic then becomes
$$
\lambda_n (h_\mathbb{C}) = \sum_{i=1}^n \ell \left( \widehat{\theta}_{h_\mathbb{C}} (X_i), \widehat{U}_{1i}, \widehat{U}_{2i} \right) - \sum_{i=1}^n \ell \left( \widehat{\theta}_0, \widehat{U}_{1i}, \widehat{U}_{2i} \right).
$$
To obtain the null distribution of $\lambda_n (h_\mathbb{C})$, we use the bootstrap procedure outlined in Algorithm \ref{alg:1} (see the Appendix)
\citep{DavisonHinkley:1997, Geerdens2013, BGeerdens2015}.
In the case of nonparametrically estimated conditional margins, a similar strategy is followed to obtain
$$
\lambda_n (h_1, h_2, h_\mathbb{C}) = \sum_{i=1}^n \ell \left( \widetilde{\theta}_{h_\mathbb{C}} (X_i), \widetilde{U}_{1i}, \widetilde{U}_{2i} \right) - \sum_{i=1}^n \ell \left( \widetilde{\theta}_0, \widetilde{U}_{1i}, \widetilde{U}_{2i} \right),
$$
where $\widetilde{\theta}_0$ is the maximum likelihood estimator of the constant conditional copula parameter $\theta_0$ based on observations $(\widetilde{U}_{1i}, \widetilde{U}_{2i}, \delta_{1i}, \delta_{2i})$, $i=1,\ldots,n$. The latter are obtained using
$h_1$ and $h_2$, the optimal bandwidth values maximizing \eqref{eq:6}, jointly with the bandwidth value $h_\mathbb{C}$.
For the alternative model, we use the local likelihood estimator $\widetilde{\theta}_{h_\mathbb{C}}$ at each covariate value (Section \ref{sec:2.2}).
To obtain the null distribution of $\lambda_n (h_1, h_2, h_\mathbb{C})$, we employ Algorithm \ref{alg:2} (see the Appendix),
which differs from Algorithm \ref{alg:1} mainly in that $(\widehat{U}_{1i}, \widehat{U}_{2i})$ and $\widehat{S}_{k|x}$ are replaced by $(\widetilde{U}_{1i}, \widetilde{U}_{2i})$ and $\widetilde{S}_{k|x}$.
In the bootstrap, the bandwidth values are taken to be the same as the ones obtained for the original data.
In both algorithms, the null distribution of the test statistic is only approximate. Each bootstrap sample, although generated using the constant conditional copula parameter value under the null hypothesis, involves additional variation/uncertainty arising from the estimation of the conditional marginal survival functions.
This variation is more pronounced when the Beran's estimator is used (see Section \ref{sec:4}).
\vspace*{-8pt}
\section{Simulation Results}
\label{sec:4}
We investigate the finite sample performance of the proposed estimation and testing methods in a simulation study.
We consider the Clayton, Frank and Gumbel copulas, with dependence structures given by the following scenarios:
\begin{enumerate}
\item[] Constant model:~~~~$ \tau(X) = 0.6$
\item[] Convex model:~~~~~~$\tau(X) = 0.1(X-3)^2 +0.3$
\item[] Concave model:~~~~~$\tau(X) = -0.1 (X-3)^2 +0.7$
\end{enumerate}
The models for the covariate effect are specified in terms of Kendall's tau to allow comparisons across different copulas. The conversions between the copula parameter $\theta$ and Kendall's tau $\tau$ {}{are} given in Table \ref{table:Copulas} for the considered copulas.
In the constant model, the covariate has no effect on the strength of dependence, while in the convex and concave models, the covariate effect has the respective form with Kendall's tau varying from $0.3$ to $0.7$.
Under each scenario, we generate the copula data $\{ (U_{1i},U_{2i} \mid X_{i}): i=1,2,\ldots,n \}$ as outlined in \cite{Acar/Craiu/Yao:2011}. That is, we first obtain covariate values $X_{i}$ from Uniform~$(2, 5)$. Then, for each $i=1,2,\ldots,n$, we calculate the corresponding Kendall's tau $\tau_i \equiv\tau(X_i)$ and the corresponding copula parameter $\theta_i \equiv \theta(X_i)$.
To obtain the event times, we apply the inverse-cdf method to the copula data using the Weibull model with parameters $\lambda_T=0.5$, $\rho_T=1.5$, and $\beta_T=0.8$. To introduce right-censoring, we generate the (univariate) censoring times from the Weibull model with parameters $\lambda_C=1.5$ and $\rho_C=1.5$ for the case of low censoring (approximately 20\% censoring rate), and with parameters $\lambda_C=1.5$ and $\rho_C=0.5$ for the case of moderate censoring (approximately 50\% {censoring rate). The observed data are then given by the minima of the event times and the censoring times.
We also consider non-censored event time data to assess the impact of censoring on the results.
We estimate the conditional marginal survival function parametrically, using the Weibull model, and nonparametrically, using the Beran's estimator. Based on the resulting estimates, we perform local linear estimation under the correct copula and obtain the estimates of the calibration function. The local weights are defined using the Epanechnikov kernel.
For the bandwidth parameter(s), we consider $6$ candidate values, ranging from $0.3$ to $3$, equally spaced on a logarithmic scale.
The bandwidth selection for the Beran's estimator is based on a comparison with the true marginal survival function, while the bandwidth selection for the conditional copula estimation is based on the cross-validated loglikelihood criterion, and this for each simulated data set.}
All results reported are based on the local calibration estimates at the chosen optimal bandwidth. These calibration estimates are converted into the copula parameter via the link functions in Table~\ref{table:Copulas}.
We evaluate the estimation strategy through the integrated Mean Square Error (IMSE) along with the integrated square Bias (IBIAS$^{2}$) and integrated Variance (IVAR), given by
\begin{equation}n
&& \text{IBIAS}^{2}(\hat{\tau}) = \int_{\mathcal{X}} \big[ E [ \hat{\tau}(x)] - \tau(x) \big]^{2} \; dx, \nonumber \\
&& \text{IVAR}(\hat{\tau}) = \int_{\mathcal{X}} E \big[ \{ \hat{\tau}(x) - E [ \hat \tau(x)] \}^{2} \big] \; dx, \nonumber \\
&& \text{IMSE}(\hat{\tau}) = \int_{\mathcal{X}} E \big[ \{ \hat{\tau}(x) - \tau(x) \}^{2} \big] \; dx \; = \; \text{IBIAS}^{2}(\hat{\tau}) + \text{IVAR}(\hat{\tau}). \nonumber
\end{equation}n
These quantities are approximated by a Riemann sum over a grid of points $\{2,2.1,\ldots,4.9,5\}$ in the covariate range.
We use the Kendall's tau scale to compare the performance of the local linear estimator across different copulas and at different censoring rates.
Under each scenario, we conduct experiments with sample sizes $n=250$ and $500$, each replicated $M=500$ times.
The results under the Clayton copula are displayed in Table~\ref{MISE_Clayton}, while the results under the Frank and Gumbel copulas are deferred to the Supplemental Material.
\begin{table}[h!]
\footnotesize
\caption{Integrated Squared Bias, Integrated Variance and Integrated Mean Square Error (multiplied by 100) of the Kendall's tau estimates under the Clayton copula.}
\label{MISE_Clayton}
\centering
\begin{tabular}{cccccccccccccc}
\toprule \\ [-3.5ex]
&&&&& \multicolumn{6}{c}{} &&& \\[-1.75ex]
&&&& \multicolumn{3}{c}{Parametric Margins}&&&\multicolumn{3}{c}{Nonparametric Margins} \\[0.5ex]
\cmidrule(r){5-8} \cmidrule(r){10-13}
& censoring rate & n && IBIAS$^{2}$ & IVAR & IMSE &&& IBIAS$^{2}$ & IVAR & IMSE \\
\midrule
\multirow{6}{*}{Constant}
& 0 \% & 250 && 0.002 & 0.605 & 0.607 &&& 0.489 & 1.246 & 1.735\\
& & 500 && 0.000 & 0.304 & 0.304 &&& 0.207 & 0.550 & 0.757 \\
& 20 \% & 250 && 0.002 & 0.764 & 0.766 &&& 0.717 & 1.914 & 2.631 \\
& & 500 && 0.001 & 0.392 & 0.393 &&& 0.318 & 0.798 & 1.116 \\
& 50 \% & 250 && 0.005 & 1.261 & 1.266 &&& 1.299 & 3.664 & 4.963 \\
& & 500 && 0.005 & 0.638 & 0.643 &&& 0.675 & 1.547 & 2.222
\\ [0.8ex] \midrule
\multirow{6}{*}{Convex}
& 0 \% & 250 && 0.097 & 1.383 & 1.479 &&& 0.711 & 1.941 & 2.652\\
& & 500 && 0.056 & 0.681 & 0.737 &&& 0.317 & 0.922 & 1.239 \\
& 20 \% & 250 && 0.117 & 1.950 & 2.066 &&& 1.003 & 2.780 & 3.783 \\
& & 500 && 0.060 & 0.964 & 1.024 &&& 0.554 & 1.360 & 1.914 \\
& 50 \%& 250 && 0.175 & 3.303 & 3.478 &&& 1.516 & 4.623 & 6.140 \\
& & 500 && 0.077 & 1.612 & 1.688 &&& 0.913 & 2.044 & 2.957
\\ [0.8ex] \midrule
\multirow{6}{*}{Concave}
& 0 \% & 250 && 0.084 & 0.908 & 0.992 &&& 0.736 & 1.677 & 2.413\\
& & 500 && 0.041 & 0.477 & 0.518 &&& 0.332 & 0.754 & 1.085 \\
& 20 \% & 250 && 0.097 & 1.113 & 1.210 &&& 0.924 & 2.446 & 3.371 \\
& & 500 && 0.056 & 0.571 & 0.627 &&& 0.475 & 1.059 & 1.534 \\
& 50 \%& 250 && 0.149 & 1.805 & 1.954 &&& 1.586 & 4.081 & 5.667 \\
& & 500 && 0.101 & 0.888 & 0.989 &&& 0.890 & 1.953 & 2.843
\\ [0.8ex]
\bottomrule
\end{tabular}
\end{table}
From Table~\ref{MISE_Clayton}, it can be seen that the estimation performance deteriorates with increasing censoring rate. Since right-censoring causes loss of information, this result is to be expected.
Further, Table~\ref{MISE_Clayton} shows that the parametrically estimated marginal survival functions yield better precision and accuracy in the estimation results compared to the nonparametrically estimated marginal survival functions.
This observation confirms the additional uncertainty induced by the Beran's estimator.
Table~\ref{MISE_Clayton} also indicates that the estimation performance improves with increasing sample size.
Similar conclusions are reached in the simulations under the Frank and Gumbel copulas (see Tables \ref{MISE_Frank} and \ref{MISE_Gumbel}).
A graphical representation of the results is provided in Figure \ref{fig:estimation_Clayton} for the convex model with sample size $n=250$ under the Clayton copula.
We observe that, on average, the underlying functional form is estimated successfully, with slightly wider confidence intervals for nonparametrically estimated margins and at higher censoring rates.
\begin{figure}
\caption{The Kendall's tau estimates for the convex model with sample size $n=250$ under the Clayton copula. Parametric margins (left panel) and nonparametric margins (right panel) - no censoring (top panel), $20\%$ of censoring (middle panel) and $50\%$ of censoring (bottom).
The grey line represents the true Kendall's tau; the dashed line represents the Kendall's tau estimates averaged over 500 Monte-Carlo samples; and the dotted lines represents the $5^{\rm{th}
\label{fig:estimation_Clayton}
\end{figure}
Next, we evaluate the proposed testing strategy through the empirical type I error and the empirical power attained at the significance level $\alpha = 0.05$. Under each of the considered scenarios, we perform the test focusing on the cases with sample size $n=250$ and considering $M=200$ replicates. For each replicate, $B=100$ bootstrap samples are drawn to obtain an approximate $p$-value. The rejection rates are reported in Table~\ref{test_Clayton}.
\begin{table}[h!]
\footnotesize
\caption{The empirical type I error (constant model) and the empirical power (convex and concave models) at significance level $5\%$ under the Clayton copula. }
\label{test_Clayton}
\centering
\begin{tabular}{ccccccc}
\toprule \\ [-3.5ex]
&&&&& \multicolumn{2}{c}{} \\[-1.75ex]
&&&& Parametric Margins && Nonparametric Margins \\[0.5ex]
\cmidrule(r){5-5} \cmidrule(r){7-7}
& censoring rate & n && rejection rate && rejection rate\\
\midrule
\multirow{3}{*}{Constant}
& 0 \% & 250 && 0.080 && 0.085\\
& 20 \% & 250 && 0.085 && 0.150\\
& 50 \% & 250 && 0.090 && 0.170
\\ [0.8ex] \midrule
\multirow{3}{*}{Convex}
& 0 \% & 250 && 0.975 && 0.845\\
& 20 \% & 250 && 0.980 && 0.815\\
& 50 \%& 250 && 0.765 && 0.630
\\ [0.8ex] \midrule
\multirow{3}{*}{Concave}
& 0 \% & 250 && 0.990 && 0.605\\
& 20 \% & 250 && 0.990 && 0.500\\
& 50 \%& 250 && 0.865 && 0.305
\\ [0.8ex]
\bottomrule
\end{tabular}
\end{table}
As can be seen in Table~\ref{test_Clayton}, the empirical type I error rates, obtained under the constant model, are higher than the nominal level $\alpha=0.05$, which indicates that the proposed test is liberal. Further, the empirical type I error tends to be higher when the conditional margins are estimated nonparametrically and when the censoring rate is higher.
The rejection rates reported under the convex and concave models allow the assessment of the empirical power of the test.
Overall, we observe a higher empirical power for the parametrically estimated margins than for the nonparametrically estimated margins. The empirical power decreases as the censoring rate increases. The test results under the Frank and Gumbel copulas provide further evidence for these conclusions (see Tables \ref{test_Frank} and \ref{test_Gumbel}).
A comparison of the results across different copula families indicates a better performance in the estimation and in the testing for the Frank and Gumbel copulas than for the Clayton copula.
Note that the Clayton copula has lower tail dependence, while the Frank and Gumbel copulas exhibit no tail dependence and upper tail dependence, respectively.
In the context of joint survival models, a copula with lower (upper) tail dependence represents the association between late (early) event times.
Typically, late event times are subject to right-censoring; hence the loss of information occurs mainly in the lower tail area.
It follows that the Clayton copula is affected more by the right-censoring, leading to a relatively inferior performance in the estimation and in the testing strategy.
\vspace*{-8pt}
\section{Data Analysis}
\label{sec:5}
In this section, we analyze a subset of the data from the Diabetic Retinopathy Study, which was considered in \cite{Huster/Brookmeyer/Self:1989}, among others.
This study was conducted to investigate the effectiveness of laser photocoagulation in delaying the onset of blindness in diabetic retinopathy patients.
One eye of each patient was randomly selected for treatment and the other eye was observed without treatment.
The patients were followed up until their visual acuity got below a threshold at two consecutive visits.
The analysis subset consists of $n=197$ high risk patients, with censoring rates 73\% for the treated eyes and 49\% for the untreated eyes.
The data on the first and the last patients are given by $(Y_{11},Y_{21},\delta_{11},\delta_{21},X_1)=(46.23,46.23,0,0,28)$ and $(Y_{1n},Y_{2n},\delta_{1n},\delta_{2n},X_n)=(41.93,41.93,0,0,32)$, respectively.
{}{Note that the event times are subject to univariate censoring.}
More details on the dataset can be found in \cite{Huster/Brookmeyer/Self:1989}.
Besides assessing the effectiveness of the laser photocoagulation treatment, of interest was also to understand the dependence between the times to blindness of the treated and untreated eyes.
For our purposes, we consider the age at onset of the diabetic retinopathy as a continuous covariate and investigate if it has a significant impact on this dependence.
We fit the conditional joint survival function of the event times given the age at onset in two stages, as outlined in Section \ref{sec:2}.
Following \cite{Huster/Brookmeyer/Self:1989}, we use the Weibull model for the conditional marginal survival functions and the Clayton copula for the conditional dependence between the event times given the age at onset.
The parameter estimates for the conditional marginal survival functions under the Weibull model are given in Table \ref{table:DRS Weibull}.
These estimates are based on the Weibull parametrization in \cite{Duchateau/Janssen:2008}, and are in close correspondence with the ones given in \cite{Huster/Brookmeyer/Self:1989}.
We also employ the Beran's estimator with the Epanechnikov kernel and obtain nonparametric estimates of each conditional margin at $10$ bandwidth values, ranging from $3$ to $57$ on a logarithmic scale.
The same $10$ bandwidth values are considered in the local likelihood estimation.
For the conditional copula model with parametrically estimated conditional margins, the optimal bandwidth value is $h_\mathbb{C} =42$.
When the conditional margins are estimated nonparametrically using the Beran's estimator, we performed bandwidth selection over a grid of $10 \times 10\times 10$ candidate values and obtained the optimal bandwidth vector $(h_1, h_2,h_\mathbb{C} ) = (3, 3, 42)$.
The local likelihood estimates of Kendall's tau at the selected bandwidths under each type of conditional margins are shown in Figure \ref{fig:drs2}, together with the $90\%$ bootstrap confidence intervals.
The latter are obtained by resampling the original data points $B=1000$ times, and fitting a joint model for each bootstrap sample at the bandwidth values selected for the original data.
The results from the parametric and nonparametric conditional margins both suggest an increasing linear pattern in the strength of dependence with the age at onset of diabetic retinopathy.
For comparisons, we also fit constant and linear calibration models using maximum likelihood. The resulting estimates are displayed in Figure \ref{fig:drs2} in the Kendall's tau scale.
As can be seen, the local likelihood estimates are in close agreement with the parametric estimates under the linear calibration model.
Furthermore, we observe slightly wider bootstrap confidence intervals, hence a larger variation in the Kendall's tau estimates when the Beran's estimator is used.
This observation is in line with our findings in Section \ref{sec:4}.
There is more uncertainty in the local likelihood estimates when the age at onset is greater than 40. This is mainly due to the limited number of patients (31 out of 197) with high onset age, for which most of the observations are censored for at least one eye.
\begin{figure}
\caption{Local likelihood estimates of the Kendall's tau (dashed lines) as a function of the age at onset of diabetic retinopathy obtained from parametrically (left panel) and nonparametrically (right panel) estimated conditional survival functions, along with the $90\%$ bootstrap confidence intervals (dotted lines) under the Clayton copula. Also shown are the maximum likelihood estimates of the Kendall's tau (grey solid lines) obtained under the constant and linear calibration models.
\label{fig:drs2}
\label{fig:drs2}
\end{figure}
To decide whether the observed variation in the strength of dependence (ranging approximately from $0.2$ to $0.7$ in the Kendall's tau scale) is significant or not, we perform the generalized likelihood ratio test as outlined in Section \ref{sec:3}.
The $p$-values based on $B=1000$ bootstrap samples under the null hypothesis are $0.138$ for the parametric conditional margins and $0.540$ for the nonparametric conditional margins.
Hence, there is not enough evidence in the data to reject the constant conditional copula model.
Note that the traditional likelihood ratio test between the constant and linear calibration models also supports this conclusion with $p$-values $0.111$ and $0.275$ for parametrically and nonparametrically estimated conditional margins, respectively.
These results together suggest that the impact of the age at onset on the dependence between the times-to-blindness in the treated and untreated eyes is not statistically significant.
Nevertheless, the observed increasing pattern in the strength of dependence may be of clinical interest, and would be worth further investigation in a larger sample.
\vspace*{-8pt}
\section{Discussion}
\label{sec:6}
In this paper we outline an estimation and a testing strategy to assess the impact of a continuous cluster-level covariate on the strength of within-cluster dependence of right-censored event time data, as modelled via a conditional copula.
A local likelihood approach is used to estimate the functional form of the conditional copula parameter and a generalized likelihood ratio test is described to test its constancy.
A bootstrap procedure is employed to obtain an approximate $p$-value for the test.
The performance of the estimation and the testing method is evaluated in a simulation study, under different rates of right-censoring and for various parametric copula families, considering both parametrically and nonparametrically estimated margins.
The results indicate that the local likelihood approach leads to on target estimation, with more uncertainty for nonparametrically estimated margins than for parametrically estimated margins, and that, depending on the considered parametric copula family, the testing strategy has reasonable to high power.
The simulation results further suggest that the proposed bootstrap strategy is not optimal in terms of the accuracy of the test.
The high type 1 error rates may be as a result of the bandwidth parameter choice in the bootstrap samples.
To reduce the computational burden, we use the bandwidth values for the original data, as selected by the cross-validated loglikelihood criterion, also in the bootstrap samples.
Alternatively, one can perform bandwidth selection for each bootstrap sample, and approximate the null distribution of the test statistic with the corresponding bootstrap test values.
This would, however, incur a rather high computational cost, and does not guarantee an accurate test decision.
A more feasible option is to perform the test at a specific (but not data-driven) bandwidth value for both the original data and the bootstrap samples.
The higher the value of the bandwidth, the more conservative the test becomes.
Our investigations for the simulated data suggest that when a global bandwidth value is used, the rejection rate of the test under the null model is much closer to the nominal type 1 error rate, yet still attains a reasonable power under the alternative models, especially when the conditional margins are estimated parametrically.
As noted before, the presence of censoring and the nonparametric convergence rates of the conditional copula estimator as well as of the Beran's estimator, together make an assessment of the asymptotic null distribution quite cumbersome.
Nevertheless, the simulation study suggests that the use of a bootstrap is a valid alternative.
In the implementation of the proposed methods, it is assumed that the copula family is correctly specified. However, in practice, one has to select a copula family appropriate for the within-cluster dependence of the right-censored event time data being analyzed. This selection can be based on a likelihood-based criterion, such as Akaike information criterion as typically employed in copula modelling, or by comparing the predictive performance of competitive copula models as suggested in \cite{Acar/Craiu/Yao:2011}. In our analysis of the diabetic retinopathy data, we followed \cite{Huster/Brookmeyer/Self:1989} and employed the Clayton copula. The estimation results under the Frank and Gumbel copula yield a similar pattern for the impact of age at onset on the strength of dependence between the time to blindness of treated and untreated eyes (see Figures \ref{fig:drsF} and \ref{fig:drsG}).
The procedures in this paper are developed for clustered right-censored event time data, assuming that the censoring times are independent of the event times and the covariate.
This assumption is required to obtain the likelihood function in \eqref{eq3}, which contains only the distribution of the event times but not the censoring distribution.
Extending the methods to event time data subject to dependent censoring requires a careful consideration, due to the problem of non-identifiability of copula \citep{Wang:2012}.
On the other hand, extensions of the methods to other types of event time data with independent censoring are possible with some additional efforts, i.e. adjustments are needed in the estimation of the conditional margins as well as in the estimation of the conditional copula parameter.
A particular future research direction is to develop conditional copula models for interval censored data.
\vspace*{-8pt}
\section*{Appendix}
\begin{algorithm}
{\footnotesize
\begin{algorithmic}
\State {\bf Step 1.} Obtain $\lambda_n (h_\mathbb{C})$ from the sample.
\State {\bf Step 2.} For $b=1,\ldots,B$ generate resamples in the following way:
\begin{itemize}
\setlength{\itemindent}{3.8em}
\item[{\bf Step 2.1.}] Generate $(U_{1i}^{b}, U_{2i}^{b})$ from the copula $\mathbb{C}$ with $\widehat{\theta}_0$ as the copula parameter value.
\item[{\bf Step 2.2.}] Obtain $(T_{1i}^{b}, T_{2i}^{b})$ via $T_{ki}^{b}=\widehat{S}_{k|x}^{-1}(U_{ki}^{b}|X_i)$, for $k=1,2$.
\item[{\bf Step 2.3.}] Generate $(C_{1i}^{b}, C_{2i}^{b})$ under one of the following scenarios:
\begin{itemize}
\setlength{\itemindent}{5.4em}
\item[{\bf Step 2.3a.}] {\bf Non-univariate censoring:} If $\delta_{ki}=0$, set $C_{ki}^{b}=Y_{ki}$. If $\delta_{ki}=1$, generate $C_{ki}^{b}$ from the conditional censoring distribution given that $C_{ki}>Y_{ki}$, i.e. generate $C_{ki}^{b}$ from
$$
\frac{\widetilde{G}_k(t)-\widetilde{G}_k(Y_{ki})}{1-\widetilde{G}_k(Y_{ki})},
$$
where $\widetilde{G}_k$ is the Kaplan-Meier estimator of the censoring distribution based on the observations $(Y_{ki},1-\delta_{ki})$ for $k=1,2$ and $i = 1, \ldots,n$.
\item[{\bf Step 2.3b.}] {\bf Univariate censoring:} if $\delta_{ki}=0$ for at least one $k$ ($k=1,2$), set $C_{i}^{b}= \max(Y_{1i},Y_{2i})$; if $\delta_{ki}=1$ for all $k$ ($k=1,2$), generate $C_{i}^{b}$ from the conditional censoring distribution given that $C_i> Y_{i,\rm{max}} = \max\{Y_{1i},Y_{2i}\}$, i.e. generate $C_{i}^{b}$ from
$$
\displaystyle \frac{\widetilde{G}_k(t)-\widetilde{G}_k(Y_{i,\rm{max}})}{1-\widetilde{G}_k(Y_{i,\rm{max}})},
$$
where $\widetilde{G}_k$ is the Kaplan-Meier estimator of the censoring distribution based on the observations $(Y_{i,\max},1-\delta_{1i}\delta_{2i})$, for $i = 1, \ldots,n$.
\end{itemize}
\item[{\bf Step 2.4.}] Set $Y_{ki}^{b}=\min\{T_{ki}^{b},C_{ki}^{b}\}$ and $\delta_{ki}^{b}=I(T_{ki}^{b} \leq C_{ki}^{b})$.
\item[{\bf Step 2.5.}]Set $\widehat{U}_{ki}^{b}=\widehat{S}_{k|x}^{b}(Y_{ki}^{b}|X_i)$ with $\widehat{S}_{k|x}^{b}$ the estimate of $S_{k|x}$ based on the observations $(Y_{ki}^{b}, \delta_{ki}^{b})$, for $k=1,2$ and $i = 1, \ldots,n$.
\item[{\bf Step 2.6.}] Fit the copula $\mathbb{C}$ to $(\widehat{U}_{1i}^{b}, \widehat{U}_{2i}^{b})$ and obtain the bootstrap value of the generalized likelihood ratio statistic: $\lambda_n^{b} (h_\mathbb{C})$.
\end{itemize}
\State {\bf Step 3.} Calculate the approximate p-value via
$$
p_{\rm{boot}}=\sum_{b=1}^B I\left( \lambda_n^{b} (h_\mathbb{C}) \geq \lambda_n (h_\mathbb{C})\right) /B.
$$
\end{algorithmic}
\caption{\small Bootstrap algorithm for the GLR statistic under parametrically estimated conditional marginal survival functions.}
\label{alg:1}
}
\end{algorithm}
\begin{algorithm}
{\footnotesize
\begin{algorithmic}
\State {\bf Step 1.} Obtain $\lambda_n (h_1, h_2, h_\mathbb{C})$ from the sample.
\State {\bf Step 2.} For $b=1,\ldots,B$ generate resamples in the following way:
\begin{itemize}
\setlength{\itemindent}{3.8em}
\item[{\bf Step 2.1.}] Generate $(U_{1i}^{b}, U_{2i}^{b})$ from the copula $\mathbb{C}$ with $\widetilde{\theta}_0$ as the copula parameter value.
\item[{\bf Step 2.2.}] Obtain $(T_{1i}^{b}, T_{2i}^{b})$ via $T_{ki}^{b}=\widetilde{S}_{k|x}^{-1}(U_{ki}^{b}|X_i)$, for $k=1,2$.
\item[{\bf Step 2.3.}] Generate $(C_{1i}^{b}, C_{2i}^{b})$ under one of the following scenarios:
\begin{itemize}
\setlength{\itemindent}{5.4em}
\item[{\bf Step 2.3a.}] {\bf Non-univariate censoring:} If $\delta_{ki}=0$, set $C_{ki}^{b}=Y_{ki}$. If $\delta_{ki}=1$, generate $C_{ki}^{b}$ from the conditional censoring distribution given that $C_{ki}>Y_{ki}$, i.e. generate $C_{ki}^{b}$ from
$$
\frac{\widetilde{G}_k(t)-\widetilde{G}_k(Y_{ki})}{1-\widetilde{G}_k(Y_{ki})},
$$
where $\widetilde{G}_k$ is the Kaplan-Meier estimator of the censoring distribution based on the observations $(Y_{ki},1-\delta_{ki})$ for $k=1,2$ and $i = 1, \ldots,n$.
\item[{\bf Step 2.3b.}] {\bf Univariate censoring:} if $\delta_{ki}=0$ for at least one $k$ ($k=1,2$), set $C_{i}^{b}= \max(Y_{1i},Y_{2i})$; if $\delta_{ki}=1$ for all $k$ ($k=1,2$), generate $C_{i}^{b}$ from the conditional censoring distribution given that $C_i> Y_{i,\rm{max}} = \max\{Y_{1i},Y_{2i}\}$, i.e. generate $C_{i}^{b}$ from
$$
\displaystyle \frac{\widetilde{G}_k(t)-\widetilde{G}_k(Y_{i,\rm{max}})}{1-\widetilde{G}_k(Y_{i,\rm{max}})},
$$
where $\widetilde{G}_k$ is the Kaplan-Meier estimator of the censoring distribution based on the observations $(Y_{i,\max},1-\delta_{1i}\delta_{2i})$, for $i = 1, \ldots,n$.
\end{itemize}
\item[{\bf Step 2.4.}] Set $Y_{ki}^{b}=\min\{T_{ki}^{b},C_{ki}^{b}\}$ and $\delta_{ki}^{b}=I(T_{ki}^{b} \leq C_{ki}^{b})$.
\item[{\bf Step 2.5.}]Set $\widetilde{U}_{ki}^{b}=\widetilde{S}_{k|x}^{b}(Y_{ki}^{b}|X_i)$ with $\widetilde{S}_{k|x}^{b}$ the estimate of $S_{k|x}$ based on the observations $(Y_{ki}^{b}, \delta_{ki}^{b})$, for $k=1,2$ and $i = 1, \ldots,n$.
\item[{\bf Step 2.6.}] Fit the copula $\mathbb{C}$ to $(\widetilde{U}_{1i}^{b}, \widetilde{U}_{2i}^{b})$ and obtain the bootstrap value of the generalized likelihood ratio statistic: $\lambda_n^{b} (h_1,h_2,h_\mathbb{C})$.
\end{itemize}
\State {\bf Step 3.} Calculate the approximate p-value via
$$
p_{\rm{boot}}=\sum_{b=1}^B I\left( \lambda_n^{b} (h_1, h_2, h_\mathbb{C}) \geq \lambda_n (h_1, h_2, h_\mathbb{C}) \right) /B.
$$\end{algorithmic}
\caption{Bootstrap algorithm for the GLR statistic under nonparametrically estimated conditional marginal survival functions.}
\label{alg:2}
}\end{algorithm}
\setcounter{table}{0}
\renewcommand{S\arabic{table}}{S\arabic{table}}
\setcounter{figure}{0}
\renewcommand{S\arabic{figure}}{S\arabic{figure}}
\section*{Supplemental Material}
{\bf \large Simulation Results under the Frank and Gumbel copulas}
\begin{table}[h!] \footnotesize
\caption{Integrated Squared Bias, Integrated Variance and Integrated Mean Square Error (multiplied by 100) of the Kendall's tau estimates under the Frank copula.}
\label{MISE_Frank}
\centering
\begin{tabular}{cccccccccccccc}
\toprule \\ [-1.5ex]
&&&&& \multicolumn{6}{c}{} &&& \\[-1.75ex]
&&&& \multicolumn{3}{c}{Parametric Margins}&&&\multicolumn{3}{c}{Nonparametric Margins} \\[0.5ex] \cmidrule(r){5-8} \cmidrule(r){10-13}
& $\%$ cens & n && IBIAS$^{2}$ & IVAR & IMSE &&& IBIAS$^{2}$ & IVAR & IMSE \\
\midrule
\multirow{6}{*}{Constant}
& 0 & 250 && 0.003 & 0.544 & 0.547 &&& 0.014 & 0.639 & 0.653\\
& & 500 && 0.002 & 0.264 & 0.266 &&& 0.005 & 0.307 & 0.311 \\
& 20 & 250 && 0.002 & 0.657 & 0.659 &&& 0.022 & 0.792 & 0.813 \\
& & 500 && 0.001 & 0.329 & 0.330 &&& 0.010 & 0.375 & 0.385 \\
& 50 & 250 && 0.005 & 1.069 & 1.074 &&& 0.031 & 1.162 & 1.194 \\
& & 500 && 0.002 & 0.501 & 0.503 &&& 0.010 & 0.579 & 0.589
\\ [0.8ex] \midrule
\multirow{6}{*}{Convex}
& 0 & 250 && 0.173 & 1.556 & 1.729 &&& 0.258 & 1.587 & 1.845\\
& & 500 && 0.098 & 0.756 & 0.854 &&& 0.144 & 0.795 & 0.938 \\
& 20 & 250 && 0.180 & 1.857 & 2.037 &&& 0.261 & 1.937 & 2.198 \\
& & 500 && 0.097 & 0.932 & 1.029 &&& 0.150 & 0.959 & 1.109 \\
& 50 & 250 && 0.308 & 2.853 & 3.161 &&& 0.404 & 2.934 & 3.339 \\
& & 500 && 0.152 & 1.556 & 1.708 &&& 0.201 & 1.527 & 1.728
\\ [0.8ex] \midrule
\multirow{6}{*}{Concave}
& 0 & 250 && 0.078 & 1.045 & 1.123 &&& 0.104 & 1.224 & 1.328\\
& & 500 && 0.034 & 0.562 & 0.596 &&& 0.037 & 0.603 & 0.640 \\
& 20 & 250 && 0.093 & 1.122 & 1.215 &&& 0.115 & 1.349 & 1.464 \\
& & 500 && 0.048 & 0.615 & 0.663 &&& 0.049 & 0.661 & 0.710 \\
& 50 & 250 && 0.142 & 1.683 & 1.825 &&& 0.214 & 1.898 & 2.111 \\
& & 500 && 0.073 & 0.920 & 0.993 &&& 0.086 & 1.009 & 1.095
\\ [0.8ex]
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h!] \footnotesize
\caption{Integrated Squared Bias, Integrated Variance and Integrated Mean Square Error (multiplied by 100) of the Kendall's tau estimates under the Gumbel copula.}
\label{MISE_Gumbel}
\centering
\begin{tabular}{cccccccccccccc}
\toprule \\ [-1.5ex]
&&&&& \multicolumn{6}{c}{} &&& \\[-1.75ex]
&&&& \multicolumn{3}{c}{Parametric Margins}&&&\multicolumn{3}{c}{Nonparametric Margins} \\[0.5ex] \cmidrule(r){5-8} \cmidrule(r){10-13}
& $\%$ cens & n && IBIAS$^{2}$ & IVAR & IMSE &&& IBIAS$^{2}$ & IVAR & IMSE \\
\midrule
\multirow{6}{*}{Constant}
& 0 & 250 && 0.005 & 0.581 & 0.586 &&& 0.015 & 0.802 & 0.817\\
& & 500 && 0.001 & 0.286 & 0.287 &&& 0.008 & 0.370 & 0.378 \\
& 20 & 250 && 0.006 & 0.690 & 0.696 &&& 0.010 & 0.973 & 0.974 \\
& & 500 && 0.001 & 0.334 & 0.336 &&& 0.004 & 0.440 & 0.444 \\
& 50 & 250 && 0.006 & 1.176 & 1.182 &&& 0.022 & 1.420 & 1.442 \\
& & 500 && 0.003 & 0.504 & 0.507 &&& 0.005 & 0.651 & 0.656
\\ [0.8ex] \midrule
\multirow{6}{*}{Convex}
& 0 & 250 && 0.089 & 1.688 & 1.777 &&& 0.239 & 1.946 & 2.184 \\
& & 500 && 0.068 & 0.812 & 0.880 &&& 0.181 & 0.853 & 1.035 \\
& 20 & 250 && 0.102 & 2.570 & 2.672 &&& 0.202 & 2.889 & 3.092 \\
& & 500 && 0.074 & 0.981 & 1.056 &&& 0.181 & 1.055 & 1.236 \\
& 50 & 250 && 0.169 & 3.847 & 4.016 &&& 0.328 & 3.740 & 4.068 \\
& & 500 && 0.086 & 1.497 & 1.583 &&& 0.236 & 1.585 & 1.820
\\ [0.8ex] \midrule
\multirow{6}{*}{Concave}
& 0 & 250 && 0.108 & 0.988 & 1.096 &&& 0.121 & 1.219 & 1.341\\
& & 500 && 0.051 & 0.506 & 0.557 &&& 0.046 & 0.620 & 0.666 \\
& 20 & 250 && 0.125 & 1.109 & 1.234 &&& 0.123 & 1.368 & 1.491 \\
& & 500 && 0.056 & 0.549 & 0.605 &&& 0.054 & 0.674 & 0.728 \\
& 50 & 250 && 0.145 & 1.591 & 1.736 &&& 0.166 & 1.895 & 2.062 \\
& & 500 && 0.087 & 0.837 & 0.924 &&& 0.084 & 0.964 & 1.048
\\ [0.8ex]
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h!] \footnotesize
\caption{The empirical type I error (constant model) and the empirical power (convex and concave models) at significance level $5\%$ under the Frank copula. }
\label{test_Frank}
\centering
\begin{tabular}{ccccccc}
\toprule \\ [-1.5ex]
&&&&& \multicolumn{2}{c}{} \\[-1.75ex]
&&&& Parametric Margins && Nonparametric Margins \\[0.5ex] \cmidrule(r){1-7}
& censoring rate & n && rejection rate && rejection rate\\
\midrule
\multirow{3}{*}{Constant}
& 0 \% & 250 && 0.110 && 0.105\\
& 20 \% & 250 && 0.085 && 0.085\\
& 50 \% & 250 && 0.095 && 0.100
\\ [0.8ex] \midrule
\multirow{3}{*}{Convex}
& 0 \% & 250 && 0.960 && 0.935\\
& 20 \% & 250 && 0.945 && 0.915\\
& 50 \%& 250 && 0.805 && 0.745
\\ [0.8ex] \midrule
\multirow{3}{*}{Concave}
& 0 \% & 250 && 0.985 && 0.975\\
& 20 \% & 250 && 0.970 && 0.920\\
& 50 \%& 250 && 0.850 && 0.735
\\ [0.8ex]
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h!] \footnotesize
\caption{The empirical type I error (constant model) and the empirical power (convex and concave models) at significance level $5\%$ under the Gumbel copula. }
\label{test_Gumbel}
\centering
\begin{tabular}{ccccccc}
\toprule \\ [-1.5ex]
&&&&& \multicolumn{2}{c}{} \\[-1.75ex]
&&&& Parametric Margins && Nonparametric Margins \\[0.5ex] \cmidrule(r){1-7}
& censoring rate & n && rejection rate && rejection rate\\
\midrule
\multirow{3}{*}{Constant}
& 0 \% & 250 && 0.090 && 0.090\\
& 20 \% & 250 && 0.080 && 0.090\\
& 50 \% & 250 && 0.115 && 0.125
\\ [0.8ex] \midrule
\multirow{3}{*}{Convex}
& 0 \% & 250 && 0.980 && 0.875\\
& 20 \% & 250 && 0.950 && 0.845\\
& 50 \%& 250 && 0.835 && 0.730
\\ [0.8ex] \midrule
\multirow{3}{*}{Concave}
& 0 \% & 250 && 0.985 && 0.935\\
& 20 \% & 250 && 0.990 && 0.875\\
& 50 \%& 250 && 0.880 && 0.755
\\ [0.8ex]
\bottomrule
\end{tabular}
\end{table}
{\bf \large Details on the Analysis of Diabetic Retinopathy Data}
\begin{table}[h!] \footnotesize
\caption{Parameter estimates and their standard errors (in parentheses) for the conditional marginal survival functions of the event times given the age at onset under the Weibull model.}
\label{table:DRS Weibull}
\centering
\begin{tabular}{ccc cc ccc}
\toprule \\ [-2ex]
\multicolumn{3}{c}{Treated Eye} && \multicolumn{3}{c}{Untreated Eye} \\[0.5ex]
$\rho_1$ & $\lambda_1$ & $\beta_1$ && $\rho_2$ & $\lambda_2$ & $\beta_2$ \\
\midrule
0.788 & 0.021 & -0.015 && 0.830 & 0.022 & 0.014 \\
(0.099) & (0.009)& (0.010) && (0.074) & (0.007)& (0.007) \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Local likelihood estimates of the Kendall's tau (dashed lines) as a function of the age at onset of diabetic retinopathy obtained from parametrically (left panel) and nonparametrically (right panel) estimated conditional survival functions, along with the $90\%$ bootstrap confidence intervals (dotted lines) under the Frank copula. Also shown are the maximum likelihood estimates of the Kendall's tau (grey solid lines) obtained under the constant and linear calibration models.
\label{fig:drsF}
\label{fig:drsF}
\end{figure}
\begin{figure}
\caption{Local likelihood estimates of the Kendall's tau (dashed lines) as a function of the age at onset of diabetic retinopathy obtained from parametrically (left panel) and nonparametrically (right panel) estimated conditional survival functions, along with the $90\%$ bootstrap confidence intervals (dotted lines) under the Gumbel copula. Also shown are the maximum likelihood estimates of the Kendall's tau (grey solid lines) obtained under the constant and linear calibration models.
\label{fig:drsG}
\label{fig:drsG}
\end{figure}
\begin{table}[h!] \footnotesize
\caption{Test results for the impact of the age at onset of diabetic retinopathy on the dependence between the times-to-blindness in the treated and untreated eyes under the Clayton, Frank and Gumbel copulas. Provided below are the bootstrap $p$-values for the generalized likelihood ratio test and the approximate $p$-values for the traditional likelihood ratio test when the conditional survival functions are estimated parametrically (left panel) and nonparametrically (right panel). Under each case, the selected bandwidth values are also reported. }
\label{table:DRS Test}
\centering
\begin{tabular}{c ccc cc ccc}
\toprule \\ [-2ex]
& \multicolumn{3}{c}{Parametric Margins} && \multicolumn{3}{c}{Nonparametric Margins} \\[1ex]
& GLRT & LRT & $h_\mathbb{C} $ && GLRT & LRT & $(h_1, h_2,h_\mathbb{C} )$ \\[2ex]
\midrule
Clayton & 0.138 & 0.111 & 42 && 0.540 & 0.275 & (3, 3, 42) \\[1ex]
Frank & 0.125 & 0.120 & 23 && 0.440 & 0.221 & (3, 3, 57) \\[1ex]
Gumbel & 0.290 & 0.148& 42 && 0.515 &0.200& (5, 3, 42)\\
\bottomrule
\end{tabular}
\end{table}
\end{document}
|
\begin{equation}gin{document}
\title[One-phase Stokes flow by capillarity]{Capillarity driven Stokes flow:\\ the one-phase problem as small viscosity limit}
\thanks{Partially supported by DFG Research Training Group~2339 ``Interfaces, Complex Structures, and Singular Limits in Continuum Mechanics - Analysis and Numerics''}
\author{Bogdan--Vasile Matioc}
\address{Fakult\"at f\"ur Mathematik, Universit\"at Regensburg, 93040 Regensburg, Deutschland.}
\varepsilonmail{[email protected]}
\author{Georg Prokert}
\address{Faculty of Mathematics and Computer Science, Technical University Eindhoven, The Netherlands.}
\varepsilonmail{[email protected]}
\mathbb Subjclass[2020]{76D07; 35R37; 35K55}
\keywords{Quasistationary Stokes problem; Singular integrals; Single layer potential.}
\begin{equation}gin{abstract}
We consider the quasistationary Stokes flow that describes the motion of a two-dimensional fluid body
under the influence of surface tension effects in an unbounded, infinite-bottom geometry.
We reformulate the problem as a fully nonlinear parabolic evolution problem for the function that parameterizes the boundary of the fluid with the nonlinearities
expressed in terms of singular integrals.
We prove well-posedness of the problem in the subcritical Sobolev spaces $H^s(\mathbb{R})$ up to critical regularity,
and establish parabolic smoothing properties for the solutions.
Moreover, we identify the problem as the singular limit of the two-phase quasistationary Stokes flow when the viscosity of one of the fluids vanishes.
\varepsilonnd{abstract}
\maketitle
\mathbb Section{Introduction}
In this paper we consider the two-dimensional flow of a fluid layer $\Omega(t) $ of infinite depth
in the case when the motion of the incompressible fluid is governed by the quasistationary Stokes equations
and the motion is driven by surface tension at the free boundary $\Gammaamma(t)=\partial\Omega(t)$.
We consider the one-phase problem, i.e. no forces are exerted on the liquid by the medium above it.
The mathematical model is given by the following system of equations
\begin{equation}gin{subequations}\label{STOKES}
\begin{equation}gin{equation}\label{StP}
\left.
\begin{equation}gin{array}{rclll}
\mu\Delta v-\nabla p&=&0&\mbox{in $\Omega(t)$,}\\
\mathop{\rm div} v&=&0&\mbox{in $\Omega(t)$,}\\{}
T_\mu(v, p)\tilde\nu&=&\mathbb Sigma\tilde\kappa\tilde\nu&\mbox{on $\Gammaamma(t)$,}\\
(v, p)(x)&\to&0&\mbox{for $|x|\to\infty$,}\\
V_n&=& v\cdot\tilde \nu&\mbox{on $\Gammaamma(t)$}
\varepsilonnd{array}\right\}
\varepsilonnd{equation}
for $t>0$, where the interface $\Gammaamma(t)$ at time $t$ is given as a graph of a function $f(t,\cdot):{\mathbb R}RM\longrightarrow{\mathbb R}RM$, i.e the fluid domain $\Omega(t)$ and its boundary $\Gammaamma(t)$ are defined by
\begin{equation}gin{align*}
\Omega(t)&:=\{x=(x_1,x_2)\in{\mathbb R}^2\,:\, x_2<f(t,x_1)\},\\[1ex]
\Gamma(t)&:=\partial\Omega(t):=\{(\xi, f(t,\xi))\,:\, \xi\in{\mathbb R}\}.
\varepsilonnd{align*}
Additionally, the interface $\Gammaamma(t)$ is assumed to be known at time~${t=0}$, i.e.
\begin{equation}gin{align}\label{IC}
f(0,\cdot)=f^{(0)}.
\varepsilonnd{align}
\varepsilonnd{subequations}
In Eq. \varepsilonqref{StP} above, $v=v(t):\Omega(t)\longrightarrow{\mathbb R}^2 $ and $p=p(t):\Omega(t)\longrightarrow{\mathbb R}$
are the velocity and the pressure of the Newtonian fluid, $\tilde\nu=(\tilde\nu^1, \tilde\nu^2)$ is the unit exterior normal to~$\partial\Omega$,
$\tilde\kappa$ denotes the curvature of the interface (negative where $\Omega(t)$ is convex), and
$T_\mu(v,p)=(T_{\mu,ij}(v,p))_{1\leq i,\, j\leq 2}$ is the stress tensor which is given by
\begin{equation}gin{align}\label{defT}
T_{\mu}(v, p):=- pE_2+\mu\big[\nabla v+(\nabla v)^\top\big], \quad (\nabla v)_{ij}:=\partial_jv_i.
\varepsilonnd{align}
Moreover, $V_n$ is the normal velocity of the interface~$\Gammaamma(t)$, $a\cdot b$ denotes the Euclidean scalar product of two vectors $a,\, b\in{\mathbb R}^2$, $E_2\in {\mathbb R}^{2\times 2}$ is the identity matrix,
and the positive constants~$\mu$ and~$\mathbb Sigma$ are the
dynamic viscosity of the fluid and the surface tension coefficient at the interface~$\Gammaamma(t),$ respectively.
Previous analysis related to \varepsilonqref{StP} considered mainly the case of a sufficiently regular bounded fluid domain~$\Omega(t)$.
More precisely, in \cite{PG97} the authors studied the quasistationary motion of a free capillary liquid drop in ${\mathbb R}^d$ for initial data in $H^{s+1}(\Sigma)$, $s\geq s_1,$
$s_1$ being the smallest integer that satisfies $s_1>3+(d-1)/2$ and $\Sigma\mathbb Subset{\mathbb R}^d$ the smooth boundary of a strictly star shaped domain in ${\mathbb R}^d$.
The authors established in \cite{PG97} the well-posedness of the problem and they also showed that the equilibria of the problem, which are balls, are exponentially stable.
In the context when $\Sigma $ is the boundary of the unit ball, it is proven in \cite{Fr02a} (for $d=2$) that solutions corresponding to small data in $H^5(\Sigma)$ exist globally and converge to a ball,
while in \cite{FR02b} (for $d=3$) the authors established the same result for small data in $H^6(\Sigma).$
Finally, in three space dimensions and for an initially bounded geometry possessing a~${\rm C}^{3+\alpha}$-boundary, $\alpha>0$, it was shown in \cite{S99} that the quasistationary Stokes flow is well-posed,
and in \cite{Solo99} the same author has rigorously justified this problem as the singular limit of Navier-Stokes flow when the Reynolds number vanishes.
The local well-posedness and the stability issue for the two-phase quasistationary Stokes flow (with or without phase transitions) in a bounded geometry in ${\mathbb R}^d$, with $d\geq2$, has been recently studied in \cite{PS16}
in the phase space~$W^{2+\mu-2/p}_p(\Sigma)$, with~$1\geq \mu>(d+2)/p$, by using a maximal $L_p$-regularity approach.
In the context of \cite{PS16},~$\Sigma$ is a real analytic hypersurface over which the boundary between the two fluid phases is parameterized.
We emphasize that in the references discussed above the moving interface is at least of class ${\rm C}^2$,
whereas the critical $L_2$-Sobolev space for \varepsilonqref{STOKES} is~$H^{3/2}({\mathbb R})$, see \cite{MP2021, MP2022}.
Our goal is to establish the well-posedness of \varepsilonqref{STOKES} in the subcritical spaces $H^{s}({\mathbb R})$ with~$s\in(3/2,2)$, see Theorem~\mathop{\rm Re}\nolimitsf{MT1}.
One of the obvious difficulties lies in the fact that, for $f\in H^{s}({\mathbb R})$, the curvature term in $\varepsilonqref{StP}_3$ is merely a distribution.
To handle this issue we use a strategy inspired by the approach in the papers \cite{BaDu98, MP2021, MP2022}
where, for the corresponding two-phase problem, potential theory was used to determine
the velocity and pressure fields in terms of $f$.
Such a strategy was applied also in the context of the Muskat problem, see the surveys \cite{G17, GL20}, and it provides
quite optimal results as the mathematical reformulations of the problems obtained by using this strategy require less
smallness and regularity assumptions on the data compared to other approaches based on Lagrangian or Hanzawa transformations.
The first goal of this paper is to show that, at each time instant $t>0$, the free boundary, given via~${f=f(t)}$, identifies the velocity field~$v=v(t)$ and the pressure~$p=p(t)$ uniquely.
More precisely, as shown in Theorem \mathop{\rm Re}\nolimitsf{T:1}, if $f\in H^3({\mathbb R})$, then $(v,p)$ is given by the hydrodynamic single layer potential with a density $\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top$ which satisfies
\begin{equation}gin{equation}\label{iINT}
\mathcal{B}ig(\frac{1}{2}- \mathbb{D}(f)^*\mathcal{B}ig)[\begin{equation}ta']=\mathbb Sigma \mathcal{B}ig(-\mathcal{B}ig(\frac{f'^2}{\omega+\omega^2}\mathcal{B}ig)',\mathcal{B}ig(\frac{f'}{\omega}\mathcal{B}ig)'\mathcal{B}ig)^\top=\mathbb Sigma g',
\varepsilonnd{equation}
where $(\cdot)'$ is the derivative with respect to the spatial coordinate $\xi\in{\mathbb R}$ and $g=g(f)$ is defined in \varepsilonqref{defgn} below.
Furthermore, $\mathbb{D}(f)^*$ is the $L_2$-adjoint of the double layer potential~$\mathbb{D}(f)$, see~\varepsilonqref{defD}, and $\omega:=(1+f'^2)^{1/2}$, see \varepsilonqref{nutau}.
Concerning \varepsilonqref{iINT}, the following issues need to be clarified:
\begin{equation}gin{itemize}
\item[(i)] The invertibility of the operators $\partialm 1/2- \mathbb{D}(f)$ (and $\partialm 1/2- \mathbb{D}(f)^*$) in $\mathcal{L}(H^1({\mathbb R})^2)$;
\item[(ii)] the question whether $(\frac{1}{2}- \mathbb{D}(f)^*)^{-1}g'$ is the derivative of some $\begin{equation}ta\in (H^2({\mathbb R}))^2$.
\varepsilonnd{itemize}
We remark that these issues are new compared to the treatment of the two-phase problem.
With respect to (i), the main step is performed in Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec} where the invertibility in~${\mathcal{L}(L_2({\mathbb R})^2)}$ is established for each~$f\in{\rm BUC}^1({\mathbb R})$.
At this point, we rely on the Rellich identities~\varepsilonqref{RELLICH1}-\varepsilonqref{RELLICH5} for the Stokes boundary value problem which have been
exploited, in a bounded geometry in ${\mathbb R}^n$ with $n\geq3$, also in~\cite{FKV88}.
In the unbounded two-dimensional setting considered in the present paper we provide new arguments which use, among others,
also a Rellich identity obtained in \cite{MBV18} in the context of the Muskat problem.
Based on Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec}, we then show that these operators are invertible in $\mathcal{L}(H^k({\mathbb R})^2)$, ${k=1,\, 2}$, provided that~${f\in H^{k+1}({\mathbb R})}$, see Lemma~\mathop{\rm Re}\nolimitsf{L:3}, and in
$\mathcal{L}(H^{s-1}({\mathbb R})^2)$ when $f\in H^{s}({\mathbb R})$, see Lemma~\mathop{\rm Re}\nolimitsf{L:4}.
Concerning (ii), we prove in Lemma~\mathop{\rm Re}\nolimitsf{L:A1} that, given $f\in H^s({\mathbb R})$ and $\begin{equation}ta\in H^1({\mathbb R})^2$, the function~${\mathbb{D}(f)[\begin{equation}ta]}$ belongs to~${H^1({\mathbb R})^2}$ and
\begin{equation}gin{equation}\label{icomder}
(\mathbb{D}(f)[\begin{equation}ta])'=-\mathbb{D}(f)^*[\begin{equation}ta'].
\varepsilonnd{equation}
This relation and the observation that the right side of \varepsilonqref{iINT} is a derivative enables us to essentially replace, for $f\in H^3({\mathbb R})$, Eq. \varepsilonqref{iINT} by
\begin{equation}gin{equation}\label{iINT2}
\mathcal{B}ig(\frac{1}{2}+ \mathbb{D}(f)\mathcal{B}ig)[\begin{equation}ta]=\mathbb Sigma g,
\varepsilonnd{equation}
see Corollary~\mathop{\rm Re}\nolimitsf{C:1}.
These properties, in particular Lemma~\mathop{\rm Re}\nolimitsf{L:3}
and the equivalence of \varepsilonqref{iINT} and \varepsilonqref{iINT2}, are then used to reformulate the one-phase Stokes flow \varepsilonqref{STOKES} as the evolution problem~\varepsilonqref{NNEP1}, which has only $f$ as unknown.
Its well-posedness properties are summarized in Theorem~\mathop{\rm Re}\nolimitsf{MT1} below.
Our second main result concerns the limit behavior for $\mu^+\to 0$ of the two-phase quasistationary Stokes problem
\begin{equation}gin{subequations}\label{2STOKES}
\begin{equation}gin{equation}\label{probint2}
\left.
\begin{equation}gin{array}{rclll}
\mu^\partialm\Delta w^\partialm-\nabla q^\partialm&=&0&\mbox{in $\Omega^\partialm(t)$,}\\
\mathop{\rm div} w^\partialm&=&0&\mbox{in $\Omega^\partialm(t)$,}\\{}
[w]&=&0&\mbox{on $\Gammaamma(t)$,}\\{}
[T_\mu(w, q)]\tilde\nu&=&-\mathbb Sigma\tilde\kappa\tilde\nu&\mbox{on $\Gammaamma(t)$,}\\
(w^\partialm, q^\partialm)(x)&\to&0&\mbox{for $|x|\to\infty$,}\\
V_n&=& w^\partialm\cdot\tilde \nu&\mbox{on $\Gammaamma(t)$}
\varepsilonnd{array}\right\}
\varepsilonnd{equation}
for $t>0$ and
\begin{equation}gin{align}\label{IC2}
f(0)= f^{(0)},
\varepsilonnd{align}
\varepsilonnd{subequations}
with $\mu^-=\mu$ fixed.
In \varepsilonqref{probint2} it is again assumed that $\Gammaamma(t)$ is the graph of a function~$f(t)$,
\[
\Omega^\partialm(t):=\{x=(x_1,x_2)\in{\mathbb R}^2\,:\, x_2\gtrless f(t,x_1)\},
\]
and $\tilde\nu$ is the unit exterior normal to~$\partial\Omega^-(t)$.
Moreover, $w^\partialm(t)$ and $ q^\partialm(t)$ represent the velocity and pressure fields in $\Omega^\partialm(t)$, respectively, and~$[v]$ (respectively $[T_\mu(v, p)]$)
is the jump of the velocity (respectively stress tensor) across the moving interface, see \varepsilonqref{defjump} below.
We emphasize that the limit $\mu^+\to 0$ in the formulation \varepsilonqref{2STOKES} is singular because ellipticity of the underlying boundary value problem is lost in this limit.
In \cite{MP2022}, we reformulated the two-phase Stokes problem~\varepsilonqref{2STOKES} as a nonlinear evolution equation for~$f$, see \varepsilonqref{NNEP2} below.
In Sections \mathop{\rm Re}\nolimitsf{Sec:4.3} and \mathop{\rm Re}\nolimitsf{Sec:4.4} of the present paper we prove that the right side of \varepsilonqref{NNEP2} has a limit for $\mu^+\to 0$, and the limit is the right side of \varepsilonqref{NNEP1}.
In this sense, we show that the moving boundary problem~\varepsilonqref{STOKES} represents the ``regular limit'' of~\varepsilonqref{2STOKES} for $\mu^-=\mu$ and $\mu^+\to 0$.
This property is used in Section~\mathop{\rm Re}\nolimitsf{Sec:4.4} to introduce the common formulation \varepsilonqref{NNEP} that contains both evolution problems.
It reads
\begin{equation}gin{equation}\label{NNEP*}
\frac{df}{dt}=\Phi(\mu^+,f),\quad t\geq 0,\qquad f(0)=f^{(0)},
\varepsilonnd{equation}
where $\mu^+\geq0$ is viewed as a parameter.
We point out that though this common formulation has been derived from the Stokes flow equations under the assumption that~${f(t)\in H^3({\mathbb R})}$,
the nonlinear and nonlocal operator $\Phi$ is well-defined when assuming only $f\in H^s({\mathbb R})$, ${s\in(3/2,2)}$,
and this allows us to consider \varepsilonqref{NNEP*} under these lower smoothness assumptions.
The regularity of the limit is now seen in the fact that $\Phi$ is smooth on $[0,\infty)\times H^s({\mathbb R})$.
For any fixed~${\mu^+>0}$ we investigated the problem \varepsilonqref{NNEP*} in \cite{MP2022}.
In particular, we showed in \cite[Theorem~1.1]{MP2022} that, given $f^{(0)}\in H^{s}({\mathbb R})$, there exists a
unique maximal solution~$(f_{\mu^+},w_{\mu^+}^\partialm,q_{\mu^+}^\partialm)$ to~\varepsilonqref{2STOKES} such that
\begin{equation}gin{itemize}
\item[$\bullet$] $f_{\mu^+}=f_{\mu^+}(\cdot, f^{(0)})\in {\rm C}([0,T_{+,\mu^+}), H^{s}(\mathbb{R}))\cap {\rm C}^1([0,T_{+,\mu^+}), H^{s-1}(\mathbb{R})),$\\[-2ex]
\item[$\bullet$] $w_{\mu^+}^\partialm(t)\in {\rm C}^2(\Omega^\partialm(t))\cap {\rm C}^1(\overlineerline{\Omega^\partialm(t)})$, $q_{\mu^+}^\partialm(t)\in {\rm C}^1(\Omega^\partialm(t))\cap {\rm C}(\overlineerline{\Omega^\partialm(t)})$
for all ${t\in(0,T_{+,\mu^+})}$,\\[-2ex]
\item[$\bullet$] $ w_{\mu^+}^\partialm(t)|_{\Gamma(t)}\circ\Xi_{f(t)}\in H^2({\mathbb R})^2$ for all $t\in(0,T_{+,\mu^+})$,\\[-2ex]
\varepsilonnd{itemize}
where $T_{+,\mu^+}=T_{+,\mu^+}( f^{(0)})\in (0,\infty]$ is the maximal existence time and $\Xi_{f(t)}(\xi):=(\xi, f(t,\xi))$ for~$\xi\in{\mathbb R}.$
Our first main result is based on the fact that the properties of $\Phi(\mu^+,\cdot)$ and
of its Fr\'echet derivative~$\partial_f\Phi(\mu^+,\cdot)$ which were used to prove \cite[Theorem 1.1]{MP2022} remain valid also when~${\mu^+=0}$.
\begin{equation}gin{thm}\label{MT1} Let $s\in(3/2,2) $ be given.
Then, the following statements hold true:
\begin{equation}gin{itemize}
\item[(i)] {\varepsilonm (Well-posedness)} Given $f^{(0)}\in H^{s}(\mathbb{R})$, there exists a unique maximal solution~$(f,v,p)$ to \varepsilonqref{STOKES} such that
\begin{equation}gin{itemize}
\item[$\bullet$] $f=f(\cdot;f^{(0)})\in {\rm C}([0,T_+), H^{s}(\mathbb{R}))\cap {\rm C}^1([0,T_+), H^{s-1}(\mathbb{R})),$
\item[$\bullet$] $v(t)\in {\rm C}^2(\Omega(t))\cap {\rm C}^1(\overlineerline{\Omega(t)})$, $p(t)\in {\rm C}^1(\Omega(t))\cap {\rm C}(\overlineerline{\Omega(t)})$ for all ${t\in(0,T_+)}$,
\item[$\bullet$] $ v(t)|_{\Gamma(t)}\circ\Xi_{f(t)}\in H^2({\mathbb R})^2$ for all $t\in(0,T_+)$,
\varepsilonnd{itemize}
where $T_+=T_+( f^{(0)})\in (0,\infty]$ is the maximal existence time.
Moreover, the set
$$\mathcal{M}:=\{(t, f^{(0)})\,:\, f^{(0)}\in H^s({\mathbb R}),\,0< t<T_+( f^{(0)})\}$$ is open in~${(0,\infty)\times H^s({\mathbb R})}$,
and $[(t, f^{(0)})\longmapsto f(t; f^{(0)})]$ is a semiflow on $H^s({\mathbb R})$ which is smooth in~$\mathcal{M}$.\\[-2.2ex]
\item[(ii)] {\varepsilonm (Parabolic smoothing)}
\begin{equation}gin{itemize}
\item[(iia)] The map $[(t,\xi)\longmapsto f(t,\xi)]:(0,T_+)\times\mathbb{R}\longrightarrow\mathbb{R}$ is a ${\rm C}^\infty$-function. \\[-2ex]
\item[(iib)] For any $k\in{\mathbb N}$, we have $f\in {\rm C}^\infty ((0,T_+), H^k(\mathbb{R})).$\\[-2ex]
\varepsilonnd{itemize}
\partialagebreak
\item[(iii)] {\varepsilonm (Global existence)} If
$$\mathbb Sup_{[0,T]\cap [0,T_+( f^{(0)}))} \|f(t)\|_{H^s}<\infty$$
for each $T>0$, then $T_+(f^{(0)})=\infty.$
\varepsilonnd{itemize}
\varepsilonnd{thm}
Observe, in particular, that by Theorem~\mathop{\rm Re}\nolimitsf{MT1}~(iib) we have $f(t)\in H^3({\mathbb R})$ for~${t>0}$, which justifies the assumptions that were made when deriving the reformulation~\varepsilonqref{NNEP1}.
Thus, the solutions we construct correspond to one-phase Stokes flows, starting from initial domains whose boundaries might have a curvature in distribution sense only.
Our second main result gives a precise formulation of the limit result announced above.
We recall the notation $(f_{\mu^+}(\cdot; f^{(0)}),w^\partialm_{\mu^+},q^\partialm_{\mu^+})$ for solutions to the two-phase problem \varepsilonqref{2STOKES}.
\begin{equation}gin{thm}\label{MT2} Let $s\in(3/2,2) $ and $ f^{(0)}\in H^{s}(\mathbb{R})$ be given.
Let further $(f(\cdot; f^{(0)}),v,p)$ denote the maximal solution to \varepsilonqref{STOKES} identified in Theorem~\mathop{\rm Re}\nolimitsf{MT1} and choose $T\in (0,T_+( f^{(0)}))$.
Then, there exist constants $\varepsilon>0$ and $M>0$ such that for all $\mu^+\in (0, \varepsilon]$, we have $T<T_{+,\mu^+}( f^{(0)})$ and
\[
\big\|f(\cdot ; f^{(0)})-f_{\mu^+}(\cdot;f^{(0)})\big\|_{{\rm C}([0,T], H^s({\mathbb R}))}+\mathcal{B}ig\|\frac{d}{dt}\big(f(\cdot ; f^{(0)})-f_{\mu^+}(\cdot; f^{(0)})\big)\mathcal{B}ig\|_{{\rm C}([0,T], H^{s-1}({\mathbb R}))}\leq M\mu^+.
\]
\varepsilonnd{thm}
The proofs of the main results are presented in Section~\mathop{\rm Re}\nolimitsf{Sec:4.5} which concludes the paper.
\mathbb Section{The Stokes boundary value problem in a fixed domain}\label{Sec:2}
In this section we fix $f\in H^3({\mathbb R})$ and we consider the Stokes boundary value problem
\begin{equation}gin{equation}\label{SBVP}
\left.
\begin{equation}gin{array}{rclll}
\mu\Delta v-\nabla p&=&0&\mbox{in $\Omega$,}\\
\mathop{\rm div} v&=&0&\mbox{in $\Omega$,}\\{}
T_\mu(v, p)\tilde\nu&=&\mathbb Sigma\tilde\kappa\tilde\nu&\mbox{on $\Gammaamma$,}\\
(v, p)(x)&\to&0&\mbox{for $|x|\to\infty$,}
\varepsilonnd{array}\right\}
\varepsilonnd{equation}
where $\Omega:=\{x=(x_1,x_2)\in{\mathbb R}^2\,:\, x_2<f(x_1)\}$ and $\Gamma:=\{(\xi, f(\xi))\,:\, \xi\in{\mathbb R}\}$.
The main goal is to show that \varepsilonqref{SBVP} has a unique solution $(v,p)$, see Theorem~\mathop{\rm Re}\nolimitsf{T:1} below.
We start by introducing some notation.
Since $\Gammaamma$ is a graph over ${\mathbb R}$, it is natural to view $\Gammaamma$ as is the image of ${\mathbb R}$ under the diffeomorphism $\Xi:=\Xi_f:=({\rm id}_\mathbb{R},f).$
Let now $\nu$ and $\tau$ denote the componentwise pull-back under~$\Xi$ of the unit normal~$\tilde\nu$ on~$\Gammaamma$ exterior to~$\Omega$ and of the unit tangent vector $\tilde\tau$ to~$\Gammaamma$, i.e.
\begin{equation}gin{align}\label{nutau}
\nu:=\nu(f):=\frac{1}{\omega}(-f',1)^\top,\qquad\tau:=\tau(f):=\frac{1}{\omega}(1,f')^\top,\qquad \omega:=\omega(f):=(1+f'^2)^{1/2}.
\varepsilonnd{align}
Observe that the pull-back~$\kappa:=\omega^{-3}f''\in H^1({\mathbb R})$ of the curvature $\tilde\kappa$ satisfies
\begin{equation}gin{equation}\label{fundfor}
\omega\kappa\nu=g',
\varepsilonnd{equation}
where $g:=g(f)$ is given by
\begin{equation}gin{equation}\label{defgn}
g:=(g_1,g_2)^\top:=(\omega^{-1}-1,\omega^{-1}f')^\top=\mathcal{B}ig(-\frac{{f'}^2}{\omega+\omega^2},\frac{f'}{\omega}\mathcal{B}ig)^\top.
\varepsilonnd{equation}
We further recall that the fundamental solutions $(\mathcal{U}^k,\mathcal{P}^k):\mathbb{R}^2\mathbb Setminus\{0\}\longrightarrow\mathbb{R}^2\times\mathbb{R},$ $k=1,\,2,$
where $\mathcal{U}^k=(\mathcal{U}^k_1, \mathcal{U}^k_2)^\top$, to the Stokes equations
\begin{equation}gin{equation}\label{inhstosy}
\left.\begin{equation}gin{array}{rllll}
\mu \Delta U-\nabla P&=&0,\\[1ex]
\mathop{\rm div} U&=&0
\varepsilonnd{array}\right\}\qquad\text{in ${\mathbb R}^2\mathbb Setminus\{0\}$}
\varepsilonnd{equation}
are given by
\begin{equation}gin{equation}\label{fundup}
\begin{equation}gin{aligned}
\mathcal{U}_j^k(y)&=-\frac{1}{4\partiali\mu}\left(\delta_{jk}\ln\frac{1}{|y|}+\frac{y_jy_k}{|y|^2}\right),\quad j=1,\,2,\\[1ex]
\mathcal{P}^k(y)&=-\frac{1}{2\partiali}\frac{y_k}{|y|^2}
\varepsilonnd{aligned}
\varepsilonnd{equation}
for $ y=(y_1,y_2)\in{\mathbb R}^2\mathbb Setminus\{0\}$, see \cite{Lad63}.
Finally, defining the mapping $r:=(r^1,r^2):{\mathbb R}^2\longrightarrow{\mathbb R}^2$ by the formula
\begin{equation}gin{equation}\label{rxs}
r:=r(\xi,s):=(\xi-s,f(\xi)-f(s)), \quad (\xi,s)\in{\mathbb R}^2,
\varepsilonnd{equation}
we introduce the double layer potential $\mathbb{D}(f)$ for the Stokes equations associated to the hypersurface $\Gamma$ and its $L_2$-adjoint $\mathbb{D}(f)^\ast$ by the formulas
\begin{equation}gin{equation}\label{defD}
\begin{equation}gin{aligned}
\mathbb{D}(f)[\begin{equation}ta](\xi)&:=\frac{1}{\partiali}\mathop{\rm PV}\nolimits\int_{\mathbb R}\frac{r_1 f'- r_2}{ |r|^4}
\begin{equation}gin{pmatrix}
r_1^2&r_1 r_2\\
r_1 r_2& r_2^2
\varepsilonnd{pmatrix}\begin{equation}ta\,ds,\\
\mathbb{D}(f)^\ast[\begin{equation}ta](\xi)&:=\frac{1}{\partiali}\mathop{\rm PV}\nolimits\int_{\mathbb R}\frac{-r_1 f'(\xi)+ r_2}{|r|^4}
\begin{equation}gin{pmatrix}
r_1^2&r_1 r_2\\
r_1r_2&r_2^2
\varepsilonnd{pmatrix}\begin{equation}ta\,ds
\varepsilonnd{aligned}
\varepsilonnd{equation}
for $\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in L_2({\mathbb R})^2$ and $\xi\in{\mathbb R}$.
In \varepsilonqref{defD}, the integrals are absolutely convergent whenever $f'$ is H\"older continuous.
We prefer the definition as principal value integral because we will consider $f\in {\rm BUC}^1({\mathbb R}RM)$ later.
Given~${k\in{\mathbb N}}$, ${\rm BUC}^k({\mathbb R}RM)$ is the Banach space consisting of functions with bounded and uniformly continuous derivatives up to order $k$.
It is well-known that the intersection of all these spaces, denoted by ${\rm BUC}^\infty({\mathbb R}RM)$,
is a dense subspace of ${\rm BUC}^k({\mathbb R}RM)$ for each $k\in{\mathbb N}$.
Both operators $\mathbb{D}(f)$, $\mathbb{D}(f)^*$ can be expressed in terms of the family~${\{B_{n,m}^0(f)\,:\, n,\, m\in{\mathbb N}\}}$
of singular integral operators introduced in \cite{MBV18,MBV19}, see \varepsilonqref{defB0} and~\varepsilonqref{DFB} below.
We now introduce these operators in a more general context.
More precisely, given~${n,\,m\in{\mathbb N}}$ and Lipschitz continuous functions~${a_1,\ldots, a_{m},\, b_1, \ldots, b_n:\mathbb{R}\longrightarrow\mathbb{R}}$, we define
\begin{equation}gin{equation}\label{BNM}
B_{n,m}(a_1,\ldots, a_m)[b_1,\ldots,b_n,h](\xi):=\frac{1}{\partiali}\mathop{\rm PV}\nolimits\int_\mathbb{R} \frac{h(\xi-\varepsilonta)}{\varepsilonta}\cfrac{\partialrod_{i=1}^{n}\big(\delta_{[\xi,\varepsilonta]} b_i /\varepsilonta\big)}{\partialrod_{i=1}^{m}\big[1+\big(\delta_{[\xi,\varepsilonta]} a_i /\varepsilonta\big)^2\big]}\, d\varepsilonta,
\varepsilonnd{equation}
where $\delta_{[\xi,\varepsilonta]}u:=u(\xi)-u(\xi-\varepsilonta)$.
For brevity we set
\begin{equation}gin{equation}\label{defB0}
B^0_{n,m}(f):=B_{n,m}(f,\ldots ,f)[f,\ldots,f,\cdot].
\varepsilonnd{equation}
We note that $B^0_{0,0}=H$, where $H$ denotes the Hilbert transform.
We now prove that the boundary value problem \varepsilonqref{SBVP} is uniquely solvable and that the solution is given by the hydrodynamic single layer potential with a suitable density function~$\begin{equation}ta$.
\begin{equation}gin{thm}\label{T:1}
Given $f\in H^3({\mathbb R})$, the boundary value problem \varepsilonqref{SBVP} has a unique solution~$(v,p)$ such that
\[
v\in {\rm C}^2(\Omega)\cap {\rm C}^1 (\overline\Omega),\quad p\in {\rm C}^1(\Omega)\cap {\rm C} (\overline\Omega),\quad v|_\Gamma \circ\Xi\in H^2({\mathbb R})^2.
\]
Moreover, letting $\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in H^2({\mathbb R})^2$ denote the unique solution to the equation
\begin{equation}gin{equation}\label{invertcom}
\mathcal{B}ig(\frac{1}{2}- \mathbb{D}(f)^*\mathcal{B}ig)[\begin{equation}ta']=\mathbb Sigma g',
\varepsilonnd{equation}
where $g\in H^2({\mathbb R})^2$ is defined in \varepsilonqref{defgn}, we have
\begin{equation}gin{equation}\label{Solutions}
\begin{equation}gin{aligned}
v(x)&:=\displaystyle\int_\mathbb{R}\partialartial_s\big(\mathcal{U}^k(x-(s,f(s)))\big)\begin{equation}ta_k(s)\,ds,\\[1ex]
p( x)&:=\displaystyle-\int_\mathbb{R}\mathcal{P}^k( x-(s,f(s)))\begin{equation}ta_k'(s)\,ds=\displaystyle\int_\mathbb{R}\partial_s\big(\mathcal{P}^k( x-(s,f(s)))\big)\begin{equation}ta_k(s)\,ds,\quad x\in\Omega.
\varepsilonnd{aligned}
\varepsilonnd{equation}
\varepsilonnd{thm}
\begin{equation}gin{proof}
The unique solvability of Eq. \varepsilonqref{invertcom} is established in Section~\mathop{\rm Re}\nolimitsf{Sec:4} and is taken for granted in this proof.
We divide the proof in two steps.
\noindent{\varepsilonm Step 1: Uniqueness.}
It suffices to show that the homogeneous boundary value problem \varepsilonqref{SBVP} (with the right side of \varepsilonqref{SBVP}$_3$ set to be zero) has only the trivial solution.
Let thus $(v,p) $ be a solution to the homogeneous system \varepsilonqref{SBVP} with regularity as prescribed above.
We then set~${\Omega^-:=\Omega}$, $\Omega^+:={\mathbb R}^2\mathbb Setminus\overline \Omega$, and we define~${(w^\partialm,q^\partialm):\Omega^\partialm\longrightarrow {\mathbb R}^2\times{\mathbb R}}$ by
\[
(w^-,w^+):=(\mu v,0)\qquad\text{and} \qquad (q^-,q^+):=(p,0).
\]
Clearly, it holds
\[
w^\partialm\in {\rm C}^2(\Omega^\partialm)\cap {\rm C}^1 (\overline{\Omega^\partialm}),\quad q^\partialm\in {\rm C}^1(\Omega^\partialm)\cap {\rm C} (\overline{\Omega^\partialm}),\quad w^\partialm|_\Gamma \circ\Xi\in H^2({\mathbb R})^2.
\]
Moreover, it can be easily checked that $(w^\partialm,q^\partialm)$ solves the boundary value problem
\begin{equation}\label{bvpaux}
\left.\begin{equation}gin{array}{rcll}
\Delta w^\partialm-\nabla q^\partialm&=&0&\mbox{in $\Omega^\partialm$,}\\
\mathop{\rm div} w^\partialm&=&0&\mbox{in $\Omega^\partialm$,}\\{}
[w]&=&\gamma\circ\Xi^{-1}&\mbox{on $\Gammaamma$,}\\
{}[T_1(w,q)](\nu\circ \Xi^{-1})&=&0&\mbox{on $\Gammaamma$,}\\
(w^\partialm,q^\partialm)(x)&\to&0&\mbox{for $|x|\to\infty$,}
\varepsilonnd{array}\right\}
\varepsilone
with $\gamma:=-w^-|_\Gammaamma\circ\Xi\in H^2({\mathbb R})^2$.
Given $z^\partialm\in {\rm C} (\overline{\Omega^\partialm}),$ we define $[z]$ as being the jump
\begin{equation}gin{equation}\label{defjump}
[z] (x):=z^+(x)-z^-(x),\qquad x\in\Gammaamma.
\varepsilonnd{equation}
According to \cite[Proposition 2.1]{MP2022}, the system \varepsilonqref{bvpaux} has a unique solution.
Moreover, we have
\begin{equation}gin{equation}\label{eqsupi}
w^\partialm|_\Gammaamma\circ\Xi=\mathcal{B}ig(\partialm\frac{1}{2}-\mathbb{D}(f)\mathcal{B}ig)[\gamma],
\varepsilonnd{equation}
see \cite[Lemma A.1]{MP2022}.
Since $w^+=0$ and $1/2-\mathbb{D}(f)\in\mathcal{L}(L_2({\mathbb R})^2)$ is invertible, see~Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec} below, we conclude that $\gamma=0$.
Consequently $(w^\partialm,q^\partialm)$ is the trivial solution and this establishes the uniqueness claim.
\noindent{\varepsilonm Step 2: Existence.} We are going to verify that $(v,p)$ from \varepsilonqref{Solutions} with $\begin{equation}ta$ from \varepsilonqref{invertcom} has the announced regularity and satisfies \varepsilonqref{SBVP}.
Recalling \varepsilonqref{fundup}, we have
\begin{equation}gin{equation}\label{stokesdiff}
\begin{equation}gin{aligned}
\partialartial_1\mathcal{U}^1(y)&=\frac{1}{4\partiali\mu |y|^4}\begin{equation}gin{pmatrix} y_1(y_1^2-y_2^2)\\[1ex]y_2(y_1^2-y_2^2)\varepsilonnd{pmatrix},& \partialartial_1\mathcal{P}^1(y)&=\frac{y_1^2-y_2^2}{2\partiali|y|^4},\\[1ex]
\partialartial_2\mathcal{U}^1(y)&=\frac{1}{4\partiali\mu |y|^4}\begin{equation}gin{pmatrix} y_2(y_2^2+3y_1^2)\\[1ex] y_1(y_2^2-y_1^2)\varepsilonnd{pmatrix},& \partialartial_2\mathcal{P}^1(y)&=\frac{2y_1y_2}{ 2\partiali|y|^4},\\[1ex]
\partialartial_1\mathcal{U}^2(y)&=\frac{1}{4\partiali\mu |y|^4}\begin{equation}gin{pmatrix} y_2(y_1^2-y_2^2)\\[1ex] y_1(y_1^2+3y_2^2)\varepsilonnd{pmatrix},& \partialartial_1\mathcal{P}^2(y)&=\frac{2y_1y_2}{2\partiali|y|^4},\\[1ex]
\partialartial_2\mathcal{U}^2(y)&=\frac{1}{4\partiali\mu |y|^4}\begin{equation}gin{pmatrix} y_1(y_2^2-y_1^2)\\[1ex] y_2(y_2^2-y_1^2)\varepsilonnd{pmatrix},& \partialartial_2\mathcal{P}^2(y)&=\frac{y_2^2-y_1^2}{ 2\partiali|y|^4}
\varepsilonnd{aligned}
\varepsilonnd{equation}
for $y\neq 0$.
A direct consequence of \varepsilonqref{Solutions} is that $(v,p)$ is defined as an integral of the form
\[(v,p)(x)=\int_{\mathbb R} K(x,s)\begin{equation}ta(s)\,ds,\qquad x\in\Omega,\]
where, for every $\alpha\in{\mathbb N}^2,$ we have $\partialartial^\alpha_x K(x,s)=O(s^{-1})$ for $|s|\to\infty$ and locally uniformly in~${x\in \Omega}$.
This shows that $v$ and $p$ are well-defined by \varepsilonqref{Solutions}, and that integration and differentiation with respect to $x$ may be interchanged.
Hence, $(v,p)\in {\rm C}^\infty(\Omega,{\mathbb R}^2\times{\mathbb R})$, and, since~$\partial_j(\mathcal{U}^{k},\mathcal{P}^{k}),$ $j=1,\, 2$, solve \varepsilonqref{inhstosy}, we deduce that $(v,p)$ is
a solution to \varepsilonqref{SBVP}$_1$-\varepsilonqref{SBVP}$_2$.
In view of \cite[Lemma A.1]{BM22} it holds that $p\in {\rm C}(\overline \Omega)$ and
\begin{equation}gin{equation}\label{tracep}
p|_\Gamma\circ\Xi=\frac{B_{0,1}^0(f)[\begin{equation}ta_1']+B_{1,1}^0(f)[\begin{equation}ta_2']}{2}-\frac{\begin{equation}ta'\cdot\nu}{2\omega}.
\varepsilonnd{equation}
Given $\partialhi\in H^1({\mathbb R})$, let~$Z_j[\partialhi]:\Omega\longrightarrow{\mathbb R}$, $j=0,\ldots,3,$ be given by
\[
Z_j[\partialhi](x):= \int_{\mathbb R} \frac{(x_1-s)^{3-j}(x_2-f(s))^j}{((x_1-s)^2+(x_2-f(s))^2)^2}\partialhi(s)\,ds, \quad x\in\Omega.
\]
Since
\[
\partial_i v(x)=-\int_\mathbb{R}\partialartial_i\mathcal{U}^k(x-(s,f(s)))\begin{equation}ta_k'(s)\,ds,\qquad i=1,\, 2, \, x\in\Omega,
\]
we obtain, due to \varepsilonqref{stokesdiff}, the following formulas
\begin{equation}gin{align*}
&\partial_1v_1=-\partial_2v_2=-\frac{(Z_0-Z_2)[\begin{equation}ta_1']+(Z_1-Z_3)[\begin{equation}ta_2']}{4\partiali\mu },\\[1ex]
&\partial_2v_1=-\frac{(Z_3+3Z_1)[\begin{equation}ta_1']+(Z_2-Z_0)[\begin{equation}ta_2']}{4\partiali\mu },\\[1ex]
&\partial_1v_2=-\frac{(Z_1-Z_3)[\begin{equation}ta_1']+(Z_0+3Z_2)[\begin{equation}ta_2']}{4\partiali\mu }.
\varepsilonnd{align*}
Since $ Z_j[\partialhi]\in {\rm C}(\overlineerline\Omega)$, see the proof of \cite[Lemma A.1]{MP2021}, we obtain in view of the latter identities that~${v\in {\rm C}^1(\overline \Omega)}$.
Moreover, the formula derived in the proof of \cite[Lemma A.1]{MP2021} for the traces~${Z_j[\partialhi]|_\Gammaamma}$, $j=0,\ldots,3,$ leads us to
\begin{equation}gin{equation}\label{tracenv}
\begin{equation}gin{aligned}
& \partial_1v_1|_\Gamma\circ \Xi=-\frac{(B_{0,2}^0(f)-B_{2,2}^0(f))[\begin{equation}ta_1']+(B_{1,2}^0(f)-B_{3,2}^0(f))[\begin{equation}ta_2']}{4\mu}-\frac{f'\begin{equation}ta'\cdot \tau}{2\mu\omega^3},\\[1ex]
&\partial_2v_1|_\Gamma\circ \Xi=-\frac{(B_{3,2}^0(f)+3B_{1,2}^0(f))[\begin{equation}ta_1']+(B_{2,2}^0(f)-B_{0,2}^0(f))[\begin{equation}ta_2']}{4\mu }+\frac{\begin{equation}ta'\cdot \tau}{2\mu\omega^3},\\[1ex]
&\partial_1v_2|_\Gamma\circ \Xi=-\frac{(B_{1,2}^0(f)-B_{3,2}^0(f))[\begin{equation}ta_1']+(B_{0,2}^0(f)+3B_{2,2}^0(f))[\begin{equation}ta_2']}{4\mu }-\frac{f'^2\begin{equation}ta'\cdot \tau}{2\mu\omega^3}.
\varepsilonnd{aligned}
\varepsilonnd{equation}
It is now a matter of direct computation to infer from \varepsilonqref{defT}, \varepsilonqref{fundfor}, \varepsilonqref{defD}, \varepsilonqref{tracep}, and \varepsilonqref{tracenv}
that the equation $\varepsilonqref{SBVP}_3$ is equivalent to \varepsilonqref{invertcom},
hence also $\varepsilonqref{SBVP}_3$ is satisfied.
It remains to check that the far field boundary condition $\varepsilonqref{SBVP}_4$ holds true.
To this end we infer directly from \cite[Lemma A.4]{BM22} and \varepsilonqref{Solutions}$_2$ that $p$ vanishes at infinity.
Moreover, since by \varepsilonqref{Solutions}$_1$ we have
\[
v(x)=\frac{1}{4\partiali\mu}
\int_\mathbb{R}\frac{1}{|R|^2}
\left(
\begin{equation}gin{array}{ccc}
-R_2^2&& R_1R_2\\
R_1R_2&& R_2^2
\varepsilonnd{array}\right)\begin{equation}ta'(s)\,ds
-\frac{1}{4\partiali\mu}\int_\mathbb{R}\frac{R_1+f'(s)R_2}{|R|^2}\begin{equation}ta(s)\,ds\]
for $x\in\Omega$, where $R=(R_1,R_2)$ is given by
\[
R:=R(s,x):=(x_1-s,x_2-f(s)), \qquad s\in{\mathbb R},\, x\in\Omega,
\]
we infer from \cite[Lemma A.4]{BM22} and \cite[Lemma B.2]{MP2021} that also $v$ vanishes at infinity.
In order to show that $v|_\Gamma\circ\Xi\in H^2({\mathbb R})^2$ we conclude from \varepsilonqref{Solutions}$_1$, \varepsilonqref{stokesdiff}, and the formula derived in the proof of \cite[Lemma A.1]{MP2021} for the traces~${Z_j[\partialhi]|_\Gammaamma}$, $j=0,\ldots,3,$ that
\begin{equation}gin{equation}\label{vgamma}
\begin{equation}gin{aligned}
v_1|_\Gammaamma\circ\Xi&=\frac{(B_{2,2}^0(f)-B_{0,2}^0(f))[\begin{equation}ta_1-f'\begin{equation}ta_2]-B_{1,2}^0(f)[3f'\begin{equation}ta_1+\begin{equation}ta_2] -B_{3,2}^0(f)[f'\begin{equation}ta_1-\begin{equation}ta_2]}{4\mu},\\[1ex]
v_2|_\Gammaamma\circ\Xi&=\frac{B_{0,2}^0(f)[f'\begin{equation}ta_1-\begin{equation}ta_2]+(B_{3,2}^0(f)-B_{1,2}^0(f))[\begin{equation}ta_1-f'\begin{equation}ta_2]-B_{2,2}^0(f)[f'\begin{equation}ta_1+3\begin{equation}ta_2]}{4\mu}.
\varepsilonnd{aligned}
\varepsilonnd{equation}
Since $B_{n,m}^0(f)\in\mathcal{L}(H^2({\mathbb R}))$, see Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(iv) below, we immediately deduce from \varepsilonqref{vgamma} that indeed $v|_\Gamma\circ\Xi\in H^2({\mathbb R})^2$.
\varepsilonnd{proof}
\mathbb Section{On the invertibility of $\partialm1/2+\mathbb{D}(f)$ and $\partialm1/2+\mathbb{D}(f)^*$}\label{Sec:3}
In this section we establish the invertibility of the operators $\partialm1/2-\mathbb{D}(f)$ and~${\partialm1/2-\mathbb{D}(f)^*}$ in $\mathcal{L}(H^k({\mathbb R})^2)$, $k=0,\, 1,\, 2$,
and $\mathcal{L}(H^{s-1}({\mathbb R})^2)$,~${s\in(3/2,2)},$ under suitable regularity assumptions on $f$.
These properties are needed on the one hand in the proof of Theorem~\mathop{\rm Re}\nolimitsf{T:1}, see~\varepsilonqref{invertcom}, and on the other hand when formulating the Stokes flow as an evolution problem for~$f$,
see~Section~\mathop{\rm Re}\nolimitsf{Sec:4}.
The main step is provided by Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec} below.
\begin{equation}gin{thm}\label{T:L2spec}
Given $\delta\in(0,1)$, there exists a constant~$C_0=C_0(\delta)\geq1$ such that for all~${f\in {\rm BUC}^1({\mathbb R})}$ with~${\|f'\|_\infty\leq 1/\delta}$ and all $\begin{equation}ta\in L_2({\mathbb R})^2$ we have
\begin{equation}gin{align}\label{DEest}
C_0\min\mathcal{B}ig\{\mathcal{B}ig\|\mathcal{B}ig(\partialm\frac{1}{2}-\mathbb{D}(f)\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2,\mathcal{B}ig\|\mathcal{B}ig(\partialm\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2\mathcal{B}ig\}\geq \|\begin{equation}ta\|_2.
\varepsilonnd{align}
Moreover, $\partialm1/2-\mathbb{D}(f)^\ast$ and $\partialm1/2-\mathbb{D}(f)$ are invertible in $\mathcal{L}(L_2({\mathbb R}))^2$.
\varepsilonnd{thm}
The present section is devoted mainly to the proof of this theorem, which is split up in a number of steps.
\mathbb Subsection{Preliminaries}
To start, we reexpress the operators $\mathbb{D}(f)$ and $\mathbb{D}(f)^*$ by using the family of singular integral operators defined in \varepsilonqref{defB0} as follows
\begin{equation}gin{equation}\label{DFB}
\begin{equation}gin{aligned}
\mathbb{D}(f)[\begin{equation}ta]
&=\begin{equation}gin{pmatrix}
B_{0,2}^0(f)&B_{1,2}^0(f)\\[1ex]
B_{1,2}^0(f)&B_{2,2}^0(f)
\varepsilonnd{pmatrix}
\begin{equation}gin{pmatrix}
f'\begin{equation}ta_1\\[1ex]
f'\begin{equation}ta_2
\varepsilonnd{pmatrix}
-\begin{equation}gin{pmatrix}
B_{1,2}^0(f)&B_{2,2}^0(f)\\[1ex]
B_{2,2}^0(f)&B_{3,2}^0(f)
\varepsilonnd{pmatrix}
\begin{equation}gin{pmatrix}
\begin{equation}ta_1\\[1ex]
\begin{equation}ta_2
\varepsilonnd{pmatrix},\\[1ex]
\mathbb{D}(f)^*[\begin{equation}ta]
&=-f'\begin{equation}gin{pmatrix}
B_{0,2}^0(f)&B_{1,2}^0(f)\\[1ex]
B_{1,2}^0(f)&B_{2,2}^0(f)
\varepsilonnd{pmatrix}
\begin{equation}gin{pmatrix}
\begin{equation}ta_1\\[1ex]
\begin{equation}ta_2
\varepsilonnd{pmatrix}
+\begin{equation}gin{pmatrix}
B_{1,2}^0(f)&B_{2,2}^0(f)\\[1ex]
B_{2,2}^0(f)&B_{3,2}^0(f)
\varepsilonnd{pmatrix}
\begin{equation}gin{pmatrix}
\begin{equation}ta_1\\[1ex]
\begin{equation}ta_2
\varepsilonnd{pmatrix}
\varepsilonnd{aligned}
\varepsilonnd{equation}
for $\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in L_2({\mathbb R})^2$.
Since the operators $B_{n,m}$ are well-studied by now, mapping properties for the operators~$\mathbb{D}(f)$ and $\mathbb{D}(f)^*$ can be obtained by using the representation~\varepsilonqref{DFB} and Lemma~\mathop{\rm Re}\nolimitsf{L:MP0} below
(which collects some important properties of the operators $B_{n,m}$).
In the following, for $n\in{\mathbb N}$ and Banach spaces $E$ and $F$, we define $\mathcal{L}^n_{\rm sym}(E,F)$ as the Banach space of $n$-linear, bounded, and symmetric maps $A: E^n\to F$.
Moreover, ${\rm C}^{1-}(E,F)$ (resp.~${{\rm C}^{\infty}(E,F)}$) is the space of locally Lipschitz continuous (resp. smooth) mappings from~$E$ to~$F$.
\begin{equation}gin{lemma}\label{L:MP0}\,
\begin{equation}gin{itemize}
\item[(i)] Given Lipschitz continuous functions $a_1,\ldots, a_{m},\, b_1, \ldots, b_n:\mathbb{R}\longrightarrow\mathbb{R}$, there exists a constant~$C$ depending only
on $n,\, m$ and $\max_{i=1,\ldots, m}\|a_i'\|_{\infty}$, such that
$$\|B_{n,m}(a_1,\ldots, a_m)[b_1,\ldots,b_n,\,\cdot\,]\|_{\mathcal{L}(L_2(\mathbb{R}))}\leq C\partialrod_{i=1}^{n} \|b_i'\|_{\infty}.$$
Moreover, $B_{n,m}\in {\rm C}^{1-}((W^1_\infty(\mathbb{R}))^{m},\mathcal{L}^n_{\rm sym}(W^1_\infty(\mathbb{R}), \mathcal{L}(L_2(\mathbb{R})))).$
\item[(ii)] Let $n\geq1,$ $s\in(3/2,2),$ and $a_1,\ldots, a_m\in H^s({\mathbb R})$ be given.
Then, there exists a constant~$C$, depending only on $n,\, m$, $s$, and $\max_{1\leq i\leq m}\|a_i\|_{H^s}$, such that
\begin{equation}gin{align}
&\| B_{n,m}(a_1,\ldots, a_{m})[b_1,\ldots, b_n,h]\|_2\leq C\|b_1\|_{H^1}\|h\|_{H^{s-1}}\partialrod_{i=2}^{n}\|b_i\|_{H^s} \label{REF1}
\varepsilonnd{align}
for all $b_1,\ldots, b_n\in H^s({\mathbb R})$ and $h\in H^{s-1}({\mathbb R}).$
\item[(iii)] Given $s\in(3/2 ,2)$ and $a_1,\ldots, a_m, b_1,\ldots, b_n\in H^s(\mathbb{R})$, there exists a constant $C,$
depending only on $n,\, m,\, s$, and $\max_{1\leq i\leq m}\|a_i\|_{H^s},$ such that
\begin{equation}gin{align*}
\| B_{n,m}(a_1,\ldots, a_{m})[b_1,\ldots, b_n,\cdot]\|_{\mathcal{L}(H^{s-1}(\mathbb{R}))}\leq C \partialrod_{i=1}^{n}\|b_i\|_{H^{s}}.
\varepsilonnd{align*}
Moreover, $ B_{n,m}\in {\rm C}^{1-}((H^s(\mathbb{R}))^m, \mathcal{L}^n_{\rm sym}(H^s(\mathbb{R}), \mathcal{L}(H^{s-1}(\mathbb{R})))).$ \\[-1ex]
\item[(iv)] Let $a_1,\ldots, a_m\in H^2({\mathbb R})$ be given.
Then, there exists a constant~$C$, depending only on~$n,\, m$, and $\max_{1\leq i\leq m}\|a_i\|_{H^2}$, such that
\begin{equation}gin{align}
\| B_{n,m}(a_1,\ldots, a_{m})[b_1,\ldots, b_n,h]\|_{H^1}\leq C \|h\|_{H^1}\partialrod_{i=1}^{n}\|b_i\|_{H^2} \label{FER1}
\varepsilonnd{align}
for all $b_1,\ldots, b_n\in H^2({\mathbb R})$ and $h\in H^1({\mathbb R}) $, with
\begin{equation}gin{equation}\label{FDER}
\begin{equation}gin{aligned}
&\hspace{-1cm}(B_{n,m}(a_1,\ldots, a_{m})[b_1,\ldots, b_n,h])'\\[1ex]
&=B_{n,m}( a_1,\ldots, a_m) [b_1,\ldots , b_n, h' ]\\[1ex]
&\hspace{0,45cm}+\mathbb Sum_{i=1}^nB_{n,m}(a_1,\ldots,a_m)[b_1,\ldots,b_{i-1}, b_i',b_{i+1},\ldots b_n, h]\\[1ex]
&\hspace{0,45cm}-2\mathbb Sum_{i=1}^mB_{n+2,m+1}( a_1,\ldots, a_i, a_i,\ldots,a_m) [b_1,\ldots,b_n, a_i',a_i, h ].
\varepsilonnd{aligned}
\varepsilonnd{equation}
Moreover, $B_{n,m}\in {\rm C}^{1-}(H^2({\mathbb R})^m,\mathcal{L}^{n}_{\rm sym}(H^2({\mathbb R}),\mathcal{L}(H^1({\mathbb R})))).$
\varepsilonnd{itemize}
\varepsilonnd{lemma}
\begin{equation}gin{proof}
The claims (i) and (ii) are established in \cite[Lemmas~3.1 and~3.2]{MBV18}, the property~(iii) is proven in \cite[Lemmas~5]{AM22},
and~(iv) is established in \cite[Lemma 4.3]{MP2022}.
\varepsilonnd{proof}
As a direct consequence of \varepsilonqref{DFB} and Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(i) we obtain that
\begin{equation}gin{equation}\label{CDD*}
[f\longmapsto \mathbb{D}(f)],\, [f\longmapsto \mathbb{D}(f)^*]\in {\rm C}^{1-}(W^1_\infty({\mathbb R}),\mathcal{L}(L_2({\mathbb R})^2)).
\varepsilonnd{equation}
Moreover, by Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(iv), we have
\begin{equation}gin{equation}\label{Bnmk}
[f\longmapsto B_{n,m}^0(f)]\in {\rm C}^{1-}(H^{k+1}({\mathbb R}),\mathcal{L}(H^k({\mathbb R})))\qquad\text{for $k=1,\, 2$.}
\varepsilonnd{equation}
\mathbb Subsection{Rellich identities on $\Gammaamma$}
The proof of Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec} relies on several Rellich identities for the Stokes problem, \varepsilonqref{RELLICH1}-\varepsilonqref{RELLICH5} below,
which hold also in a bounded geometry~in ${\mathbb R}^n$,~$n\geq3$,
see~\cite{FKV88}.
Let $f\in{\rm BUC}^\infty({\mathbb R}RM)$ and $\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in {\rm C}_c^\infty({\mathbb R}RM)^2$.
Using the notation from Section~\mathop{\rm Re}\nolimitsf{Sec:2}, we set~${\Omega^-:=\Omega}$, $\Omega^+:={\mathbb R}^2\mathbb Setminus\overline\Omega$, and we define the hydrodynamic single-layer potential $(u, \Pi)$ by the formula
\begin{equation}gin{equation}\label{iupi}
\left.\begin{equation}gin{aligned}
u(x)&:=u(f)[\begin{equation}ta](x):=-\int_{\mathbb R}\mathcal{U}^k( x-(s,f(s)))\begin{equation}ta_k(s)\, ds\\[1ex]
\Pi(x)&:= \Pi(f)[\begin{equation}ta](x):=-\int_{\mathbb R}\mathcal{P}^k( x-(s,f(s)))\begin{equation}ta_k(s)\, ds
\varepsilonnd{aligned}
\right\} \qquad\text{for $x\in{\mathbb R}^2\mathbb Setminus\Gammaamma$,}
\varepsilonnd{equation}
where $\mathcal{U}^k$ and $\mathcal{P}^k$ are defined in \varepsilonqref{fundup} (with $\mu=1$).
Since $\begin{equation}ta$ is compactly supported, it is not difficult to see that the functions $(u,\Pi)$ are well-defined, smooth in~$\Omega^\partialm$, and satisfy
\begin{equation}gin{equation}\label{Sto34}
\left.
\begin{equation}gin{array}{rllll}
\Delta u-\nabla \Pi&=&0,\\[1ex]
\mathop{\rm div} u&=&0
\varepsilonnd{array}\right\}\qquad\text{in $\Omega^\partialm$},
\varepsilonnd{equation}
as well as
\begin{equation}gin{equation}\label{decay}
\Pi,\,\nabla u=O(|x|^{-1})\qquad \mbox{for $|x|\to\infty$}.
\varepsilonnd{equation}
Moreover, \cite[Lemma~A.1]{BM22} and the arguments in the proof of \cite[Lemma~A.1]{MP2021} enable us to conclude that $ \Pi^\partialm:=\Pi|_{\Omega^\partialm}$ and $u^\partialm:=u|_{\Omega^\partialm}$ satisfy
$\Pi^\partialm\in {\rm C}(\overlineerline{\Omega^\partialm})$ and
$u^\partialm\in {\rm C}^1(\overlineerline{\Omega^\partialm})$, with
\begin{equation}gin{equation}\label{repp}
\begin{equation}gin{aligned}
\partialartial_iu_j^\partialm|_\Gamma \circ\Xi(\xi) &=-\mathop{\rm PV}\nolimits\int_{\mathbb R}\partialartial_i\mathcal{U}_j^k(r)\begin{equation}ta_k\,ds\partialm\frac{-\begin{equation}ta_j\nu^i+\nu^i\nu^j\begin{equation}ta\cdot\nu}{2\omega}(\xi),\quad i,\, j=1,\, 2,\\
\Pi^\partialm|_\Gamma\circ\Xi(\xi) &= -\mathop{\rm PV}\nolimits\int_{\mathbb R}\mathcal{P}^k(r)\begin{equation}ta_k\,ds\partialm\frac{\begin{equation}ta\cdot\nu}{2\omega}(\xi)
\varepsilonnd{aligned}
\varepsilonnd{equation}
for $\xi\in{\mathbb R}RM$, with $r=r(\xi,s)$ defined in \varepsilonqref{rxs}.
Recalling the definition \varepsilonqref{defT} of the stress tensor, we then compute in view of \varepsilonqref{Sto34}
\begin{equation}gin{equation*}
\begin{equation}gin{aligned}
{\rm div\, }\begin{equation}gin{pmatrix}
0\\[1ex]
\|\nabla u+(\nabla u)^\top\|_F^2
\varepsilonnd{pmatrix}&=4 \,{\rm div\, } \big(T_1(u,\Pi)\partial_2 u\big)\qquad\text{in ${\mathbb R}^2\mathbb Setminus\Gammaamma$},\\[1ex]
{\rm div\,}\begin{equation}gin{pmatrix}
0\\[1ex]
\|\nabla u\|_F^2
\varepsilonnd{pmatrix}&
=2\, {\rm div\, } \big(((\nabla u)^\top -\Pi E_2)\partial_2 u\big)\qquad\text{in ${\mathbb R}^2\mathbb Setminus\Gammaamma$},\\[1ex]
{\rm div\,}\begin{equation}gin{pmatrix}
0\\[1ex]
\Pi^2
\varepsilonnd{pmatrix}&=2\,{\rm div\,}\big( (\partial_1u_2-\partial_2u_1)\partial_1u+ \Pi\nabla u_2\big)\qquad\text{in ${\mathbb R}^2\mathbb Setminus\Gammaamma$,}
\varepsilonnd{aligned}
\varepsilonnd{equation*}
where $\|\cdot\|_F$ denotes as usual the Frobenius norm of matrices.
Using \varepsilonqref{decay}, we may integrate the latter identities over $\Omega^\partialm$ to obtain, in view of Gauss' theorem, the Rellich identities
\begin{equation}gin{align}
\int_\Gammaamma \|\nabla u^\partialm+(\nabla u^\partialm)^\top\|_F^2\tilde\nu^2\, d\Gammaamma&=4\int_\Gammaamma\partial_2 u^\partialm \cdot T_1(u,\Pi)^\partialm \tilde \nu\, d\Gammaamma,\label{RELLICH1}\\[1ex]
\int_\Gammaamma \|\nabla u^\partialm\|_F^2\tilde\nu^2\, d\Gammaamma&=2\int_\Gammaamma \partial_2 u^\partialm \cdot ( \nabla u^\partialm -\Pi^\partialm E_2)\tilde\nu\, d\Gammaamma,\label{RELLICH2}\\[1ex]
\int_\Gammaamma |\Pi^\partialm|^2\tilde\nu^2\, d\Gammaamma&=2\int_\Gammaamma (\partial_1u_2^\partialm-\partial_2u_1^\partialm)\partial_{\tilde\tau}u_2^\partialm+\Pi^\partialm \partial_{\tilde\nu}u_2^\partialm\, d\Gammaamma.\label{RELLICH3}
\varepsilonnd{align}
We now subtract \varepsilonqref{RELLICH1} from \varepsilonqref{RELLICH2} multiplied by $4$ to get
\begin{equation}gin{align}
\int_\Gammaamma |\partial_1u_2^\partialm-\partial_2u_1^\partialm|^2\tilde\nu^2\, d\Gammaamma=2\int_\Gammaamma \Pi^\partialm\partial_{\tilde\tau} u_1^\partialm-(\partial_1u_2^\partialm-\partial_2u_1^\partialm) \partial_{\tilde\nu} u_1^\partialm\, d\Gammaamma.\label{RELLICH4}
\varepsilonnd{align}
Furthermore, from
\[
(T_1(u^\partialm,\Pi^\partialm)+\Pi^\partialm E_2)\tilde\nu=\big(\nabla u^\partialm+(\nabla u^\partialm)^\top\big)\tilde\nu=\begin{equation}gin{pmatrix}
\partial_{\tilde \nu}u_1^\partialm+\partial_{\tilde\tau} u_2^\partialm\\[1ex]
\partial_{\tilde \nu}u_2^\partialm-\partial_{\tilde\tau} u_1^\partialm
\varepsilonnd{pmatrix},
\]
we obtain, after taking the difference of \varepsilonqref{RELLICH3} and \varepsilonqref{RELLICH4},
\begin{equation}gin{equation}\label{RELLICH5}
\begin{equation}gin{aligned}
&{\hspace{-1cm}} \int_\Gammaamma |\partial_1u_2^\partialm-\partial_2u_1^\partialm|^2\tilde\nu^2\, d\Gammaamma\\[1ex]
&=\int_\Gammaamma |\Pi^\partialm|^2\tilde\nu^2\, d\Gammaamma
-2\int_\Gammaamma
\begin{equation}gin{pmatrix}
\partial_1u_2^\partialm-\partial_2u_1^\partialm\\[1ex]
\Pi^\partialm
\varepsilonnd{pmatrix}\cdot \big(T_1(u^\partialm,\Pi^\partialm)+\Pi^\partialm E_2\big)\tilde\nu\, d\Gammaamma.
\varepsilonnd{aligned}
\varepsilonnd{equation}
\mathbb Subsection{Transformation to the real axis}\
To represent the pull-backs of the one-sided traces of $(\nabla u)_{ij}$ and $\Pi$ on $\Gamma$ as singular integral operators,
we define, for~$f\in W^1_\infty({\mathbb R}RM)$, ${\begin{equation}ta\in L_2}({\mathbb R}RM)^2$,
and $\theta\in L_2({\mathbb R})$, the singular integral operators ${\mathbb T}_i$, $\mathbb{B}_i$, $i=1,\,2$, as follows:
\begin{equation}gin{align*}
{\mathbb T}_1(f)[\begin{equation}ta](\xi)&:=\frac{1}{4\partiali}\mathop{\rm PV}\nolimits\int_{\mathbb R}\frac{1}{|r|^4}
\begin{equation}gin{pmatrix}
r_1r_2^2-r_1^3& r_2^3- r_1^2 r_2\\
r_2^3-r_1^2 r_2& -r_1^3 -3r_1r_2^2
\varepsilonnd{pmatrix}\begin{equation}ta\,ds,\\[1ex]
{\mathbb T}_2(f)[\begin{equation}ta](\xi)&:=\frac{1}{4\partiali}\mathop{\rm PV}\nolimits\int_{\mathbb R}\frac{1}{|r|^4}
\begin{equation}gin{pmatrix}
- r_2^3-3r_1^2r_2& r_1^3- r_1 r_2^2\\
r_1^3-r_1 r_2^2& r_1^2 r_2- r_2^3
\varepsilonnd{pmatrix}\begin{equation}ta\,ds,\\[1ex]
\mathbb{B}_1(f)[\theta](\xi)&:=\frac{1}{\partiali}\mathop{\rm PV}\nolimits\int_{\mathbb R}\frac{- r_1 f'+ r_2}{ |r|^2}\,\theta\,ds,\\[1ex]
\mathbb{B}_2(f)[\theta](\xi)&:=\frac{1}{\partiali}\mathop{\rm PV}\nolimits\int_{\mathbb R}\frac{r_1 + r_2 f'}{|r|^2}\,\theta\,ds,
\varepsilonnd{align*}
in the notation introduced in~\varepsilonqref{rxs}. Since the components of these operators may be expressed by using only the singular operators~${B^{0}_{n,m}(f)}$, we infer from Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(i) that
\begin{equation}gin{equation}\label{regTB}
{\mathbb T}_i\in {\rm C}^{1-}(W^1_\infty({\mathbb R}),\mathcal{L}(L_2({\mathbb R})^2)),\quad \mathbb{B}_i\in {\rm C}^{1-}(W^1_\infty({\mathbb R}),\mathcal{L}(L_2({\mathbb R}))).
\varepsilonnd{equation}
It follows from \varepsilonqref{fundup}, \varepsilonqref{stokesdiff} (with~${\mu=1}$) and~\varepsilonqref{repp} that for~${f\in{\rm BUC}^\infty({\mathbb R}RM)}$ and $\begin{equation}ta\in {\rm C}_c^\infty({\mathbb R}RM)^2$
we have (in matrix notation)
\begin{equation}\label{identitiesR}
\begin{equation}gin{array}{rl}
\nabla u^\partialm(f)[\begin{equation}ta]\big|_\Gamma\circ\Xi&=\mathcal{B}ig({\mathbb T}_1(f)[\begin{equation}ta]\;{\mathbb T}_2(f)[\begin{equation}ta]\mathcal{B}ig)\mp\cfrac{(\begin{equation}ta\cdot\tau)}{2\omega}\tau\,\nu^\top
=:\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta],\\[2ex]
\Pi^\partialm(f)[\begin{equation}ta]\big|_\Gamma\circ\Xi&=\displaystyle\frac{(\partialm1+\mathbb{B}_1(f))[\omega^{-1}\begin{equation}ta\cdot\nu]+\mathbb{B}_2(f)[\omega^{-1}\begin{equation}ta\cdot\tau]}{2}
=:\widetilde{\Pi^\pm}(f)[\begin{equation}ta],
\varepsilonnd{array}
\varepsilone
the right sides of \varepsilonqref{identitiesR} being meaningful whenever $f\in W^1_\infty({\mathbb R}RM)$ and $\begin{equation}ta\in L_2({\mathbb R}RM)^2$.
To translate the Rellich identities of the previous subsection to identities for integral operators on ${\mathbb R}$ it is convenient to additionally introduce the operators $\widetilde{T_1^\pm}$ and $\widetilde{\p_2u^\pm}$ by
\begin{equation}gin{align*}
\widetilde{T_1^\pm}(f)[\begin{equation}ta]&:=\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]+\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]^\top-\widetilde{\Pi^\pm}(f)[\begin{equation}ta] E_2,\\
\widetilde{\p_2u^\pm}(f)[\begin{equation}ta]&:=\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]e_2,
\varepsilonnd{align*}
where $e_2:=(0,1)^\top$.
From \varepsilonqref{regTB} we immediately get
\begin{equation}\label{regpullback}
\left.\begin{equation}gin{array}{rl}
\widetilde{(\nabla u)^\pm},\widetilde{T_1^\pm}&\in {\rm C}^{1-}(W^1_\infty({\mathbb R}),\mathcal{L}(L_2({\mathbb R})^2,L_2({\mathbb R})^{2\times 2})),\\
\widetilde{\p_2u^\pm}&\in {\rm C}^{1-}(W^1_\infty({\mathbb R}),\mathcal{L}(L_2({\mathbb R})^2,L_2({\mathbb R})^2)),\\
\widetilde{\Pi^\pm}&\in {\rm C}^{1-}(W^1_\infty({\mathbb R}),\mathcal{L}(L_2({\mathbb R})^2,L_2({\mathbb R}))).
\varepsilonnd{array}\right\}
\varepsilone
It is not difficult to check that
\begin{equation}gin{equation}\label{ffff}
\omega \widetilde{T_1^\pm}(f)[\begin{equation}ta]\nu=\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta].
\varepsilonnd{equation}
Parameterizing $\Gammaamma$ over ${\mathbb R}$ via $[s\longmapsto(s,f(s))]$
and using \varepsilonqref{identitiesR} and \varepsilonqref{ffff}, we find from~\varepsilonqref{RELLICH1} that
\begin{equation}gin{equation}\label{RELLICH1'}
\mathcal{B}ig\|\widetilde{T_1^\pm}(f)[\begin{equation}ta]+\widetilde{\Pi^\pm}(f)[\begin{equation}ta]E_2\mathcal{B}ig\|_2^2=4\mathcal{B}ig\langle\widetilde{\p_2u^\pm}(f)[\begin{equation}ta]\,\mathcal{B}ig|\,\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\rangle_2,
\varepsilonnd{equation}
where $\langle\cdot|\cdot\rangle$ denotes the $L_2({\mathbb R})^2$ scalar product.
Similarly, from \varepsilonqref{RELLICH2} and \varepsilonqref{RELLICH5} we get
\begin{equation}gin{equation} \label{RELLICH2'}
\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2=2 \mathcal{B}ig\langle\widetilde{\p_2u^\pm}(f)[\begin{equation}ta] \mathcal{B}ig| \omega^{-1}\mathcal{B}ig( \widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta] -\widetilde{\Pi^\pm}(f)[\begin{equation}ta] E_2\mathcal{B}ig)\nu \mathcal{B}ig\rangle_2
\varepsilonnd{equation}
and
\begin{equation}gin{equation}\label{RELLICH5'}
\begin{equation}gin{aligned}
&{\hspace{-1cm}}2\left\langle
\begin{equation}gin{pmatrix}
\mathcal{B}ig(\widetilde{(\nabla u)^\partialm_{21}}(f)-\widetilde{(\nabla u)^\partialm_{12}}(f)\mathcal{B}ig)[\begin{equation}ta]\\[1ex]
\widetilde{\Pi^\pm}(f)[\begin{equation}ta]
\varepsilonnd{pmatrix} \right| \left.\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]+\widetilde{\Pi^\pm}(f)[\begin{equation}ta] \begin{equation}gin{pmatrix}
-f'\\[1ex]
1
\varepsilonnd{pmatrix} \right\rangle_2\\[1ex]
&=\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2 -\mathcal{B}ig\|\mathcal{B}ig(\widetilde{(\nabla u)^\partialm_{21}}(f)-\widetilde{(\nabla u)^\partialm_{12}}(f)\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2,
\varepsilonnd{aligned}
\varepsilonnd{equation}
respectively.
By a standard density argument, it follows from \varepsilonqref{CDD*} and \varepsilonqref{regpullback} that \varepsilonqref{RELLICH1'}--\varepsilonqref{RELLICH5'}
hold for any~${f\in{\rm BUC} ^1({\mathbb R}RM)}$ and~$\begin{equation}ta\in L_2({\mathbb R})^2$.
\mathbb Subsection{Completion of the proof of Theorem \mathop{\rm Re}\nolimitsf{T:L2spec}} We divide the remaining arguments in the proof of Theorem \mathop{\rm Re}\nolimitsf{T:L2spec} in three steps.
\noindent{\varepsilonm Step 1.} Fix $\delta\in (0,1)$ and ${f\in{\rm BUC}^1({\mathbb R}RM)}$ such that $\|f'\|_\infty\leq 1/\delta$.
In the sequel, we are going to write $C(\delta)$ for different positive constants that depend on $\delta$ only.
Let $\begin{equation}ta\in L_2({\mathbb R})^2$.
Using Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(i), we find a constant $C(\delta)$ such that the right side of \varepsilonqref{RELLICH1'} satisfies
\begin{equation}gin{equation*}
4\mathcal{B}ig\langle\widetilde{\p_2u^\pm}(f)[\begin{equation}ta]\,\mathcal{B}ig|\,\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\rangle_2\leq C(\delta)\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2\|\begin{equation}ta\|_2.
\varepsilonnd{equation*}
For the left side of \varepsilonqref{RELLICH1'} we have, in view of \varepsilonqref{ffff} and Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(i),
\begin{equation}gin{align*}
\mathcal{B}ig\|\widetilde{T_1^\pm}(f)[\begin{equation}ta]+\widetilde{\Pi^\pm}(f)[\begin{equation}ta]E_2\mathcal{B}ig\|_2^2\geq& \frac{\delta^2}{2}\int_{\mathbb R} \mathcal{B}ig|\omega \widetilde{T_1^\pm}(f)[\begin{equation}ta]\nu+\omega\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\nu\mathcal{B}ig|^2 \, dx\\[1ex]
\geq&\frac{\delta^2}{4}\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2-\frac{\delta^2}{2}\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2\\[1ex]
\geq&\frac{\delta^2}{4}\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2-C(\delta)\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2\|\begin{equation}ta\|_2.
\varepsilonnd{align*}
These estimates show that there exists a constant $ C(\delta)$ with the property that
\begin{equation}gin{equation}\label{L32}
C(\delta)\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2\|\begin{equation}ta\|_2\geq\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2
\varepsilonnd{equation}
for all $\begin{equation}ta\in L_2({\mathbb R})^2$.
\noindent
{\varepsilonm Step 2.} It follows from \varepsilonqref{RELLICH5'} that
\begin{equation}gin{equation*}
\begin{equation}gin{aligned}
&\mathcal{B}ig\|\mathcal{B}ig(\widetilde{(\nabla u)^\partialm_{21}}(f)-\widetilde{(\nabla u)^\partialm_{12}}(f)\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2\\[1ex]
&\leq C(\delta)\bigg[\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\\[1ex]
&\hspace{1.5cm}+\mathcal{B}ig\|\mathcal{B}ig(\widetilde{(\nabla u)^\partialm_{21}}(f)-\widetilde{(\nabla u)^\partialm_{12}}(f)\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2
\mathcal{B}ig(\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2\mathcal{B}ig)\bigg],
\varepsilonnd{aligned}
\varepsilonnd{equation*}
hence
\begin{equation}gin{equation}\label{estrot}
\mathcal{B}ig\|\mathcal{B}ig(\widetilde{(\nabla u)^\partialm_{21}}(f)-\widetilde{(\nabla u)^\partialm_{12}}(f)\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2\leq C(\delta)
\mathcal{B}ig(\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\mathcal{B}ig).
\varepsilonnd{equation}
Furthermore, as
\[
2\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\nu=\frac{1}{\omega}\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]
+\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\nu-\mathcal{B}ig(\widetilde{(\nabla u)^\partialm_{21}}(f)-\widetilde{(\nabla u)^\partialm_{12}}(f)\mathcal{B}ig)[\begin{equation}ta]\tau,
\]
we infer from \varepsilonqref{estrot} that
\begin{equation}gin{equation}\label{estjacnorm}
\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\nu\mathcal{B}ig\|_2^2\leq C(\delta)
\mathcal{B}ig(\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2
+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\mathcal{B}ig).
\varepsilonnd{equation}
The identity \varepsilonqref{RELLICH2'} implies the estimate
\[\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\leq C(\delta)\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2
\mathcal{B}ig(\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\nu\mathcal{B}ig\|_2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2\mathcal{B}ig),\]
and together with \varepsilonqref{estjacnorm} this yields
\begin{equation}gin{equation}\label{estjac}
\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\leq C(\delta)
\mathcal{B}ig(\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2
+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\mathcal{B}ig).
\varepsilonnd{equation}
Multiplying the identity \varepsilonqref{identitiesR}$_1$ by $e_2$ and taking subsequently the scalar product with $\begin{equation}ta$, we observe that
\[\|\begin{equation}ta\cdot\tau\|_2^2=\mp2\omega^2\mathcal{B}ig(\mathcal{B}ig\langle\begin{equation}ta\,\mathcal{B}ig|\,\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]e_2\mathcal{B}ig\rangle
-\langle\begin{equation}ta\,|\,{\mathbb T}_2(f)[\begin{equation}ta]\rangle\mathcal{B}ig).\]
The second term on the right vanishes as ${\mathbb T}_2(f)^\ast=-{\mathbb T}_2(f)$, and thus
\begin{equation}gin{equation}\label{estbtan}
\|\begin{equation}ta\cdot\tau\|_2^2\leq C(\delta)\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2\|\begin{equation}ta\|_2
\varepsilonnd{equation}
Next, we rewrite \varepsilonqref{identitiesR}$_2$ as
\[(\partialm1+\mathbb{B}_1(f)[\omega^{-1}\begin{equation}ta\cdot\nu]=2\widetilde{\Pi^\pm}(f)[\begin{equation}ta]-\mathbb{B}_2(f)[\omega^{-1}\begin{equation}ta\cdot\tau].\]
Letting ${\mathbb A}(f):=\mathbb{B}_1(f)^\ast$, it follows from the Rellich identity for the Muskat problem established in the proof of~\cite[Theorem~3.5]{MBV18} that the operator~${(\partialm 1-{\mathbb A}(f))\in\mathcal{L}(L_2({\mathbb R}))}$ is an isomorphism with
\[\|\left(\partialm 1+{\mathbb A}(f)\right)^{-1}\|_{\mathcal{L}(L_2({\mathbb R}))}\leq C(\delta).\]
This implies that also its adjoint $(\partialm 1- \mathbb{B}_1(f))\in\mathcal{L}(L_2({\mathbb R}^2))$ is an isomorphism and
\[ \|(\partialm 1+\mathbb{B}_1(f))^{-1}\|_{ \mathcal{L}(L_2({\mathbb R}))}\leq C(\delta).\]
Using this and Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(i) we get
\[\|\begin{equation}ta\cdot\nu\|_2\leq C(\delta)\mathcal{B}ig(\|\begin{equation}ta\cdot\tau\|_2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2\mathcal{B}ig),\]
and together with \varepsilonqref{estbtan} and Young's inequality we arrive at
\[\|\begin{equation}ta\|_2^2=\|\begin{equation}ta\cdot\nu\|_2^2+\|\begin{equation}ta\cdot\tau\|_2^2\leq C(\delta)\mathcal{B}ig(\mathcal{B}ig\|\widetilde{(\nabla u)^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\mathcal{B}ig).\]
In view of \varepsilonqref{estjac} we infer from the latter inequality that
\[\|\begin{equation}ta\|_2^2\leq C(\delta)\mathcal{B}ig(\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2^2+\mathcal{B}ig\|\widetilde{\Pi^\pm}(f)[\begin{equation}ta]\mathcal{B}ig\|_2^2\mathcal{B}ig),\]
and together with \varepsilonqref{L32} and Young's inequality we finally obtain
\begin{equation}gin{equation}\label{DEest1}
\|\begin{equation}ta\|_2\leq C(\delta)\mathcal{B}ig\|\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_2.
\varepsilonnd{equation}
\noindent{\varepsilonm Step 3.} In view of the identity
\[
(\lambda-\mathbb{D}(f)^\ast)[\begin{equation}ta]=\mathcal{B}ig(\mp\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]+\mathcal{B}ig(\lambda\partialm\frac{1}{2}\mathcal{B}ig)\begin{equation}ta, \quad \lambda\in\mathbb C,\, \begin{equation}ta\in L_2({\mathbb R})^2,
\]
we deduce from \varepsilonqref{DEest1} that
\[C(\delta)\|(\lambda-\mathbb{D}(f)^\ast)[\begin{equation}ta]\|_2\geq (1-C(\delta)|\lambda\partialm1/2|)\|\begin{equation}ta\|_2 , \quad \lambda\in\mathbb C,\, \begin{equation}ta\in L_2({\mathbb R})^2,\]
and therefore
\begin{equation}gin{equation}\label{DEest2}
\|\begin{equation}ta\|_2\leq C(\delta)\|(\lambda-\mathbb{D}(f)^\ast)[\begin{equation}ta]\|_2\quad
\text{for $\lambda$ sufficiently close to $\partialm1/2$ and $ \begin{equation}ta\in L_2({\mathbb R})^2$.}
\varepsilonnd{equation}
Now \varepsilonqref{DEest2} together with the estimate \cite[(3.15)]{MP2022} shows there exists a constant~${C_0=C_0(\delta)\geq 1}$ such that
\[
C_0\|(\lambda-\mathbb{D}(f)^*)[\begin{equation}ta]\|_2\geq \|\begin{equation}ta\|_2\qquad\text{for all $\begin{equation}ta\in L_2({\mathbb R})^2$ and all $\lambda\in{\mathbb R}\mathbb Setminus(-1/2,1/2)$.}
\]
As $\mathbb{D}(f)^\ast$ is in $\mathcal{L}(L_2({\mathbb R})^2),$ the shift $\lambda-\mathbb{D}(f)^\ast\in\mathcal{L}(L_2({\mathbb R})^2)$ is an isomorphism if~$|\lambda|$ is sufficiently large.
The method of continuity, cf. e.g. \cite[Proposition~I.1.1.1]{Am95}, implies now that~${\partialm1/2-\mathbb{D}(f)^*}$, and hence also $\partialm1/2-\mathbb{D}(f)$, are isomorphisms as well.
This completes the proof of Theorem \mathop{\rm Re}\nolimitsf{T:L2spec}.
\mathbb Subsection{Spectral properties in Sobolev spaces}
In Lemma~\mathop{\rm Re}\nolimitsf{L:3} we establish the invertibility of the operators considered in Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec} in the Banach algebras $\mathcal{L}(H^k({\mathbb R})^2)$, $k=1,\, 2$.
\begin{equation}gin{lemma}\label{L:3} \
For $f\in H^{k+1}({\mathbb R})$, $k=1,\, 2$, the operators~${\partialm1/2-\mathbb{D}(f)}$ and~${\partialm1/2-\mathbb{D}(f)^*}$ are invertible in $\mathcal{L}(H^k({\mathbb R})^2)$.
\varepsilonnd{lemma}
\begin{equation}gin{proof}
Fix $f\in H^{k+1}({\mathbb R})$.
The representation \varepsilonqref{DFB} and Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(iv) then immediately imply that~$\mathbb{D}(f)$ and $\mathbb{D}(f)^*$ belong to $\mathcal{L}(H^k({\mathbb R})^2)$.
Let first $k=1$. Using \varepsilonqref{FDER}, we compute that the components of
\begin{equation}gin{align*}
T[\begin{equation}ta]:=(\mathbb{D}(f)[\begin{equation}ta])'-\mathbb{D}(f)[\begin{equation}ta'], \qquad\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in H^1({\mathbb R})^2,
\varepsilonnd{align*}
are (finite) linear combination of terms of the form
\begin{equation}gin{align*}
B_{n,m}(f,\ldots,f)[f',f,\ldots,f,f'^{\varepsilonll}\begin{equation}ta_i]\quad\text{and} \qquad B_{n,m}^0(f)[f''\begin{equation}ta_i]
\varepsilonnd{align*}
with $n,\,m\leq 5$, $\varepsilonll=0,\, 1,$ and $i=1,\, 2$.
Choosing $s\in(3/2,2)$, it follows from Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(i)-(ii) that there exists a constant $C_1>0$ such that
\[
\|T[\begin{equation}ta]\|_2\leq C_1\|\begin{equation}ta\|_{H^{s-1}}, \qquad\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in H^1({\mathbb R})^2.
\]
This property together with \varepsilonqref{DEest} now leads to
\begin{equation}gin{align*}
\|(\partialm1/2-\mathbb{D}(f))[\begin{equation}ta]\|_{H^1}^2&=\|(\partialm1/2-\mathbb{D}(f))[\begin{equation}ta]\|_{2}^2+\|((\partialm1/2-\mathbb{D}(f))[\begin{equation}ta])'\|_{2}^2\\[1ex]
&=\|(\partialm1/2-\mathbb{D}(f))[\begin{equation}ta]\|_{2}^2+\frac{1}{2}\|(\partialm1/2-\mathbb{D}(f))[\begin{equation}ta']\|_{2}^2-\|T[\begin{equation}ta]\|_2^2\\[1ex]
&\geq \frac{1}{2C_0^2} \|\begin{equation}ta\|_{H^1}^2 - C_1^2\|\begin{equation}ta\|_{H^{s-1}}^2.
\varepsilonnd{align*}
The latter estimate, an interpolation argument, and Young's inequality imply there exists a further constant~${C_2=C_2(\delta)\geq1}$ such that
\begin{equation}gin{align*}
C_2\big(\|\begin{equation}ta\|_2^2+\|(\partialm1/2-\mathbb{D}(f))[\begin{equation}ta]\|_{H^1}^2\big)\geq \|\begin{equation}ta\|_{H^1}^2
\varepsilonnd{align*}
for all $\begin{equation}ta\in H^1({\mathbb R})^2$.
This estimate combined with \varepsilonqref{DEest} now yields
\begin{equation}gin{align*}
C_2(C_0^2+1)\|(\partialm1/2-\mathbb{D}(f))[\begin{equation}ta]\|_{H^1}^2\geq \|\begin{equation}ta\|_{H^1}^2
\varepsilonnd{align*}
for all $\begin{equation}ta\in H^1({\mathbb R})^2$.
The invertibility of $\partialm1/2-\mathbb{D}(f)$ in $\mathcal{L}(H^1({\mathbb R})^2)$ follows from this estimate and the invertibility property in $\mathcal{L}(L_2({\mathbb R})^2)$.
The invertibility of $\partialm1/2-\mathbb{D}(f)^*$ in $\mathcal{L}(H^1({\mathbb R})^2)$ may be established by using the same arguments and therefore we omit the details.
Finally, when $k=2$, the invertibility of $\partialm1/2-\mathbb{D}(f)$ and $\partialm1/2-\mathbb{D}(f)^*$ in $\mathcal{L}(H^2({\mathbb R})^2)$ may be obtained by arguing along the same lines as above
(see the proof of \cite[Theorem 4.5]{MP2022} for some details).
\varepsilonnd{proof}
The next invertibility result is used in Section~\mathop{\rm Re}\nolimitsf{Sec:4.4} when we consider our evolution problems
in~${H^{s-1}({\mathbb R})}$ with~${s\in(3/2,2)}$.
\begin{equation}gin{lemma}\label{L:4}
Given $\delta\in(0,1)$ and $s\in(3/2,2),$ there exists a positive constant~${C=C(\delta,s)\geq 1}$ such that
for all~${f\in H^s({\mathbb R})}$ with~${\|f \|_{H^s}\leq 1/\delta}$ and all $\begin{equation}ta\in H^{s-1}({\mathbb R})^2$ we have
\begin{equation}gin{align}\label{DEests}
C\min\mathcal{B}ig\{\mathcal{B}ig\|\mathcal{B}ig(\partialm\frac{1}{2}-\mathbb{D}(f)\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_{H^{s-1}},\mathcal{B}ig\|\mathcal{B}ig(\partialm\frac{1}{2}-\mathbb{D}(f)^\ast\mathcal{B}ig)[\begin{equation}ta]\mathcal{B}ig\|_{H^{s-1}}\mathcal{B}ig\}\geq \|\begin{equation}ta\|_{H^{s-1}}.
\varepsilonnd{align}
Moreover, $\partialm1/2-\mathbb{D}(f)^\ast$ and $\partialm1/2-\mathbb{D}(f)$ are invertible in $\mathcal{L}(H^{s-1}({\mathbb R})^2)$.
\varepsilonnd{lemma}
\begin{equation}gin{proof}
As a direct consequence of Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(iii) we get $\mathbb{D}(f),\,\mathbb{D}(f)^*\in\mathcal{L}(H^{s-1}({\mathbb R}))^2$.
The remaining claims follow from Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}, \varepsilonqref{DEest}, and Theorem~\mathop{\rm Re}\nolimitsf{T:L2spec}, by arguing as in the proof of \cite[Theorem~4.2]{MP2022} and Lemma~\mathop{\rm Re}\nolimitsf{L:3}.
\varepsilonnd{proof}
\mathbb Section{ Equivalent formulation and proof of the main results}\label{Sec:4}
In this section we formulate the quasistationary Stokes flow \varepsilonqref{STOKES} as an evolution problem for $f$.
The main step is established in Corollary~\mathop{\rm Re}\nolimitsf{C:1}, which provides the unique solvability of Eq.~\varepsilonqref{invertcom}, as announced in Theorem~\mathop{\rm Re}\nolimitsf{T:1}.
Using this, in Section~\mathop{\rm Re}\nolimitsf{Sec:4.2} we derive the evolution problem \varepsilonqref{NNEP1} for the Stokes flow~\varepsilonqref{STOKES}.
This is in analogy to the problem \varepsilonqref{NNEP2} obtained in \cite{MP2022} for the corresponding
two-phase Stokes flow \varepsilonqref{2STOKES}.
In Section~\mathop{\rm Re}\nolimitsf{Sec:4.3}, Problem~\varepsilonqref{NNEP1} is then shown to
be the limit $\mu_+\to0$ of \varepsilonqref{NNEP2}.
This is based on a commutator type identity provided in Proposition~\mathop{\rm Re}\nolimitsf{P:2}.
Finally, in Section~\mathop{\rm Re}\nolimitsf{Sec:4.4}, we introduce the general evolution problem \varepsilonqref{NNEP} with parameter $\mu_+\geq 0$. This formulation enables us to treat both one- and two-phase flows simultaneously
for initial data in~$H^s({\mathbb R})$, ${s\in(3/2,2)}$, and to establish the main results.
\mathbb Subsection{A relation connecting $(\mathbb{D}(f)[\begin{equation}ta])'$ and $\mathbb{D}(f)^*[\begin{equation}ta']$}\label{Sec:4.1}\
The following identity is, besides Lemma~\mathop{\rm Re}\nolimitsf{L:3}, the main ingredient in the proof of Corollary~\mathop{\rm Re}\nolimitsf{C:1}.
\begin{equation}gin{lemma}\label{L:A1}
Given $f\in H^{\tau}({\mathbb R})$, $\tau\in(3/2,2)$, and $\begin{equation}ta\in H^1({\mathbb R})^2$, we have~${\mathbb{D}(f)[\begin{equation}ta]\in H^1({\mathbb R})^2}$ with
\begin{equation}gin{equation}\label{comder}
(\mathbb{D}(f)[\begin{equation}ta])'=-\mathbb{D}(f)^*[\begin{equation}ta'].
\varepsilonnd{equation}
\varepsilonnd{lemma}
In order to prepare the proof of Lemma~\mathop{\rm Re}\nolimitsf{L:A1}, which is presented below, we set
\[
D:=\{(\xi,\xi)\in{\mathbb R}^2\,:\,\xi\in{\mathbb R}\}
\]
and define
\begin{equation}gin{equation}\label{kernel}
K(\xi,s):=\frac{r_1f'(s)-r_2}{|r|^4}
\left(\begin{equation}gin{array}{cc}
r_1^2&r_1r_2\\
r_1r_2&r_2^2
\varepsilonnd{array}\right),\qquad (\xi,s)\in{\mathbb R}^2\mathbb Setminus D,
\varepsilonnd{equation}
where $r=r(\xi,s)$ is defined in \varepsilonqref{rxs}.
The double layer potential $\mathbb{D}(f)$ and its $L_2$-adjoint~$\mathbb{D}(f)^*$ can now be expressed as follows:
\begin{equation}gin{align*}
\mathbb{D}(f)[\begin{equation}ta](\xi)&=\int_{\mathbb R} K(\xi,s)\begin{equation}ta(s)\,ds,\\
\mathbb{D}(f)^\ast[\begin{equation}ta](\xi)&=\int_{\mathbb R} K(s,\xi)^\top\begin{equation}ta(s)\,ds=\int_{\mathbb R} K(s,\xi)\begin{equation}ta(s)\,ds
\varepsilonnd{align*}
for $\begin{equation}ta\in L_2({\mathbb R})^2$, see \varepsilonqref{defD}.
Both integrals converge when $f\in H^\tau({\mathbb R})$, with $\tau\in(3/2,2),$ since there exists a constant $C>0$ such that
\begin{equation}gin{equation}\label{eq:BBG}
\|K(\xi,s)\|_F\leq C |\xi-s|^{\tau-3/2} \qquad\text{for all $(\xi,\, s)\in{\mathbb R}^2\mathbb Setminus D$.}
\varepsilonnd{equation}
Here, $\|\cdot\|_F$ is again the Frobenius norm.
Motivated by \varepsilonqref{eq:BBG}, we establish the following auxiliary result.
\begin{equation}gin{lemma}\label{exchange}
Let $A\in {\rm C}({\mathbb R}^2)\cap {\rm C}^1({\mathbb R}^2\mathbb Setminus D)$, $u\in {\rm C}_c({\mathbb R})$, and assume there exist constants~${C>0}$ and $\alpha\in(0,1)$ such that
\[|\partialartial_\xi A(\xi,s)|\leq C|\xi-s|^{-\alpha} \quad\text{for all $(\xi,s)\in{\mathbb R}^2\mathbb Setminus D$.}\]
Then, the function $\partialsi:{\mathbb R}\longrightarrow{\mathbb R}$ given by
\[\partialsi(\xi):=\int_{{\mathbb R}}A(\xi,s)u(s)\,ds,\qquad \xi\in{\mathbb R},\]
belongs to ${\rm C}^1({\mathbb R})$ and
\[\partialsi'(\xi)=\int_{\mathbb R}\partial_\xi A(\xi,s)u(s)\,ds,\qquad \xi\in{\mathbb R}.\]
\varepsilonnd{lemma}
\begin{equation}gin{proof}
Given~${\varepsilon\in(0,1)}$, let $\partialsi_\varepsilon\in {\rm C}({\mathbb R})$ be given by
\[\partialsi_\varepsilon(\xi):=\int_{\{|\xi-s|>\varepsilon\}} A(\xi,s)u(s)\,ds.\]
Since $u$ has compact support and $A\in {\rm C}({\mathbb R}^2)\cap {\rm C}^1({\mathbb R}^2\mathbb Setminus D)$, we have $\partialsi_\varepsilon\in {\rm C}^1({\mathbb R})$ and $\partialsi_\varepsilon\to \partialsi$ for $ \varepsilon\to0$ uniformly on compact subsets of ${\mathbb R}$.
By closedness of the differentiation, the lemma is proved once we show
\[\partialsi_\varepsilon'\to \int_{\mathbb R}\partial_\xi A(\cdot,s)u(s)\,ds:=\varphi\quad\text{for $\varepsilon\to 0$,}\]
uniformly on compact subsets of ${\mathbb R}$.
Indeed, given $\xi\in{\mathbb R}$, it holds that
\[\partialsi_\varepsilon'(\xi)=\int_{\{|\xi-s|>\varepsilon\}} \partial_\xi A(\xi,s)u(s)\,ds+A(\xi,\xi-\varepsilon)u(\xi-\varepsilon)-A(\xi,\xi+\varepsilon)u(\xi+\varepsilon),\]
and therefore
\[|(\partialsi_\varepsilon'-\varphi)(\xi)|\leq C\int_{\{|\xi-s|<\varepsilon\}}|\xi-s|^{-\alpha}\,ds+|A(\xi,\xi-\varepsilon)u(\xi-\varepsilon)-A(\xi,\xi+\varepsilon)u(\xi+\varepsilon)|,\]
which implies the announced convergence.
\varepsilonnd{proof}
We are now in a position to prove Lemma~\mathop{\rm Re}\nolimitsf{L:A1}.
\begin{equation}gin{proof}[Proof of Lemma~\mathop{\rm Re}\nolimitsf{L:A1}]
We first establish the result for $\begin{equation}ta\in {\rm C}_c^\infty({\mathbb R})^2$.
To this end we define the function $S:=S(f)\in {\rm C}({\mathbb R}^2)$ by
\[S(f)(\xi,s):=\left\{
\begin{equation}gin{array}{ccl}
\displaystyle\frac{f(\xi)-f(s)}{\xi-s},&&\text{if $\xi\neq s$,}\\[2ex]
f'(\xi),&&\text{if $\xi=s$.}
\varepsilonnd{array}\right.\]
The function $S$ is continuously differentiable in ${\mathbb R}^2\mathbb Setminus D$, where again~${D:=\{(\xi,\xi)\,:\,\xi\in{\mathbb R}\}}$, with partial derivatives expressed, in the notation~\varepsilonqref{rxs}, as
\begin{equation}gin{equation*}
\partial_\xi S(\xi,s)=\frac{f'(\xi)r_1-r_2}{r_1^2}\qquad\text{and}\qquad\partial_sS(\xi,s)=\frac{-f'(s)r_1+r_2}{r_1^2}.
\varepsilonnd{equation*}
Now (with arguments $(\xi,s)$ partly suppressed), the kernel $K$ defined in \varepsilonqref{kernel} can be expressed as
\begin{equation}gin{align*}
K(\xi,s)&=\frac{1}{(1+S^2)^2}
\left(\begin{equation}gin{array}{cc}
1&S\\
S&S^2
\varepsilonnd{array}\right)\,\frac{r_1f'(s)-r_2}{r_1^2}
=-\frac{1}{(1+S^2)^2}
\left(\begin{equation}gin{array}{cc}
1&S\\
S&S^2
\varepsilonnd{array}\right)\partial_s S(\xi,s)\\[1ex]
&=-\partial_s G\circ S(\xi,s),
\varepsilonnd{align*}
where $G\in {\rm C}^\infty({\mathbb R},{\mathbb R}^{2\times2})$ is a primitive of the smooth matrix valued function
\[\mathcal{B}ig[r\longmapsto\frac{1}{(1+r^2)^2}
\left(\begin{equation}gin{array}{cc}
1&r\\
r&r^2
\varepsilonnd{array}
\right)
\mathcal{B}ig]:{\mathbb R}\longrightarrow{\mathbb R}^{2\times 2}.\]
Thus, after integration by parts,
\[\mathbb{D}(f)[\begin{equation}ta](\xi)=\int_{\mathbb R} G(S(\xi,s))\begin{equation}ta'(s)\,ds.\]
We next observe that $A:=G\circ S$ is continuous on ${\mathbb R}^2$ and $A\in {\rm C}^1({\mathbb R}^2\mathbb Setminus D)$ with
\begin{equation}gin{align*}
\partial_\xi A(\xi,s)&=\frac{1}{(1+S^2)^2}
\left(\begin{equation}gin{array}{cc}
1&S\\
S&S^2
\varepsilonnd{array}\right)\partial_\xi S(\xi,s)=-
\frac{1}{(1+S^2)^2}
\left(\begin{equation}gin{array}{cc}
1&S\\
S&S^2
\varepsilonnd{array}\right)\,\frac{-r_1f'(\xi)+r_2}{r_1^2}\\[1ex]
&=-K(s,\xi)=-K(s,\xi)^\top.
\varepsilonnd{align*}
Moreover, since $f\in {\rm BUC}^{\tau-1/2}({\mathbb R})$, setting $\alpha:=\tau-3/2\in(0,1)$, we obtain from~\varepsilonqref{eq:BBG} that there exists $C>0$ such that
\begin{equation}gin{equation*}
\|\partial_\xi A(\xi,s)\|_F\leq C |\xi-s|^{-\alpha} \qquad\text{for all $(\xi,\, s)\in{\mathbb R}^2\mathbb Setminus D$.}
\varepsilonnd{equation*}
We may now infer from Lemma \mathop{\rm Re}\nolimitsf{exchange} that $\mathbb{D}(f)[\begin{equation}ta]\in{\rm C^1}({\mathbb R})$ and
\[(\mathbb{D}(f)[\begin{equation}ta])'(\xi)=\int_{\mathbb R}\partial_\xi A(\xi,s) \begin{equation}ta'(s)\,ds=-\int_{\mathbb R} K(s,\xi)^\top\begin{equation}ta(s)\,ds=-\mathbb{D}(f)^\ast[\begin{equation}ta'](\xi),\quad \xi\in{\mathbb R}.\]
Let now $\begin{equation}ta\in H^1({\mathbb R})^2$ arbitrary, and let $(\begin{equation}ta_n)$ be a sequence in ${\rm C}^\infty_c({\mathbb R})^2$ with
$\begin{equation}ta_n\to\begin{equation}ta$ in~${H^1({\mathbb R})^2}$.
Then, by $L_2$-continuity of $\mathbb{D}(f)$, see~\varepsilonqref{CDD*},
\[\mathbb{D}(f)[\begin{equation}ta_n]\to\mathbb{D}(f)[\begin{equation}ta]\quad\text{in $L_2({\mathbb R})^2$}\]
and, by $L_2$-continuity of $\mathbb{D}(f)^\ast$, see~\varepsilonqref{CDD*},
\[(\mathbb{D}(f)[\begin{equation}ta_n])'= -\mathbb{D}(f)^\ast[\begin{equation}ta'_n]\to-\mathbb{D}(f)^\ast[\begin{equation}ta']\quad\text{in $L_2({\mathbb R})^2$}.\]
Now the result follows by closedness of the derivative operator.
\varepsilonnd{proof}
We are now in a position to establish the solvability of \varepsilonqref{invertcom}.
\begin{equation}gin{cor}\label{C:1}
Given $f\in H^3({\mathbb R})$ and $g\in H^2({\mathbb R})^2$, let $\begin{equation}ta\in H^2({\mathbb R})^2$ denote the unique solution to the equation
\begin{equation}gin{equation}\label{Dual1}
\mathcal{B}ig(\frac{1}{2}+ \mathbb{D}(f)\mathcal{B}ig)[\begin{equation}ta]= g.
\varepsilonnd{equation}
Then $\alpha:=\begin{equation}ta'\in H^1({\mathbb R})^2$ is the unique solution to
\begin{equation}gin{equation}\label{Dual2}
\mathcal{B}ig(\frac{1}{2}- \mathbb{D}(f)^*\mathcal{B}ig)[\alpha]=g'.
\varepsilonnd{equation}
\varepsilonnd{cor}
\begin{equation}gin{proof}
The claim is a direct consequence of Lemma~\mathop{\rm Re}\nolimitsf{L:3} and Lemma~\mathop{\rm Re}\nolimitsf{L:A1}.
\varepsilonnd{proof}
\mathbb Subsection{The evolution problem for $f$}\label{Sec:4.2}
Let $ T_+>0$ and $(f,v,p)$ be a solution to \varepsilonqref{STOKES} such that for all~$t\in(0,T_+)$ we have $f(t)\in H^3({\mathbb R})$ and
\[
v(t)\in {\rm C}^2(\Omega(t))\cap {\rm C}^1 (\overline{\Omega(t)}),\quad p(t)\in {\rm C}^1(\Omega(t))\cap {\rm C} (\overline{\Omega(t)}),\quad v(t)|_{\Gamma(t)} \circ\Xi_{f(t)}\in H^2({\mathbb R})^2.
\]
Define $\begin{equation}ta(t):=\begin{equation}ta(f(t))\in H^{2}({\mathbb R})^2$ by
\begin{equation}gin{equation}\label{Phi2}
\begin{equation}ta(t):= \mathcal{B}ig(\frac{1}{2}+ \mathbb{D}(f(t))\mathcal{B}ig)^{-1}[g(t)],
\varepsilonnd{equation}
with $g(t)=g(f(t))$ as defined in \varepsilonqref{defgn}.
Then, by Corollary~\mathop{\rm Re}\nolimitsf{C:1}, $\begin{equation}ta(t)'$ is the unique solution to
\[\mathcal{B}ig(\frac{1}{2}- \mathbb{D}(f(t))^\ast\mathcal{B}ig)[\begin{equation}ta(t)']=g(t)',\]
with the prime denoting the spatial derivative along ${\mathbb R}$.
Theorem~\mathop{\rm Re}\nolimitsf{T:1}, in particular \varepsilonqref{vgamma}, then implies that
\begin{equation}gin{equation}\label{veltra}
v(t)|_{\Gamma(t)} \circ\Xi_{f(t)}=\frac{\mathbb Sigma}{\mu}{\mathbb V}(f(t))[\begin{equation}ta(t)]
\varepsilonnd{equation}
where, given $f\in H^3({\mathbb R})$, the operator ${\mathbb V}(f)\in\mathcal{L}(H^{2}({\mathbb R})^2)$ (see \varepsilonqref{Bnmk}) is defined by
\begin{equation}gin{equation}\label{trv}
\begin{equation}gin{aligned}
\mathbb{V}(f)[\begin{equation}ta]&:=\frac{1}{4}\begin{equation}gin{pmatrix}
B_{2,2}^0(f)-B_{0,2}^0(f) &B_{3,2}^0(f)-B_{1,2}^0(f)\\[1ex]
B_{3,2}^0(f)-B_{1,2}^0(f)&-3B_{2,2}^0(f)-B_{0,2}^0(f)
\varepsilonnd{pmatrix}\begin{equation}gin{pmatrix}
\begin{equation}ta_1\\
\begin{equation}ta_2
\varepsilonnd{pmatrix}\\[1ex]
&\qquad+\frac{1}{4}\begin{equation}gin{pmatrix}
-B_{3,2}^0(f)-3B_{1,2}^0(f)&-B_{2,2}^0(f)+B_{0,2}^0(f) &\\[1ex]
-B_{2,2}^0(f)+B_{0,2}^0(f)&-B_{3,2}^0(f)+B_{1,2}^0(f)
\varepsilonnd{pmatrix}\begin{equation}gin{pmatrix}
f'\begin{equation}ta_1\\
f'\begin{equation}ta_2
\varepsilonnd{pmatrix}
\varepsilonnd{aligned}
\varepsilonnd{equation}
for $\begin{equation}ta=(\begin{equation}ta_1,\begin{equation}ta_2)^\top\in H^{2}({\mathbb R})^2 $.
Recalling \varepsilonqref{StP}$_5$ and \varepsilonqref{IC}, we may thus recast~\varepsilonqref{STOKES} as the following evolution problem
\begin{equation}gin{equation}\label{NNEP1}
\frac{df}{dt}=\frac{\mathbb Sigma}{\mu}\mathbb{V}(f)[\begin{equation}ta]\cdot (-f',1),\quad t>0,\qquad f(0)=f^{(0)},
\varepsilonnd{equation}
where the evolution equation should be satisfied pointwise with values in $H^2({\mathbb R})$.
\mathbb Subsection{Problem \varepsilonqref{NNEP1} as the limit $\mu_+\to 0$ of the two-phase Stokes problem}\label{Sec:4.3}
In \cite{MP2022} it is shown that if $T_{+}>0$ and $(f, w^\partialm,q^\partialm)$ is a solution to the two-phase quasistationary Stokes flow~\varepsilonqref{2STOKES} such that for all
$t\in(0,T_{+})$ we have $f(t)\in H^3({\mathbb R})$, $ w^\partialm(t)|_{\Gamma(t)}\circ\Xi_{f(t)}\in H^2({\mathbb R})^2$, and
\begin{equation}gin{align*}
w^\partialm(t)\in {\rm C}^2(\Omega^\partialm(t))\cap {\rm C}^1(\overlineerline{\Omega^\partialm(t)}),\quad q^\partialm(t)\in {\rm C}^1(\Omega^\partialm(t))\cap {\rm C}(\overlineerline{\Omega^\partialm(t)}),
\varepsilonnd{align*}
then $f=f(t)$ solves the evolution problem
\begin{equation}gin{equation}\label{NNEP2}
\frac{df}{dt}=\frac{\mathbb Sigma}{\mu^++\mu^-}\gamma\cdot (-f',1),\quad t>0,\qquad f(0)=f^{(0)},
\varepsilonnd{equation}
where $\gamma(t):=\gamma(f(t))\in H^2({\mathbb R})^2$ is given by
\begin{equation}gin{align}\label{TOSO}
\gamma(t):=\mathcal{B}ig(\frac{1}{2}+a_\mu\mathbb{D}(f(t))\mathcal{B}ig)^{-1}[{\mathbb V}(f(t))[g(t)]]
\varepsilonnd{align}
and
\[a_\mu:=\frac{\mu^+-\mu^-}{\mu^++\mu^-}\in(-1,1).\]
In \varepsilonqref{TOSO}, $g(f(t))\in H^2({\mathbb R})^2$ and ${\mathbb V}(f(t))\in\mathcal{L}(H^{2}({\mathbb R})^2)$, $t\in(0,T_+)$, are defined in \varepsilonqref{defgn} and~\varepsilonqref{trv},
respectively, and the evolution equation should be again satisfied pointwise with values in~$H^2({\mathbb R}), $ see~\cite[Theorem 4.5]{MP2022}.
We next prove that the formulation~\varepsilonqref{NNEP1} coincides with the limit $\mu_+\to0$ of~\varepsilonqref{NNEP2} with~${\mu^-=\mu}$ fixed.
Then $a_\mu\to -1$, and as Lemma~\mathop{\rm Re}\nolimitsf{L:3} and~\cite[Theorem 4.5]{MP2022} show, this limit can be taken by continuity in \varepsilonqref{NNEP2}.
\begin{equation}gin{prop}\label{P:2}
Given $f\in H^3({\mathbb R})$, it holds that
\begin{equation}gin{equation}\label{eq:co}
\mathbb{V}(f)\mathcal{B}ig(\frac{1}{2}+ \mathbb{D}(f)\mathcal{B}ig)^{-1} =\mathcal{B}ig(\frac{1}{2}- \mathbb{D}(f)\mathcal{B}ig)^{-1}\mathbb{V}(f).
\varepsilonnd{equation}
\varepsilonnd{prop}
\begin{equation}gin{proof}
We are going to prove the more compact identity
\begin{equation}gin{equation}\label{eq:co2}
{\mathbb V}(f)\mathbb{D}(f)+\mathbb{D}(f) {\mathbb V}(f)=0,
\varepsilonnd{equation}
which is equivalent to \varepsilonqref{eq:co} in view of Lemma~\mathop{\rm Re}\nolimitsf{L:3}.
Let $\begin{equation}ta\in H^2({\mathbb R})^2$ be arbitrary and let $(v,p):=(v,p)(f)[\begin{equation}ta]$ be defined by \varepsilonqref{Solutions} (with $\mu=1$).
Then, as shown in Theorem~\mathop{\rm Re}\nolimitsf{T:1}, it holds that
\[
v\in {\rm C}^2(\Omega)\cap {\rm C}^1 (\overline\Omega),\quad p\in {\rm C}^1(\Omega)\cap {\rm C} (\overline\Omega),\quad v|_\Gamma\circ\Xi={\mathbb V}(f)[\begin{equation}ta] \in H^2({\mathbb R})^2,
\]
and, recalling also Corollary~\mathop{\rm Re}\nolimitsf{C:1}, $(v,p)$ solves the boundary value problem
\begin{equation}gin{equation}\label{SBVP21}
\left.
\begin{equation}gin{array}{rclll}
\Delta v-\nabla p&=&0&\mbox{in $\Omega$,}\\
\mathop{\rm div} v&=&0&\mbox{in $\Omega$,}\\{}
T_1(v, p)\tilde\nu&=&(\gamma'/\omega)\circ\Xi^{-1}&\mbox{on $\Gammaamma$,}\\
(v, p)(x)&\to&0&\mbox{for $|x|\to\infty$,}
\varepsilonnd{array}\right\}
\varepsilonnd{equation}
where
\begin{equation}gin{equation}\label{ran0}
\gamma:= \mathcal{B}ig(\frac{1}{2}+\mathbb{D}(f)\mathcal{B}ig)[\begin{equation}ta]\in H^2({\mathbb R})^2.
\varepsilonnd{equation}
We next define~${(w^\partialm,q^\partialm):\Omega^\partialm\longrightarrow{\mathbb R}^2\times{\mathbb R}}$ by $(w^-,w^+)=( v,0)$ and $(q^-,q^+)=(p,0)$.
Then
\begin{equation}gin{equation}\label{regwq}
w^\partialm\in {\rm C}^2(\Omega^\partialm)\cap {\rm C}^1 (\overline{\Omega^\partialm}),\quad q^\partialm\in {\rm C}^1(\Omega^\partialm)\cap {\rm C} (\overline{\Omega^\partialm}),\quad w^\partialm|_\Gamma \circ\Xi\in H^2({\mathbb R})^2,
\varepsilonnd{equation}
and $(w^\partialm,q^\partialm)$ solves the boundary value problem
\begin{equation}\label{bvpaux21}
\left.\begin{equation}gin{array}{rcll}
\Delta w^\partialm-\nabla q^\partialm&=&0&\mbox{in $\Omega^\partialm$,}\\
\mathop{\rm div} w^\partialm&=&0&\mbox{in $\Omega^\partialm$,}\\{}
[w]&=&-v|_\Gammaamma&\mbox{on $\Gammaamma$,}\\
{}[T_1(w,q)]\tilde \nu &=&-(\gamma'/\omega)\circ\Xi^{-1}&\mbox{on $\Gammaamma$,}\\
(w^\partialm,q^\partialm)(x)&\to&0&\mbox{for $|x|\to\infty$.}
\varepsilonnd{array}\right\}
\varepsilone
Similarly as in the proof of \cite[Proposition 5.1]{MP2022}, we decompose $(w^\partialm,q^\partialm)$ as a sum
\[
(w^\partialm,q^\partialm)=(w^\partialm_s,q^\partialm_s)+(w^\partialm_d,q^\partialm_d),
\]
where $(w^\partialm_s,q^\partialm_s)$ solves
\begin{equation}\label{bvpaux21a}
\left.\begin{equation}gin{array}{rcll}
\Delta w_s^\partialm-\nabla q_s^\partialm&=&0&\mbox{in $\Omega^\partialm$,}\\
\mathop{\rm div} w_s^\partialm&=&0&\mbox{in $\Omega^\partialm$,}\\{}
[w_s]&=&0&\mbox{on $\Gammaamma$,}\\
{}[T_1(w_s,q_s)]\tilde \nu&=&-(\gamma'/\omega)\circ\Xi^{-1}&\mbox{on $\Gammaamma$,}\\
(w_s^\partialm,q_s^\partialm)(x)&\to&0&\mbox{for $|x|\to\infty$.}
\varepsilonnd{array}\right\}
\varepsilone
The system \varepsilonqref{bvpaux21a} has been studied in \cite{MP2021} and, according to \cite[Theorem~2.1 and Lemma~A.1]{MP2021}, it has a unique solution which satisfies
\begin{equation}gin{equation}\label{ran1}
w_s^\partialm|_\Gammaamma\circ\Xi={\mathbb V}(f)[\gamma]
\varepsilonnd{equation}
and which has the same regularity as $(w,q)$, see \varepsilonqref{regwq}.
Consequently, $(w^\partialm_d,q^\partialm_d)$ enjoys also the regularity \varepsilonqref{regwq} and moreover it solves
\begin{equation}\label{bvpaux22}
\left.\begin{equation}gin{array}{rcll}
\Delta w^\partialm_d-\nabla q^\partialm_d&=&0&\mbox{in $\Omega^\partialm$,}\\
\mathop{\rm div} w^\partialm_d&=&0&\mbox{in $\Omega^\partialm$,}\\{}
[w_d]&=&-v|_\Gammaamma&\mbox{on $\Gammaamma$,}\\
{}[T_1(w_d,q_d)]\tilde \nu&=&0&\mbox{on $\Gammaamma$,}\\
(w_d^\partialm,q_d^\partialm)(x)&\to&0&\mbox{for $|x|\to\infty$.}
\varepsilonnd{array}\right\}
\varepsilone
Since $v|_\Gammaamma\circ\Xi \in H^2({\mathbb R})^2$, also \varepsilonqref{bvpaux22} has a unique solution, see \cite[Proposition 2.1]{MP2022}, and, according to \cite[Lemma A.1]{MP2022} we have
\begin{equation}gin{equation}\label{ran2}
w^-_d|_\Gammaamma\circ \Xi=\mathcal{B}ig(\frac{1}{2}+\mathbb{D}(f)\mathcal{B}ig)[v|_\Gammaamma\circ\Xi].
\varepsilonnd{equation}
Since ${\mathbb V}(f)[\begin{equation}ta]=v|_\Gammaamma\circ\Xi=(w^-_s+w^-_d)|_\Gammaamma\circ\Xi,$ we infer from \varepsilonqref{ran0}, \varepsilonqref{ran1}, and \varepsilonqref{ran2} that
\[
{\mathbb V}(f)[\begin{equation}ta]={\mathbb V}(f)\mathcal{B}ig(\frac{1}{2}+\mathbb{D}(f)\mathcal{B}ig)[\begin{equation}ta]+\mathcal{B}ig(\frac{1}{2}+\mathbb{D}(f)\mathcal{B}ig)[{\mathbb V}(f)[\begin{equation}ta]] \qquad\text{for all $\begin{equation}ta\in H^2({\mathbb R})^2$,}
\]
and \varepsilonqref{eq:co2} is now a direct consequence of this identity.
\varepsilonnd{proof}
\mathbb Subsection{The common equivalent formulation for \varepsilonqref{STOKES} and \varepsilonqref{2STOKES}}\label{Sec:4.4}\
We are now able to simultaneously consider the evolution equations \varepsilonqref{NNEP1} and \varepsilonqref{NNEP2} that respectively encode the one-phase problem \varepsilonqref{STOKES} and the two-phase problem \varepsilonqref{2STOKES}.
For this purpose we set
\[
\mu^-=\mu
\]
and view $\mu^+\in[0,\infty)$ as a parameter.
For $f\in H^3({\mathbb R})$, we set
\begin{equation}gin{equation}\label{Phi}
\Phi(\mu^+,f):=\frac{\mathbb Sigma}{\mu^++\mu}\mathcal{B}ig(\frac{1}{2}+\frac{\mu^+-\mu}{\mu^++\mu}\mathbb{D}(f)\mathcal{B}ig)^{-1}[{\mathbb V}(f)[g(f)]]\cdot (-f',1),
\varepsilonnd{equation}
with $g(f)$ defined in \varepsilonqref{defgn} and ${\mathbb V}(f)$ in \varepsilonqref{trv}.
Now, in view of Proposition~\mathop{\rm Re}\nolimitsf{P:2}, the parameter dependent evolution equation
\begin{equation}gin{equation}\label{NNEP}
\frac{df}{dt}=\Phi(\mu^+,f),\quad t\geq 0,\qquad f(0)=f^{(0)},
\varepsilonnd{equation}
is identical to \varepsilonqref{NNEP1} for $\mu^+=0$ and to \varepsilonqref{NNEP2} for $\mu^+>0$.
Though \varepsilonqref{NNEP} has been derived under the assumption that the solution lies in $ H^3({\mathbb R})$ for positive times, this equation may be viewed in a more general analytic setting.
Indeed, given~$f\in H^{s}({\mathbb R})$, $s\in(3/2,2)$, we first deduce from \cite[Lemma~3.5]{MP2021} that~${g(f)\in H^{s-1}({\mathbb R})^2}$ and
\begin{equation}gin{equation}\label{regg}
g\in {\rm C}^\infty(H^{s}({\mathbb R}),H^{s-1}({\mathbb R})^2).
\varepsilonnd{equation}
Moreover, since
\[
[f\longmapsto B^0_{n,m}(f)]\in{\rm C}^\infty(H^s({\mathbb R}),\mathcal{L}( H^{s-1}({\mathbb R}))),\qquad n,\, m\in{\mathbb N},
\]
see \cite[Corollary C.5]{MP2021},
\varepsilonqref{DFB}$_1$ and \varepsilonqref{trv} yield
\begin{equation}gin{equation}\label{regBV}
[f\longmapsto \mathbb{D}(f)],\, [f\longmapsto {\mathbb V}(f)] \in{\rm C}^\infty(H^s({\mathbb R}),\mathcal{L}( H^{s-1}({\mathbb R})^2)).
\varepsilonnd{equation}
The properties \varepsilonqref{regg}, \varepsilonqref{regBV}, Lemma~\mathop{\rm Re}\nolimitsf{L:4} (for $\mu^+=0$) and \cite[Theorem 4.2]{MP2022} (for $\mu^+>0$) ensure now that the operator~${\Phi:[0,\infty)\times H^s({\mathbb R})\longrightarrow H^{s-1}({\mathbb R})}$ is well-defined.
Moreover, the smoothness of the function which maps an isomorphism to its inverse together with \varepsilonqref{regg} and \varepsilonqref{regBV} implies that
\begin{equation}gin{equation}\label{regPhi}
\Phi\in {\rm C}^\infty([0,\infty)\times H^s({\mathbb R}), H^{s-1}({\mathbb R})).
\varepsilonnd{equation}
We conclude this section with the observation that $\Phi(\mu^+,\cdot)$ maps bounded sets of $H^s({\mathbb R})$ to bounded sets of $H^{s-1}({\mathbb R}).$
This property is a consequence of the fact that $g$ maps bounded sets of~$H^{s}({\mathbb R})$ to bounded sets of $H^{s-1}({\mathbb R})^2$, of Lemma~\mathop{\rm Re}\nolimitsf{L:MP0}~(iii),
and of Lemma~\mathop{\rm Re}\nolimitsf{L:4} (for $\mu^+=0$) or \cite[Theorem 4.2]{MP2022} (for $\mu^+>0$).
\mathbb Subsection{The proofs of the main results}\label{Sec:4.5}
In the case $\mu^+>0$, we shown in \cite[Theorem~6.1]{MP2022}
that the Fr\'echet derivative $\partial_f\Phi(\mu^+,f)$ is, for each $f\in H^{s}({\mathbb R}),$ the generator of an analytic semigroup in $\mathcal{L}(H^{s-1}({\mathbb R})).$
This property, the observation that $\Phi(\mu^+,\cdot)$ maps bounded sets of~${H^s({\mathbb R})}$ to bounded sets of $H^{s-1}({\mathbb R}),$
the smoothness of $\Phi(\mu^+,\cdot)$, the fully nonlinear parabolic theory from \cite{L95}, and a parameter
trick used also for other problems, see e.g. \cite{An90, ES96, PSS15, MBV19},
were then exploited in \cite{MP2022} to establish the well-posedness of the two-phase quasistationary Stokes flow \varepsilonqref{2STOKES}, see \cite[Theorem~1.1]{MP2022}.
All these properties are satisfied also when $\mu^+=0$.
Indeed, using Lemma~\mathop{\rm Re}\nolimitsf{L:4} instead of \cite[Theorem~4.2]{MP2022}, the arguments in the proof of
\cite[Theorem~6.1]{MP2022} remain valid also if $\mu^+=0$, hence also $\partial_f\Phi(0,f)$ is the generator of an analytic semigroup in~${\mathcal{L}(H^{s-1}({\mathbb R}))}$ for each $f\in H^{s}({\mathbb R})$.
In view of these facts we have:
\begin{equation}gin{proof}[Proof of Theorem~\mathop{\rm Re}\nolimitsf{MT1}]
The proof is identical to that of \cite[Theorem~1.1]{MP2022} and therefore we omit the details.
\varepsilonnd{proof}
For the proof of Theorem~\mathop{\rm Re}\nolimitsf{MT2} we consider $\mu^+$ as a parameter and we use a result on the continuous dependence of the solutions to abstract parabolic problems on parameters
provided in \cite[Theorem 8.3.2]{L95}
\begin{equation}gin{proof}[Proof of Theorem~\mathop{\rm Re}\nolimitsf{MT2}]
In view of \varepsilonqref{regPhi} and of the fact that Fr\'echet derivative $\partial_f\Phi(\mu^+,f)$ is, for each $(\mu^+,f)\in[0,\infty)\times H^{s}({\mathbb R}),$ the generator of an analytic semigroup in $\mathcal{L}(H^{s-1}({\mathbb R}))$ we find that all the assumptions of
\cite[Theorem 8.3.2]{L95} are satisfied in the context of \varepsilonqref{NNEP}.
Let thus~$f(\cdot;f^{(0)}):[0,T_+( f^{(0)}))\longrightarrow{\mathbb R}$ denote the maximal solution to \varepsilonqref{NNEP} with $\mu^+=0$ and fix~${T\in(0,T_+( f^{(0)}))}$.
In view of \cite[Theorem 8.3.2]{L95} there exist constants $\varepsilon>0$ and~${M>0}$ such that for each $\mu^+ \in(0,\varepsilon]$
the solution $f_{\mu^+}(\cdot; f^{(0)})$ to~\varepsilonqref{NNEP} found in \cite[Theorem 6.1]{MP2022} satisfies $T_{+,\mu^+}( f^{(0)})>T$ and
\[
\big\|f(\cdot ;f^{(0)})- f_{\mu^+}(\cdot; f^{(0))}\big\|_{{\rm C}([0,T], H^s({\mathbb R}))}
+\mathcal{B}ig\|\frac{d}{dt}\big(f(\cdot; f^{(0)})- f_{\mu^+}(\cdot; f^{(0)})\big)\mathcal{B}ig\|_{{\rm C}([0,T], H^{s-1}({\mathbb R}))}\leq M\mu^+.
\]
This completes the proof.
\varepsilonnd{proof}
\varepsilonnd{document}
|
\begin{document}
\title{Near field linear sampling method for an inverse problem in an electromagnetic waveguide}
\author{Peter Monk$^1$ \and Virginia Selgas$^2$ \and Fan Yang$^2$}
\date{
\small{$^1$Dept. of Mathematical Sciences, University of Delaware, Newark DE 19716, USA\\%
$^2$Dept. de Matem\'aticas, Universidad de Oviedo, EPIG, c/Luis Ortiz Berrocal s/n, 33203 Gij\'on, Spain\\%
$^3$Dept. of Mathematics, California Polytechnic State University, San Luis Obispo, CA 93407-0403, USA\\
email: \texttt{[email protected]}, \texttt{[email protected]}, \texttt{[email protected]}\\[2ex]}
\today
}
\maketitle
\begin{abstract}
We consider the problem of determining the shape and location of an unknown penetrable object in a perfectly conducting electromagnetic waveguide. The inverse problem is posed in the frequency domain and uses multistatic data in the near field. In particular, we assume that we are given measurements of the electric scattered field due to point sources on a cross-section of the waveguide and measured on the same cross-section, which is away from the scatterer but not in the far field.
The problem is solved by using the Linear Sampling Method (LSM) and we also discuss the generalized LSM. We start by
giving a brief discussion of the direct problem and its associated interior transmission problem. Then, we adapt and analyze the LSM to deal with the inverse problem. This {extends the work on the LSM for perfectly conducting scatterers in a waveguide by one of us (Yang) to the detection of penetrable objects}. We provide several useful results concerning reciprocity and
the density of fields due to single layer potentials. We also prove the standard results for the LSM in the waveguide context. Finally we give numerical results to show the performance of the method for simple shapes.
\end{abstract}
\section{Introduction}
The detection or characterization of inaccessible objects in closed waveguides has received considerable attention in
recent years. Much of the work has focused on the scalar acoustic problem. Examples include the use of time-reversal imaging~\cite{RouxFinck-utrasonicWG}, linearization methods~\cite{DediuMacLaughlin}, near-field measurements inside periodic waveguides~\cite{SunZheng-periodicWG}, and volume integral equations with fixed point iteration~\cite{Yurys-IPguides}. We apply the Linear Sampling Method (LSM) in the frequency domain (see \cite{ColtonKress-IP} for background to this method). In the context of waveguides, the use of the LSM was initiated by
L. Bourgeois and E. Luneville~\cite{BLFD} who demonstrated the possibility of using the LSM to detect impenetrable obstacles. Two of us extended this work to penetrable obstacles and three dimensions~\cite{MonkSelgasFD}. {Then, one of us, F. Yang~\cite{FanPhD}, showed that the LSM can be applied to
reconstruct PEC scatterers in a closed waveguide.} It is the latter that is the main background for our current paper. The LSM can also be used with time domain multistatic data, either by using the Fourier transform to move to the frequency domain~\cite{BourgTD} or by working in the time domain directly~\cite{MonkSelgasTD} but we shall not discuss the time domain here.
It should be noted that far field data does not uniquely determine the scatterer in an acoustic waveguide~\cite{ASBinvisible}. {Using near field measurements as in this paper, we will prove that the inverse scattering problem has a unique solution}. Although we are working in the near field, the decay of evanescent modes implies that higher modes cannot be observed in the presence of noise. So non-uniqueness may be a practical problem even in the near field. However, at least for the simple shapes examined in the papers {discussed in the previous paragraph}, this does not seem to cause an issue (also multi-frequency or time domain data might ameliorate the problem). It is reasonable to conjecture that uniqueness is also an issue for the electromagnetic inverse problem using far field measurements, but so far this has not been studied to our knowledge.
This paper is devoted to extending{~\cite{BLFD,MonkSelgasFD,FanPhD} to penetrable }electromagnetic scattering in the frequency domain using single-frequency multistatic data. In particular, we study the model problem of determining the shape and the location of a bounded penetrable obstacle located in a tubular waveguide. By a tubular waveguide we mean that the waveguide has a cross-section represented by a convex, open, bounded domain
$\Sigma\subseteq \mathbb{R}^{2}$ and the waveguide is the infinite domain $W=\Sigma\times\mathbb{R}$ having boundary $\Gamma$. The unknown scatterer occupies a bounded, open and Lipschitz domain $D\subset\Sigma\times\mathbb{R}$. We assume that a probing
electromagnetic field is due to point sources with arbitrary polarization {that are} located on a surface
\[\Sigma_r= \{ x\in\mathbb{R}^3\; ; \;(x_1,x_2)\in\Sigma , \,
x_3=r \} ,
\]
where $r$ is such that $\Sigma_r\cap \overline{D}=\emptyset$. We define $\boldsymbol{\nu}_0$ to be the normal to $\Sigma_r$ pointing in the direction of increasing $z$.
We also assume that measurements of the polarization, phase and amplitude of the resulting scattered field can be made on this surface. From these multistatic data, we seek to determine the boundary of $D$ denoted $\partial D$. We shall define the problem in more detail in the next section, but, to summarize the inverse problem we assume that the
scattered field $\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{p})$ due to a point source at $\mathbf{y}\in\Sigma_r$ with polarization $\mathbf{p}$
and measured at $\mathbf{x}\in\Sigma_r$ is known (the data may also be corrupted with random noise, and in practice is only known for a finite number of source and receiver points on $\Sigma_r$. From this data it is desired to determine $\partial D$.
The LSM uses the near field operator defined for
\[
\mathbf{g}\in L^2_T(\Sigma_r)=\{\mathbf{g}\in L^2(\Sigma_r)\;; \; \mathbf{g}\cdot\boldsymbol{\nu}_0 =0\mbox{ a.e. on }\Sigma_r\}
\]
by
\begin{equation}\langlebel{inv:nearfieldop}
N\mathbf{g}(\mathbf{x}) \, = \, \int_{\Sigma_r}\boldsymbol{\nu}_0(\mathbf{x})\times\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{g}(\mathbf{y}))\, dS_{\mathbf{y}} \quad\mbox{for a.e. }\mathbf{x}\in\Sigma _r \, .
\end{equation}
We then consider the Near Field Equation (NFE)
for $\mathbf{g}_z\in L^2_T(\Sigma_r)$ given by
\begin{equation} \langlebel{inv:lsm:nfe}
N\mathbf{g}_{\mathbf{z}}(\mathbf{x})
= \boldsymbol{\nu}_0\times\mathbf{u}^i(\mathbf{x};\mathbf{z},\mathbf{q})
\quad\mbox{a.e. } \mathbf{x}\in\Sigma _r\, ,
\end{equation}
where $\mathbf{u}^i(\mathbf{x};\mathbf{y},\mathbf{p})$ is the field due to an auxiliary source point $\mathbf{z}\in W$ with polarization $\mathbf{q}$ in the {an empty}
waveguide. Of course (\ref{inv:lsm:nfe}) is ill-posed but as usual for the LSM we shall show that there exists
an approximate solution to this equation such that $\mathbf{z}\mapsto\Vertert \mathbf{g}_z\Vertert_{L_T^2(\Sigma_r)}$ can be used as an indicator
function for $\partial D$ (as usual this is only a partial justification of the LSM, see \cite{ColtonKress-IP}).
We intend
our inverse problem to be a simplified model for applications of {inverse scattering in a waveguide (see for example the engineering papers}~\cite{Dalarsson-PhD,SjobergLarsson}) although these applications are more complex. Of course we are not the first to apply inverse scattering techniques to electromagnetic waveguides. For example L. Borcea and D.-L. Nguyen~\cite{BorceaNguyen}
used reverse time migration and $\ell_1$ optimization to image objects in terminating waveguides. This could be a
problem amenable to the application of the LSM but is not considered in our paper. Also of interest J. Chen and G. Huang~\cite{ChenHuang} have applied a reverse time migration approach. The closest work to this paper is the
thesis of one of us (F. Yang~\cite{FanPhD}) in which the use of the LSM to detect impenetrable objects in a waveguide
is analyzed and implemented. {These results have not been published in an academic journal, and some} of the results from the current paper are taken from the thesis with acknowledgement.
An outline of the paper is as follows. In the next section (Sec.~\ref{forw}) we briefly discuss the forward problem. In Section \ref{subsec:forward:modal} we
give more details of the {well known} fundamental solution for an electromagnetic waveguide, as well as an analysis of the
\emph{blocked waveguide} problem (or semi-infinite waveguide) in Section~\ref{wg:forward:half_pipe}.
Existence and uniqueness for the forward problem is then summarized in Section~\ref{wg:forward:weak_vari:bdd}.
We then move on to the inverse problem in Section~\ref{wg:inverse}. We start by recalling the dyadic Green's function for the standard scattering problem, and then in Section~\ref{wg:inverse:uniqueness} give an uniqueness result for the electromagnetic waveguide. Section~\ref{wg:inverse:nearfieldop} is devoted to factoring the near field operator and deriving mapping properties, while Section~\ref{wg:inverse:lsm} presents the main result of the paper justifying the LSM for electromagnetic waveguides. We then make some observations concerning the Generalized LSM (GLSM) in Section~\ref{glsm}. This is followed by our last major section, Section~\ref{Sec-Numresult}, in which we present some numerical results, and
we end with a brief conclusion and discussion (Section~\ref{Sec-Concl}).
Throughout this paper, we will distinguish vectors by means of boldface. Moreover, we will denote the divergence and the rotational of a regular enough vector field $\mathbf{u}$ by $\nabla\cdot\mathbf{u}$ and $\nabla\times\mathbf{u}$, respectively.
\section{Forward problem}\langlebel{forw}
As discussed in the introduction, we consider an infinite tubular waveguide $W$, generated by translates
of its cross-section $\Sigma\subseteq \mathbb{R}^{2}$.
Note that the axis of the waveguide is parallel to the $x_3$-axis, and we can identify points in $\mathbb{R}^3=\mathbb{R}^2\times\mathbb{R}$ by writing $\mathbf{x}=(x_1,x_2,x_3)=(\hat{\mathbf{x}},x_3)$.
We then denote by $\boldsymbol{\nu} _0 := (\hat{\boldsymbol{0}},1)$, $\boldsymbol{\nu}:=(\hat{\boldsymbol{\nu}},0)$ and $\boldsymbol{\nu} _D $ the unit vector fields that are normal a.e. to $\Sigma_s:=\Sigma\times\{ s\}$ ($s\in\mathbb{R}$), $\Gamma :=\partial W=\partial\Sigma\times \mathbb{R}$ and $\partial D$, and directed to the right and outwards, respectively; see Figure \ref{fig:domain}.
We shall also denote by ${\mathbf{e}}_1,{\mathbf{e}}_2,{\mathbf{e}}_3$ the standard unit vectors in $\mathbb{R}^3$.
\begin{figure}
\caption{A schematic 2d-view of the problem geometry: The penetrable obstacle occupies an unknown region $D$ inside the waveguide $W$.
}
\end{figure}
We assume that the waveguide is filled with air or vacuum; in particular, the background electric permittivity and magnetic permeability are
$$
\varepsilon _0=1/(\mu_0 c_0^2)\, Fm^{-1} \quad\mbox{ and } \quad \mu_0=4\pi 10^{-7}\, Hm^{-1} \, ,
$$
respectively. Here and in the sequel $c_0$ stands for the speed of light in vacuum.
Concerning the material which fills the scatterer $D$, we denote by $\tilde\mu (\mathbf{x} )$, $\tilde\varepsilon (\mathbf{x})$ and $\tilde\sigma (\mathbf{x})$ its magnetic permeability, electric permittivity and conductivity, respectively. We assume that the magnetic permeability is constant, $\tilde\mu (\mathbf{x} )=\mu_0$, and that the conductivity is non-negative, $\tilde\sigma (\mathbf{x})\geq 0$.
In general, $\tilde\varepsilon$ is a matrix function of position, but here we understand it as a scalar function (which is the case when the material is isotropic and uniform in all directions). The the anisotropic case would involve no extra mathematical
difficulties. We further assume that it is piecewise smooth and bounded so there are constants $\tilde\varepsilon _0$
and $\tilde\varepsilon _1$ such that
\begin{equation*}
0<\tilde\varepsilon _0 < \tilde\varepsilon < \tilde\varepsilon _1 \quad\mbox{a.e. in } W \, .
\end{equation*}
Let us consider the time-harmonic case, and denote by $\omega$ and $k:=\omega\sqrt{\varepsilon_0\mu_0}$ the angular frequency and the wavenumber for the background medium, respectively. We define the relative quantities $\displaystyle\varepsilon = (\frac{\tilde\varepsilon}{\varepsilon_0} + i\frac{\tilde\sigma}{\omega\varepsilon_0} )$ and $\displaystyle\mu = \frac{\tilde\mu}{\mu_0}$; notice that $\varepsilon=\mu=1$ in the background $W\setminus\overline{D}$. Then, the time harmonic system of Maxwell's equations consists of finding the total electric and
magnetic fields denoted by the
complex valued vector fields $\mathbf{E}\equiv \mathbf{E} (\mathbf{x})$ and $\mathbf{H}\equiv \mathbf{H} (\mathbf{x})${, that} satisfy the following {Maxwell system} in a weak sense:
\begin{eqnarray}
-ik\varepsilon \mathbf{E} - \nabla\times \mathbf{H} &=& \boldsymbol{0} \mbox{ in }W\, , \langlebel{time_harmonic_clH} \\[1ex]
-ik\mu \mathbf{H} + \nabla\times \mathbf{E} &=& \boldsymbol{0} \mbox{ in } W\, . \langlebel{time_harmonic_clE}
\end{eqnarray}
Since we have assumed that the boundary of the waveguide is a perfect electric conductor (PEC, e.g. made of metal), then
\begin{equation}\langlebel{fwd-total:bdry_shell}
\boldsymbol{\nu}\times\mathbf{E} = \boldsymbol{0} \quad\mbox{ on } \Gamma \, .
\end{equation}
Using (\ref{time_harmonic_clE}) we can eliminate the unknown $\mathbf{H}$ from (\ref{time_harmonic_clH}) and rewrite the time harmonic Maxwell's system as a second order system of equations in terms of $\mathbf{E}$:
\begin{equation}
\nabla\times \nabla\times\mathbf{E} - k^2\varepsilon\mathbf{E} = \boldsymbol{0} \quad\mbox{in } W \, , \langlebel{fwd-total:maxwell_govern}
\end{equation}
together with the boundary condition (\ref{fwd-total:bdry_shell}). Notice that we could similarly eliminate $\mathbf{E}$ and rewrite the system in terms of $\mathbf{H}$.
Let $\mathbf{E}^i$ denote a given incident field, which satisfies the Maxwell's system in the absence of the scatterer $D$:
\begin{eqnarray*}
\nabla\times\nabla\times \mathbf{E}^i - k^2 \mathbf{E}^i = \boldsymbol{0} \quad
&\mbox{in } W_{(-R,R)} \, , \\[1ex]
\boldsymbol{\nu}\times\mathbf{E}^i = \boldsymbol{0} &\mbox{on } \Gamma_{(-R,R)} \, ,
\end{eqnarray*}
where $W_{(-R,R)}=
\Sigma\times (-R,R)$ and $\Gamma_{(-R,R)}=
\partial\Sigma\times (-R,R)$, with $R>0$ big enough so that $\overline{D}\subset W_{(-R,R)}$. Of particular interest for this paper, we shall consider the incident field excited by an electric point source at $\mathbf{y} $ with polarization vector $\mathbf{p} \in\mathbb{R}^3$ ($|\mathbf{p}|=1$); typically, the sources will be located on a cross-section $\Sigma_{r}$ with $|r|>R$ (notice that then $\mathbf{y}\in W\setminus \overline{D}$).
This means that
\begin{eqnarray*}
\nabla\times\nabla\times \mathbf{E}^i - k^2 \mathbf{E}^i = \mathbf{p}\delta_{\mathbf{y}} \quad
&\mbox{in } W\, , \\[1ex]
\boldsymbol{\nu}\times\mathbf{E}^i = \boldsymbol{0} &\mbox{on } \Gamma \, ,
\end{eqnarray*}
where $\delta_{\mathbf{y}}$ denotes the Dirac delta distribution centered at $\mathbf{y}$.
The total field $\mathbf{E}$ is then decomposed into the incident and scattered fields, $\mathbf{E}=\mathbf{E}^i+\mathbf{E}^s$ in $W$. To close the system (\ref{fwd-total:maxwell_govern}-\ref{fwd-total:bdry_shell}) we need a suitable radiation condition on the scattered field $\mathbf{E}^s(\mathbf{x})$ as $x_3 \to\pm\infty$ that imposes the physical restriction that it is \emph{an outgoing wave} in the waveguide. We will formalize this condition by using waveguide modes
in the next section.
\subsection{Modal Solutions to Maxwell's Equations in the Waveguide}\langlebel{subsec:forward:modal}
The waveguide supports modes obtained from either of the following families (c.f. \cite{ChenToTai}, \cite[Appendix A]{FanPhD}):
\begin{itemize}
\item The first family consists of the waveguide modes
$$ \mathbf{M}_m (\mathbf{x}) = \nabla\times \big(u_m(\hat{x})e^{ih_mx_3}{\mathbf{e}}_3\big) = \left(\!\begin{array}{c} \frac{\partial u_m}{\partial x_2} \\[1ex] -\frac{\partial u_m}{\partial x_1} \\[1ex] 0 \end{array} \!\right) e^{ih_mx_3} \quad\mbox{for } m=1,2,\ldots\, , $$
where $h_m^2+\langlembda_m^2=k^2$, and $\{(\langlembda_m,u_m)\}_{m=0}^{\infty}$ are the eigenpairs of the Neumann problem for the negative surface Laplacian $-\Delta_{\Sigma}$ on the cross-section of the waveguide.
We sort the values $\{\langlembda_m\}_{m=0}^{\infty}$ in ascending order, in particular $\langlembda_m>0$ for $m=1,2,\ldots$ (notice that $\langlembda_0=0$ is not considered because then $u_0$ is constant and $\mathbf{M}_0=\boldsymbol{0}$). We also rescale the eigenfunctions to have $\{u_m\}_{m=0}^{\infty}$ orthonormal in $L^2(\Sigma)$, in which case $\{ \frac{u_m}{\sqrt{\langlembda_m+1}}\}_{m=0}^{\infty}$ defines an orthonormal basis of $H^1(\Sigma)$.
\item The second family is given by
$$ \mathbf{N}_n (\mathbf{x} ) = \frac{1}{k}\,\nabla\times\nabla\times\big(v_n(\hat{x})e^{ig_nx_3}{\mathbf{e}}_3\big) = \frac{1}{k} \left(\!\begin{array}{c} ig_n \frac{\partial v_n}{\partial x_1}\\[1ex] ig_n \frac{\partial v_n}{\partial x_2} \\[1ex] \mu_n^2 v_n \end{array} \!\right) e^{ig_nx_3} $$
for $n=1,2,\ldots\, ,$ where $k^2 = \mu_n^2 + g_n^2$, and $\{(\mu_n,v_n)\}_{n=1}^{\infty}$ is the set of eigenpairs of the Dirichlet problem for $-\Delta_{\Sigma}$ on the cross-section of the waveguide.
Here again, we sort $\{\mu_n\}_{n=1}^{\infty}$ increasing and we rescale the eigenfunctions so that $\{v_n\}_{n=1}^{\infty}$ is orthonormal in $L^2(\Sigma)$, in which case $\{ \frac{v_n}{\sqrt{\mu_n}}\}_{n=1}^{\infty}$ is an orthonormal basis of $H^1_0(\Sigma)$.
\end{itemize}
Notice that, since we have assumed that $\Sigma $ is convex, these eigenfunctions have the further regularity $u_m,v_n\in H^2(\Sigma)$; see \cite[Theorems 3.2.1.2-3.2.1.3]{GrisvardBook}.
Here and in the sequel we avoid the cut-off wavenumbers $k\in \{ \langlembda_m\} _{m=1}^{\infty} \cup \{ \mu_n \} _{n=1}^{\infty}$. With this assumption, $h_m=\sqrt{k^2-\langlembda^2_m}\neq 0$ and $g_n=\sqrt{k^2-\mu^2_n}\neq 0$ for all $m,n=1,2,\ldots$, and we can define them by choosing the square root branch with non-negative real and imaginary parts.
The behavior of the waveguide modes depend on {the coefficients $g_n$ and $h_m$}:
\begin{itemize}
\item Modes for which $h_m$ (or $g_n$) are real are said to be \emph{traveling waves}. They satisfy a Sommerfeld type outgoing radiation condition along the axis of the waveguide; for example, for $x_3>0$,
$$ \frac{\partial \mathbf{M}_m}{\partial x_3} - ih_m\mathbf{M}_m = \boldsymbol{0} \, . $$
\item Modes for which $h_m$(or $g_m$) are purely imaginary are said to be \emph{evanescent}. They decay or grow exponentially along the axis of the waveguide; for example, for $x_3>0$,
$$ \mathbf{M}_m = \nabla\times(u_me^{ih_m | x_3 |}) = \nabla\times(u_me^{- | h_m | x_3}) \to\mathbf{0} \quad\mbox{ as }x_3 \to+\infty\, . $$
\item {We assume that $k$ is not a cut-off frequency (also called a Rayleigh frequency) for the wave guide which implies
that $h_m\not=0$ and $g_n\not =0$ for any $m,n$, so all modes are either evanescent or travelling.}
\end{itemize}
It is clear that, for a fixed wavenumber $k$, the number of traveling waves is bounded and the remaining modes are evanescent. In contrast to a sound hard acoustic waveguide, there may be no traveling modes if the wavenumber $k$ is too small.
The constant factor $1/k$ in the definition of $\mathbf{N}_n$ is convenient for the following relations:
\begin{equation}\langlebel{wg:forward:modal:explicit-relation-M-N}
\mathbf{N}_m = \frac1k\nabla\times \mathbf{M}_m\qquad\mbox{and}\qquad \mathbf{M}_m = \frac1k\nabla\times \mathbf{N}_m \qquad\forall m=1,2,\ldots \, .
\end{equation}
For later use, let us consider a bounded section of the waveguide $W_{(s_1,s_2)}=\Sigma\times (s_1,s_2)$ and introduce the space
$$
{\mathbf{X}}_{(s_1,s_2)}\, = \, \{\mathbf{v}\in H(\mathbf{curl},W_{(s_1,s_2)}) ; \, \boldsymbol{\nu}\times\mathbf{v}=0\,\mbox{ on } \Gamma _{(s_1,s_2)} \} \, ,
$$
where $\Gamma _{(s_1,s_2)}=\partial\Sigma\times (s_1,s_2) $. Moreover, on any Lipschitz surface $S$ contained in $W_{(s_1,s_2)}$ with normal vector field $\boldsymbol{\nu}_S$, we consider $L^{2}_T(S) $ to be the subspace of fields in $(L^2(S))^3$ tangential to $S$. Moreover,
the standard dual space of $H^{1/2}(S) $ is denoted $\widetilde{H}^{-1/2}(S) $. We also consider the following space of traces:
$$
\widetilde{H}^{-1/2}(\mathrm{div},S) \, = \,
\{ \mathbf{f}\in \widetilde{H}^{-1/2}(S)^3; \, \mathbf{f}=\boldsymbol{\nu}_{S}\times\mathbf{v}|_{S} \,\mbox{ for some }\mathbf{v}\in {\mathbf{X}} _{(s_1,s_2)}\} \, ;
$$
and denote by $\widetilde{H}^{-1/2}(\mathbf{curl},S)$ its dual space.
In particular, when $S=\Sigma_s$ with $s\in [s_1,s_2]$, these spaces can be characterized in terms of modes:
$$
\begin{array}{c}
\mathbf{f}\in\widetilde{H}^{-1/2}(\mathbf{curl},\Sigma_s) \quad \mbox{if, and only if,} \quad \displaystyle\sum _{m=1}^{\infty} |\alpha _m |^2 |\langlembda_m | + \sum _{n=1}^{\infty} |\beta_n|^2 |\mu_n |^{3} <\infty \, ,
\\
\mathbf{f}\in\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s) \quad \mbox{if, and only if,} \quad \displaystyle\sum _{m=1}^{\infty} |\alpha _m |^2 | \langlembda_m | ^{3} + \sum _{n=1}^{\infty} |\beta_n |^2 |\mu_n | <\infty \, ,
\end{array}
$$
for each $\mathbf{f}=\displaystyle\sum _{m=1}^{\infty} \alpha _m \nabla_{\Sigma} u_m+ \sum _{n=1}^{\infty} \beta_n \nabla_{\Sigma} \times v_n\in \widetilde{H}^{-1/2}(\Sigma _s)^3$;
indeed, the natural norms on $\widetilde{H}^{-1/2}(\mathbf{curl},\Sigma_s) $ and $\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s) $ are equivalent to
\begin{equation}
\begin{array}{c}
|| \mathbf{f} ||_{\widetilde{H}^{-1/2}(\mathbf{curl},\Sigma_s)} = ( \displaystyle\sum _{m=1}^{\infty} |\alpha _m |^2 |\langlembda_m | + \sum _{n=1}^{\infty} |\beta_n|^2 |\mu_n |^{3} )^{1/2} ,
\\
|| \mathbf{f} ||_{\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s)} = ( \displaystyle\sum _{m=1}^{\infty} |\alpha _m |^2 | \langlembda_m | ^{3} + \sum _{n=1}^{\infty} |\beta_n |^2 |\mu_n | ) ^{1/2} ,
\end{array}\langlebel{norms}
\end{equation}
respectively, see \cite[Paragraph 3.1.3.2]{FanPhD}. Moreover, for any $t\in\mathbb{R}$, the space $H^{t}_T(\Sigma_s)$ consists of tangential fields $\mathbf{f}$ on $\Sigma_s$ such that
$$
\sum _{m=1}^{\infty} |\alpha _m |^2 |\langlembda_m |^{2(t+1)} + \sum _{n=1}^{\infty} |\beta_n | ^2 |\mu_n | ^{2(t+1)} <+\infty \, ,
$$
and may be endowed with the norm
$$
|| \mathbf{f} ||^2_{H^{t}_T(\Sigma_s)} = \sum _{m=1}^{\infty} |\alpha _m |^2 |\langlembda_m | ^{2 (t+1)} + \sum _{n=1}^{\infty} |\beta_n |^2 |\mu_n| ^{2 (t+1)}\, .
$$
\subsection{The Blocked Waveguide and the Dirichlet-to-Neuman Map}\langlebel{wg:forward:half_pipe}
The radiation condition, which is yet to be defined, must {constrain} any scattered field to be an \emph{outgoing wave} in the waveguide: when represented by the waveguide modes, each contributing mode must either propagate outwards or decay exponentially away from the scatterer. With this in mind, let us consider a solution $\mathbf{U}$ of Maxwell's system (\ref{fwd-total:maxwell_govern}-\ref{fwd-total:bdry_shell}) in an unbounded section of the waveguide
of the form $W_I=\Sigma\times I$ where $I=(-\infty ,s)$ or $(s,\infty)$ with $s\in\mathbb{R}$.
For $I=(s,\infty)$, we say that $\mathbf{U}$ satisfies the outgoing radiation condition (ORC) if $\mathbf{U} \in H_{loc}(\mathbf{curl} ,W_I) $ and, for $|x_3|$ big enough, it can be written in terms of the waveguide modes as
$$ \mathbf{U} = \sum_{m=1}^{\infty}A_m\mathbf{M}_m + \sum_{n=1}^{\infty}B_n\mathbf{N}_n \, . $$
The following lemma is shown in \cite[Lemma 3.1.3]{FanPhD} and states the well-posedness of the blocked (or semi-infinite) waveguide problem in the absence of any scatterer.
\begin{lemma}\langlebel{fwd:half_pipe:unbdd} Given $\mathbf{Q}\in\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s)$, there exists a unique solution $\mathbf{U}\in H_{loc}(\mathbf{curl},W_{(s,\infty)})$ to the following problem:
\begin{equation} \langlebel{fwd:half_pipe:unbdd-problem}
\begin{array}{rl}
\nabla\times\nabla\times\mathbf{U} - k^2\mathbf{U} = \boldsymbol{0} & \mbox{in } W_{(s,\infty)}\, , \\[1ex]
\boldsymbol{\nu}\times\mathbf{U} = \boldsymbol{0} & \mbox{on } \Gamma_{(s,\infty)}=\partial\Sigma\times (s,+\infty)\, , \\[1ex]
\boldsymbol{\nu}_0\times\mathbf{U} = \mathbf{Q} & \mbox{on } \Sigma_s\, , \\[1ex]
\mbox{$\mathbf{U}$ satisfies the ORC} & \mbox{for } x_3 \to +\infty \, .
\end{array}
\end{equation}
Moreover, the solution has the expansion
\begin{equation} \mathbf{U} = \sum_{m=1}^{\infty} A_m \mathbf{M}_m + \sum_{n=1}^{\infty} B_n \mathbf{N}_n \, , \langlebel{modal}
\end{equation}
when $\mathbf{Q} = \displaystyle\sum_{m=1}^{\infty} A_m \nabla_{\Sigma} u_m - \frac{i}{k} \sum_{n=1}^{\infty} B_n g_n \nabla_{\Sigma} \times v_n$ on $\Sigma_s$.
The same result holds for $W_{(-\infty,s)}$ if $h_m$ and $g_n$ are replaced by $-h_m$ and $-g_n$ in the expansions.
\end{lemma}
We can use this result to define an important operator for our upcoming analysis, denoted by $T^{\pm}_{s}$ and which is the analogue of the Dirichlet-to-Neumann (DtN) map for Helmholtz equation. Specifically, for some fixed $s\in\mathbb{R}$ and any tangential field $\mathbf{Q}\in\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s)$, we take
\begin{eqnarray}
T^+_{s}(\mathbf{Q}) = \boldsymbol{\nu}_0\times(\nabla\times\mathbf{U})|_{\Sigma_s} \, , \langlebel{fwd-dtn-general-form}
\end{eqnarray}
where $\mathbf{U}$ solves (\ref{fwd:half_pipe:unbdd-problem}) in $W_{(s,\infty)}$.
A similar operator can be defined by considering the analogue on $W_{(-\infty,s)}$, and we identify the operator on each specific cross-section by means of a superscript: $T^{+}_{s}$ and $T^{-}_{s}$ on $\Sigma_s$ when using $W_{(s,\infty )}$ and $W_{(-\infty,s)}$, respectively. The analysis of the two operators $T^{\pm}_{s}$ is analogous and, accordingly, we only give details for $T^+_{s}$.
To derive a series representation of $T^+_{s}$ in terms of the waveguide modes, we can make use of the explicit form of the solution $\mathbf{U}$ provided in Lemma \ref{fwd:half_pipe:unbdd}:
\begin{equation} \langlebel{fwd-dtn-explicit-form}
\begin{array}{rl}
T^{+}_{s}(\boldsymbol{\nu}_0\times \mathbf{U}|_{\Sigma}) \, =&\! -\, i\,\displaystyle\sum_{m=1}^{\infty} \Big\langle \boldsymbol{\nu}_0\times \mathbf{U}|_{\Sigma_s}, \left(\!\!\begin{array}{c} \nabla_{\Sigma} u_m \\ 0 \end{array}\!\!\right) \!\Big\rangle_{\!\Sigma_s}\, \frac{h_m}{\langlembda_m^2} \left(\! \!\begin{array}{c} \vec{\nabla}_{\Sigma}\times u_m \\ 0\end{array}\!\!\right) \nonumber \\
& \, +\, i\,k^2\, \displaystyle\sum_{n=1}^{\infty} \Big\langle \boldsymbol{\nu}_0\times \mathbf{U}|_{\Sigma_s}, \left(\!\!\begin{array}{c} \vec{\nabla}_{\Sigma}\times v_n \\ 0 \end{array}\!\!\right) \!\Big\rangle_{\!\Sigma_s} \, \frac{1}{g_n\mu_n^2}\! \left(\!\!\begin{array}{c} \nabla_{\Sigma} v_n \\ 0 \end{array}\!\!\right)\! .
\end{array}
\end{equation}
This expression is explicitly derived in \cite[Section 3.1.3.5]{FanPhD} and then used to deduce the following properties of the operator $T^{+}_{s}$ {using the characterization of the norms in(\ref{norms})} (see \cite[Lemmas 3.1.4 and 3.1.5]{FanPhD}).
\begin{lemma}\langlebel{fwd-dtn-bdd&analytic}
The operator $T^{+}_{s}$ is bounded from $\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s)$ to $\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_s)$. Moreover, there exists a neighborhood $\mathfrak{B}\subset \mathbb{C}$ of $k$ where $T^{+}_{s}$ depends analytically on $k$.
\end{lemma}
Let us notice that, by means of $T^{+}_{s}$, the blocked waveguide problem (\ref{fwd:half_pipe:unbdd-problem}) can be rewritten in a bounded section of waveguide. Accordingly, the following result is the counterpart of Lemma~\ref{fwd:half_pipe:unbdd}, and we refer to \cite[Corollary 3.1.1]{FanPhD} for more details.
\begin{corollary}\langlebel{fwd:half_pipe:bdd} For $s_1<s_2$ and $\mathbf{Q}\in\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_{s_1})$, there exists a unique $\mathbf{U}\in H(\mathbf{curl},W_{(s_1,s_2)})$ such that
\begin{equation}
\begin{array}{rcl}
\nabla\times\nabla\times\mathbf{U} - k^2\mathbf{U} = \boldsymbol{0} & \mbox{in} & W_{(s_1,s_2)} \, , \\[1ex]
\boldsymbol{\nu}\times\mathbf{U} = \boldsymbol{0} & \mbox{on} & \Gamma_{(s_1,s_2)}\, , \\[1ex]
\boldsymbol{\nu}_0\times\mathbf{U} = \mathbf{Q} & \mbox{on} & \Sigma_{s_1}\, , \\[1ex]
\boldsymbol{\nu}_0\times(\nabla\times\mathbf{U}) = T^{+}_{s_2}(\boldsymbol{\nu}_0\times\mathbf{U}) & \mbox{on} & \Sigma_{s_2} \, . \langlebel{fwd:half_pipe:bdd-problem}
\end{array}
\end{equation}
\end{corollary}
\subsection{Analysis of the Forward Problem}\langlebel{wg:forward:weak_vari:bdd}
Now we have all the tools we need both to impose a suitable radiation condition on the scattered field and to analyze the forward problem (\ref{fwd-total:bdry_shell})-(\ref{fwd-total:maxwell_govern}) closed with such a radiation condition.
Let the scatterer $D$ be illuminated by a point source at $\mathbf{y}\in W$ located sufficiently far below $D$. {By this we mean that we can choose} $R>0$ such that $\overline{D}\subset\Sigma\times W_{(-R,R)}$ and $y_3<R$. We then write the forward problem
as the equivalent problem of finding the total field $\mathbf{E}\in H({\rm\bf{}curl},W_{(-R,R)})$ such that
\begin{equation}\langlebel{fwd-total:problem_truncated}
\begin{array}{rcl}
\nabla\times\nabla\times\mathbf{E} - k^2\varepsilon\mathbf{E} = \bf0 & \mbox{in} &W_{(-R,R)}\, , \\[1ex]
\boldsymbol{\nu}\times\mathbf{E} = \boldsymbol{0} & \mbox{on} & \Gamma_{(-R,R)}\, , \\[1ex]
\pm\boldsymbol{\nu}_0\times(\nabla\times (\mathbf{E}-\mathbf{E}^i)) = T^{\pm}_{R}(\boldsymbol{\nu}_0\times(\mathbf{E}-\mathbf{E}^i)) & \mbox{on} & \Sigma_{\pm R} \, .
\end{array}
\end{equation}
This problem can be equivalently rewritten in terms of the scattered field as the problem of finding
$\mathbf{E}^s\in H({\rm\bf{}curl},W_{(-R,R)})$ such that
\begin{equation}\langlebel{fwd-sc:problem_truncated}
\begin{array}{rcl}
\nabla\times\nabla\times\mathbf{E}^s - k^2\varepsilon\mathbf{E}^s = k^2(\varepsilon - 1) \mathbf{E}^i& \mbox{in} & W_{(-R,R)}\, , \\[1ex]
\boldsymbol{\nu}\times\mathbf{E}^s = \boldsymbol{0} & \mbox{on} & \Gamma_{(-R,R)}\, , \\[1ex]
\pm\boldsymbol{\nu}_0\times(\nabla\times\mathbf{E}^s) = T^{\pm }_{R}(\boldsymbol{\nu}_0\times\mathbf{E}^s) & \mbox{on} & \Sigma_{\pm R} \, .
\end{array}
\end{equation}
In order to write the scattering problem in weak form, we define
the trace operator $\boldsymbol{\gamma} _T:H(\mathbf{curl},W_{(-R,R)})\to H^{-1/2}( \mathbf{curl} ,\Sigma_{ \pm R})$ for smooth vector fields on $W_{(-R,R)}$ by
$\boldsymbol{\gamma}_T\mathbf{v} =\boldsymbol{\nu}_0\times (\mathbf{v} |_{\Sigma_{ \pm R}} \times\boldsymbol{\nu}_0)$. We also denote by $(\cdot, \cdot )_{W_{(-R,R)}}$ the inner product in $L^2(W_{(-R,R)})^3$, and by $\langlengle \cdot , \cdot \ranglengle_{\Sigma_{ \pm R}}$ the duality product in $\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_{\pm R})\times \widetilde{H}^{-1/2}(\mathbf{curl},\Sigma_{\pm R})$ so that:
$$
\begin{array}{l}
\displaystyle (\mathbf{u},\mathbf{v})_{W_{(-R,R)}} = \int_{W_{(-R,R)} }\mathbf{u}\cdot\overline{\mathbf{v}}\, d\mathbf{x} \quad\forall \mathbf{u},\mathbf{v}\in L^2(W_{(-R,R)})^3\, ,
\\
\displaystyle \langlengle\mathbf{u},\mathbf{v}\ranglengle_{\Sigma _{\pm R}} = \int_{\Sigma _{ \pm R}} \mathbf{u}\cdot\overline{\mathbf{v}}\, dS \quad\forall \mathbf{u},\mathbf{v}\in L^2(\Sigma _{ \pm R})^3\, .
\end{array}
$$
Formally, multiplying the first equation of (\ref{fwd-total:problem_truncated}) by the complex conjugate of a smooth test function $\mathbf{v}\in C^\infty(W_{(-R,R)})^3$ and applying Green's identity, we have that $\mathbf{E}\in H({\rm\bf{}curl},W_{(-R,R)})$
satisfies
\begin{equation}\langlebel{fwd-total:weak_form}
\begin{array}{l}
\displaystyle ( \nabla\times\mathbf{E},\nabla\times {\mathbf{v}} )_{W_{(-R,R)}}\! - k^2 (\varepsilon\mathbf{E}, {\mathbf{v}} )_{W_{(-R,R)}} \!
+ \langlengle T^{\pm }_{R}(\boldsymbol{\nu}_0\times\mathbf{E}) ,\boldsymbol{\gamma}_T {\mathbf{v}} \ranglengle_{\Sigma_{\pm R}} =
\mathcal{F}({\mathbf{v}}) \, .
\end{array}
\end{equation}
Here the antilinear functional
$$
\mathcal{F}(\mathbf{v}) =
\langlengle ( T^{\pm }_{ R}(\boldsymbol{\nu}_0\times\mathbf{E}^i) \mp\boldsymbol{\nu}_0\times (\nabla\times\mathbf{E}^i ) ) , \boldsymbol{\gamma}_T {\mathbf{v}} \ranglengle _{\Sigma_{ \pm R}}
\, .
$$
A weak formulation of the scattered field problem (\ref{fwd-sc:problem_truncated}) is formulated in the same way.
Given an incident field $\mathbf{E}^i $, the total field $\mathbf{E}\in\mathbf{X}_{(-R,R)}$ is a weak solution to the forward scattering problem if it satisfies (\ref{fwd-total:weak_form}) for all $\mathbf{v}\in\mathbf{X}_{(-R,R)}$. Problem (\ref{fwd-total:weak_form}) can then be analyzed using the analytic Fredholm theory to prove the following result.
\begin{theorem} \langlebel{fwd:vari_well-posedness}
If $\tilde{\sigma}\geq\sigma_0>0$ in $D$ (or in some open bounded subdomain of $D$ with non-zero measure), then the forward scattering problem (\ref{fwd-total:weak_form}) is well-posed for any real wavenumber $k$. If $\tilde{\sigma}=0$ in $D$, then the problem is well-posed except for, at most, a discrete set of real $k$ values whose only possible accumulation point is $\infty$.
\end{theorem}
\begin{remark}
In the remainder of the paper we assume that $k$ is such that the forward problem is well-posed.
\end{remark}
\begin{proof}
We now give a sketch of the proof of this result. For more details of a similar argument in the PEC case see
\cite[Section 3.2]{FanPhD}:
First note that the space $\mathbf{X}_{(-R,R)}$ admits the Helmholtz decomposition $\mathbf{X}_{(-R,R)}= \nabla S_R \oplus \mathbf{X}^+_{(-R,R)} $, where
$$
\begin{array}{l}
S_{(-R,R)}= \{ p\in H^1(W_{(-R,R)}) ; \, p=0 \,\mbox{on } \Gamma _{(-R,R)} \} \, ,\\[1ex]
\mathbf{X}_{(-R,R)}^+=\big\{ \mathbf{v}^+\!\!\in\! \mathbf{X}_{(-R,R)} ; \, k^2 ( \varepsilon\mathbf{v}^+ ,\nabla q)_{W_{(-R,R)}} \\[.5ex]
\hspace*{5cm} = \langlengle T^{\pm }_{R}(\boldsymbol{\nu}_0\times\mathbf{v}^+) , \boldsymbol{\gamma}_T (\nabla q)\ranglengle _{\Sigma _{\pm R}} \forall q\in S_{(-R,R)} \big\} \, ;
\end{array}
$$
see \cite[Lemma 3.2.2]{FanPhD} for a similar result. Then
we can
write $\mathbf{E}=\nabla p+\mathbf{E}^+$ with $p\in S_{(-R,R)}$ and $\mathbf{E}^+\in\mathbf{X}_{(-R,R)}^+$. The problem for $p$ decouples by taking the test function in (\ref{fwd-total:weak_form}) with the form $\mathbf{v}=\nabla q$, and $p\in S_{(-R,R)}$ must solve
\begin{equation*}
k^2 ( \varepsilon\nabla p ,\nabla q )_{W_{(-R,R)}} -\langlengle T^{\pm }_{R}(\boldsymbol{\nu}_0\times\nabla p) , \boldsymbol{\gamma}_T ({\nabla q}) \ranglengle _{\Sigma _{\pm R}}
=
\mathcal{F} (\nabla q)
\quad\forall q\in S_{(-R,R)} \, .
\end{equation*}
This auxiliary problem satisfies the hypotheses of the Lax-Milgram lemma (see \cite[Lemma $3.2.1$]{FanPhD}) and, in consequence $p\in S_{(-R,R)}$ is uniquely determined.
Using the function $p$ from the previous step, the forward scattering problem can be rewritten in terms of $\mathbf{E}^+\in\mathbf{X}_{(-R,R)}^+$ as follows:
\begin{equation}\langlebel{fwd-total:weak_form+}
\begin{array}{l}
\displaystyle\big( \nabla\times\mathbf{E}^+,\nabla\times {\mathbf{v}}^+\big)_{W_{(-R,R)}} - k^2\big(\varepsilon\mathbf{E}^+, {\mathbf{v}}^+\big)_{W_{(-R,R)}}
+\, \big\langlengle T^{ \pm }_{R}(\boldsymbol{\nu}_0\times\mathbf{E}^+) ,\boldsymbol{\gamma}_T {\mathbf{v}}^+ \big\ranglengle_{\Sigma_{\pm R}} \, = \,
\tilde{\mathcal{F}}(\mathbf{v}^+)
\end{array}
\end{equation}
for all $\mathbf{v}^+\in\mathbf{X}_{(-R,R)}^+$, where $\displaystyle \tilde{\mathcal{F}}(\mathbf{v}^+)
=
\mathcal{F}(\mathbf{v}^+)
+ k^2 \big(\varepsilon \nabla p ,{\mathbf{v}}^+\big)_{W_{(-R,R)}}
- \big\langlengle T^{\pm}_{ R}(\boldsymbol{\nu}_0\times\nabla p) , \boldsymbol{\gamma}_T {\mathbf{v}}^+ \big\ranglengle _{\Sigma_{\pm R}}$.
Notice that
\begin{itemize}
\item the sesquilinear form $a_k^+:\mathbf{X}_{(-R,R)}^+\times \mathbf{X}_{(-R,R)}^+\to \mathbb{C}$ defined by
\begin{equation*}
\begin{array}{l}
a_k(\mathbf{u}^+,\mathbf{v}^+)\, = \,\displaystyle ( \nabla\times\mathbf{u}^+,\nabla\times {\mathbf{v}}^+ )_{W_{(-R,R)}} + k^2 (\varepsilon\mathbf{u}^+, {\mathbf{v}}^+ )_{W_{(-R,R)}}
+ \langlengle T^{\pm ,0}_{R}(\boldsymbol{\nu}_0\times\mathbf{u}^+) ,\boldsymbol{\gamma}_T {\mathbf{v}}^+ \ranglengle_{\Sigma_{\pm R}}
\, ,
\end{array}
\end{equation*}
is bounded and coercive;
\item $\mathbf{X}_{(-R,R)}^+$ is compactly embedded in $L^2(W_{(-R,R)})^3$;
\item the DtN map $T^{ \pm }_{R} : \widetilde{H}^{-1/2}(\mathrm{div},\Sigma_{\pm R}) \to \widetilde{H}^{-1/2}(\mathrm{div},\Sigma_{\pm R}) $ is the superposition of a positive operator $T^{\pm ,0 }_{R}$ and a compact map $T^{\pm ,c }_{R}$ (see \cite[Lemma 3.2.4]{FanPhD}).
\end{itemize}
This allows us to rewrite (\ref{fwd-total:weak_form+}) in operator form as \[
(I + B_k) \,\mathbf{E}^+ = \mathbf{f},
\] where the operator $B_k:\mathbf{X}_{(-R,R)}^+\to \mathbf{X}^+_{(-R,R)}$ is compact and analytic with respect to $k$ in a suitable subdomain of the complex plane containing the real line (removing the cut-off frequencies, see paragraph \ref{subsec:forward:modal}). Now we can see that
this operator equation admits at most one solution $\mathbf{E}^+\in \mathbf{X}_{(-R,R)}^+$ in the following two cases:
\begin{itemize}
\item for any real wavenumber $k\in\mathbb{R}$, if $\tilde{\sigma}\geq\sigma_0>0$ in $D$ (or in some open bounded subdomain of $D$ with non-zero measure);
\item for any pure imaginary $k = ic$ with $c>0$ small enough, if $\tilde{\sigma}=0$ in $D$;
\end{itemize}
see the proof of \cite[Theorem 3.2.1]{FanPhD} for an analogous result.
Then, by applying the analytic Fredholm theory~\cite{ColtonKress-IP}, we conclude that the forward problem is well-posed except for (at most) a countable set of real wavenumbers.
\end{proof}
\section{Inverse Problem}\langlebel{wg:inverse}
In this section, we shall provide a theoretical basis for the Linear Sampling Method (LSM) approach to the inverse problem in the waveguide geometry. There are two important results here: the uniqueness of the solution of the inverse problem, and the justification of the LSM for the reconstruction of the shape of scatterer.
To this end, we recall some results about the background Green's function in the waveguide. It is well-known that, for electromagnetic waves, the Green's functions are dyadic functions (second order tensors that can be written as $3\times3$ matrices) with appropriate boundary conditions on $\Gamma$. Specifically, we here consider the electric Green's function $\mathbb{G}_{e}$ which satisfies a PEC condition on the boundary $\Gamma$ of the waveguide:
\begin{eqnarray*}
\nabla\timesx\nabla\timesx\mathbb{G}_{e}(\mathbf{x},\mathbf{y}) - k^2\mathbb{G}_{e}(\mathbf{x},\mathbf{y}) \, = \,\delta_{\mathbf{x}-\mathbf{y}} \, \mathbb{I} \quad\mbox{in } W\, ,\\[1ex]
\boldsymbol{\nu}\times\mathbb{G}_{e}(\mathbf{x},\mathbf{y}) \, = \, \boldsymbol{0}\quad\mbox{on }\Gamma \, ,
\end{eqnarray*}
where $\mathbf{y}\in W$ represents the point source and $\mathbf{x}\in W$ any evaluation point. Moreover, $\mathbb{I}$ is the identity matrix, whereas $\mathbf{a}\times\mathbb{B}$ and $\nabla\times\mathbb{B}$ denote the matrices whose $l$-th columns are $\mathbf{a}\times\mathbf{b}_l$ and $\nabla\times \mathbf{b}_l$, respectively, for any column vector function $\mathbf{a}$ and any dyadic function $\mathbb{B}$ (written as a matrix with columns $\mathbf{b}_l$). In particular,
an incident wave due to a point source at $\mathbf{z}\in W_R\setminus\overline{D}$ with polarization $\mathbf{p}\ ( |\mathbf{p} |\not=0)$ is given by $\mathbf{u}^i(\mathbf{x};\mathbf{z},\mathbf{p})=\mathbb{G}_e(\mathbf{x},\mathbf{z})\mathbf{p}$; and we recall that $\mathbf{u}^s (\mathbf{x};\mathbf{z},\mathbf{p})$ stands for its associated scattered field (that is, the solution of the forward problem (\ref{fwd-sc:problem_truncated}) for such an incident wave), and similar notation is used for the associated total field $ \mathbf{u} (\mathbf{x};\mathbf{z},\mathbf{p}) = \mathbf{u}^s (\mathbf{x};\mathbf{z},\mathbf{p} ) + \mathbf{u}^i(\mathbf{x};\mathbf{z},\mathbf{p})$.
We wish to compare the above electric Green's function in the waveguide with the one in free space, which solves
\begin{equation*}
\nabla\timesx\nabla\timesx\mathbb{G}_{0}(\mathbf{x},\mathbf{y}) - k^2\mathbb{G}_{}(\mathbf{x},\mathbf{y}) \, = \,\delta_{\mathbf{x}-\mathbf{y}} \, \mathbb{I} \quad\mbox{in } \mathbb{R}^3\, ,\
\end{equation*}
together with a tensor form of the Silver-Muller radiation condition. This can be written explicitly as
\begin{eqnarray}
\mathbb{G}_0(\mathbf{x},\mathbf{y}) = \Phi(\mathbf{x},\mathbf{y})\mathbb{I} + \frac{1}{k^2}\nabla_\mathbf{y}\nabla_\mathbf{y}\Phi(\mathbf{x},\mathbf{y})\quad \forall \mathbf{x},\mathbf{y}\in\mathbb{R}^3 \mbox{ with }\mathbf{x}\ne\mathbf{y} \, , \langlebel{inv-green-free-space}
\end{eqnarray}
where $\Phi$ stands for the fundamental solution of Helmholtz equation:
$$
\displaystyle \Phi (\mathbf{x},\mathbf{y}) = \frac{\mathbf{e}^1p(ik |\mathbf{x}-\mathbf{y} |)}{4\pi |\mathbf{x}-\mathbf{y} |} \quad \forall \mathbf{x},\mathbf{y}\in\mathbb{R}^3 \mbox{ with }\mathbf{x}\ne\mathbf{y}\, ,
$$
and $\nabla_\mathbf{y}\nabla_\mathbf{y}\Phi(\mathbf{x},\mathbf{y})$ denotes its Hessian matrix.
The following result is proven in \cite[Lemma 3.3.1]{FanPhD} and provides a decomposition of Green's function in the waveguide in terms of that in free-space. This provides a clear statement of the singularity present in the
waveguide fundamental solution {(the result in \cite{FanPhD} was proved when $\Sigma$ has a smooth boundary; here the regularity of the remainder term is determined by the cross section $\Sigma$).}
\begin{lemma}\langlebel{inv:green-decompose} In any bounded segment $W_{(s_1,s_2)}$ of the waveguide $W$, the electric type dyadic Green's function for the waveguide can be decomposed into $\mathbb{G}_e = \mathbb{G}_0 + \mathbb{J}$,
where
the
dyadic function $\mathbb{J}$ is {in $H^1_{\rm{}loc}$.}
\end{lemma}
\subsection{Uniqueness Result}\langlebel{wg:inverse:uniqueness}
In order to prove the uniqueness result for the inverse problem, we start by stating two lemmas: The first gives us a \emph{representation formula} of the solution of the forward problem in the waveguide excluding the scatterer $D$; this result is analogous to the well-known Stratton-Chu formula in free-space and is proven in \cite[Lemma 3.3.2]{FanPhD}.
\begin{lemma}\langlebel{inv:uniqueness:rep_fml}(Representation Formula)
Let $\mathbf{U}$ be a solution of the Maxwell's system
\begin{equation*}
\begin{array}{ll}
\nabla\times\nabla\times\mathbf{U}- k^2 \mathbf{U} \, = \, \boldsymbol{0} & \quad\mbox{in } W\setminus\overline{D}\, ,\\[1ex]
\boldsymbol{\nu}\times\mathbf{U} \, =\, \boldsymbol{0} &\quad\mbox{on }\Gamma \, , \\[1ex]
\mbox{$\mathbf{U}$ satisfies the ORC} & \quad\mbox{for } \vert x_3\vert \to +\infty \, .
\end{array}
\end{equation*}
Then
$$
\mathbf{U}(\mathbf{x})
= \int_{\partial {D} } \!\!\! \Big(
\big( \boldsymbol{\nu}_{D}\times (\nabla\times\mathbf{U}(\mathbf{y})) \big)\cdot\mathbb{G}_e(\mathbf{x},\mathbf{y})
\,-\, \mathbf{U}(\mathbf{y})\cdot\big( \boldsymbol{\nu}_{D}\times (\nabla\timesy\mathbb{G}_e(\mathbf{x},\mathbf{y})) \big)
\Big) dS_{\mathbf{y}} \, .
$$
\end{lemma}
\begin{remark}\begin{enumerate}\item
Above, the dot-product is understood as vector-matrix or matrix-vector multiplication depending on the position of dyadic and vector functions. The next result states that the scattered field satisfies the standard reciprocity relation.
\item This result is proved in the same way as the corresponding Stratton-Chu formula in free-space~\cite{Kirsch-Hettlich}.
\end{enumerate}
\end{remark}
\begin{lemma}\langlebel{inv:uniqueness:reciprocity}(Reciprocity relation)
For any points $\mathbf{x},\mathbf{z}\in W\backslash\overline{D}$ and polarization vectors $\mathbf{p},\mathbf{q}\in\mathbb{R}^3$ ($ |\mathbf{p} |\not=0,\; |\mathbf{q} |\not=0$), it holds
$$ \mathbf{u}^s(\mathbf{x};\mathbf{z},\mathbf{p})\cdot\mathbf{q} = \mathbf{u}^s(\mathbf{z};\mathbf{x},\mathbf{q})\cdot\mathbf{p}\, . $$
\end{lemma}
\begin{remark} This result also holds for a bounded perfectly conducting scatterer (incorrectly stated in \cite[Lemma 3.3.3]{FanPhD}). The following proof is closely related to the proof of \cite[Lemma 3.3.3]{FanPhD}, but correcting the
statement of the result.
\end{remark}
\begin{proof}
By using the representation formula in Lemma~\ref{inv:uniqueness:rep_fml}, we have
\begin{equation*}
\begin{array}{l}
\mathbf{u}^s(\mathbf{x};\mathbf{z},\mathbf{p}) = \displaystyle\int_{\partial D} \!\Big( -\mathbf{u}^s (\mathbf{y};\mathbf{z},\mathbf{p} )\cdot \big( \boldsymbol{\nu}_D\times(\nabla\timesy\mathbb{G}_e(\mathbf{x},\mathbf{y}) ) \big)
+ \big( \boldsymbol{\nu}_D\times(\nabla\times\mathbf{u}^s(\mathbf{y};\mathbf{z},\mathbf{p})) \big) \cdot\mathbb{G}_e(\mathbf{x},\mathbf{y})\Big) \, dS_{\mathbf{y}}\, .
\end{array}
\end{equation*}
In addition, by using Green's second identity and the properties of the electric type dyadic Green's function,
\begin{equation*}
\begin{array}{l}
\boldsymbol{0} = \displaystyle\int_{\partial D} \!\Big( -\mathbb{G}_e (\mathbf{y},\mathbf{z})\mathbf{p} \cdot \big( \boldsymbol{\nu}_D\times(\nabla\timesy\mathbb{G}_e(\mathbf{x},\mathbf{y}) ) \big)
+ \big( \boldsymbol{\nu}_D\times(\nabla\times\mathbb{G}_e(\mathbf{y},\mathbf{z})\mathbf{p}) \big) \cdot\mathbb{G}_e(\mathbf{x},\mathbf{y})\Big) \, dS_{\mathbf{y}}\, ;
\end{array}
\end{equation*}
see \cite[eq. (3.48)]{FanPhD}. Adding both results leads to
\begin{equation*}
\begin{array}{l}
\mathbf{u}^s(\mathbf{x};\mathbf{z},\mathbf{p}) =\displaystyle \int_{\partial D} \Big(- \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} )\cdot \big( \boldsymbol{\nu}_D\times(\nabla\timesy\mathbb{G}_e(\mathbf{x},\mathbf{y}) ) \big)
+ \big( \boldsymbol{\nu}_D\times(\nabla\times \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p})) \big) \cdot\mathbb{G}_e(\mathbf{x},\mathbf{y})\Big) \, dS_{\mathbf{y}}\, ;
\end{array}
\end{equation*}
which, making use of dyadic identities and the second vector-dyadic Green's identity, can be to rewritten as
\begin{equation}\langlebel{r_c1tilde}
\begin{array}{l}
\mathbf{u}^s(\mathbf{x};\mathbf{z},\mathbf{p}) =\displaystyle \int_{D} \Big( \big(\nabla\times\nabla\times \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} )\big) \cdot \mathbb{G}_e(\mathbf{x},\mathbf{y}) )
- \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p}) \cdot \big(\nabla\timesy\nabla\timesy\mathbb{G}_e(\mathbf{x},\mathbf{y})\big) \Big) \, d\mathbf{y}\, .
\end{array}
\end{equation}
Similarly, one can show that
\begin{equation*}
\begin{array}{l}
\mathbf{u}^s(\mathbf{z};\mathbf{x},\mathbf{q}) \cdot \mathbf{p}
=\displaystyle \int_{\partial D} \Big( -{\mathbf{u}}^s (\mathbf{y};\mathbf{x},\mathbf{q} )\cdot \big( \boldsymbol{\nu}_D\times(\nabla\times {\mathbf{u}} (\mathbf{y};\mathbf{z},\mathbf{p} ) ) \big) \Big. \\
\hspace*{3.5cm} \Big. + \big( \boldsymbol{\nu}_D\times(\nabla\times {\mathbf{u}}^s(\mathbf{y};\mathbf{x},\mathbf{q})) \big) \cdot {\mathbf{u}} (\mathbf{y};\mathbf{z},\mathbf{p} )\Big) \, dS_{\mathbf{y}}\, ,
\end{array}
\end{equation*}
and deduce that
\begin{equation}\langlebel{r_c1tildebis}
\begin{array}{l}
\mathbf{u}^s(\mathbf{z};\mathbf{x},\mathbf{q}) \cdot \mathbf{p}
= \displaystyle\int_{D} \Big( \big(\nabla\timesy\nabla\timesy\mathbf{u}^s (\mathbf{y};\mathbf{x},\mathbf{q} )\big)\cdot \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} ) \Big. \\
\hspace*{3.5cm} \Big. - {\mathbf{u}}^s(\mathbf{y};\mathbf{x},\mathbf{q})) ) \cdot \big(\nabla\times\nabla\times \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} )\big) \Big) \, d\mathbf{y} \, .
\end{array}
\end{equation}
In consequence, from (\ref{r_c1tilde}) and (\ref{r_c1tildebis}) we have
\begin{equation*}
\begin{array}{l}
\mathbf{u}^s(\mathbf{x};\mathbf{z},\mathbf{p}) \cdot\mathbf{q} - \mathbf{u}^s(\mathbf{z};\mathbf{x},\mathbf{q}) \cdot \mathbf{p} \, = \\
\qquad =\, \displaystyle\int_{D} \!\Big( \big(\nabla\times\nabla\times \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} )\big) \cdot \mathbf{u} (\mathbf{y};\mathbf{x},\mathbf{q} )
- \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} ) \cdot \big(\nabla\times\nabla\times \mathbf{u} (\mathbf{y};\mathbf{x},\mathbf{q} )\big)
\Big) d\mathbf{y} \, ,
\end{array}
\end{equation*}
from which the result follows by noticing that $ \nabla\times\nabla\times \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} ) = \varepsilon k^2 \mathbf{u} (\mathbf{y};\mathbf{z},\mathbf{p} ) $ and $ \nabla\times\nabla\times \mathbf{u} (\mathbf{y};\mathbf{x},\mathbf{q} ) = \varepsilon k^2 \mathbf{u} (\mathbf{y};\mathbf{x},\mathbf{q} ) $ in $D$.
\end{proof}
The above reciprocity relation is the basic tool we need to prove the uniqueness of solution for the inverse problem we are dealing with. We state the result in slightly more generality than is needed for this paper. More precisely, let us consider two cross-sections $\Sigma_s$ and $\Sigma_r$ located on the same side {(i.e. both above or both below)} of the scatterer and with the receivers not further from the target than the sources; for the sake of simplicity, in the sequel we assume that both surfaces are below the scatterer, that is, $D\subseteq W_{(r,R)}$ and $s\leq r$.
\begin{theorem}\langlebel{inv:uniqueness} Let $D_1$ and $D_2$ be two penetrable scatterers completely contained in the waveguide and away from its boundary
, and with relative electric permittivities $\varepsilon_1$ and $\varepsilon_2$, respectively.
If
the tangential components of the fields $\mathbf{u}_1^s(\cdot;\mathbf{y},\mathbf{p})$ and $\mathbf{u}_2^s(\cdot;\mathbf{y},\mathbf{p})$, scattered by $D_1$ and $D_2$ respectively, coincide on $\Sigma_r$ for all
sources $\mathbf{y}\in\Sigma_s$ and polarizations $\mathbf{p}\in\mathbb{R}^3$ ($ |\mathbf{p} |=1$), then $D_1=
D_2$.
\end{theorem}
\begin{proof}
The following proof is a modification of that of \cite[Theorem 5.6]{ColtonKress-IP}.
Let us suppose that $\mathbf{w}(\cdot;\mathbf{x}_0,\mathbf{p}) := \mathbf{u}_1^s(\cdot;\mathbf{x}_0,\mathbf{p}) - \mathbf{u}_2^s(\cdot;\mathbf{x}_0,\mathbf{p})$ has null tangential component $\boldsymbol{\nu}_0\times\mathbf{w} (\cdot;\mathbf{x}_0,\mathbf{p}) = \boldsymbol{0}$ on $\Sigma_r$. Then, by Lemma~\ref{fwd:half_pipe:unbdd}, $\mathbf{w} (\cdot;\mathbf{x}_0,\mathbf{p})=\boldsymbol{0}$ in $W_{(-\infty,r)}$; and, by the unique continuation principle (see \cite[Theorem 4.13 and Remark 4.14]{MonkBook}), also $\mathbf{w} (\cdot;\mathbf{x}_0,\mathbf{p}) = \boldsymbol{0}$ in $\Omega= W\backslash\overline{(D_1\cup D_2)}$. Applying the reciprocity relation (see Lemma~\ref{inv:uniqueness:reciprocity} above), from $\mathbf{u}_1^s(\mathbf{y} ;\mathbf{x}_0,\mathbf{p}) = \mathbf{u}_2^s(\mathbf{y} ;\mathbf{x}_0,\mathbf{p})$ for $\mathbf{y}\in\Omega$ and $\mathbf{x}_0\in\Sigma_s$ we deduce that
$$
\mathbf{u}_1^s(\mathbf{x}_0;\mathbf{y},\mathbf{p}) = \mathbf{u}_2^s(\mathbf{x}_0;\mathbf{y},\mathbf{p}) \quad \forall \mathbf{x}_0\in\Sigma_s \, , \; \mathbf{y}\in\Omega\, , \;\mathbf{p}\in\mathbb{R}^3 \, .
$$
In case $D_1\ne D_2$, without loss of generality we may consider some point $\mathbf{y}^*\in\partial D_1$ such that $\mathbf{y}^*\not\in\overline{D_2}$ and use it to build the sequence
$$ \mathbf{y}_n^* = \mathbf{y}^* + \frac1n\,\boldsymbol{\nu}_{D_1}^* \quad\mbox{for } n = 1,2,\ldots \, ,$$
where $\boldsymbol{\nu}_{D_1}^*$ is the unit outward normal to $\partial D_1$ at $\mathbf{y}^*$. Notice that $\mathbf{y}_n^*\in\Omega$ for $n$ big enough, so that we have already shown
$$
\mathbf{u}_1^s(\mathbf{x}_0;\mathbf{y}_n^*,\mathbf{p}) = \mathbf{u}_2^s(\mathbf{x}_0;\mathbf{y}_n^*,\mathbf{p}) \quad \forall \mathbf{x}_0\in\Sigma_s\, ,\; \mathbf{p}\in\mathbb{R}^3 \, .
$$
We can use here again Lemma~\ref{fwd:half_pipe:unbdd} and the unique continuation principle to deduce that $\mathbf{u}_1^s(\cdot;\mathbf{y}_n^*,\mathbf{p}) = \mathbf{u}_2^s(\cdot;\mathbf{y}_n^*,\mathbf{p})$ in $\Omega$, which leads to a contradiction when $n\to\infty$ because $\mathbf{y}_n^*\to\mathbf{y}^*\in \partial D_1\setminus\overline{D_2}$ (notice $\mathbf{u}_1^i(\cdot;\mathbf{y}^*,\mathbf{p})$ is singular in $\overline{D_1}$ and $\mathbf{u}_2^i(\cdot;\mathbf{y}^*,\mathbf{p})$ is not in $\overline{D_2}$). \end{proof}
\subsection{The Near Field Operator and its Basic Properties}\langlebel{wg:inverse:nearfieldop}
In the sequel we consider the sources and receivers placed on the same cross-section.
As suggested by Theorem \ref{inv:uniqueness}, we could allow for different surfaces for the sources and the receivers, but the usual choice for the LSM is to have only one.
Recall that the {Near Field operator} $N: L_T^2(\Sigma _r)\to L_T^2(\Sigma _r)$ is given by (\ref{inv:nearfieldop}).
Note that, in general, given a function $\mathbf{v}\in H(\mathbf{curl},W_{(-R,R)}\setminus\Sigma_r)$ its tangential trace $\boldsymbol{\nu}_0\times\mathbf{v}$
is only in $\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_r)$. Nevertheless, we can define the near field operator from $L_T^2(\Sigma_r)$ into $L_T^2(\Sigma_r)$
, thanks to the following lemma (see \cite[Lemma 3.3.4]{FanPhD}, here restated for a bounded section of the waveguide).
\begin{lemma}\langlebel{inv:hhdv-to-lt} Given $\mathbf{Q}\in\widetilde{H}^{-1/2}(\mathrm{div},\Sigma_{s_1})$, let $\mathbf{U}$ denote the solution to the blocked waveguide problem (\ref{fwd:half_pipe:bdd-problem}) posed in $W_{(s_1,R)}$. Then the tangential component $\boldsymbol{\nu}_0\times\mathbf{U}$
on any cross-section $\Sigma_{s_2}$ ($s_1<s_2<R$) belongs to $L_T^2(\Sigma_{s_2})$.
\end{lemma}
More precisely,
we can rewrite the near field operator by means of two auxiliary operators: On the one hand, we define
$$
\mathcal{H}: \mathbf{g}\in L^2_T(\Sigma _r) \mapsto \mathbf{w}^i_{\mathbf{g}}|_D\in H_{inc}(D) \, ,
$$
where $\, H_{inc}(D)= \{ \mathbf{w}\in L^2(D)^3 ; \, \nabla\times\nabla\times\mathbf{w} -k^2\mathbf{w} = \boldsymbol{0} \,\,\mbox{in } D\} \, $ and
\begin{equation}\langlebel{inv:wgi}
\mathbf{w}^i_{\mathbf{g}}(\mathbf{x})=\int_{\Sigma_r} \mathbf{u}^i(\mathbf{x};\mathbf{y},\mathbf{g}(\mathbf{y}))\, dS_{\mathbf{y}} \quad\mbox{for a.e. }\mathbf{x}\in W_{(-R,R)}\setminus\Sigma_r \,
\end{equation}
is
the \emph{electric single layer potential} on $\Sigma_r$ with density $\mathbf{g}$; this operator is linear and bounded.
On the other hand, we define the \emph{incident-to-measurement} operator
$$
\mathcal{N}: \mathbf{w}^i\in H_{inc}(D)\mapsto \boldsymbol{\nu}_0\times\mathbf{w}^s|_{\Sigma_r}\in L^2_T(\Sigma_r) \,
$$
where $\mathbf{w}^s$ is the solution of the solution of the forward problem (\ref{fwd-sc:problem_truncated}) for the incident field $ \mathbf{w}^i$; notice that, by Proposition \ref{fwd:vari_well-posedness} and Lemma \ref{inv:hhdv-to-lt}, we know that this operator is well-defined, linear and bounded.
Then we have the factorization $N=\mathcal{N}\mathcal{H}$. Indeed, we can understand $\mathbf{w}^i_{\mathbf{g}}=\mathcal{H}\mathbf{g}$ as the superposition of the incident fields due to point sources $\mathbf{y}\in\Sigma_r$ with polarizations $\mathbf{g} (\mathbf{y})$; by linearity of the forward problem, the corresponding scattered field is $$\mathbf{w}^s_{\mathbf{g}} (\mathbf{x})=\int_{\Sigma_r} \mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{g}(\mathbf{y}))\, dS_{\mathbf{y}} \quad\mbox{for a.e. } \mathbf{x}\in W_{(-R,R)}$$ and, in particular, its tangential component on $\Sigma_r$ is just $\boldsymbol{\nu}_0\times\mathbf{w}_{\mathbf{g}}^s|_{\Sigma_r}= N\mathbf{g}$.
In order to analyze the properties of the near field operator, we recall the following standard homogeneous \emph{Interior Transmission Problem} (ITP):
\begin{equation}\langlebel{inv:itp}
\begin{array}{rcl}
\nabla\times\nabla\times\mathbf{U}_1 - k^2\mathbf{U}_1 = \boldsymbol{0} & \mbox{in} & D\, , \\[1ex]
\nabla\times\nabla\times\mathbf{U}_2 - k^2\varepsilon\mathbf{U}_2 = \boldsymbol{0} & \mbox{in} & D\, , \\[1ex]
\boldsymbol{\nu}_D\times\mathbf{U}_1 = \boldsymbol{\nu}_D\times\mathbf{U}_2 & \mbox{on} & \partial D \, , \\[1ex]
\boldsymbol{\nu}_D\times(\nabla\times\mathbf{U}_1) = \boldsymbol{\nu}_D\times(\nabla\times\mathbf{U}_2) & \mbox{on} & \partial D \, .
\end{array}
\end{equation}
The values of $k$ for which this problem has a nontrivial solution are known as \emph{interior transmission eigenvalues}. Notice that this interior transmission problem is the same that arises in the analysis of the inverse problem in free-space, and has been analyzed in \cite[Section 4.2]{CakoniColtonMonk-IP}. In particular the set of real transmission
eigenvalues is countable.
\begin{lemma}\langlebel{inv:NFO:1-1}
If $k$ is not an interior transmission eigenvalue with an eigenfunction of the form of a single layer potential $\mathbf{U}_1=\mathcal{H}\mathbf{g}$ ($\mathbf{g}\in L^2_T(\Sigma_r)$), then the near field operator $N: L_T^2(\Sigma _r) \to L_T^2(\Sigma _r)$ is one-to-one.
\end{lemma}
\begin{proof}
Let us consider some $\mathbf{g}\in L_T^2(\Sigma _r) $ such that $N\mathbf{g} =\boldsymbol{0}$ on $\Sigma_r$. By the definition of $N$, this means that
$$
\int_{\Sigma_r}\boldsymbol{\nu}_0(\mathbf{x})\times\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{g}(\mathbf{y}))\, dS_{\mathbf{y}}=\boldsymbol{0}\quad \mbox{for a.e. } \mathbf{x}\in\Sigma _r \, .
$$
In terms of $\mathbf{w}_{\mathbf{g}}^i=\mathcal{H}\mathbf{g}$
and its associated scattered field $\mathbf{w}_{\mathbf{g}}^s$, the property above means that $\boldsymbol{\nu}_0\times\mathbf{w}^s_{\mathbf{g}}=\boldsymbol{0}$ on $\Sigma_r$. Then Lemma \ref{fwd:half_pipe:unbdd} (rewritten for
{lower} section of the waveguide $W_{(-R,r)}$, that is, with the radiation condition on $\Sigma_{-R}$ and the boundary condition on $\Sigma_r$) guarantees that $\mathbf{w}^s_{\mathbf{g}}=\boldsymbol{0}$ in $W_{(-R,r)}$ and, by the unique continuation principle, also in $W_R\setminus \overline{D}$. Therefore, $\mathbf{U}_1=\mathbf{w}^i_{\mathbf{g}}|_D$ and $\mathbf{U}_2=(\mathbf{w}^i_{\mathbf{g}}+\mathbf{w}^s_{\mathbf{g}})|_D$ solves the ITP with $\mathbf{U}_1=\mathcal{H}\mathbf{g}$. {By the assumption that $k$ is not a transmission eigenvalue, we have $\mathbf{U}_1=\mathbf{U}_2=0$ and hence $\mathbf{g}=0$.} This completes the proof. \end{proof}
\begin{lemma}\langlebel{inv:NFO:denserange}
Under the hypothesis of Lemma \ref{inv:NFO:1-1} on the wavenumber $k$, the range of the near field operator $N: L_T^2(\Sigma _r) \to L_T^2(\Sigma _r)$ is dense in $L_T^2(\Sigma _r)$.
\end{lemma}
\begin{proof}
Equivalently, we study the injectivity of the $L^2$-adjoint of the near field operator, which we denote $N^*: L_T^2(\Sigma _r) \to L_T^2(\Sigma _r)$. To this end, let us consider $\mathbf{g}\in L_T^2(\Sigma _r) $ such that
, for all $\mathbf{h}\in L_T^2(\Sigma _r)$,
$$
0=\langlengle N^*\mathbf{g},\mathbf{h}\ranglengle_{\Sigma _r} = \langlengle \mathbf{g},N\mathbf{h}\ranglengle _{\Sigma _r} = \int_{\Sigma_r} \mathbf{g}(\mathbf{x}) \cdot (\boldsymbol{\nu}_0(\mathbf{x})\times\int_{\Sigma _r} \overline{\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{h}(\mathbf{y}))}\, dS_{\mathbf{y}} ) dS_{\mathbf{x}}\, .
$$
Changing (formally) the order of integration,
$$
0= \int_{\Sigma_r} ( \int_{\Sigma _r} \mathbf{g}(\mathbf{x}) \cdot (\boldsymbol{\nu}_0(\mathbf{x})\times\overline{\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{h}(\mathbf{y}))} ) dS_{\mathbf{x}} ) dS_{\mathbf{y}}\, .
$$
Notice that, making use of the definition of $L^2_T(\Sigma_r)$ and the reciprocity relation,
$$
\mathbf{g}(\mathbf{x}) \cdot (\boldsymbol{\nu}_0(\mathbf{x})\times\overline{\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{h}(\mathbf{y}))} ) = - \mathbf{g}(\mathbf{x}) \cdot \overline{\mathbf{u}^s(\mathbf{x};\mathbf{y},\mathbf{h}(\mathbf{y}))} =
- \overline{\mathbf{h}(\mathbf{y})} \cdot \overline{\mathbf{u}^s(\mathbf{y};\mathbf{x},\overline{\mathbf{g}(\mathbf{x})})} \, ,
$$
so that
$$
0= \int_{\Sigma_r} \overline{\mathbf{h}(\mathbf{y})} \cdot ( \int_{\Sigma _r} \overline{\mathbf{u}^s(\mathbf{y};\mathbf{x},\overline{\mathbf{g}(\mathbf{x})})} dS_{\mathbf{x}} ) dS_{\mathbf{y}}\, .
$$
Since this holds for all $\mathbf{h}\in L_T^2(\Sigma _r)$, we deduce that
$$
\boldsymbol{\nu}_0 ( \mathbf{y}) \times \int_{\Sigma _r} \mathbf{u}^s(\mathbf{y};\mathbf{x},\overline{\mathbf{g} (\mathbf{x})}) \, dS_{\mathbf{x} } = \boldsymbol{0} \quad\mbox{for a.e. } \mathbf{y}
\in \Sigma _r \, .
$$
Reasoning as for Lemma \ref{inv:NFO:1-1}, we conclude that $\mathbf{g}=\boldsymbol{0}$ on $\Sigma _r$.\end{proof}
In order to study the compactness of the near field operator, we consider the following volume integral representation of the scattered field associated to any incident field $\mathbf{w}^i\in H_{inc}(D)$:
\begin{equation}\langlebel{ip:volintrep}
\mathbf{w}^s (\mathbf{x}) = -\int_D (1-\varepsilon (\mathbf{y})) \mathbb{G}_e (\mathbf{x},\mathbf{y}) (\mathbf{w}^i(\mathbf{y})+\mathbf{w}^s(\mathbf{y})) \, d\mathbf{y} \quad\mbox{for }\mathbf{x}\in W\, .
\end{equation}
Notice that Lemma \ref{inv:green-decompose} guarantees that $\mathbb{G}_e (\mathbf{x},\mathbf{y})$ is smooth for $\mathbf{x}\in\Sigma _r$ and $\mathbf{y}\in D$, from which it follows
that $\mathcal{N}:H_{inc}(D)\to L^2_T(\Sigma_r)$ is compact; thus, the following result is straightforward by the factorization $N=\mathcal{N}\mathcal{H}:L^2_T(\Sigma _r)\to L^2_T(\Sigma_r)$.
\begin{lemma}\langlebel{inv:NFO:compact}
The near field operator $N: L_T^2(\Sigma _r) \to L_T^2(\Sigma _r)$ is compact.
\end{lemma}
\subsection{Justification of the Linear Sampling Method}\langlebel{wg:inverse:lsm}
We now give details {of} the Linear Sampling Method (LSM) outlined in the introduction. We assume that the wavenumber $k$ is not a transmission eigenvalue or an exceptional frequency for the forward problem. More precisely, let us consider a sampling point $\mathbf{y}$ in the section of the waveguide $ W_{(r,R)}$, away from its boundary $\Gamma_{(r,R)}$ and in the vicinity of $D$. In order to study if this point $\mathbf{y}$ belongs to $D$, we
fix an artificial polarization $\mathbf{p}\in\mathbb{R}^3\setminus\{\boldsymbol{0}\}$
and seek a function $\mathbf{g}_{\mathbf{y}}\in L^2_T(\Sigma_r)$ that solves the so-called Near Field equation (\ref{inv:lsm:nfe}).
We then claim that $\mathbf{y}\in D$ if $\Vertert\mathbf{g}_{\mathbf{y}}\Vertert_{L^2_T(\Sigma _r)}$ is small.
To provide a justification of this approach, we first characterize the points in $D$ in terms of the range of $\mathcal{N}$.
\begin{lemma}\langlebel{inv:lsm:inout}
The tangential trace $\boldsymbol{\nu}_0\times\mathbb{G}_e(\cdot,\mathbf{y})\mathbf{p}|_{\Sigma_r}$ is in the range of $\mathcal{N}$ if, and only if, $\mathbf{y}\in D$.
Furthermore, when $\{\mathbf{y}_n\}_n\subset D$ approaches to a point on the boundary of $D$,
the unique solutions of the associated near field equations $\mathcal{N}\mathbf{U}^i_{\mathbf{y}_n}=\boldsymbol{\nu}_0\times \mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})
|_{\Sigma_r}$
blow up in the $H(\mathbf{curl},D)$-norm as $n\to\infty$.
\end{lemma}
\begin{proof}
Let us consider $\mathbf{y}\in D$. Assuming that $k$ is not an interior transmission eigenvalue, there exists a unique solution of the following non-homogeneous ITP:
\begin{equation}\langlebel{inv:itpaux}
\begin{array}{rcl}
\nabla\times\nabla\times\mathbf{U}_{\mathbf{y}}^1 - k^2\mathbf{U}^1_{\mathbf{y}} = \boldsymbol{0} & \mbox{in} & D\, , \\[1ex]
\nabla\times\nabla\times\mathbf{U}_{\mathbf{y}}^2 - k^2\varepsilon\mathbf{U}_{\mathbf{y}}^2 = \boldsymbol{0} & \mbox{in} & D\, , \\[1ex]
\boldsymbol{\nu}_D\times(\mathbf{U}^2_{\mathbf{y}}-\mathbf{U}^1_{\mathbf{y}}) = \boldsymbol{\nu}_D\times\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p}) & \mbox{on} & \partial D \, , \\[1ex]
\boldsymbol{\nu}_D\times (\nabla\times (\mathbf{U}^2_{\mathbf{y}}-\mathbf{U}^1_{\mathbf{y}}) )= \boldsymbol{\nu}_D\times(\nabla\times \mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})) & \mbox{on} & \partial D \, .
\end{array}
\end{equation}
Notice that
\begin{equation}\langlebel{nfe:defUsy}
\mathbf{U} ^s_{\mathbf{y}} = \left\{
\begin{array}{ll}
\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p}) & \mbox{in } W_R\setminus\overline{D}\, ,\\[1ex]
\mathbf{U}^2_{\mathbf{y}}-\mathbf{U}^1_{\mathbf{y}} \quad & \mbox{in } {D}\, ,
\end{array} \right.
\end{equation}
solves the forward problem (\ref{fwd-sc:problem_truncated}) for the incident field $\mathbf{U}_{\mathbf{y}}^1\in H_{inc}(D)$. Therefore
$$
\mathcal{N} \mathbf{U}_{\mathbf{y}}^1 = \boldsymbol{\nu}_0\times \mathbf{U}^s_{\mathbf{y}}|_{\Sigma _r} = \boldsymbol{\nu}_0\times \mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})|_{\Sigma _r} \, ,
$$
and we deduce that $\boldsymbol{\nu}_0\times \mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p}|_{\Sigma _r})\in \mathcal{N} (H_{inc}(D))$.
Reciprocally, let us take $\mathbf{y}\in W\backslash\overline{D}$ and suppose that there exists $\mathbf{U}_{\mathbf{y}}^0\in H_{inc}(D) $ such that $\mathcal{N}\mathbf{U}_{\mathbf{y}}^0 = \boldsymbol{\nu}_0\times\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})
|_{\Sigma_r}$. If $\mathbf{U}^s_{\mathbf{y}}\in {X}_{(-R,R)}$ denotes the associated scattered wave
, then
$$
\boldsymbol{\nu}_0\times\mathbf{U}^s_{\mathbf{y}}|_{\Sigma _r} =
\mathcal{N}\mathbf{U}_{\mathbf{y}}^0 =
\boldsymbol{\nu}_0\times\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})
|_{\Sigma_r}
\, .
$$
Therefore, Corollary \ref{fwd:half_pipe:bdd} rewritten on the section $W_{(-R,r)}$
guarantees that $\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})
= \mathbf{U}^s_{\mathbf{y}}$ in $W_{(-R,r)}$ and, by the unique continuation principle, also in $W_{R}\setminus(\overline{D}\cup \{\mathbf{y}\})$. This leads to a contradiction when approaching $\mathbf{y}\in W_{(-R,R)}\setminus\overline{D}$ {as $\mathbf{u}^i$ is singular there} and hence $\boldsymbol{\nu}_0\times \mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p}|_{\Sigma _r})\not\in \mathcal{N} (H_{inc}(D))$.
We next study the behavior of the solutions of
$$\mathcal{N}\mathbf{U}^i_{\mathbf{y}_n}=\boldsymbol{\nu}_0\times\mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})
|_{\Sigma_r}$$
for a sequence $\{\mathbf{y}_n\}_n\subset D$ that approaches to a point on the boundary of $D$. To this end, we recall that we have built these solutions using the incident and scattered fields $\mathbf{U}^i_{\mathbf{y}_n}=\mathbf{U}_{\mathbf{y}_n}^1$ and $\mathbf{U} ^s_{\mathbf{y}_n} $ which satisfy and (\ref{inv:itpaux}) and (\ref{nfe:defUsy}), respectively; then
$$
\mathcal{N} \mathbf{U}_{\mathbf{y}_n}^1 = \boldsymbol{\nu}_0\times \mathbf{U}^s_{\mathbf{y}_n}|_{\Sigma _r} = \boldsymbol{\nu}_0\times \mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})|_{\Sigma _r} \, .
$$
To study the behavior of these functions, let us fix two auxiliary Lipschitz domains ${D}_1$ and ${D}_2$ such that
$$
\overline{D}\subset D_1 \, , \quad \overline{D_1}\subset D_2 \, , \quad \overline{D_2}\subset W_{(-R,R)} \, .
$$
Notice that
\begin{equation*}
\begin{array}{rcl}
\nabla\times\nabla\times\mathbf{U}^s_{\mathbf{y}_n} - k^2\varepsilon\mathbf{U}^s_{\mathbf{y}_n} = - k^2 (1-\varepsilon) \mathbf{U}_{\mathbf{y}_n}^1 & \mbox{in} & D_2 \, , \\[1ex]
\boldsymbol{\nu}_{D_2} \times \mathbf{U}^s_{\mathbf{y}_n} = \boldsymbol{\nu}_{D_2}\times\mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p}) & \mbox{on} & \partial D_2 \, , \\[1ex]
\boldsymbol{\nu}_{D_2} \times (\nabla\times \mathbf{U}^s_{\mathbf{y}_n} )= \boldsymbol{\nu}_{D_2} \times(\nabla\times \mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})) & \mbox{on} & \partial D_2 \, ,
\end{array}
\end{equation*}
where $\boldsymbol{\nu}_{D_2} $ stands for the unit normal field on $\partial D_2$ directed outwards. The above is a well-posed forward problem, in particular
$$
\begin{array}{l}
\Vert \mathbf{U}^s_{\mathbf{y}_n} \Vert _{H(\mathbf{curl},D_2)}\, \leq \, C\, \Big(
\Vert \mathbf{U}^1_{\mathbf{y}_n} \Vert _{H(\mathbf{curl},D)}
+ \Vert \boldsymbol{\nu}_{D_2}\times\mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p}) \Vert _{H^{-1/2}_T (\partial D_2)} \Big. \\
\hspace*{6cm} \Big. + \Vert \boldsymbol{\nu}_{D_2}\times (\nabla\times\mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})) \Vert _{H^{-1/2}_T (\partial D_2)}
\Big) ;
\end{array}
$$
hence, by the continuity of the tangential trace and that
$\nabla\times\nabla\times\mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})=k^2\mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p})$ in $D_2\setminus\overline{D_1}$,
$$
\Vert \mathbf{U}^s_{\mathbf{y}_n} \Vert _{H(\mathbf{curl},D_2)} \leq C \left(
\Vert \mathbf{U}^1_{\mathbf{y}_n} \Vert _{H(\mathbf{curl},D)}
+ \Vert \mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p}) \Vert _{H(\mathbf{curl}, D_2\setminus\overline{D_1})}
\right) .
$$
Finally notice that, when $\{\mathbf{y}_n\}_n\subset D$ approaches a point on the boundary of $D$, the sequence $\Vert \mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p}) \Vert _{H(\mathbf{curl}, D_2\setminus\overline{D_1})}$ remains bounded whereas $\Vert \mathbf{U}^s_{\mathbf{y}_n} \Vert _{H(\mathbf{curl},D_2)} \geq \Vert \mathbf{u}^i(\cdot;\mathbf{y}_n,\mathbf{p}) \Vert _{H(\mathbf{curl},D_2\setminus\overline{D})} $ blows up; therefore, we conclude that $ \Vert \mathbf{U}^1_{\mathbf{y}_n} \Vert _{H(\mathbf{curl},D)} $ must also blow up.
\end{proof}
We continue our analysis of the LSM with a result which justifies the usage of single layer potentials in the near field equation. {In this proof, we use the following explicit expression of the electric Green's function in terms of the waveguide modes introduced in Section \ref{subsec:forward:modal}, cf. \cite[Subsection 3.3.1]{FanPhD} and \cite{ChenToTai}:
\begin{equation}\langlebel{ip:GeModes}
\mathbb{G}_{e}(\mathbf{y},\mathbf{x}) \, = \, \displaystyle\sum_{m=1}^{\infty} c_m \mathbf{M}_m(\mathbf{y}^-){\mathbf{M}_m(\mathbf{x})}^T
+ \sum_{n=1}^{\infty} d_n \mathbf{N}_n(\mathbf{y}^-){\mathbf{N}_n(\mathbf{x})}^T \quad\mbox{for } y_3<x_3\, .
\end{equation}
In this expression, the superindex $^-$ represents the reflection of a point with respect to the plane $z_3=0$, that is, $\mathbf{y}^-:=(\hat{y},-y_3)$ when $\mathbf{y}=(\hat{\mathbf{y}},y_3)$. Moreover, the coefficients $c_m,d_n\ (m,n=1,2,\ldots)$ depend on
the shape of $\Sigma$, and the terms $\mathbf{M}_m(\mathbf{y}^-)\,{\mathbf{M}_m(\mathbf{x})}^T$ and $\mathbf{N}_n(\mathbf{y}^-)\,{\mathbf{N}_n(\mathbf{x})}^T$ denote the $3\times 3$ matrices obtained by column-row multiplication.}
\begin{lemma}\langlebel{inv:densHinc}
Let us assume that
{the forward problem for the waveguide with a PEC boundary condition on $\partial D$ is well-posed in $W\setminus\overline{D}$ and for the interior Maxwell problem on $D$ (for $\varepsilon = 1$)}. {We also assume that all the coefficients in the expansion (\ref{ip:GeModes}) are non zero.} Then, the operator $\mathcal{H}: L^2_T(\Sigma _r) \to H_{inc}(D)$ has dense range.
\end{lemma}
\begin{remark}{Using the results in \cite{ChenToTai} pages 108 and 141, we know that the assumption on the
coefficients in the expansion (\ref{ip:GeModes}) is satisfied for a rectangular or circular cross-section waveguides.}
\end{remark}
\begin{proof}
We show that the
adjoint of $\mathcal{H}: L^2_T(\Sigma _r) \to H_{inc}(D)$ is one-to-one. To this end, we consider $\mathbf{U}\in H_{inc}(D)'$ such that $\mathcal{H}^*\mathbf{U}=\boldsymbol{0}$ in $L^2_T(\Sigma_r)$, that is, for all $\mathbf{g}\in L^2_T(\Sigma_r) $ it holds
$$
0= \langlengle \mathcal{H}^*\mathbf{U},\mathbf{g} \ranglengle_{\Sigma _r} = (\mathbf{U},\mathcal{H}\mathbf{g} )_{D}
=
\int_D \mathbf{U} (\mathbf{x}) \cdot ( \int_{\Sigma_r} \overline{ \mathbb{G}_e (\mathbf{x},\mathbf{y}) \mathbf{g}(\mathbf{y})} \, dS_{\mathbf{y}} ) d\mathbf{x}
\, ;
$$
using that $\mathbf{U} (\mathbf{x}) \cdot ( \overline{ \mathbb{G}_e (\mathbf{x},\mathbf{y}) } \overline{\mathbf{g}(\mathbf{y})} ) = ( \overline{ \mathbb{G}_e (\mathbf{x},\mathbf{y}) }^T \mathbf{U} (\mathbf{x}) ) \cdot \overline{\mathbf{g}(\mathbf{y})} $ and changing (formally) the order of integration,
$$
\int_{\Sigma_r}
( \int_D \overline{ \mathbb{G}_e (\mathbf{x},\mathbf{y}) }^T \mathbf{U} (\mathbf{x}) \, d\mathbf{x} )
\cdot \overline{\mathbf{g}(\mathbf{y})}\, dS_{\mathbf{y}}
\, =\, 0 \, ;
$$
since this holds for all $\mathbf{g}\in L^2_T(\Sigma_r)$, what we have is that $\boldsymbol{\nu}_0\times\mathbf{V}_{\mathbf{U}}|_{\Sigma_r}=\boldsymbol{0}$ on $\Sigma_r$,
where
$$
\mathbf{V}_{\mathbf{U}} (\mathbf{y} ) = \int_D \mathbb{G}_e (\mathbf{x},\mathbf{y}) \overline{ \mathbf{U} (\mathbf{x}) } \, d\mathbf{x} \quad\mbox{for }\mathbf{y}\in W\setminus\overline{D}\, .
$$
{Substituting (\ref{ip:GeModes}) in the definition of $\mathbf{V}_{\mathbf{U}} $,} we have
$$
\mathbf{V}_{\mathbf{U}} (\mathbf{y} ) =
\displaystyle\sum_{m=1}^{\infty}\! c_m \,\mathbf{M}_m(\mathbf{y}^-) \!\int_D\! {\mathbf{M}_m(\mathbf{x})}^T \overline{ \mathbf{U} (\mathbf{x}) } \, d\mathbf{x}
\, +\, \sum_{n=1}^{\infty} \! d_n\, \mathbf{N}_n(\mathbf{y}^-) \!\int_D \!{\mathbf{N}_n(\mathbf{x})}^T \overline{ \mathbf{U} (\mathbf{x}) } \, d\mathbf{x}\, .
$$
This allows us to rewrite the boundary condition $\boldsymbol{\nu}_0\times\mathbf{V}_{\mathbf{U}}|_{\Sigma_r}=\boldsymbol{0}$ on $\Sigma_r$ as
\begin{equation}\langlebel{ip:Ucoefnull}
\int_D \mathbf{M}_m(\mathbf{x})^T \overline{\mathbf{U} (\mathbf{x})} \, d\mathbf{x} = 0 \, ,
\quad
\int_D \mathbf{N}_n(\mathbf{x})^T \overline{\mathbf{U} (\mathbf{x}) } \, d\mathbf{x} = 0
\qquad\forall m,n=1,2,\ldots
\end{equation}
thanks to the definition of $\mathbf{M}_m, \mathbf{N}_n$ (see Section \ref{subsec:forward:modal}) and that $\{ \nabla_{\Sigma} u_m\}_{m=1}^{\infty} \cup \{ \vec{\nabla}_{\Sigma}\times v_n\}_{n=1}^{\infty}$ defines an orthonormal basis of $ L^2_T(\Sigma _r)$ (see \cite[Lemma 3.1.2]{FanPhD}); notice that this reasoning also requires the further assumption $c_m\neq 0$ and $d_n\neq 0$ for all $m,n=1,2,...$
In order to finally conclude that $\mathbf{U}$ {vanishes, note} that the operator
$$
\mathcal{S}_{\partial D}: \mathbf{h} \in H^{-1/2}(\mathbf{curl},\partial D) \mapsto \mathcal{S}_{\partial D} \mathbf{h}=\boldsymbol{\nu}_D \times\int_{\partial D} \mathbb{G}_e(\cdot,\mathbf{z}) \, \mathbf{h}(\mathbf{z}) \, d\mathbf{z} \in H^{-1/2}(\mathrm{div}, \partial D) \, ,
$$
defines an isomorphism, cf. \cite[Lemma 3.3.5]{FanPhD}. In consequence, by the well-posedness of the interior problem that characterizes the space $H_{inc}(D)$, we know that the linear operator
$$
\mathcal{H}_{\partial D}: \mathbf{h} \in H^{-1/2}(\mathbf{curl},\partial D) \mapsto \mathcal{H}_{\partial D} \mathbf{h}=\int_{\partial D} \mathbb{G}_e(\cdot,\mathbf{z}) \, \mathbf{h}(\mathbf{z}) \, d\mathbf{z} \in H_{inc}(D) \,
$$
has dense range; and, in particular, from the expression of the fundamental solution in terms of modes (\ref{ip:GeModes}) it follows that
$$
\mathrm{span}\{ \mathbf{M}_m,\mathbf{N}_n ; \, m,n=1,2,\ldots \}
$$
is dense in $H_{inc} (D)$. Therefore, (\ref{ip:Ucoefnull}) already guarantees that $\mathbf{U}\in H_{inc}(D)'$ cancels.
\end{proof}
Now, we shall prove the main result for the justification of the LSM under the additional assumption of Lemma \ref{inv:densHinc} on the wavenumber $k$.
\begin{theorem}\langlebel{inv:lsm:justification}
Let us fix any polarization $\mathbf{p}\in\mathbb{R}^3\setminus\{\boldsymbol{0}\}$. \begin{enumerate}
\item For each $\mathbf{y}\in D$ and $\epsilon>0$, there exists $\mathbf{g}_{\mathbf{y}}^{\epsilon}\in L_T^2(\Sigma_r)$ such that
\begin{eqnarray}
\Vert N\mathbf{g}_{\mathbf{y}}^{\epsilon} - \boldsymbol{\nu}_0\times\mathbf{u}^i (\cdot;\mathbf{y},\mathbf{p})
\Vert_{L_T^2(\Sigma_r)} < \epsilon \, , \langlebel{inv:lsm:indicator-norm}
\end{eqnarray}
and for which
the associated scattered fields
$$\mathbf{w}^s_{\mathbf{g}_{\mathbf{y}}^{\epsilon}}(\mathbf{z}) = \int_{\Sigma_r}\mathbf{u}^s(\mathbf{z};\mathbf{x},\mathbf{g}_{\mathbf{y}}^{\epsilon}(\mathbf{x}))\,dS_{\mathbf{x}}$$
converge to
$\mathbf{u}^s(\cdot ;\mathbf{y},\mathbf{p})$
in $H(\mathbf{curl},W_R)$ as $\epsilon arrow 0$; moreover, if a sequence $\{\mathbf{y}_n \}_n\subset D$ approaches to some point on $\partial D$, then necessarily these functions blow up:
$$ \lim_{n}\Vert\mathbf{g}_{\mathbf{y}_n}^{\epsilon}\Vert_{L_T^2(\Sigma_r)} = \infty\, . $$
\item If $\mathbf{y}\not\in D$, any sequence $\{\mathbf{g}_{\mathbf{y}}^{\epsilon}\}_{\epsilon>0}\subset L_T^2(\Sigma_r)$ that satisfies (\ref{inv:lsm:indicator-norm}) must also blow up:
$$ \lim_{\epsilon \to 0}\Vert\mathbf{g}_{\mathbf{y}}^{\epsilon}\Vert_{L_T^2(\Sigma_r)} = \infty\, . $$
\end{enumerate}
\end{theorem}
\begin{proof}
Thanks to the factorization $N=\mathcal{N}\mathcal{H}$, the first statement follows from the characterization of points $\mathbf{y}\in D$ in terms of $ \boldsymbol{\nu}_0\times\mathbf{u}^i(\cdot ;\mathbf{y},\mathbf{p})
\in \mathcal{N}(H_{inc}(D)) $ given in Lemma \ref{inv:lsm:inout} and the density of $\mathcal{H}(L_T^2(\Sigma_r))$ in $H_{inc}(D)$ shown in Lemma \ref{inv:densHinc}.
On the other hand, let us consider a point $\mathbf{y}\in W_R\setminus\overline{D}$ and a bounded sequence $\{\mathbf{g}_{\mathbf{y}}^{\epsilon}\}_{\epsilon>0}\subset L_T^2(\Sigma_r)$ satisfying (\ref{inv:lsm:indicator-norm}). Then there exists a subsequence of $\{\mathbf{g}_{\mathbf{y}}^{\epsilon}\}_{\epsilon>0}$ that converges weakly to some $\mathbf{g}_{\mathbf{y}}$ in $L_T^2(\Sigma_r)$; we arrive to a contradiction by noticing that $ \boldsymbol{\nu}_0\times\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})
=N\mathbf{g} =\mathcal{NH}\mathbf{g}\in\mathcal{N}(H_{inc}(D))$ for the point $\mathbf{y}\not\in D$.
\end{proof}
\subsection{Some remarks on the Generalized Linear Sampling Method}\langlebel{glsm}
In this subsection we provide some remarks about the Generalized Linear Sampling Method (GLSM) for the inverse problem under study. More precisely, let us recall that the LSM makes use of an approximate solution of the NFE
in the sense of (\ref{inv:lsm:indicator-norm}); usually, this is done by means of a Tikhonov regularization so that the following is minimized:
\begin{equation*}
\Vert N\mathbf{g}_{\mathbf{y}}^{\alpha} - \boldsymbol{\nu}_0\times\mathbf{u}^i (\cdot;\mathbf{y},\mathbf{p})
\Vert_{L_T^2(\Sigma_r)}^2 + \alpha^2 \,\Vert \mathbf{g}_{\mathbf{y}}^{\alpha} \Vert_{L_T^2( \Sigma _r)} \, ,
\end{equation*}
for $\mathbf{g}_{\mathbf{y}}^{\alpha}\in L^2_T(\Sigma _r)$.
{In contrast,} the noise free
GLSM that we consider here approximately solves the NFE
by minimizing
\begin{equation}\langlebel{inv:glsm:nfe}
\Vert N\mathbf{g}_{\mathbf{y}}^{\alpha} - \boldsymbol{\nu}_0\times\mathbf{u}^i (\cdot;\mathbf{y},\mathbf{p})
\Vert_{L_T^2(\Sigma_r)}^2 + \alpha^2\, |\langlengle N \mathbf{g}_{\mathbf{y}}^{\alpha} , \mathbf{g}_{\mathbf{y}}^{\alpha} \ranglengle_{L_T^2( \Sigma _r)} | \, .
\end{equation}
To analyze this strategy, we first notice that, for all $\mathbf{v}\in L^2(D)^3$ and $\mathbf{f}\in L^2_T(\Sigma_r)$
\begin{equation*}
(\mathcal{H}^*\mathbf{v},\mathbf{f})_{\Sigma _r} = \int _D \mathbf{v}\cdot \overline{\mathcal{H}\mathbf{f}} \, d\mathbf{x}
= \int_{\Sigma _r} \overline{ \mathbf{f} (\mathbf{y}) } \cdot ( \boldsymbol{\nu}_0 \times\int_D \overline{\mathbb{G}_e (\mathbf{y},\mathbf{x})} \mathbf{v} (\mathbf{x}) \,d\mathbf{x} ) dS_{\mathbf{y}} \, ,
\end{equation*}
where we have changed the order of integration and made use of the symmetry of the dyadic function $\mathbb{G}_e(\cdot,\cdot)$. The above means that, for all $\mathbf{v}\in L^2(D)^3$,
\begin{equation*}
{\mathcal{H}^*\mathbf{v}} = \boldsymbol{\nu}_0 \times \int_D \overline{\mathbb{G}_e (\cdot,\mathbf{x}) } \mathbf{v} (\mathbf{x}) \, d\mathbf{x} \quad \mbox{on } \Sigma _r \, ;
\end{equation*}
therefore, taking into account the volume integral representation (\ref{ip:volintrep}) and the definition of the auxiliary operator $\mathcal{N}: \mathbf{w}^i\in H_{inc}(D)\mapsto \boldsymbol{\nu}_0\times\mathbf{w}^s|_{\Sigma_r}\in L^2_T(\Sigma_r)$, we deduce that
\begin{equation*}
-\overline{\mathcal{H}^*(k^2\, \overline{(1-\varepsilon)\mathbf{w}})} = \boldsymbol{\nu}_0 \times \mathbf{w}^s = \mathcal{N} \mathbf{w}^i \quad \mbox{on } \Sigma _r \, ,
\end{equation*}
for each $\mathbf{w}^i\in H_{inc}(D)$, where $\mathbf{w}^s$ and $\mathbf{w}=\mathbf{w}^i+\mathbf{w}^s$ denote the corresponding scattered and total fields. In particular, using the factorization of the near field operator $N=\mathcal{N}\mathcal{H}$, we have that
\begin{equation}\langlebel{eq:Nfactization}
N\mathbf{g} \, = \, \mathcal{N}\mathbf{w}^i_{\mathbf{g}} =
\overline{\mathcal{H}^*(\overline{\mathcal{T}\mathbf{w}^i_{\mathbf{g}}})} =
\overline{\mathcal{H}^*(\overline{\mathcal{T}\mathcal{H}\mathbf{g}})}
\quad\mbox{for all } \mathbf{g}\in L^2_T(\Sigma_r) \, ,
\end{equation}
where we have used a third auxiliary operator, defined by
\begin{equation*}
\mathcal{T}: \mathbf{w}^i \in H_{inc}(D) \to -k^2 \, (1-\varepsilon ) \, \mathbf{w}|_D
\in L^2(D)^3 \, .
\end{equation*}
The key property for the analysis of the GLSM is the coercivity of this operator, see \cite{ComputELectromag-Cetraro}.
In order to study such coercivity, we notice that, for all $\mathbf{w}^i\in H_{inc}(D)$,
\begin{equation}\langlebel{ip:productTwiwi}
(\mathcal{T}\mathbf{w}^i,\mathbf{w}^i)_D = -k^2 \int_D (1-\varepsilon) \, |\mathbf{w}^i|^2 \, d\mathbf{x} -k^2 \int_D (1-\varepsilon) \, \mathbf{w}^s\cdot\overline{\mathbf{w}^i} \, d\mathbf{x} \, ;
\end{equation}
the second term on the right-hand side can be analyzed using the equation (\ref{fwd-total:weak_form}) that characterizes the scattered field $\mathbf{E}=\mathbf{w}^s$ for the test function $\mathbf{v}=\mathbf{w}^i$:
\begin{equation*}
\begin{array}{l}
\displaystyle\-k^2 \int_D (1-\varepsilon) \, \mathbf{w}^s\cdot\overline{\mathbf{w}^i} \, d\mathbf{x}
=
\int_{W_{(-R,R)}}\!\! ( |\nabla\times\mathbf{w}^s|^2-k^2\,\overline{\varepsilon} \, |\mathbf{w}^s|^2 ) d\mathbf{x}
+ \displaystyle\int_{\Sigma_{\pm R}} \!\!\overline{T^{ \pm }_{R} (\boldsymbol{\nu}_0 \times \mathbf{w}^s) } \cdot \boldsymbol{\gamma}_T \mathbf{w}^s \, dS_{\mathbf{x}}
\, ;
\end{array}
\end{equation*}
moreover, we can write explicitly the imaginary part of last term using the modal expansion (\ref{fwd-dtn-explicit-form}):
\begin{equation*}
\begin{array}{l}
\Im \big(\!\displaystyle\int_{\Sigma_{\pm R}}\!\!\overline{T^{ \pm }_{R} (\boldsymbol{\nu}_0 \times \mathbf{w}^s) } \cdot \boldsymbol{\gamma}_T \mathbf{w}^s \, dS_{\mathbf{x}}\big) =
-\displaystyle\sum_{m=1}^{m_0} |\langlengle\boldsymbol{\nu}_0\times\mathbf{w}^s , \left(\!\!\begin{array}{c} \nabla_{\Sigma} u_m \\ 0 \end{array}\!\!\right) \ranglengle _{\Sigma_{ \pm R}}|^2 \,\frac{h_m}
{\langlembda_m^2}\\[1ex]
\hspace*{2cm}-\displaystyle\sum_{n=1}^{n_0} |\langlengle\boldsymbol{\nu}_0\times\mathbf{w}^s , \left(\!\!\begin{array}{c} \vec{\nabla}_{\Sigma}\times v_n \\ 0 \end{array}\!\!\right) \ranglengle _{\Sigma_{\pm R}}|^2 \,\frac{k^2}{\mu_n^2\,g_n}
\, ,
\end{array}
\end{equation*}
where $m_0$ and $n_0$ represent the indices up to which
$k^2>\langlembda_m^2$ and $k^2>\mu_n^2$, respectively; in other words, the imaginary part of this term catches the asymptotics of the traveling waves. Taking this results back to (\ref{ip:productTwiwi}), we deduce that
\begin{equation*}
\begin{array}{l}
\displaystyle
\Im ((\mathcal{T}\mathbf{w}^i,\mathbf{w}^i)_D) \, = \, k^2\int_D \Im ( \varepsilon ) \, ( |\mathbf{w}^i|^2 +\, |\mathbf{w}^s|^2 ) \, d\mathbf{x} +
\displaystyle\displaystyle\sum_{m=1}^{m_0} |\langlengle\boldsymbol{\nu}_0\times\mathbf{w}^s , \left(\!\!\begin{array}{c} \nabla_{\Sigma} u_m \\ 0 \end{array}\!\!\right) \ranglengle _{\Sigma_{ \pm R}}|^2 \, \frac{h_m}{\langlembda_m^2}
\\
\hspace*{4cm}
+
\displaystyle\sum_{n=1}^{n_0} |\langlengle\boldsymbol{\nu}_0\times\mathbf{w}^s , \left(\!\!\begin{array}{c} \vec{\nabla}_{\Sigma}\times v_n \\ 0 \end{array}\!\!\right) \ranglengle _{\Sigma_{ \pm R}}|^2 \, \frac{k^2}{\mu_n^2\, g_n} \,
\end{array}
\end{equation*}
from where we conclude that $\mathcal{T}$ is coercive whenever the imaginary part of $\varepsilon$ is strictly positive in some subdomain of $D$ with non-zero measure. We are ready to prove the following justification of the GLSM for our problem.
\begin{theorem}
Let us assume
that the imaginary part of $\varepsilon$
is strictly positive in $D$ (or on a subdomain with non-zero measure). Then, for any polarization $\mathbf{p}\in\mathbb{R}^3\setminus\{\boldsymbol{0}\}$ and $\mathbf{y}\in W$, it holds that $\mathbf{y}\in D$ if, and only if, any sequence $\{ \mathbf{g}^{\alpha}_{\mathbf{y}}\}_{\alpha>0}\subset L^2_T(D)$ of minimizers of
(\ref{inv:glsm:nfe}) is bounded.
\end{theorem}
\begin{proof}
On one hand, points $\mathbf{y}\in D$ are characterized by the property $ \boldsymbol{\nu}_0\times\mathbf{u}^i(\cdot;\mathbf{y},\mathbf{p})
|_{\Sigma_r}
\in \mathcal{N}(H_{inc}(D)) $ (see Lemma \ref{inv:lsm:inout}). On the other hand, the near field operator can be factorized both as $N=\mathcal{N}\mathcal{H}$ and as in (\ref{eq:Nfactization}); moreover, $\mathcal{N}:H_{inc}(D)\to L^2_T(\Sigma_r)$ has dense range and is compact (see Lemmas \ref{inv:NFO:denserange} and \ref{inv:NFO:compact}, respectively), whereas $\mathcal{T}:H_{inc}(D)\to L^2(D)^3$ is coercive when the imaginary part of $\varepsilon$ is strictly positive (see the reasoning above the statement). Therefore, the result follows by \cite[Chapter 4, Theorem 8]{ComputELectromag-Cetraro}.
\end{proof}
\section{Numerical results}\langlebel{Sec-Numresult}
In this section, we shall describe some numerical simulations of the reconstruction of scattering objects in order to investigate the application of the LSM to inverse electromagnetic scattering in a waveguide. Specifically, we use NGSolve~\cite{netgen} to implement a forward scattering code to generate synthetic scattering data to be collected at the receivers located on a cross-section of the waveguide below the scatterer. In particular we used quadratic edge finite elements to approximate $\mathbf{E}^s$ on a finite section of the waveguide, and terminate this section at both ends using the non-standard Perfectly Matched Layer (PML) proposed in~\cite{Rivas} (with the parameters used there). This PML is singular and accounts for both traveling and evanescent components of the solution. The electric field
is extended to the entire waveguide by using a truncated modal expansion (\ref{modal}) to one side of the scatterer. The expansion coefficients are computed by fitting the finite element solution on a cross section
of the waveguide taken to be an interface in the mesh, and we always use up to order 7 Fourier modes in $x$ and $y$ giving 63 modes for each polarization. The mesh size suggested to NGSolve is $2\pi/(7 k)$ in the air
and $2\pi/(7 k \sqrt{\epsilon})$ in $D$.
The waveguide is taken to have a square cross-section $\Sigma=(0, 1)\times(0,1)$, and the scatterer $D$ has a constant electric permittivity $\varepsilon=4$. We consider two wavenumbers: $k=20$ and $k=25$. When $k=20$ we have 38 propagating modes, and when $k=25$ we have 55 such modes (in this case the highest Fourier order for a propagating mode is 7). For all experiments we use an $8\times 8$ grid of transducers (the same points are used to place the sources and to take measurements) at the tensor product Gauss-Legendre quadrature points in $\Sigma_r$ where we choose $r=-5$. At each source point we use successively each of the three polarizations parallel to the coordinate axes, and assume knowledge of all three polarizations of the scattered field at the measurement points. Using the product Gauss-Legendre quadrature scheme to discretize the near field operator results in a $192\times 192$ \emph{near field matrix}.
In this paper we use a simple spectral cutoff regularization which appears
sufficient for the examples here although the more standard Tikhonov-Morozov scheme~\cite{CakoniColtonMonk-IP} might be preferable in practice. We choose the spectral cutoff manually as described below.
In some cases noise is added to the data entry by entry as described in \cite{CakoniColtonMonk-IP}. In particular if $N$ is the matrix representing the near field operator after discretization using Gauss-Legendre quadrature (in our case $N$ is a $192\times 192$ matrix) then, for a given noise parameter $\eta$, we add noise by computing a new matrix $N_{\eta}$ using
\[
(N_{\eta})_{i,j}=N_{1,j}(1+\eta \xi_{i,j})\mbox{ for all }i,j.
\]
Here $\xi_{i,j}$ is drawn from a set of random numbers uniformly distributed in $(-1,1)$ (using the Numpy \verb+random.uniform+ command).
Having computed a regularized solution of the discrete near field equation for each of three linearly independent auxiliary polarizations (taken to be each of the three standard unit vectors successively) due to a given
source point $\mathbf{z}$, we use the reciprocal of the average of the discrete $\ell^2$-norms of the discrete approximation to $\mathbf{g}_z$ for each of these three polarizations as the indicator function for identification of the shape of the scatterer.
We shall present isosurface plots for the indicator function as well as detailed contour plots on cross-sections of the
domain. The isosurface to draw is chosen by fixing a constant $0<C<1$, and the isosurface is then given by all
$\mathbf{x}$ such that
\begin{equation}
\psi(\mathbf{x})=C ( \max _{\mathbf{y}\in Z} \psi(\mathbf{y})-\min_{\mathbf{y}\in Z} \psi(\mathbf{y}) )+\min _{\mathbf{y}\in Z} \psi(\mathbf{y}) \, ,
\langlebel{level}
\end{equation}
where $\psi$ is the indicator function and $Z$ is the set of source points used for the sampling method.
The constant $C$ may have to be modified for different scatterers and noise levels.
\begin{figure}
\caption{Cross-sections of the computational domains used for the forward problems in this study. Left: the single spherical scatterer. Right: two smaller spheres. The source points and measurement surface are below the scatterers. The boxed regions at the top and bottom of the domain are the PML regions (in this case $k=20$ and the PML regions are one wavelength thick). The surface between the obstacle or obstacles and the lower PML is used to fit a modal expansion for extending the solution outside the computational domain.}
\end{figure}
We will consider two examples motivated by previous works in the area. The examples are three-dimensional analogues of the examples in \cite{BLFD} (see Fig.~\ref{domains}). The scatterer $D$ is chosen to be either:
\begin{itemize}
\item A single sphere centered at $(0.5,0.6,0)$ with radius 0.2.
\item Two spheres, the first centered at $(0.5,0.7,0)$ of radius 0.05, and the second centered at $(0.5,0.5,0.5)$ of radius 0.07.
\end{itemize}
\subsection{A single Sphere}
First we consider the single sphere of radius 0.2 centered at $(0.5,0.6,0)$.
Of the two examples considered in this paper, this is the most difficult to reconstruct. As discussed above, regularization
is via the truncated singular value decomposition. In Fig.~\ref{S20} we show the singular values when $k=20$. In the left panel no extra noise has been added to the data computed by the finite element method. In the right-hand panel we have added random noise with noise parameter $\eta=0.001$ that produces a relative error in the discrete near field matrix of 0.06\% in the Frobenius norm. {We also show examples for} $\eta=0.01$ which produces an error of 0.6\%. When $k=20$ there are 38 propagating modes. We choose a spectral cutoff larger than this, restricting to the
first 51 SVD vectors. With this choice the reconstructions are shown in Fig.~\ref{onesphk20}. The position of the scatterer along the waveguide is predicted well, but the shape is not obvious from the isovalue plots even for no added noise.
A higher wavenumber results in more propagating modes, and hence possibly more data. Using $k=25$, when there are 55 propagating modes,
gives the singular values shown in Fig.~\ref{S25}. In this case more singular vectors are significant (compared to the $k=20$ case). In the case of no noise shown in Fig.~\ref{onesphk25} (left panel) we used 81 singular vectors. The position and approximate shape of the scatterer are clearly visible. We compare in Fig.~\ref{onesphk25} center and right panels the effect of noise. {To compute the results shown in Fig.~\ref{onesphk25} (center panel),} we use 81 singular values results in an improved reconstruction compared to Fig.~\ref{onesphk20}.
\begin{figure}
\caption{Singular values for the single sphere example shown in Fig.~\ref{domains}
\end{figure}
\begin{figure}
\caption{Reconstructions of the single sphere example shown in Fig.~\ref{domains}
\end{figure}
\begin{figure}
\caption{Singular values for the single sphere example shown in Fig.~\ref{domains}
\end{figure}
\begin{figure}
\caption{Reconstructions of the single sphere example shown in Fig.~\ref{domains}
\end{figure}
\subsection{Two spheres}
Next we consider the two spheres example where the exact scatterer is shown in the right-hand panel of Fig.~\ref{domains}. Perhaps surprisingly, this example can be reconstructed using a lower wavenumber than for the single sphere. We show results of
reconstructing this scatterer using $k=20$ in Fig.~\ref{twosph}.
\begin{figure}
\caption{Reconstructions of the two spheres example shown in Fig.~\ref{domains}
\end{figure}
\section{Conclusions}\langlebel{Sec-Concl}
Our analysis and numerical evidence suggests that the LSM can be used to identify the position and size of penetrable obstacles
in an electromagnetic waveguide. Clearly the model problem we have examined requires considerable
elaboration before being useful in applications. The case
{when} measurements are made on
a surface on the opposite side of the obstacle to the receivers could also be investigated (the theory we have {presented} holds in that case as well, but the numerical results in this paper are only for measurement and sources on one side of the obstacle). However, we suppose that the one sided measurement considered here would be simpler
in practice.
Although we did not discuss PEC scatterers, exactly the same LSM applies for a PEC or penetrable scatterer. Theory and numerical results for the PEC case can be found in \cite{FanPhD}.
\section*{Acknowledgments} The research of P. Monk was partially supported by the Air Force Office of Scientific Research under award number FA9550-17-1-0147, and that of V. Selgas by project MTM2017-87162-P.
\end{document}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.